BIND 10 trac878, updated. 137d1b29b6063f4d1983bde07f6ec5404f67dcee Merge branch 'master' into trac878
BIND 10 source code commits
bind10-changes at lists.isc.org
Thu Oct 6 14:35:46 UTC 2011
The branch, trac878 has been updated
via 137d1b29b6063f4d1983bde07f6ec5404f67dcee (commit)
via 85071d50cf5e1a569b447ba00e118db04293475a (commit)
via a9b140ed88b9a25f47e5649b635c8a19e81bfdee (commit)
via f5d7359a945241edf986b7c91c0ad6c7bcf113e3 (commit)
via 5f5b2d7a98eff5dc74f74b7018f50e286ae82c2d (commit)
via 7209be736accd15885ad7eafc23b36eec18c2213 (commit)
via a5eeb73116cbc74f6bb3fb4a06b99396a8ceebcb (commit)
via 743dad9408b0a86052156e6a3d4fec1001600017 (commit)
via af927e2c390b49012b276c11991a3f7ef3a592a9 (commit)
via d267c0511a07c41cd92e3b0b9ee9bf693743a7cf (commit)
via 33c0d21361655c08b274c75736b7bcbe99dd3d2d (commit)
via 956e210239d46bebe4574c5ca38b3b51b1bb7c65 (commit)
via fe76209cd8ad96144f0e2fc9522f5fda1d52d9c3 (commit)
via eb4917aea94d78ea64fa90f0c70501bbb6d48b37 (commit)
via 85ac49c5282c231c71b8d2046889d22b0061db08 (commit)
via ebeb5ead60c5c0d7b16478498b78a8f1ef3b71c3 (commit)
via e38010819247006d20532d24de8dd6c37e0ca664 (commit)
via 00f4c38428153bb5ad99ba1cc40e9a204266dace (commit)
via f7bb760f4d8290d52959ea83b090d1877e4ac9ee (commit)
via b29c5e5221b8e6a9ff65a0c39f14c04afaed5c44 (commit)
via 9e17bd49b426ffba00312cf90ec80d178a20b964 (commit)
via 519720d9c6eb354a2e31089f1c7b8fd0760053f9 (commit)
via c6babcd3e44bc42fdb090d3a4837848d8c7c149c (commit)
via bb444bae93e8e87d1e62214b1819fb73fd7634e4 (commit)
via 8fe024cd171ecf1610419abb70e5d613b94ba5a0 (commit)
via d36eda71276b43e4281ae53fd558155725f4d4eb (commit)
via 32f075fa288dc5ea049cbf72657386889144bd12 (commit)
via 471edfd7a86d91f04536bc7c7fb42ad7239e1731 (commit)
via feeddd7e5b966c9445fc4ac97a6526fa792413cd (commit)
via 85e4dfa61bf440c132f4ce6bc73130bc6e91719c (commit)
via 054699635affd9c9ecbe7a108d880829f3ba229e (commit)
via d04acfb82c3425a638f09d2f49208ef86bc7a6b3 (commit)
via 434f4fd17dd3dee1d17e7b2e008f1ab1416d5799 (commit)
via ce8b5fe9567f06f7acba34b9e9b35ad471e2ab67 (commit)
via 34ead9dfeff5f64af36a209cae28075fcbbb3330 (commit)
via fcfe5af9c22c5b666e5ecf646bbe0d9da7b655e9 (commit)
via 1f967a8ffe37f6732dd628d28a13abc442541c38 (commit)
via 3efca5f9b7b7bfeac53044fdd44e5add61397157 (commit)
via a35b62699480e149f22f4e039935bfcf41f97ac2 (commit)
via 9dedc72e89b9ca8ba2c5f3bc562ad9ccd1aa05b0 (commit)
via 7808524aa9bbb424327ac67d7408647cb18840f5 (commit)
via 5b866ef26bd5ae980bb86c494a592ef232552b68 (commit)
via a5387c15e93c6d1925bf4ad0eacdcfd63790c32a (commit)
via d56c782197242e32ccdd23c9e3652ff520f3d58f (commit)
via bd8cb42b61666342ee8bc6c33aed2a168301ff67 (commit)
via 9accf90bb081b057023479f0a86e54017b02cdd3 (commit)
via 9eafb04ee8dbd47022dd9a5e5c1310f88f398d2c (commit)
via 7af1aeddc36a1ac1343f1af12aa29164f1028f03 (commit)
via 15f5d7895a2744376062229cf19593016a773cde (commit)
via ddec42c7a23cca11903ece8f7ab614dcc7e5edd3 (commit)
via d8cac904c7aea4a652a47afb35aceb6ca4808ce8 (commit)
via 433381e5ca62418fc90377d16f1805260b27b619 (commit)
via c8bbdd1d74ac313d8b57d8debe4f7b75490e5df2 (commit)
via e57c5196d3e8dd56b0190799c98b56a5be55333a (commit)
via 06f7bc4b3b69e8fda96f6e626a7dac5b1fbbb233 (commit)
via 0aa4c14ebd1eb0a68c2bcf5c617325596657ea71 (commit)
via 9daa2f686b3bdb03b13e9becf45a722344888cf3 (commit)
via f159ac66aa577889514dc170c87a92c49be5a6cc (commit)
via d6b86a88c7a486f2e5b742fc60d374e48382320e (commit)
via 5ddc441f77a34158039f0328c3ab7c2106b7b3b8 (commit)
via 290e89c515e051dad269f1acbce0b52a541d9c8c (commit)
via 9b8925a4d0ecbd8a09d307dfd56fa15fb8eedcc6 (commit)
via 53314ecb63f3f0f85629b66a228207658d8fd73f (commit)
via 863509e2dc3bf96fd38476d787abb62e0da46624 (commit)
via fe1d6665faf06b3fcc0aaf8ec72905aa4b7ce1f7 (commit)
via 7581a21a7dce1dc6b92ad24293b4269a3531e6d4 (commit)
via 1fd37ae8a4bb25a6e85ffb2158b2ae95fe8cbd04 (commit)
via 8ed3b760c179df435882f2ad96b6dcfad5b6e9fa (commit)
via 3516ab551851273faeeb0b8696695e5f3ffc88f9 (commit)
via 9f8ddd6ee1b73c9403f85b6ef5c85605ca393aa7 (commit)
via 898485cd30084d478e8be688151cd11fb4d492a7 (commit)
via 30f4856101bf23ce155ef0f2ebd1ca6f034d2420 (commit)
via eb4be17ddf3b26c379e3f100cf8e8b0fd4329537 (commit)
via ac06a06d1df9a1cc905b224b79921b0d0ade4c05 (commit)
via 611d0300fb8bb2e87d787023cb5c6030ee07d8d2 (commit)
via fdf02d580f2bb1fbc6fa85ee0edd81a07404d1de (commit)
via a0bb482b46bd05f8c8774bacdd26dc891cb3bef7 (commit)
via cebd7e3562312ade50d972af49239cee7f10d057 (commit)
via 8750dc3ab772e29d7374d779cefb3c8b8c61d2d1 (commit)
via b743e6ba98c8cbb53c45e1c0f59e5a78ba62f5d4 (commit)
via 6556a2ffdd7bdb5370c2f1b3d8c9e8799ef82140 (commit)
via 3e9189a483c0f53eba4f05092c90f7955123f52c (commit)
via 7f5702a379516cee041129c03dd37d67f26d49c1 (commit)
via e60ecc91ad65087c3cff3af479cc455abccbe020 (commit)
via 62bd7736311e166aea3604b8e486b58c1315f82f (commit)
via 9687077033661cf07b6ea2e966299e837a501612 (commit)
via 703d5f36d0102993f311d21e662a28492d8cf7b4 (commit)
via 84d9095c66c765cf78814323597b2e3bbef293d5 (commit)
via e54bc83c4e8a66fd9ab1ae9f27899d70ef82a066 (commit)
via 1a8c86ea2503bffe6dc1f2300dfc2b4efba108cc (commit)
via ed5311a26b7b1368f28191c405ec13da907213ae (commit)
via 493a6449b37b34ac5fe36257b266c229e34d105c (commit)
via 6f6a4cf9d98f2b4550e0949da1e20a7f38440610 (commit)
via 36a53f41a7da580926111dca65652d6389fcd909 (commit)
via 61681dac2023240a4a029072add3a39809ccb7f0 (commit)
via 96dd4d2daf1fb91672a798fa478da0ec8a7ac737 (commit)
via 9354737244e0bb7c22ec684ed652c89991eca913 (commit)
via 9bbc77b6b8381c9a6d831e490a7715ba84b9356f (commit)
via 8023760a5fc6f346cf82340aa50df755b0d0d00a (commit)
via cc0d6e4674fd2e6ebe3775a28ec87fc5c869f924 (commit)
via f9cb0d187f02078b27a0119ce42c83f62461a507 (commit)
via 26691e282b76d74959e63524b280e77b09ac89df (commit)
via 4cde36d2b97a24f03c192a61248545d0180fb856 (commit)
via c874cb056e2a5e656165f3c160e1b34ccfe8b302 (commit)
via 12fd115d2e1ea8b55f43313ac665c32e07f9498e (commit)
via 84ada921a2fe98489b578b6d780c1ad2e6c31482 (commit)
via 763a994cb14bb11ba823831f54d64071319bfac0 (commit)
via b86d51b24e7d1bb4980426c9a74962628c096ba7 (commit)
via 48d5ac59277e2e8b43f697a0d1d4b0991a40caa0 (commit)
via c191f23dfc2b0179ec0a010a1ff00fa3ae1d9398 (commit)
via 8d2c46f19c1b4f435d7b9180ff6c2e8daf78ab2b (commit)
via 80319933903fbdb359ef9472573bfaceda7c8cd5 (commit)
via 8c838cf57adef3c004b910b086513d9620147692 (commit)
via 1378551aa74712c929a79964ae18d9962ce73787 (commit)
via bb7833f2054edca11a32d24d17486f153db00ec1 (commit)
via c430e464860b4460a0ab32454e53918a1cc7444b (commit)
via 39e529c506a4350cd676bf5ddff6d61686e8814f (commit)
via aba10a01b765b472d57112fd4e09a6fb47b49fa7 (commit)
via 9688dee697e9ad279c6542bf164b820e907e526f (commit)
via c1a72c46b572eee2d94ab53a5589c724fcb1fcf1 (commit)
via 9016513b4d19d2781d0b6f2575b490431e04ec79 (commit)
via 13e8bc43e4888fe9e6df7e536ea0b439c6351199 (commit)
via e89895b7e5f3b7074271c89de281e426c53be347 (commit)
via 938f4e9ba14954551fbc390abb7d1e06d38189c2 (commit)
via b0b0da67c915f3c02020397b8dcf6a078a9b3a90 (commit)
via 1ee8ad4a2b092a6edc35c111c5a3b5b761da0dae (commit)
via c943619d223be1158ae8db5223f655343d06785f (commit)
via 0d874a95d3c782b9c663c64be619f449956df457 (commit)
via 2d325650009f46a1f16ef2e7c1f4ed0827db236f (commit)
via abe73e885b980aace1fd0df492fa321bdd35f01f (commit)
via 53d45f54e33d23a5b4df42dc977a3a6ab597f5c5 (commit)
via 338b54ef4631f0d35601f174eabfa10f1541f46d (commit)
via 698176eccd5d55759fe9448b2c249717c932ac31 (commit)
via 41cbf5a91bdfa0b311aade6b05d2f51f59cce978 (commit)
via d845ae918fe8dce6806c3f927a7c101fc0e2173d (commit)
via 7bc93774a449b3f19748a37186db3efcd3d6c537 (commit)
via d5a58bbe641d32257035a6087f18655e7b66d8fd (commit)
via c64c4730852f74fff8ea75730e0b40cd3b23a85e (commit)
via fdf1c88a53f5970aa4e6d55da42303ce7d4730f7 (commit)
via 33ee923f7139cbda7a616a83d572a4358f456e16 (commit)
via c69a1675dd0434db0b99682d14fa7905fcd3af8f (commit)
via 9b23d60d6f58b18da3995dc3e090d7fd63233bcc (commit)
via 4bb4081381b39c563707c03818a0f9d16ef7846f (commit)
via eef5b0eb5defdd22ef5e351213ab66531f788c5d (commit)
via e7f1ead205f2dc13d6fd6e2a28b121794ca281be (commit)
via 638674c480d47cf957a8b4f7d61dda3320c881ff (commit)
via 0a22b98c05bf5032c190fbfdf9fefceac3597411 (commit)
via f59415a8b5ee951dd298eaf8eecaa21e8955851c (commit)
via 4e458fc15b5c236e1cc44565f6af313753e87a26 (commit)
via e2eca96f1876a72fc8c121c9204d49cb7e9eaeb7 (commit)
via 4a605525cda67bea8c43ca8b3eae6e6749797450 (commit)
via 85455b6e2f7063b10bae9938de1b70f5d319911e (commit)
via 66e1420d30f8e71e867a3b5b0a73ead1156d5660 (commit)
via 16cc75f764b6ea509f386c261b472e282cd606ed (commit)
via b2d2acebebc66495b98eef634ce633eb70cc2411 (commit)
via b1f197c6102ae31ded2e4b61103308dcdfa72a89 (commit)
via acb299784ddbf280aac6ee5a78977c9acbf1fd32 (commit)
via 2418922a1389bbf265b02328f7c4f594257c4026 (commit)
via 44a44c0b568dc997e7522292212e0ef02b522f3d (commit)
via 250ce2abb3d6b48fce778b5e0c651d57582aff7c (commit)
via 99be45a44f97942f9327b16aff368f1650994e0e (commit)
via 7592596f7a9f8dce2e5e8d9311cc40c5199c66e3 (commit)
via c24c42a5e29444313efee6528f172ad66452050d (commit)
via 5e14c4caafaa44b92134c5df01b726f435f46845 (commit)
via 05eaa177051b212669c2a7b9e2194c3e9ba47f14 (commit)
via 9797d47ab90761c50020f78d5a55fb2672ffd7c0 (commit)
via 000164d51a974acf3846a6b0a7795f484e915161 (commit)
via 0b46c391a973bb8d3f0a1681eb0a79e8a196f0f0 (commit)
via 5e5743ecb40da81c4e8ad27ac8b158c9a7aaff87 (commit)
via 9c95bf79406ae791e2f8c7263ff4fddb19d0eda4 (commit)
via 7dfa14ccdb6777ccacb99fe0d716b7d63654426f (commit)
via f0ff0a2f69bcfae3e2a30a3bdeae37b475ae9106 (commit)
via 38816f95cc01f1c7aeec1d42bde3febb308dd98f (commit)
via 0f8868d1ed7d479d05e2a70de67897d133d41ef9 (commit)
via bc03b37015ab6ea23cbec70dbd299c74fb001aba (commit)
via e56e0f7d1ad206f1ebc26e285d82a8e7ff6390e1 (commit)
via 7d2b0148161460b928cf39c7c2969d95d2870d9c (commit)
via 58b843554162e6599ba895c8325985f74adef734 (commit)
via 98cb905a5852321204499985efb42c5a76b9da6e (commit)
via f7a92e4b0336f3c64eb429947657952178b7d76f (commit)
via 3ff9c6c215faa2e1419d4cb67906a1f7772b355a (commit)
via 90b3952ff515f8746ffc6b227695836921bc046d (commit)
via 0372723794501908ae94be9330dcd8577d951f68 (commit)
via 6b27a7ba1c0343725e3d2e9ea7d97426a8f73f0d (commit)
via a8b5aabeb7b56702a85344434d7822a034ff140c (commit)
via 87a3c86e7e132a1ee80bf29b418ad4b61cefc7d8 (commit)
via 8b4f53f245ab45bf07be9b1108fca951133b836a (commit)
via 07b6398dbd11037eb553fc6fcf56dc8051e71150 (commit)
via f0ef6c88066961a038ea1b80face4feaa9a2d17d (commit)
via 8f9f4ece764df4607f695f3f7eb4c421e8ac4c9d (commit)
via 7751d0ac43f1b7186a53ba5dd5cf2eeca6f7dc46 (commit)
via 40cd22fc64c7755efe60cd42cb12851cf3de55a4 (commit)
via ed8d686171f140fd12164d2d34f65b4ab3c97645 (commit)
via 1e32824c93dac7e406d1b35449b42700bf854679 (commit)
via c5d5522f83888a8b442aa7ff17738f3f688749fe (commit)
via 688867daa34ade5075443c77535f80e1d2d76743 (commit)
via d36ded7d95a695f0412f6ccdb59bf55fc600e9d3 (commit)
via b8e90124c19177e0b6b33bd624e244860e2424b3 (commit)
via 5cf1b7ab58c42675c1396fbbd5b1aaf037eb8d19 (commit)
via 17d9827aa40e363650d1698fddba9204f27b5171 (commit)
via 27f447c8b054b17d96abfba431568c1ffe017f0a (commit)
via 219818389cc848dc2d67aff732b9790968851b51 (commit)
via e602f86dae29c62619b0ea8bf2ca69e1ce1b8295 (commit)
via 57f7044d690d38cff90487b5883883a674d2589f (commit)
via 383b6b2891226228ddf3cfd4c3dd8b17ea186b8a (commit)
via 8cc8f4c008f640b7f13f8f1160261275ec14475b (commit)
via b6dd72042939ca62d9ceeb80385eedc7c5f0560d (commit)
via 31e010330189f489c624b7cdb812ef3f33f8e280 (commit)
via 70bba1b3f811261fcef30694568245e83cd64bc5 (commit)
via 6c5f8867a45f40411594372bca09c04ddf5c0002 (commit)
via f1fef139dbc592aa4c7071d47e38e14487ab72e7 (commit)
via 2c8b76ed408547789f2e26ad76773e40e316a392 (commit)
via eefa62a767ec09c20d679876842e15e9d3742499 (commit)
via 58845974d57ee0cd0b261b00d1ededccc7bde105 (commit)
via d49e3c5e79e00b59e518c4bc1f71882adf721696 (commit)
via 06a24c688282b61dd2ce5b6c00608bee34ae3563 (commit)
via b902e70583a9dfb1ee410e297e2da4c8b944ba8d (commit)
via 09349cf206ee9e68618713b97e621b7ef2a6c0a9 (commit)
via ff1bd2a00278bc753a7d035fd5020ff936df1882 (commit)
via c89f3a2f43fd7fe70bcb199fad0ccf94364b1ebe (commit)
via 4c86025464db4603ec07490169aaf4b77868057b (commit)
via 842fc917163f0b8cb2a703a4c7fe078d944932e8 (commit)
via 0eb576518f81c3758c7dbaa2522bd8302b1836b3 (commit)
via 68cf1ccf20ecfcc1e06de69fcd50d13cf8b5e1e0 (commit)
via bd0c874dda60a0f5e235b653e1bb63716cb385f8 (commit)
via b6709a7001e4812c4ed774ef0ff3111fb654d199 (commit)
via 9b4326dc093b71bcd77a527111ea6778795bf068 (commit)
via 2c5b2fc19c21dd12747eb960baee65759847a118 (commit)
via 0aa89cf84c78a9ee8b97a51c17b3982324021f81 (commit)
via d9dd4c5a7438c152f6c9ae2bcc4c9f5ee598728b (commit)
via 03da93322b956e003882c09a8d4ea949f790dbc4 (commit)
via bfa93c0ee79935bf37d379065e219ba0afb0c4e3 (commit)
via 7a061c2e82d62e2b275cb5a8d7460dce7d36f050 (commit)
via a6cbb14cc9c986d109983087313225829f1c91fe (commit)
via 7cc32b7915532354ed7e2fd15f7ca5a9b9b64610 (commit)
via dd340b32df88083fdc17f682094b451f7dcdf6d6 (commit)
via 30c277567f64d09c11cadcb173eef066efdaea07 (commit)
via ec2793914d1090db8c8d94a2f9b92ed97b1a6cba (commit)
via a59c7f28a458842b4edce2d6639639b17a85eb9f (commit)
via 766db4a6100e34e6a29aa9c849b60ba80b551389 (commit)
via f7b5370a9bf82b0b480b75275349d8570ee83c4c (commit)
via 12d62d54d33fbb1572a1aa3089b0d547d02924aa (commit)
via c38112d8b59bfb6e73b5fbc637fa9eaaae42c52d (commit)
via ccb4c0aa696918c579a0b80448fc93606152ec93 (commit)
via 0fa8006ade38ac7206ff57934f3bb866be6407a2 (commit)
via b25df34f6a7582baff54dab59c4e033f6db4e42c (commit)
via 715fee7daf2f966261d997e1b39888f14fb28a45 (commit)
via c3424869801ea8811106f8f97928ed5cd71efbff (commit)
via 4e544fba3459913e23f86dc5e628665bd288c483 (commit)
via 259955ba65c102bd36ec818ca4193aab311e983d (commit)
via 1f81b4916fa3bd0cbf4f41cc7ad8f13450aa6481 (commit)
via 6d6353cea42ed088df3c2c90c4c2741a1b8b2871 (commit)
via 7efa61c40b94d3234dd7fc79a0fc7ae0f1b0a105 (commit)
via 5c3a7ca7b3b28a7a163b0af3cbadc3d8fe7a702b (commit)
via 54c6127e005c8e3dd82cd97d49aca23f5a5d8029 (commit)
via b6261f09b53af42a26d88fd50d74ab1e84524cce (commit)
via 8634aa9cab1c2205629540b4d99b88847148bd80 (commit)
via d1a1871cc6c93ababba62f42bcab5205320b8867 (commit)
via 2a5c5383e3df0e625367bf85b740f62bf777b211 (commit)
via af10f1ef696ee94f817bc389e0e8b6cd08234333 (commit)
via f16de89251e4607eb413df666a64022c50478a4c (commit)
via 3eb0dedb8a5d9835b394484c6112a4b2fcbe9d51 (commit)
via 2f8c4b3da6060a9b57e944726dd61cb1b2a19906 (commit)
via 4e93ba217318854742144bf0b8e30f4c3614db92 (commit)
via ee468e8f02f1cd1bcf09da75170ed62dc230b70e (commit)
via 433f29fd44d8dd6c940e49ee2657b769d70781fe (commit)
via f0274b7451761b2dc48c0be148ecd8563c9800da (commit)
via 45ef63790b34ebc2d26081609bb168aefee800dc (commit)
via 38d80ef7186ac2b18ed234a825894f5f78fc90b1 (commit)
via 88bee2515653d3b5481608bc92a1956c7ea7cf48 (commit)
via e9286ce511be095f2b16b1b7bc503b1e4377593d (commit)
via 723a6d1f333f1d513d5e4fe26a8ee7611767c9fc (commit)
via 88fe1bafce118f40d256097c2bfbdf9e53553784 (commit)
via cbf08d56345922d754182b941b84b18bfddabcda (commit)
via 84a95705e1e8219187e75433baec2fd2fc8ba2fe (commit)
via aa5fd84d438cf165c9836fa545d15c33781401af (commit)
via fac67afceead36ba7296e194942811d9ed3b437b (commit)
via 90b740caf4cc5d207dfa2ac98f1c73d9818792e2 (commit)
via 0ea828cb5c74b0f9a254aeab2c7d31ff214371e5 (commit)
via 170a0661dfb17014a62cd2eeaaa99e408bc55a14 (commit)
via b12f4e55007ee2e8130991f322e782bb31a8a289 (commit)
via 18083458382473b414a3fc7f57623d2241f487ef (commit)
via fbe4ee1f76237fdd586638ce1ded4c6e5bd0bf1d (commit)
via 9c53309978b4a4bf684b3abbb853876c5413f875 (commit)
via 8ee5844e8dc3ec7d99a5890bdc85f54afd8886b6 (commit)
via c9ad781ebbaebb2e57956ac9eda542eaa88a743b (commit)
via 9f441d72a245e3ccce2ee014adaa0ad62e7b0d29 (commit)
via 51c4b53945599a72d550d7380c7107e11b467d5c (commit)
via 4d39f72b87677c194d282a9e93de67dc0adfb4f3 (commit)
via ece8bd155e646869b10fd08817ee7cd71c699c61 (commit)
via b59f898456b33294d71a333d3f3b4fe9dc81e3dd (commit)
via 84d7ae48d44e055cb16e3900cf2c4b2262f6a6da (commit)
via f8b10842465d60483e3bc9827e06115ea8081bfc (commit)
via 06341cb6cdbd5ff57c376f7b0b25aba4a35bab86 (commit)
via 61aaae27e12db2a00cfde674931e5080e733e6b3 (commit)
via 3089b6fd6eff650dc06c0698b80eae1595986677 (commit)
via 3a9dc4fbd7dab867829ba3299d86c2f5b58d864f (commit)
via 5859f177250685fbd49c9562ffc3e984b9d5ebae (commit)
via 4948e0c8965c3d39b6e1bcb1bdb12b9615260a27 (commit)
via 59e2ceaf7b75c38391c518436a70ac3d41b8c8be (commit)
via 4e3c6c5e5b19be3a0f970a06e3e135d1b2fae668 (commit)
via 03e9f45f8a6584a373f1bd15f01f56d9296c842a (commit)
via cb4d8443645a5c3e973b4e2477198686d8d8c507 (commit)
via f847a5e079ceae0346b84fb320ed06ce9b443a63 (commit)
via 05512e090c6c3cb852cebdb85ae7c12e8001603b (commit)
via c35f6b15bb6b703154e05399266dd2051ef9cfa9 (commit)
via 3f2864bf1271ca525858cf3e1fa641e3496eec59 (commit)
via f8720ba467d8e107c512160a5502caf9be58a425 (commit)
via 38af8a4225e8c82564758e8a5629da438220bc87 (commit)
via c5e0db2b7d8fbdb13548e01310f623f131ea0e9c (commit)
via 26c7bfe851f00422beb442a77d25cc0887557b79 (commit)
via f5239632a06383f2b4f6825cb6a006ceb8bea417 (commit)
via 680f05c35753bf1f70392d25b1e6310cf46476ce (commit)
via b12351c21ee92a13536aa89331cc73bd166dbe5f (commit)
via 2e1dceedf6a4f661a8d7e57757b28f9f6cb1a9b3 (commit)
via df69ad0d0231218610f68ecb2b1953ae7f28fa68 (commit)
via 5b713ea8e5fd35fdb1ab7ff953e010ef9b60f98c (commit)
via 02b2e71bdc1564f4272869bb5676727af809870f (commit)
via 8d1942a3b7516e8161b7f54888da2a4a4d27484e (commit)
via 856ff83ad2b97c136de1103a421547bdcb332e74 (commit)
via 7cc9b08f18967fa1a694f5b7e320aad62d0d3e88 (commit)
via 25e56e5d1bc9197e882e3a42285d0efad21a51f2 (commit)
via 87d2a8766e610a0dece7d86268ac9be4122d6d82 (commit)
via 64ac0166d5ea3b565f500f8a770dfa4d7d9f6a28 (commit)
via c86612cd4120b9ad3d00978c04ea252e7d501e44 (commit)
via c1c2ddf5be4556e6e8cd52a314ddd6d026c7e540 (commit)
via ba50f189eced101999efb96672179aa1024204e9 (commit)
via 6906362bebdbe7e0de66f2c8d10a00bd34911121 (commit)
via 83a58b817e5c0432d543b66208f502b059fdbe13 (commit)
via 40126733cc69634035b0cca3a0c90ee3a606ea3b (commit)
via bcafb8b98d5df77108a83a6bd8b7746f7c2616d7 (commit)
via 4ef59f25a452f934408a9ba837cea9b7fab0be48 (commit)
via 3d069e2745070bc23f14c845cb7d8116d919f0da (commit)
via 230df584722d08705f2cb3b99940b764b1cb7865 (commit)
via fda403b09887a24403c3a90d7ad6c95288f2d641 (commit)
via 88095bed9cbc3e39c61eb0ea7dee1646ff13ac7e (commit)
via b557ab47f3355f5fc7d4f87dfa9e4a15e7e9f3e3 (commit)
via 04b04226b726b6e1fea6bba970556b9ed5cc3446 (commit)
via 3a838eb454ed0de4f073b99e94e02014eca63a56 (commit)
via 748c3e1aeb833012a19b651af7d98757a8ffc50f (commit)
via a0e04c0ad837b4b42caf139573f2a95c86cdac76 (commit)
via 4e12574323ca3db3e985acee0540c603b2b33124 (commit)
via 3fc53ba91b92ad40ebbf46272f57a45e3d2e3a27 (commit)
via fcb2409598d37e2078076cf43794ef6c445ac22f (commit)
via c6d2a365580709981852007cd0a9a3b32afaa5c3 (commit)
via da8bfe82aa18a67b1a99fa459f48cea89ee2a41a (commit)
via 7980a6c8e598d34f5f733f5c6c3ca83c0a0f1187 (commit)
via 9c62a36b0ebf9ff4ef3dad1f4d91195d301348ed (commit)
via 2ec9338d84714ea670ee888f1edf5a4ad220ea9a (commit)
via 1d907966f7f0fe7089efe46d8b808d9115f0d167 (commit)
via 93327a85ea63f7043c49a0af2384a1e274ab1dda (commit)
via 75e756cdf9d5b08e859afac5cef38bd818a90e60 (commit)
via 778bd1be6ced7f4a135e2a6bcc7414c4e4bdc27d (commit)
via 38c8e9a9ccfd7fd57bc5fa5090c86cf7b7920d28 (commit)
via ddf9da5175b1182810838861f1464fb05fe00104 (commit)
via 8fe581570c2ef4f881762f4f22ef4f66c1063491 (commit)
via 2812fa5cb0c2013ef1696888651390aa71a76b4a (commit)
via b131dd71ce147b4efcece9dd8fba16c51fefa492 (commit)
via 84d83c1d8979e2906971af79f2e41083299beb7e (commit)
via 255bf5b18e2b0e28a65062e87dc2d1212376bfc2 (commit)
via e2ada81cd2a090f707147abdb73a90d44db2f2b0 (commit)
via 0953b3f5d7ed1b4a25362f9a2d1a41eeeda8efa6 (commit)
via 8d380bb47dd24c7fd2c4880a4106835d871bf4d5 (commit)
via 77ba8639c274865c762eee688383c321f18ef889 (commit)
via ecf3f4b962026aa9094ee321b03ee32df2fdf1d2 (commit)
via 30df43575158b0cb294ec49a8463fe8b49593e62 (commit)
via 4c0accf0a591b0422c84216150e1b9b4e008609e (commit)
via 1f051716bce3d7aa2545722ba41958df9758cadc (commit)
via 10553ed4ebb5b949ae74d277d398d2e8a3909ea5 (commit)
via d916aef6af6bb8506b1ff4756054a1697410982f (commit)
via 4700bada6282f5ad10b53cd8ca7cc03b8fea791d (commit)
via ef64723fe9638f8d56f58fba44a149ac620eadd9 (commit)
via 5de6f9658f745e05361242042afd518b444d7466 (commit)
via 3f847f9d35bf2bf9ee0d957ea1aa9ffb27a32cdb (commit)
via df047c5ccb5c81f9a3d36f7fc38a19bc7c8f2ac2 (commit)
via a7346d50ae5389ce37e35a7131f0f218663b8c68 (commit)
via ad91831c938430b6d4a8fd7bfae517a0f1e327c1 (commit)
via 43da3c6c1cc7cb5fcb1dbe2f983a53e883408d1b (commit)
via fb3d0e1146e9e5a36a9402690a09e7629408c677 (commit)
via 27b3488b71a5c3b95652eab2720497d6d055346e (commit)
via 087c6def9087019640a437b63c782a5c22de1feb (commit)
via 3b0ccfb2f23961e4cbddb9d0873bab0f4c1d4c3d (commit)
via 0a39659638fc68f60b95b102968d7d0ad75443ea (commit)
via 2684301690d59a41cd20d131491e0714d156fa7c (commit)
via 5baa7aa73ad8d8d5250990a9e330b9b746659452 (commit)
via 1921e1297dfcb878b9417edefe4d87639c827948 (commit)
via 872bd5756ba8b5daeeacedfcd4ec38bc50035ec8 (commit)
via 67d8e93028e014f644868fede3570abb28e5fb43 (commit)
via 4ff5e524a7f79ad7f4513ebed3ca0990392263af (commit)
via 5157876271e945703ee699f07442ee1a72bba362 (commit)
via 73df015104eb5ac8934ff1176c24079e6e9b09c3 (commit)
via 586d49827ebaa2cf2c70dc030c5830afb1fb89f5 (commit)
via 2b755575c9d0277980008df99f92c38dd6b3a420 (commit)
via 38d1a8aa943424e1a0de0503ee8aa961a95d0e14 (commit)
via 4579a2a9f43a38144539447bb5076bfcbaf8b6d8 (commit)
via 58d7fda0fd2efc2d4bccfdcb55ce6ba42af83aa0 (commit)
via 7f08fc3123ef7d26a2e61dd29455c07510404a7e (commit)
via af6328603521584ff62b25a6f86a923bba5a4f5e (commit)
via 9d48d1964569b49be17afc3e20085a23544a32de (commit)
via 28988a78d3b80c7f1080fce696acf176b74a29fe (commit)
via 5c6391cca55baec236b813b4c2e2b7699595559d (commit)
via 08b5add9a6e405342c0c8bc3bdf5d552ed45df0e (commit)
via a176724d99c073f8e547dea2675a5b7d1df70515 (commit)
via a9b769b8bf12e2922e385c62ce337fb723731699 (commit)
via 6318db7dc90cb6656cc2a1f8e875f2258f6a4343 (commit)
via 35a0136d56de7faca280666ba40bb1b87a85fff6 (commit)
via b97162729a3ad4214e5f6b85452a27904b8f34ca (commit)
via ad36c799ff07d47ebd5c861c63e9feef50408e34 (commit)
via 9d3e78f0d8075ad62391ed005e1e82f79f05e2ca (commit)
via c5e0ebf85ef50e61457f3b99a05109a92b328573 (commit)
via 8216d5dbe1ef23d56ba589fe1de619a601bada4b (commit)
via 1c834de994f51a1fb98add648dad49abfea2c403 (commit)
via 9622aed753d953a763a9c0ac25cd7868d257bad7 (commit)
via 01d8d0f13289ecdf9996d6d5d26ac0d43e30549c (commit)
via 7fe505d131d2a13a6a412789474d92493ade65dd (commit)
via 353c08576b5c7fae46be834cb815df744ec2ba96 (commit)
via dc9318330acbd36e07ad5a4e8a68c9a6e2430543 (commit)
via e4a17a0630a6460090c5cdb562e02ba992a74fa8 (commit)
via 954143a2748110c720d28df49159ed4f0bc1a1a2 (commit)
via 8cfa0f76baf92f82bf2865b3557c0a2094e81cb4 (commit)
via bdebd1afa4bf82120c66d9ee8d8cab500ab0b606 (commit)
via 451086b203ef3e4611487630225a7650ad9322e7 (commit)
via c0c7b21ab57bb9445329fed9e1451c534aab6a67 (commit)
via 59add6ec0f7e96ee81a7b9970228b8f795b01997 (commit)
via 1b421982a6fcadebc72d3d6ee7a4e34eec61a25d (commit)
via 45630ca90e823247c429f82b338244a9bba9baf4 (commit)
via 36c6035855db0ae87a64a0d169e0230d936e3e64 (commit)
via 5ece3fe5e40efbcf7d727650475c35850624cfaf (commit)
via d88becea33630677dbb5123cd72fa8695512311a (commit)
via 171088e69ff96a2e242cfdf98e8d1f0415d4c172 (commit)
via 568a8cc472f3207b44b92428e7ac40338d9ede37 (commit)
via 9a8667331d9a7179331516e7bb1f3aa942bf8218 (commit)
via 4c98f3dc47545794daccd4978103f6b98236ad82 (commit)
via 2dfa0983e4680f321a3d4f1bd0d826abd88f455c (commit)
via ec8fed8c805b513ea15ad76eb380c639dba88548 (commit)
via ce3be84b9f772fda5f08947fec92764119989019 (commit)
via d60bb44c243f27053589b5501529b0001404373f (commit)
via 92dcac243a4a2924bab85d1519a0c7a20853f9cc (commit)
via fb7c63f65c121b372b1ea23a823cb17afdcd1dfd (commit)
via 2bd6dc4ac6ac61705517df297320fa79b308b9e3 (commit)
via 58d6de47f6e189ff0b648b4f2f74e6d5df85d749 (commit)
via 2ecb4add323e3c4ba56641d28e35dd79013ff9cf (commit)
via 540c6a5f5b25d935a8193fd835c1ba83dba02fd5 (commit)
via 507b231626a2e0289288f48b1e4613b569cdd8b2 (commit)
via ea8bdd6cb6894855f109b8d19ce104ae9a4b9cb5 (commit)
via 4a7d63179ae732ede6bdc77c393a1cfd9b0b58ca (commit)
via ba9f03b99b6e1dd46d9b11eb1bac629789c8f94a (commit)
via 6ea996c67dff319e332b465ed450ee50b97de4f7 (commit)
via bbc661e3c38f02b4a1fb50bd4e058a22150b0087 (commit)
via 373a792a4706be2619dd1d1820f949858620bc77 (commit)
via f9b1950752ff1d3041d776a5d50ec2d0ddb8065a (commit)
via d63056e7cff35f58898a9bdc8d5cad589689590c (commit)
via fe8f3314300936f71cc89535ecd3f0f3cad3804c (commit)
via b4ae924f504e9749989059a14e6a5dc830c99e81 (commit)
via 20871297d2aaae57acb79e987ff80a9020d608d1 (commit)
via 2384bcf387e93435658ec1ab92addbf28c9ab640 (commit)
via 1d314b2544b8af8a936c90e00a0dbbb605410952 (commit)
via 2bb551be853647c25005d1ab167e17ada7a5bfc5 (commit)
via e3c81bd07046903b4b3bff8325024aafcdb35cba (commit)
via 9001f1db99dfff10957dc2a971e7466a496f0f2f (commit)
via 616fb3be8c0b3c266eaf0aa4ae399918fc7992ef (commit)
via 7dd0238dbd4ed086ca7217ec50d8f0a5be3179f3 (commit)
via 7a9a19d6431df02d48a7bc9de44f08d9450d3a37 (commit)
via d72e84456e23ac19c2c12a186ba429cd2e4985cd (commit)
via deefb84c32a289f8deda6550518a48b01a6032c0 (commit)
via 83f8d6de769a33f51b83cd81efe178db162e95e1 (commit)
via db9e3c398b854c83a65eb227ab9ff40dfae1145b (commit)
via 77030a4789285a3f08fbdd9621a384a9e008f4a8 (commit)
via a030033e5a53dd18157509c6c101340688d16011 (commit)
via 13e236a3d647d15858b061c7d96288bf7407e090 (commit)
via a7fe0d5982813f092f8a497d350620c02b995649 (commit)
via 485e0ba7f7fe11e4d28e3eec2be835157521a6e9 (commit)
via 6a55aa002c8f3b701dbb8291cd9a8e21534c6974 (commit)
via 7cdda20613f7ed7b18e7fe210ae0f6a87054dbf3 (commit)
via 745ebcec892cb27feec663de9218ae3647c7b8a5 (commit)
via 1e702fae4c9adbd7134a739dee28c868a15f0b3e (commit)
via 44bd4bc6dc7df56905071933a542e00e91f84837 (commit)
via 006d0fab3f44ec9caa2b23da3866bbbd841cd5d3 (commit)
via 68da925f226966a2760a193e9f9a3cdbdfcfacec (commit)
via 09e8c50958a1fca313c2be427c2991c39798f90f (commit)
via d1b580f1780e5ebdbbf6fe8655cc923fbd5c02de (commit)
via 98e74ad62b23ce33f66e3841431511136bc1c2f8 (commit)
via 0fe4f0151ae7a994aaf305e7985d4ba9f992e482 (commit)
via 9df1f04f8b1f7091ab32dcd56fb6e47e3e96d5a7 (commit)
via 691c232b2655673ac352beafc0bfba4bc966f8f8 (commit)
via 6ad78d124740f1ea18f6f93721ec6f152364e878 (commit)
via 5253640054d48f7816aa00c803f5bc593c0c12c1 (commit)
via ce052cd92cd128ea3db5a8f154bd151956c2920c (commit)
via 6dfeded7b6f2f78a2d45fa54543a5962bdc6c035 (commit)
via 9bbf7837ed869bfa42849f433367b0471bf7bc58 (commit)
via 810c79d6d9b8efbc12ec8e1ad727cf002f2dedc6 (commit)
via c74d3b7f393f3934bae22fc9d3a4a49e2211aadb (commit)
via ed6fc7857e3fe7d64f19a0bed27226964009f095 (commit)
via e074df43e95dc002374de30503ba44e203b04788 (commit)
via b06a3e2ba1febb9e34458c5106f8d1629a191d5f (commit)
via 56af86bdab9c9700a13cc7d622653d34cbaa72f3 (commit)
via 4cbf309be8a302afe3bc041da11c24b593464157 (commit)
via b3bcd825cfb9c19a62a7db4d12717e85aca0b1e8 (commit)
via 3f5a0900a568436b011fc14b628b71bb130ae5f7 (commit)
via 6df7102965c6afdec6f621175f9e91a56ee42a67 (commit)
via 81613a741bcc9cbe909c814fab9ca99c1a1fc2fd (commit)
via cc004ec0ff327ca300cde89ffc252a9b1c588bec (commit)
via c454dfae8988337bd10bfe0551ee62a267049dfe (commit)
via afde75c1fe9ab3fa35acdf1a3b5f80ec389e1190 (commit)
via 5de7909a21a077238567b64e489ed5345824b2a0 (commit)
via b4a1bc9ba28398dbd5fdbe4ee4f118a2faf59efa (commit)
via 3ce7b09732207eac03998fa5e267672760e475c9 (commit)
via d9f4f26b0f2c73eddd07b2a4368ae1b238944b80 (commit)
via 59c8ea50e972e7753c96f6bcf46fec48e694daa2 (commit)
via 0f7dd030eb47912112b8774424a62c5561af16a1 (commit)
via fb441884baa9994093ed380aded84e707c3d34b5 (commit)
via 6f5ca0bd47ff6a9b1670f38d6a68a1a7b1a01a5c (commit)
via ee552335b8177318be98e6a4c5d941aa41091a2f (commit)
via edbcbf0ab15f140b96efab5fae808b35e705cf67 (commit)
via c4131b7a0c4a6d666a35847f8cce3d099b7a9949 (commit)
via f3e53fe5cba59946ddcf24be423eece1ab596769 (commit)
via a51d6b87331f0fc991b9926a9101e081668ebbcb (commit)
via e0215095818d30e80b59e99689f2cf0dfbbae841 (commit)
via 10cfb9ccd5b2eb489b14804e0ea9a73c80e697e6 (commit)
via acb5dff4449286422f23a7d5867b3bd792c888e5 (commit)
via 253d1fc351fffc8a0b1d325044854a2defdd7223 (commit)
via d7834356a301b162fb9757427359d0dbac95cecf (commit)
via 004afad6ea3fba7c8dd7730428b50fd770daec66 (commit)
via f20be125d667bceea0d940fc5fabf87b2eef86cd (commit)
via fcc707041d663b98c1992cdd1402cc183155d3c0 (commit)
via da5d5926cb26ca8dbdae119c03687cd3415f6638 (commit)
via 0314c7bb66b85775dea73c95463eed88e9e286c3 (commit)
via b8cecbbd905c10d28bcb905def7160d9e406dac4 (commit)
via 7a31e95e63013a298b449573cc5336bcd64a0419 (commit)
via e18a678b62d03729f065c40650d7183e2f260b22 (commit)
via 1d1a87939a010bd16ed23cd817261e9a655bf98f (commit)
via c6948a6df9aeedd3753bc4c5e3a553088cd98f63 (commit)
via db0371fc9e5c7a85ab524ab7bc0b8169b9ba0486 (commit)
via e906efc3747f052128eef50bed0107a0d53546c8 (commit)
via d86a9dceaddf5a2cee44170e6e677f492df5e0ea (commit)
via 4c2732cbf0bb7384ed61ab3604855f143a0c6c5d (commit)
via aaffb9c83c0fe59d9c7d590c5bea559ed8876269 (commit)
via e8a22472e58bfc7df4a661d665152fe4d70454a6 (commit)
via 2c22d334a05ec1e77299a6c55252f1d1c33082af (commit)
via 8a24b9066537caf373d0cfc11dca855eb6c3e4d9 (commit)
via 7275c59de54593d3baca81345226dda2d3a19c30 (commit)
via bcf37a11b08922d69d02fa2ea1b280b2fa2c21e0 (commit)
via a142fa6302e1e0ea2ad1c9faf59d6a70a53a6489 (commit)
via ae8748f77a0261623216b1a11f9d979f555fe892 (commit)
via d0d5a67123b8009e89e84515eee4f93b37ec8497 (commit)
via a9a976d2a5871f1501018d697d3afd299ceec5da (commit)
via df9a8f921f0d20bd70c519218335357297bffa7d (commit)
via e95625332a20fb50afe43da2db0cab507efe8ebe (commit)
via 28cad73dff9dae43a38ad7dafbee406c690fb77c (commit)
via 4de3a5bdf367d87247cb9138f8929ab4798f014e (commit)
via aa108cc824539a1d32a4aa2f46f9e58171074a9e (commit)
via 691328d91b4c4d15ace467ca47a3c987a9fb52b9 (commit)
via c06463cf96ea7401325a208af8ba457e661d1cec (commit)
via c074f6e0b72c3facf6b325b17dea1ca13a2788cc (commit)
via daa1d6dd07292142d3dec5928583b0ab1da89adf (commit)
via e7b4337aeaa760947e8e7906e64077ad7aaadc66 (commit)
via 0b235902f38d611606d44661506f32baf266fdda (commit)
via c19a295eb4125b4d2a391de65972271002412258 (commit)
via 9261da8717a433cf20218af08d3642fbeffb7d4b (commit)
via d4078d52343247b07c47370b497927a3a47a4f9a (commit)
via 1aa728ddf691657611680385c920e3a7bd5fee12 (commit)
via 1768e822df82943f075ebed023b72d225b3b0216 (commit)
via 326885a3f98c49a848a67dc48db693b8bcc7b508 (commit)
via 3e0a0e157bc2a1ca7ad9efb566755ec61eedd180 (commit)
via 93a7f7d1495795b731242e270b6dc76b1ad6b0dc (commit)
via 87e410c0061df72fe69fb47c7456ae54c609b219 (commit)
via 1ddc6158f7544c95742757654863379fff847771 (commit)
via 0f787178301c7cbf59fc7c516ebe920a33e22429 (commit)
via 9b6993b6f6507fab1bc8956f727cca60c8c9243a (commit)
via 7bda7762ab9243404bbd0964908b3365cd052969 (commit)
via 7cf7ec751e4f776dbb60cd290cea4fb217173cdb (commit)
via d5ded106a85afaf695e59941bd382bca4811fe46 (commit)
via c4ef641d07c7ddfd6b86d6b5ae944ab9a30d6990 (commit)
via e443a325b31edefe9cd4da71e10497db6544468c (commit)
via cddcafd790288f5e666198effa142132b6fc43fa (commit)
via ab5085e81007711f9d18ed77f3d78f51cf37545c (commit)
via 5e621bce015d2847104303fba574989fdf0399e0 (commit)
via 7d5c3d56743fb696405f509663b3e1558fa72e25 (commit)
via 990247bfd2248be5ae4293928101eec87e1997e9 (commit)
via e9e36557849ba6b650e503841596bd31034c1936 (commit)
via 39a0a5c65d0802f40ab428474b1e6d981a91fbce (commit)
via 0c9db8bbeb7187218a5b47d82df18e38128d06a3 (commit)
via 9882d600d0bbbc115671b12646e690ccddbf5348 (commit)
via 59b545e90d30444a97c8e925569d240c819d42b4 (commit)
via 7e89c625c5d12b5816c857d0c0910922f8803f82 (commit)
via b9f87e9332895be6915e2f2960a2e921375e8e7f (commit)
via 978ae99ac4aa211ba4ba960f56bb6cdd84b648ae (commit)
via 2e60562cfda15fad37550ce5996e942084131d1c (commit)
via 2f49e3eb0ddf31d601184b516b7f44ab4ea6eece (commit)
via d71b7da05d3e1a82047e35c2720c759bdc0fb44f (commit)
via a577b387b7e5c9c8afd371767fccc85009e84485 (commit)
via 8e82cd7374cda9ef55f88504a94d31b06d7e1bd4 (commit)
via 1351cb42c92cd415003adf6234d96507c8a6d2db (commit)
via 575bd3ec2fe918257cb448eee8ebbff269d85431 (commit)
via 17a87c6bb9d16e992fadd47b11b3eb26af54ac69 (commit)
via 2cc500af0929c1f268aeb6f8480bc428af70f4c4 (commit)
via e021b84f7fc20b3e3927093ed87e9c873d33a443 (commit)
via c46b0bc28c22f2ae4b46c592f450e745774846d4 (commit)
via 7740b9810bc093a9083e8c3404afc627c8b78242 (commit)
via b51f0cfcedc2499aa1c0b85aaebf2fecf244c291 (commit)
via 69eb1a250699f481427c2d12abf14314fee9e6eb (commit)
via 62432e71ef943744fd4ca9ce216da1b0a7250573 (commit)
via 005c77dfe53b54cef92ce51d91f615eb9c2769c4 (commit)
via ce3bc8504d765ecc9b453398efb18662bd4f277a (commit)
via 94fc6d8d303053c47064c9408947cd49a8e11975 (commit)
via 0edf51aca0ea2d75ed9d96fb612c1005965ec64f (commit)
via ba292defc14029971d5e9043881ddb98c994cfdb (commit)
via c5cf3cc081042fec0e2baea7cdf7f22a8a84664a (commit)
via 779e145d8f15ad9975f6ca689e6a595ea0a3de4b (commit)
via adcbbb141bdb09a6fd999f3369e15c2881f843ba (commit)
via 80014655d76e758868e8e1ed36472be9a606eb2a (commit)
via 959dc163810ac286e01d0163624f5bbad5b82c55 (commit)
via 1d74428fb7a817790c397338db92d102e2113e1c (commit)
via d5e24e94bbd581098e460fc3a0b437478340c876 (commit)
via 4cd96de7e7d4ac12c38b45efe7b3ee0ed331d3b9 (commit)
via 914fe9bc05003defeff70acb84a52e86fb9ced4c (commit)
via b22882ae78f0e5d38d4b6ace0725bf0ae5bc4803 (commit)
via f83a47d3c260982be4918a3d9f5d0b480503b131 (commit)
via 8c84a6865c4b09eccf41c9d2e91a030c941bffab (commit)
via c6ca831b3f171da96fad75c21dffbd2bed71e297 (commit)
via 8ce8e05a403440e7f2323e9d43dca08be1cf8a94 (commit)
via a9ddfa91f81e00400f04548e71ab9519892a6dea (commit)
via 04749187843604f51ddcab4f53811dac9a9ed8a0 (commit)
via 414b25d4bfa89e0609cd3c8c3a6e610681f4c929 (commit)
via f57e8133a7af31a59578ac2cd50dd20418cb8fbc (commit)
via 85a14b1daffb3a20e9e510b73d25c71ba95cc350 (commit)
via 774a56a8beeef3a73258910b12cace20443a1bcb (commit)
via 89bd1bf64a6d745f4276fce3ee8fa4e050736ff1 (commit)
via f429202995ebb0dbc86d41c6d707815186832063 (commit)
via f14bc0502c3c4d2ffd609b110771ca1fa752b68e (commit)
via f75d5bd488669426794d086b80568ef0a7a4afe6 (commit)
via d719b47c4131e2120305cee60395c0a88f5aca25 (commit)
via c7db1351d3b1c25bfc31ed9e7b6b491e6bcb1555 (commit)
via ac15a86eb62832cc22533bc33b802ea297666ad5 (commit)
via 0af72968bfd192fa418551ae75def455adcfbb4b (commit)
via 977f822d94c59bfd9d56373404291fc85218b1d6 (commit)
via d00042b03e1f85cd1d8ea8340d5ac72222e5123e (commit)
via 0081ce40b832f4c5abaeb0316736d772aec3f08d (commit)
via f03688da19c21b4d46761cc4ed9da981cebe43c1 (commit)
via eb8ba927115b091bb407cbc29ad2d07dfed318f1 (commit)
via b19a36e30d0d3829c68f2e0300ea1487da242af8 (commit)
via 12b3473393fb7a471fc7d928476b0ba66da145e9 (commit)
via cfd1d9e142fa2fd8b21f74de0e4a0109e0a04439 (commit)
via 67b352b3f7cf736c9aa7c1332aa7814911556ad5 (commit)
via 822a00aee0d7feb845e28dad7dccb552d10d83db (commit)
via c293f639684d2c6625b7395c995aa813eafa5fa4 (commit)
via 00686a614cca93f007335d01c06d78cfd212d973 (commit)
via 5951ef6faaffcff62d9a9963260a932666e3decb (commit)
via a36be891057f7a2505db032768264c79f37f05e7 (commit)
via 23b1e8bb169e058dfb11b826b1b59d606d64ce20 (commit)
via f82dc7b09f470f79ed2bf099216fa64c76528d3b (commit)
via a53c7d7c450de09ceb04b47cb59450225827bd51 (commit)
via 5b7dee0548f068e626c0bf5d116fc506d2af92a0 (commit)
via 7990857c32cbb49f4bedf805f86c1b718b3a70d0 (commit)
via a03d7d9aae8ac258d266c66c62c63e03ff5d2558 (commit)
via 5d6fde4aa0d2a93945276dd722be48e05da72faf (commit)
via d14895917e4841ee53c46f7ab3f46c3f19489069 (commit)
via eb5023d2a38e0862e2d9a5f1ca4a3788fc131405 (commit)
via 1aa26c98d1b827a80bad8abd7f8bb25c26db72b7 (commit)
via f6a1807c25d85a0ca762bfa276ebac4a3430e7c7 (commit)
via 20483389cb90e4f46486be925b896c8a0438191c (commit)
via 4102716ab6f3cfaa979151029c2859701dfe2ac6 (commit)
via 8975d286a6de827a02b073de32570602cd9cffbc (commit)
via 65e4595c21bf9c01fb0b7da61577ae8a79d29c30 (commit)
via 19c8c07e6e1601180f85f7aad145f00112f3f8a4 (commit)
via 87090907f39983b744749017cdac3fb957d8d0c0 (commit)
via 2808941eebec54dc7c4981f5a2a0e149d452b8ca (commit)
via 10b192574ca253331298bbc4b05ef70d2cb927d1 (commit)
via 9351dbcc88ccdd6aa83d72f432f19a76c031124b (commit)
via de06b256b36f6428c5d914266c4e91c25c69ded5 (commit)
via d4867b8dd18ddbee0b30040f569eeac99964343f (commit)
via b5347a6b22c2d82ffa57c8302c81ee0f25b413a1 (commit)
via 848cfc635084c5baccb275ed4995032d3ada2d59 (commit)
via 46b961d69aff3a2e4d1cb7f3d0910bfcc66d1e19 (commit)
via 52357dbe51bd015119a798a4f8e7244a3e1efda4 (commit)
via 97153d16eb9ecb7281ed9dc76783091964e769dd (commit)
via 56083614ae0e8c5177786528e85d348686bf9bc2 (commit)
via c9d7e29600f7a80094bcda2c3bd87d8f07d813e9 (commit)
via 2b6bcb84a17fc98ea0ea87df65e6a77829857ecd (commit)
via cc6d6b14603924a4ef2d86dfaf758447cca6a7ff (commit)
via 69642fb8f55cb4741f977d3fbaacd5d12d742625 (commit)
via 3027ed2010e5e27ef6e8ba519b789269100f442e (commit)
via fc33ec0a47dce3e94fa7179d4d28d7fd050a258d (commit)
via 86257c05755c8adbb19ce684546b718dd48a5ef8 (commit)
via 5f13949918d125f851bd2ba8ab092c301835d3ac (commit)
via 9a98be99edd71e540bd65631dcbd3d766f93056e (commit)
via cce2a00af57ef823abeaeff787eff35f43dfb093 (commit)
via 7e1e150e056d0dcf5a58b2a8036f47c2e5dac820 (commit)
via 15428e5a9c1bb01f5e7a04979c17ec5f1de9d1db (commit)
via ac9fd0a240cbfa8c448cb01bb69ac92313eb7e56 (commit)
via ce0544bd0852415891cb31e0c1b7d0ba0b3d19f3 (commit)
via dba1e2c7884b5bc68f945fd5d2dd500f9a258c6b (commit)
via bc281e8b48c92102d3c64318e07598c8e96e493c (commit)
via 82667b0cdd6592053f5b2f4cfa1cbd0ec92db0b2 (commit)
via 71b0ae9ddbcbf4093900ff879e2e1c82be89867f (commit)
via 1b96c2563342098e05ac4b240c66e60222249cf4 (commit)
via ff14da4f9b706a47f152491eae60586b75430c6e (commit)
via d23cde8c4285cf55b007b300123c41fa852d38d9 (commit)
via 885d7987eefb0b8b694626b0831ed93123fb8d8d (commit)
via 07cd1647921e0e94432cecb2f7a5413cd8f3884e (commit)
via 82348d8d9d266d91e570c4ae8d8f1afd3315178a (commit)
via ee2a86bd4c1472e606b3d59ef5c4392b61d7ab48 (commit)
via efea6557fd364ee42c84c08df28efa9797f1c9c8 (commit)
via 0e662967ac5a6c8e187725828cd20b826ca00000 (commit)
via dc979c6874916221df10de3557db0d1b4a19d221 (commit)
via 925045f2ad19d5dccb7dde77530ea16ea7b6341b (commit)
via ba80991049e1e361d2b1de08160c91e5bd38b728 (commit)
via faa90e91384af409419363aca539709e2985708b (commit)
via 1feeca7c2209819dd181f1fbaaa75026d3e38aa2 (commit)
via d7713e5c5033ccb0b51769d7f28d91619655b24d (commit)
via 928dacfdf443393618edf7124a46c599bd760784 (commit)
via b34e7172b5f663faf3add7f6e72a3e2d8ffe680a (commit)
via 7fbc6a734b2a9e33100e57cbea0ce1d20cdf4491 (commit)
via 9f5c36321d6843ba5b2a0e9e6c10c3ffee7b14fc (commit)
via fea1f88cd0bb5bdeefc6048b122da4328635163d (commit)
via 54ef8963e504e22dcf29405412a95100a210efe5 (commit)
via 4db53f3593e24b80a33b608432ef463acbec295e (commit)
via 0b98878ed8a185cbc3b78c860019416bfed317bb (commit)
via 009d45dfbb20a54ea402e7e8f18bc2d253f41ad6 (commit)
via f1d52ff7171da920acc7583fa427a95386312908 (commit)
via 98953e35ee95489f01fbe87e55fe91d9571fcb48 (commit)
via f33ffa77fdcc3e40ec42268ea09b67ac65982f1f (commit)
via ac08c1b86b979574678aa110f19fb744719def21 (commit)
via 747d2952c78ee32acc485946d3922cfe899a4b48 (commit)
via f26298e3ae274ccea3d4bcef37f5ac85da383461 (commit)
via 7489fa475c3f5963323a6b660e4544e48f45d37c (commit)
via f00712037fa4b4cbd0d677d998df3728c0c4d8fe (commit)
via dae8a2aabc0cc9c9f3794276676872014c5a58fa (commit)
via 3cebb4e77088feb357b485aeeda26429f98dce9b (commit)
via 96249117c97e625ec93d94939e9d75fad18ac2df (commit)
via dfc13c8130787ee07e2386773a221524ac6d802b (commit)
via 6ee994f190d58df863c71389bf9f8edd38d8e3eb (commit)
via f240d7d1d55f4ae87bfd1acc9c07a90870f59a93 (commit)
via 1c5a66507b7dc2990709308979354d8e62646a28 (commit)
via c5124556a1a8907a84bb2c2bd1912da0c0aaafcc (commit)
via 19912ea4537e669f9c9ad1108b6f5453025738ef (commit)
via 3702df52de21023d90052afdc54732d9ad285b39 (commit)
via e47f04584b00f6d7b5c8bf9e8ae6af9aaa6831fd (commit)
via 823e0fcf308c7f3fc88ba48070e12bd995e75392 (commit)
via 608d45610e9f499fb43d2e52eba461d489a7d45f (commit)
via da32354d05eb22cecdf9543f542636d44e503a20 (commit)
via c42eef08cd6cb28c898d46c2168c5c08684d5c36 (commit)
via e76dc86b0a01a54dab56cbf8552bd0c5fbb5b461 (commit)
via f17363ea38564867df555b6be9138d2eff28daa0 (commit)
via 5fd94aa027828c50e63ae1073d9d6708e0a9c223 (commit)
via 7b04ab1afedaf73b4492f9e0a9210dc4392ea068 (commit)
via 16e52275c4c9e355cf4e448a5b17136f24324d7a (commit)
via 61029d971895738ba353841d99f4ca07ecf792b7 (commit)
via 4625b640b9b5892da7f35f165407ed3e850353d9 (commit)
via 1c8043e5b50bd47d7734397a08d5015e3672b9ad (commit)
via 9819295a58b8b40ca6d95c84f1f1de08fb0eb707 (commit)
via dc3b856b460ff380feb68cdff551f334e6db5a27 (commit)
via d2f96b7e1e3e4a5917ea73a56429fa645d8ede7c (commit)
via f4620596bd798f3c0e1d4b7738a5c4ca1730cf89 (commit)
via a01cd4ac5a68a1749593600c0f338620511cae2d (commit)
via e62e50f3143aa67bd60c2351ad61d7544f28d4ca (commit)
via be9d5fe994e6a086a951e432d56e7de2af3cfd09 (commit)
via 11b8b873e7fd6722053aa224d20f29350bf2b298 (commit)
via b63b9aac20259f3612e23c7a3e977dcb48693ef1 (commit)
via 14a0766224d50d1c4c409e883cf29515dafc25f0 (commit)
via b5fbd9c942b1080aa60a48ee23da60574d1fc22f (commit)
via d299036c6ac281d1d6c119c5fdbe603bed404851 (commit)
via e5d9f259dce621201a2c52b56b260f8de776ecc0 (commit)
via f773f9ac21221663bd093806374cab83abd2288d (commit)
via 63f4617b5ab99d75e98e40760ff68bb1615a84e6 (commit)
via 579fd2bf848e994ed6dcd8d1c3633f2fa62cbd28 (commit)
via 25b02eeaa9acda461629d19c4c6c2b20b5850795 (commit)
via e89a3a1302cd3e95403c5c64edb126153852ff35 (commit)
via d9d0d1f6cb6c6210f293dcf5c181024d2df787f6 (commit)
via c8710633f9cad97adc038852319f1a7a22cebc44 (commit)
via c759e90e162192eda89c5046fa446891aac259c7 (commit)
via 21850ab947dbdf98b1d89afc36d8bcfc6001592e (commit)
via 9cc8edcca2ab13145a954b44101f7058142d4ac1 (commit)
via dd7e5d47df1e9af687cdc87c2d2595893eefec12 (commit)
via 8907c6a5c71816483099683e0ddcaf11cf3a7912 (commit)
via 0d2c284222839ff21401cecb7cb567cb0cc04127 (commit)
via 06aeefc4787c82db7f5443651f099c5af47bd4d6 (commit)
via 119442008b97f3b39d0ade075dd219a2f781e2a3 (commit)
via d42d232acb16847ea8ec775854469e3226cdfe17 (commit)
via 34634d2ba1efba222403e8a210379d1573759939 (commit)
via 0373b72ac00aaecb7745cf7fd129424994e2fab8 (commit)
via feae0b934e048b17830f49779b01c48136a5b2bf (commit)
via 8f5f77f8e2819a66de774a4b7f5216ebc631434c (commit)
via ced9ddecf6b8f7777125b8d4d2ef1b24ccad34cd (commit)
via 34cfc02f00196f9f5124172b10de5cc8fea1081f (commit)
via 45dcf93cb43fbd2f52cd432e38a5c17ae2ded61f (commit)
via c18502d5a89af081b1cd4c4b1c112f9458056124 (commit)
via ee4916a2db7ff1217c0af65f03220583b80b4568 (commit)
via 87a4f24037965ae88435ebe3f887750c500cbfde (commit)
via aa9497f4d2346e7a18cd07b9bf31dfb5832031bc (commit)
via 7b0201a4f98ee1b1288ae3b074cd1007707b6b21 (commit)
via ba7bc1e14fcf1a223a9a42ede2e9cd7d290c8b61 (commit)
via c6ef5865b3fd8e5d5fb8c891467b3722fde4d685 (commit)
via 589965360a98152e8c783e4736080e06a895feb0 (commit)
via cb86d16418ced44b148726104c5c8f9d36a3be49 (commit)
via f279d996354eded4defa219a393efa362e157406 (commit)
via 69336de84b2ae1b5b6a59fa8d817daa1108cea27 (commit)
via e05a3418c9d6b3f70cdb387d1f30d8ba59733f02 (commit)
via 12186e267fb75a77027dc046f78db6ace99b8571 (commit)
via c62810c526d75363ed4d668bbdb6b21a5a294a7b (commit)
via 0710846d8d7a38079b9570aeec9abfb94341af79 (commit)
via 9517f61cb8ad4f8074b5e6e33c663ca9ed581908 (commit)
via 3da7e8747dcea9b45c8bc4c17b946be7d5ff9576 (commit)
via 900a3c5828be90bfce2a7b8e2e6edc0d4509df6a (commit)
via d9e757fb15b711464cfc8ba344f2563f3e2b9195 (commit)
via 517c31a58af1f7b97f308e77caeb8cbe9ef99cf1 (commit)
via 4c485d0b112721d3a2b2939ab61db14b7608c98c (commit)
via be388eb699a8517595ea921082b5ded2d1450dcc (commit)
via bf5fbf4c58d67a25c68efea6608ec2b8e89c7597 (commit)
via aa7400d4aa132f50a982739e1e8b9752d418b97f (commit)
via 0711c996f017cabe220dd291500bb1b202f21e1f (commit)
via 9b2e89cabb6191db86f88ee717f7abc4171fa979 (commit)
via 07e015d587c487ce1934144abe59010b8f588c81 (commit)
via 253a3fad875abba510e13a3112b6176b9e272e84 (commit)
via 566d284cd664a78255f5fbc8881ee8996f835960 (commit)
via 8d8c3bc259f8b549a2fbace562afb0984cd427ba (commit)
via af698f41e199e4942d818accb0cc0ad7589785e8 (commit)
via 6300d968db6e857e199cf8e4701988bf2f9136a2 (commit)
via 49d5415d994ab0807daeaacf5e30f9186ca72ff5 (commit)
via 6a204908cb3f11ba7635d5e0a97a196856fb5748 (commit)
via 489f9a3bf2078969f746a47a49fdc17d94f898d3 (commit)
via 7b55eb02488353672fad7160148a40e581cb5c80 (commit)
via 67f6e4baa87b5555f3bc13919707a3f3180d57f4 (commit)
via c0a78a899ad3d96bcfe15715e957eebdb71ecca4 (commit)
via 6ba745463f9f54496a2f9c2b1a407ab40844bbd4 (commit)
via 18d0a74b6464ffbe036c41e706d3130a69a38313 (commit)
via ae1cf18d06bfc92ba1803ad8bb7c90be844f491e (commit)
via 26e04c45efa440353cd75365c499fc06ba1eb4ea (commit)
via 902ad260c2399b597fe22bba461481a09502b9d5 (commit)
via 486bf91e0ecc5fbecfe637e1e75ebe373d42509b (commit)
via 687c2d4bdc959da433c141d920820875b701c2be (commit)
via 640a5da59304684d4fe304f2616e8dcf9f234d41 (commit)
via eff38a97dea5a54b7a9f3e1213cd5e8b2b15be37 (commit)
via 42017c858f5e08f1544620342404904c36d12625 (commit)
via fafb108c231295b40b7b0d0ea86caff5031a0c30 (commit)
via 136adbdab133d19bf900036b3786d5f709ab2082 (commit)
via 10e4a07adce6af4794166cc783eca4fed188cd42 (commit)
via bb79e799885427437f01e6456c03a206886ae9ff (commit)
via 1c88cc3b00870a93c01688dd5742f5a19e0d0f76 (commit)
via ae79f5fe81a38b64a541adc67194404de5dc8cc5 (commit)
via d185c72f14dab4b4ca10dd01e6ea9b7aeb42b2df (commit)
via 50070c824270d5da1db0b716db73b726d458e9f7 (commit)
via 66ebc54e863f58b86c3ae65ca9f4764906c9a348 (commit)
via 3912ef9b24104abea0e9344ff24deeed700712e3 (commit)
via 21b4324449c7091d36fc3e153d3e0f4ea3515278 (commit)
via 5471e816ab36a6182b2223dea461fc8d086ed9e7 (commit)
via 686ed44b82c009ddb63ed064d46ce44fcade5fbe (commit)
via e108ea6f210bf93250ad4ea23ac3708e1478946e (commit)
via f551799e8c3de59be0a6a7c5168194b93987e876 (commit)
via bb1028fd4f52135f4a2c8175d9bf1b90043df1cc (commit)
via 1f26ac530c0ca072ff0de69093d38c95b9d3c80a (commit)
via 7d85a63f7bd3ef5926b92dd8f7d9c1588cf6e286 (commit)
via a365c21da34b70f50459137ae242767cc336f191 (commit)
via d13509441ce77077ccf21b9442458b0fb52b1c07 (commit)
via 8e00f359e81c3cb03c5075710ead0f87f87e3220 (commit)
via f52ff519388e7f3ab4e903695b731a2a7000fcf5 (commit)
via 582348ce86ac20e0bb92079e5f15ba9b05f60a66 (commit)
via 92bf1032800f3365a5d8eb5052a2a045495ca646 (commit)
via ebc15cde7e0fa14a61127be51267a5ad0c430f90 (commit)
via f63ff922e713a04b3f4391d509c2206ac32edbb5 (commit)
via 72a0beb8dfe85b303f546d09986461886fe7a3d8 (commit)
via aa4405d57bec097972c4d5b60d1cfd6a06f84bf1 (commit)
via 4d17de950b96631d01c7928b9cab24860b2e29e5 (commit)
via 365948a46f61db8726a24bfd0c625d26a014f63a (commit)
via c24553e21fe01121a42e2136d0a1230d75812b27 (commit)
via 151ea34890984f1fb2404df848c1dcbf3e61d765 (commit)
via dfd8332b1a958ed9aeb6ae423ea937b5e08024f8 (commit)
via 54c3708f45c72065cefd4d6013be5467bee65f85 (commit)
via 146c48357b32d26019675834eda1daddde95302c (commit)
via 8cec4587428e4fba8f5cf8791f19f8373212b250 (commit)
via 090c4c5abac33b2b28d7bdcf3039005a014f9c5b (commit)
via 62f912bd96a5fefeb0eb8b017ff12335810483b0 (commit)
via 3b4b066b5d1c3726f51e52fee52c317a3ae3f9e3 (commit)
via a7047de1ec7aece83271cc28605ea1d790afee67 (commit)
via 71eee6e279d7527adbc1e325b0cca49d824b67ee (commit)
via 0958095d36903cd821afc57be0c038896dd1acdb (commit)
via 8e66cc336b29bd5acc1f764f26cb0b116db4dc87 (commit)
via e540aaf2cedae6cfeb4c0ea063f8693cf5999822 (commit)
via 71fb105407d496134f0cfcbea73eaea9991dbcf5 (commit)
via aac974498b0a9513f3caf341e1eecbe4adbcff0a (commit)
via e7cf8992bed2ef0be2843da6f0eedf9fa6d5f66b (commit)
via eea48a1e96605accf8579ae4b7fb869295c9ff99 (commit)
via df79b8d3306394ae123fb4c558f7239146e9f0d6 (commit)
via 6d784213ea929dfa06099d7d85ed87709a7f408e (commit)
via e77575c3c85c7e219137b2c616ad104e5b28eb20 (commit)
via 49f1d2d2e7f75432465ddd4acae2579c018aab33 (commit)
via ed9c17ed1627872d701c76336aff407d3ad5c44e (commit)
via b0e38303e79e2a487e37a9dcadd5f1730cdeae9e (commit)
via 93145c09728dbfb7fe5bd77b5a3671e911c41deb (commit)
via 1c1bd99f0add79535b62f6723d7e942661007653 (commit)
via 1d03e4212cffa7fcf57d0f3a4fcdc1920c959e40 (commit)
via 834d48869745039bbd874d76bcafb4ac6ce7a4e8 (commit)
via cca39b307de50546d7e3c4cd9fe4c2435223bf21 (commit)
via dffeeebd09195ad602090501c8c9b05b55885596 (commit)
via 673a619cd628130b0506a5d3669fd6a4d139c790 (commit)
via f8092952b50ef238e2ffc63ccb6d17a469f22966 (commit)
via 7cb53c7b33c41bc8c5d76c6994caae800692108d (commit)
via d0df4daafee6703a7b52609b5681846f83310182 (commit)
via d23f84732df2786fad5bf31f3446e0e088d941ec (commit)
via 963e72656e6a5d8303034f9085c87834a75c44ce (commit)
via fd2daaa2c1a27140568cf5a4f04baf57682214d2 (commit)
via 78942e3fc11f22f1bdbbd8fdd629691d5c510a55 (commit)
via 8945eccce758dd466ac42c6521a3fc4ada5a9226 (commit)
via f29890eed7bad4aead5e95cfa6aae147287a0b10 (commit)
via 7469b1f920d47306f87aab0e2fa0533903bc61af (commit)
via bf9c46a19ba59fa798236b64521fc6d95f18e076 (commit)
via 2357e7abc8bac23f60d79ca8abe81854b5550eea (commit)
via 4d685db094731fccfa684f5c0b26ebfc1c28ca2c (commit)
via 0dd272381befd9b464365cc7df0bb2d761d0d2e0 (commit)
via 046f1b9556d3b8197c03225843ff96d0d79ae762 (commit)
via 7e0ef7c21ad41f0e3047059fef61ddbefe143444 (commit)
via 7cc84b1bfe00402ea12749c63c7e4d8cef5b2431 (commit)
via b327d9aac9bfd87e175d03421069ae679087dd00 (commit)
via 65bc0fbf12199bee2d16b914a544a69345c37cae (commit)
via 2cd7eb5d2c64c6a54350e6399f07fd4826933bff (commit)
via a2a8094104e32ed8249c2811c94f74b876f78b3d (commit)
via 4f17845a927e33ad9655c3f711177e376bc10e44 (commit)
via 84a16612dd45bcaca490715039b1bec235e0dfef (commit)
via d4dce83017319569f35e617dae47af9041166239 (commit)
via 829edd5488aa90324ddc4036dbaf4f2578be9e76 (commit)
via d81a47d3366b6c6ed14edff69188b60ed3655f28 (commit)
via a29b113e5b418921dffaf9b4cfc562ae887a7960 (commit)
via 5024b68a04ecc7ff1c73299fa986cac740cb3e8b (commit)
via 56b188c6e4e36a28b54cab442677e2fa748f0bae (commit)
via d7d60797272f02e6f3f09b659922c71f2c49ffec (commit)
via 570bbcef51aa6a5bc920faabd850cd6a86c0d421 (commit)
via e090ab50879c15c850df8e8145f01d39dbd6b87b (commit)
via 832cd1c032222fec662f9320e6f564f55b75cc8a (commit)
via 933adf250348caf92c04f5249120333e3e300227 (commit)
via 1658417daeac170c4068fbbc6f4e3762ada0e72c (commit)
via 9ff2f7bc88be85c09d37209bd0feeb96ca256892 (commit)
via 7b2691afaea9ccefa2db073f8a717e003f2ad07e (commit)
via 3b30727c4ae0b4febedb9795752352bf5154730a (commit)
via bf635ee41af43f357b285ab97f04f72b37e8fb64 (commit)
via e8e411dd27068279b58bc3527d1b60878ed19d0b (commit)
via 6c44ca2eaa94224d60ceac2602ee9c6846fabf18 (commit)
via bfbd97a0fa52c122c6d0ab5239524b7be58b62be (commit)
via 0e6771fbedb4081dc867e845b541023a673a1da6 (commit)
via b9bc51b44f59c9e93eaa5a21ae7658a320741e08 (commit)
via b18409a1d6515152d107cd965e25ef58835f9f22 (commit)
via a837df2e0c4858543c0bd2e420f726b89994a7e2 (commit)
via 0cd32b208c9a92e5e773b7874a3f75ee58abd6c0 (commit)
via 616537c54eb13b434294342e1a0df06375134ec0 (commit)
via 0d68ac445710fdb4d9d89ca2055b206c9a06dc94 (commit)
via b5e49faa3340628865ea28a60d3dc36d3e08511d (commit)
via b5bbfb2f868f8f7401018debe275c39fc65a5139 (commit)
via 710095383c263973fffe58b050a4924d5053bd7f (commit)
via dd356abbe83f7c1275eba42ac855977499e71e44 (commit)
via 5e1e54cbee0215374fb712152f7906fff960b334 (commit)
via c05781723ac006e4d193d9181bf46ccec998a5b0 (commit)
via 7203d4203cddbb6bf930586e2f3fba183ca12140 (commit)
via 535a401494dd268de77cccfaba68cacbaa1b2a6e (commit)
via b250ca760fe74c901845861fbc2e7292b4349724 (commit)
via 3ac41521c2a1cbc43e3b6e0979eee46b6c45fa63 (commit)
via f30f07e5b15e118686f5665c0a6dca430a95abba (commit)
via 63918fd91e570dbcc2c06f39c75083bbae6a2303 (commit)
via 5599a9aa3b735b42a4a726b79f0c85a0d38eb5da (commit)
via 469bda3f72a097af3dd1bde56d757d7ea916d996 (commit)
via a0209aebd72ab6ec63941d5881b58a3c689b943f (commit)
via 9dec49291c2ccefab6cf97b9146c292897783c5a (commit)
via 6c9695dac3a16574ff3e7d0d310cff3df6d542f6 (commit)
via 7c7238ca556654cd2a0483dab5e7478fa7956a88 (commit)
via e12a932eadf0b33e26979cfbf387eb6788b97cad (commit)
via d77fde1d390e921740df739699dc03b48777f81a (commit)
via 551a2d7f6ed5744170265ea5bc7b99690b58a6f5 (commit)
via 0529433796c0024e9345edd3c458e22e1aec9043 (commit)
via 0252f1b276eaf8e72d42510546f594b9d0703a58 (commit)
via 026699b978f21466cdd20b09dba3fe0448e0592f (commit)
via 6ec7cbb9976f68a0ca265e72dadfbb867d59581f (commit)
via 8faa21b81fde5c30ca1df72739b9a0dd27005402 (commit)
via 159caa607fc11e4b7c1b5efcbb28d0ebf5e99903 (commit)
via 2bfafa08c054715e6163a91da334e1e4fa780740 (commit)
via 78763269e5388263ad29b02049fa61c62829dbe8 (commit)
via 025f40af1c7877ac0ab84e0ee159806a57285c3b (commit)
via 91e59cdd3ffa76c041f774c4ff61cd865299ab75 (commit)
via 27c6b8cef44f5daaa149ec72e3b7052e516ebc26 (commit)
via 846493567722add45db6e7296d570f8ecf99837e (commit)
via d1f68168ca58af66f09e51ced1a35334fb5fb825 (commit)
via 9336279b31c1a5dd9e50fa37d8178c790c4fdef0 (commit)
via 7819ba75f5c170afa06a5a27b8c64e13ae094b74 (commit)
via cf5ed0e7c52e8a97ec48525ee2181e31aaa4184a (commit)
via db143fb8a98a13414f997892449ca2fbb07a0629 (commit)
via 47286f0bb01c6dbe0e48fc080f931d7b93e22063 (commit)
via 191329567e3cab6ae2f0752f2e70880b8d97271a (commit)
via 4c0d2595196da373ca70a52663b7ec13842c940d (commit)
via 242235d6e7bb4e1893c0ebfc58e7a757dae771f8 (commit)
via baf3d8783ad1bc05bbe4db507325e9bfcd8d9be9 (commit)
via f9045d39a58a9b9287f3ece1022391a3b07e88d3 (commit)
via 525d9602da83a5d8ddbfc9ebda282209aa743a70 (commit)
via c6dc0f2d6f67d69d32e7f8c3c175d79f4b2ef430 (commit)
via 0042b37bdc4e3929faf3d2b7862dd79979d60aa0 (commit)
via e39dbc26a1aaecdff6809be620a91d4771e5af9b (commit)
via 88c4d5e241d0dd670eca6f9a4981439a08924704 (commit)
via 2e8473390d5dd2274aedd59ba3934c597f94b04a (commit)
via 05164f9d61006869233b498d248486b4307ea8b6 (commit)
via bfcd0225fe669dde479dde1146612f7c067a817f (commit)
via 85b53414c2c8f70e541447ee204e004693289956 (commit)
via 2c936393a16d79fa3d4bbbdacc66884f7d8d3cb9 (commit)
via 3383b56081364d68de8c29fb34698a7651c50e05 (commit)
via b60e7347e2b97d913b386b82b682c8c7ae2e3d4e (commit)
via e2127bd275b2263f06d7ba039123411c9b7cf07d (commit)
via e3d273a6048b48b7b39d55087d3a6b7ca0a623eb (commit)
via 35d4ca9a46fd8372cce752577944b2fdc458a0f5 (commit)
via ea2a4f906dc3b5bae939a0348a5f82fa690bbec5 (commit)
via 376fd546007d1bf592e391f11b5fdf08993914c2 (commit)
via c0442d5d6e70643e10e639efe1162b64c44cce45 (commit)
via 95e0a157b2ae5a4027c06b7bb1aec04f9eb883fd (commit)
via 4082b7fdb7a1b23518e2dcbb5077f52a79bffa8c (commit)
via 18ba901c91b5bd1e910c7ccc8ae1ebbb1e00fa36 (commit)
via 2f088998525f389391efb86ac4e917174449df85 (commit)
via cdadcd5d6a6bd594fdbcb9efe628067879623df6 (commit)
via 2ab154b25ceb6264f87ba6a3ca139ec44c7db275 (commit)
via faa2bca6751f7a8837e8c593ae723ea81fd40b69 (commit)
via 2e29bef63a7fa200af54b9b0fc69e5cf2573b467 (commit)
via 258663014324e165ea95d581498268915d176141 (commit)
via 4d81613695a03b3d39adb5b54822dc1a07a37af0 (commit)
via af1dbc5024d5b3289841868ee49929ba4f4d3f50 (commit)
via 03d707719016e3d3a6d98b3fb9eb786c90df69ec (commit)
via 803a215c662c5a692c3b057fc7c0bae6c91b3587 (commit)
via 99a63ce0a562d9b26ef1ad68b9426d91e6ec35d7 (commit)
via cf4605bebe7b0266f21376b796d4863aca01f63e (commit)
via 18dcf1d0ec44f4ddf701d5872f6d5e493d3c4fdb (commit)
via ac06e0bbad2fd39f8cc77fac06fc397be14f92c2 (commit)
via a6de7efe8fbf314c5182744d462699283464d9f0 (commit)
via 3bd81bcaed4c9c2ca6c6ed5fab00f350be5c2eef (commit)
via 756a7d1a0816670445219db901364074b79f158a (commit)
via 1a915fef55d9902cb4e0c5d077e9c602101419dc (commit)
via dbe54369eb40d9ba95b8fd77859a243f076b5966 (commit)
via 87fe9bf486e8671d74ed7e6683309a77add03f51 (commit)
via ec45081d781ae19de834b11e000acc35415a8f30 (commit)
via 46e6d4b1702e5c30c8bcd33e7fc73733872bc620 (commit)
via 612a96ab3eea34e232fd97e834599745401b73eb (commit)
via ee8c5f5bee062c8943e955184146d839c05bd2da (commit)
via 9ef0e6eeb1ec8477b1f6867d118d4c599f41c0ae (commit)
via b9755c94e619471f8d9769c7c0d230c1e40b9584 (commit)
via 05c064b4bd3ea51553a34e37099aa1053c141060 (commit)
via 1eb10de8b47aaab24b48cb0e109cf2a3bbc22860 (commit)
via 690a04dcd63dce08a69e648223320e922f82b3d6 (commit)
via 1b11baa7c10783eb9d53c24c7f1deb1c0a424105 (commit)
via e3ef2529a0582f0b146ea7326cf2d52312149cf9 (commit)
via d2dc7a3ef911a5ab83527753f351bc99440d60fe (commit)
via f0445f392c1e2c99acfe9117ad36eef0811bd68b (commit)
via aedaf51b32a4b31d697b18ecb914af3889d13c2c (commit)
via ae5aa5618516d4f894bdf5d2aefed76742069644 (commit)
via d191035ad012bc481ce0a4545f9b6819b897a04e (commit)
via 6c3401b4a9fb79bdee7484e1e3c05758d1b0c0ca (commit)
via a5cf5c7b3a6ac9be60a8737f0e36a61897d32acd (commit)
via 734cae300ccd13aacec1f32b283d4d21b5de8fb5 (commit)
via dadf18e8c050ad6a5977fa32d563f31de99d3ac7 (commit)
via 3caca9f8debed45019acb731b5ef2f55a3479ee4 (commit)
via 3bbdd1e7d3f89b3a281900c75ceb0830d0cfd7d3 (commit)
via f5ae2264a57664aa6ab307865db72f1f740b80c7 (commit)
via 07708b4325680c4731f0d3dc24bca9da3c962d80 (commit)
via 0a836aa297d08b3c375d245f50971cf4cf2760e7 (commit)
via f7af58ec51254d0586ee20ebfae4bd0f8977ed48 (commit)
via b4007e4b25d21ba3b693674ca19ead7d202b7de0 (commit)
via 22f3ad26d4bb70a03858d42122b7a648211911c7 (commit)
via 534acaf92fd8ba43488be7057d7a35623dcab0a9 (commit)
via ebe5d465d2995899aa3f95c944e0d32d09ec2034 (commit)
via 3f599b883384e9f180f12b06d704ef098e948c8e (commit)
via 348387b8fa68c25873b4ee50881738c9c0e83670 (commit)
via cae4ced00386d042535ec9b53b20e9bbc2cdaa20 (commit)
via f8ef5bcb1e8ebc747b32192348faae9fd32fdba9 (commit)
via 666d6e49e1cd46fd293b3fdce239e34588666ed6 (commit)
via 8b21629d234228ff9fbb7a3c5ad5ebeca4b981c1 (commit)
via 4191945aad5aaf0873b15727716d0a988b1c978d (commit)
via 99c025349129904b864806049ea8761940ba0ecc (commit)
via ebc5206327363f747822e7344037d9c2b76b8cd9 (commit)
via 9e72b16bcbfcdc819cbdc437feb10f73b1694107 (commit)
via 4355e75c9f82ea797d9353e82fd4d7c445c9e5c2 (commit)
via dfd7a01376d7b871cf7dfe631f5c96b4b2b7767b (commit)
via ae4e7f10136bd182db6d4801ace410e72574abf2 (commit)
via ca03a1f5156b0a68a2179e287d9af444c64aee91 (commit)
via 688d0a641d4fa7a018fb4f9e131ed1454c68dd15 (commit)
via c136060da6a43da5db7e45b6a32da83f0f7d0820 (commit)
via 0caa1d89d3e60a80ab7517d3691a149093e32be6 (commit)
via e3b0557e225ad3e7a6b7d192b8820666d7b81d0a (commit)
via 27bb28f8bbde1dfc79030b0129a1c0405a8ffc38 (commit)
via 7e41b9c3e2e1ca809ed4ea6de67c843a1a0d7680 (commit)
via 4b83a53a37e3fa53a01ca0a6b4c9f7846a64bc5e (commit)
via 839d23249ae0d1722b51d87195b92cad40e6d78c (commit)
via 4dc1b4bf31b25256bac76baca6e8af71e11cc83a (commit)
via af6b421d426357550e818d6fee79dd559382ae46 (commit)
via b9ad6d4babd3e10f1c13140e53d60181681a5def (commit)
via a91e1274bc7cd044b9e6c254a100a0aff73dcc2b (commit)
via d137040ad98f7203cd440ca8b449a84f048af6fd (commit)
via 440524d524bde6ea17ec64b427e259f3bd08757a (commit)
via cb737e68ceac8238844fe2b8b9bc7feea23b4004 (commit)
via 6b48fd891428674ecf5eaaa083bcf5b843deabc5 (commit)
via a98a90d8c392ed3bd0ab51d644568cf560574112 (commit)
via 421d19914af7d7a6f886c1bce084324ce9407b99 (commit)
via cb8f695c11b2a6e5402ca58fabcc8a17800177ee (commit)
from 54aad8af04350eb3a45a4bd6623681efa2f8d2fb (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
- Log -----------------------------------------------------------------
commit 137d1b29b6063f4d1983bde07f6ec5404f67dcee
Merge: 54aad8af04350eb3a45a4bd6623681efa2f8d2fb 85071d50cf5e1a569b447ba00e118db04293475a
Author: Tomek Mrugalski <tomasz at isc.org>
Date: Thu Oct 6 16:11:16 2011 +0200
Merge branch 'master' into trac878
Conflicts:
ChangeLog
-----------------------------------------------------------------------
Summary of changes:
ChangeLog | 232 ++-
README | 10 +-
src/bin/stats/tests/http/__init__.py => TODO | 0
configure.ac | 62 +-
doc/Doxyfile | 8 +-
doc/guide/bind10-guide.html | 595 +++-
doc/guide/bind10-guide.xml | 942 +++++-
doc/guide/bind10-messages.html | 2028 +++++++++--
doc/guide/bind10-messages.xml | 3790 +++++++++++++++++---
ext/asio/asio/impl/error_code.ipp | 3 +
src/bin/auth/Makefile.am | 7 +
src/bin/auth/auth.spec.pre.in | 18 +
src/bin/auth/auth_config.cc | 17 +-
src/bin/auth/auth_messages.mes | 13 +-
src/bin/auth/auth_srv.cc | 56 +-
src/bin/auth/auth_srv.h | 26 +-
src/bin/auth/b10-auth.8 | 47 +-
src/bin/auth/b10-auth.xml | 48 +-
src/bin/auth/benchmarks/Makefile.am | 7 +
src/bin/auth/command.cc | 23 +-
src/bin/auth/query.cc | 109 +-
src/bin/auth/query.h | 57 +-
src/bin/auth/statistics.cc | 32 +-
src/bin/auth/statistics.h | 20 +
src/bin/auth/tests/Makefile.am | 8 +
src/bin/auth/tests/auth_srv_unittest.cc | 10 +-
src/bin/auth/tests/command_unittest.cc | 65 +-
src/bin/auth/tests/config_unittest.cc | 46 +-
src/bin/auth/tests/query_unittest.cc | 222 +-
src/bin/auth/tests/statistics_unittest.cc | 74 +-
src/bin/auth/tests/testdata/Makefile.am | 2 +-
src/bin/bind10/Makefile.am | 19 +-
src/bin/bind10/bind10.8 | 16 +-
src/bin/bind10/bind10.py.in | 1047 ------
src/bin/bind10/bind10.xml | 28 +-
src/bin/bind10/bind10_messages.mes | 204 ++
src/bin/bind10/bind10_src.py.in | 1087 ++++++
src/bin/bind10/bob.spec | 11 +
src/bin/bind10/creatorapi.txt | 123 +
src/bin/bind10/run_bind10.sh.in | 6 +-
src/bin/bind10/tests/Makefile.am | 7 +-
src/bin/bind10/tests/bind10_test.py.in | 42 +-
src/bin/bindctl/Makefile.am | 2 +
src/bin/bindctl/bindcmd.py | 26 +-
src/bin/bindctl/bindctl_main.py.in | 19 +-
src/bin/bindctl/run_bindctl.sh.in | 4 +-
src/bin/bindctl/tests/Makefile.am | 4 +-
src/bin/cfgmgr/b10-cfgmgr.py.in | 2 +-
src/bin/cfgmgr/plugins/tests/Makefile.am | 6 +-
src/bin/cfgmgr/tests/Makefile.am | 10 +-
src/bin/cmdctl/Makefile.am | 15 +-
src/bin/cmdctl/cmdctl.py.in | 56 +-
src/bin/cmdctl/cmdctl_messages.mes | 81 +
src/bin/cmdctl/run_b10-cmdctl.sh.in | 10 +-
src/bin/cmdctl/tests/Makefile.am | 4 +-
src/bin/cmdctl/tests/cmdctl_test.py | 6 +-
src/bin/dhcp6/Makefile.am | 1 +
src/bin/dhcp6/tests/Makefile.am | 4 +-
src/bin/dhcp6/tests/dhcp6_test.py | 2 +-
src/bin/host/Makefile.am | 1 +
src/bin/host/b10-host.1 | 4 -
src/bin/host/b10-host.xml | 5 -
src/bin/loadzone/Makefile.am | 1 +
src/bin/loadzone/run_loadzone.sh.in | 4 +-
src/bin/loadzone/tests/correct/Makefile.am | 4 +-
src/bin/loadzone/tests/correct/correct_test.sh.in | 2 +-
src/bin/loadzone/tests/error/Makefile.am | 4 +-
src/bin/loadzone/tests/error/error_test.sh.in | 2 +-
src/bin/msgq/tests/Makefile.am | 4 +-
src/bin/resolver/Makefile.am | 2 +
src/bin/resolver/b10-resolver.8 | 30 +-
src/bin/resolver/b10-resolver.xml | 32 +-
src/bin/resolver/main.cc | 3 +-
src/bin/resolver/resolver.cc | 56 +-
src/bin/resolver/resolver.h | 17 +-
src/bin/resolver/resolver_messages.mes | 245 +-
src/bin/resolver/tests/Makefile.am | 1 +
src/bin/resolver/tests/resolver_config_unittest.cc | 147 +-
src/bin/resolver/tests/resolver_unittest.cc | 4 +-
src/bin/sockcreator/README | 2 +-
src/bin/stats/Makefile.am | 25 +-
src/bin/stats/b10-stats-httpd.8 | 6 +-
src/bin/stats/b10-stats-httpd.xml | 10 +-
src/bin/stats/b10-stats.8 | 103 +-
src/bin/stats/b10-stats.xml | 130 +-
src/bin/stats/stats-httpd-xsl.tpl | 1 +
src/bin/stats/stats-schema.spec | 87 -
src/bin/stats/stats.py.in | 622 ++--
src/bin/stats/stats.spec | 106 +-
src/bin/stats/stats_httpd.py.in | 363 +-
src/bin/stats/stats_httpd_messages.mes | 92 +
src/bin/stats/stats_messages.mes | 76 +
src/bin/stats/tests/Makefile.am | 18 +-
src/bin/stats/tests/b10-stats-httpd_test.py | 785 +++--
src/bin/stats/tests/b10-stats_test.py | 1196 +++----
src/bin/stats/tests/fake_select.py | 43 -
src/bin/stats/tests/fake_socket.py | 70 -
src/bin/stats/tests/fake_time.py | 47 -
src/bin/stats/tests/http/Makefile.am | 6 -
src/bin/stats/tests/http/server.py | 96 -
src/bin/stats/tests/isc/Makefile.am | 8 -
src/bin/stats/tests/isc/cc/Makefile.am | 7 -
src/bin/stats/tests/isc/cc/__init__.py | 1 -
src/bin/stats/tests/isc/cc/session.py | 148 -
src/bin/stats/tests/isc/config/Makefile.am | 7 -
src/bin/stats/tests/isc/config/__init__.py | 1 -
src/bin/stats/tests/isc/config/ccsession.py | 160 -
src/bin/stats/tests/isc/log_messages/Makefile.am | 7 +
src/bin/stats/tests/isc/log_messages/__init__.py | 18 +
.../tests/isc/log_messages/stats_httpd_messages.py | 16 +
.../stats/tests/isc/log_messages/stats_messages.py | 16 +
src/bin/stats/tests/isc/util/Makefile.am | 7 -
src/bin/stats/tests/isc/util/process.py | 21 -
src/bin/stats/tests/test_utils.py | 364 ++
src/bin/stats/tests/testdata/Makefile.am | 1 -
src/bin/stats/tests/testdata/stats_test.spec | 19 -
src/bin/tests/Makefile.am | 8 +-
src/bin/tests/process_rename_test.py.in | 9 +-
src/bin/xfrin/Makefile.am | 15 +-
src/bin/xfrin/b10-xfrin.8 | 11 +-
src/bin/xfrin/b10-xfrin.xml | 7 +-
src/bin/xfrin/tests/Makefile.am | 4 +-
src/bin/xfrin/tests/xfrin_test.py | 14 +-
src/bin/xfrin/xfrin.py.in | 18 +-
src/bin/xfrin/xfrin.spec | 5 +
src/bin/xfrout/Makefile.am | 15 +-
src/bin/xfrout/b10-xfrout.xml | 8 +
src/bin/xfrout/tests/Makefile.am | 10 +-
src/bin/xfrout/tests/xfrout_test.py.in | 328 ++-
src/bin/xfrout/xfrout.py.in | 229 +-
src/bin/xfrout/xfrout.spec.pre.in | 65 +-
src/bin/xfrout/xfrout_messages.mes | 24 +-
src/bin/zonemgr/Makefile.am | 16 +-
src/bin/zonemgr/tests/Makefile.am | 4 +-
src/bin/zonemgr/tests/zonemgr_test.py | 65 +-
src/bin/zonemgr/zonemgr.py.in | 124 +-
src/bin/zonemgr/zonemgr_messages.mes | 145 +
src/cppcheck-suppress.lst | 2 +-
src/lib/Makefile.am | 6 +-
src/lib/acl/Makefile.am | 2 +-
src/lib/acl/acl.h | 3 +
src/lib/acl/dns.cc | 120 +-
src/lib/acl/dns.h | 143 +-
src/lib/acl/dnsname_check.h | 83 +
src/lib/acl/loader.h | 59 +-
src/lib/acl/logic_check.h | 80 +
src/lib/acl/tests/Makefile.am | 3 +
src/lib/acl/tests/dns_test.cc | 256 ++-
src/lib/acl/tests/dnsname_check_unittest.cc | 59 +
src/lib/acl/tests/ip_check_unittest.cc | 31 +-
src/lib/acl/tests/loader_test.cc | 4 +
src/lib/acl/tests/logic_check_test.cc | 46 +
src/lib/acl/tests/sockaddr.h | 69 +
src/lib/asiodns/asiodns_messages.mes | 10 +-
src/lib/asiodns/tests/run_unittests.cc | 4 +-
src/lib/asiolink/README | 7 +
src/lib/asiolink/tests/interval_timer_unittest.cc | 63 +-
src/lib/asiolink/tests/io_endpoint_unittest.cc | 2 +-
src/lib/bench/tests/Makefile.am | 1 +
src/lib/cache/Makefile.am | 11 +-
src/lib/cache/cache_messages.mes | 148 +
src/lib/cache/local_zone_data.cc | 4 +
src/lib/cache/logger.cc | 23 +
src/lib/cache/logger.h | 44 +
src/lib/cache/message_cache.cc | 20 +
src/lib/cache/message_cache.h | 2 +-
src/lib/cache/message_entry.cc | 5 +-
src/lib/cache/resolver_cache.cc | 32 +
src/lib/cache/rrset_cache.cc | 27 +-
src/lib/cache/tests/Makefile.am | 2 +
src/lib/cache/tests/run_unittests.cc | 4 +
src/lib/cc/cc_messages.mes | 6 +-
src/lib/cc/data.cc | 6 +-
src/lib/cc/session.cc | 2 +-
src/lib/cc/tests/data_unittests.cc | 15 +
src/lib/config/ccsession.cc | 129 +-
src/lib/config/ccsession.h | 8 +-
src/lib/config/config_log.h | 8 +
src/lib/config/config_messages.mes | 25 +
src/lib/config/module_spec.cc | 115 +-
src/lib/config/module_spec.h | 23 +-
src/lib/config/tests/ccsession_unittests.cc | 94 +-
src/lib/config/tests/module_spec_unittests.cc | 167 +-
src/lib/config/tests/testdata/Makefile.am | 12 +
src/lib/config/tests/testdata/data32_1.data | 3 +
src/lib/config/tests/testdata/data32_2.data | 3 +
src/lib/config/tests/testdata/data32_3.data | 3 +
src/lib/config/tests/testdata/data33_1.data | 7 +
src/lib/config/tests/testdata/data33_2.data | 7 +
src/lib/config/tests/testdata/spec2.spec | 11 +
src/lib/config/tests/testdata/spec32.spec | 19 +
src/lib/config/tests/testdata/spec33.spec | 50 +
src/lib/config/tests/testdata/spec34.spec | 14 +
src/lib/config/tests/testdata/spec35.spec | 15 +
src/lib/config/tests/testdata/spec36.spec | 17 +
src/lib/config/tests/testdata/spec37.spec | 7 +
src/lib/config/tests/testdata/spec38.spec | 17 +
src/lib/datasrc/Makefile.am | 14 +-
src/lib/datasrc/cache.cc | 3 +-
src/lib/datasrc/client.h | 292 ++
src/lib/datasrc/data_source.cc | 6 +-
src/lib/datasrc/data_source.h | 12 +-
src/lib/datasrc/database.cc | 960 +++++
src/lib/datasrc/database.h | 770 ++++
src/lib/datasrc/datasrc_messages.mes | 259 +-
src/lib/datasrc/factory.cc | 82 +
src/lib/datasrc/factory.h | 182 +
src/lib/datasrc/iterator.h | 61 +
src/lib/datasrc/memory_datasrc.cc | 372 ++-
src/lib/datasrc/memory_datasrc.h | 216 +-
src/lib/datasrc/rbtree.h | 6 +-
src/lib/datasrc/sqlite3_accessor.cc | 779 ++++
src/lib/datasrc/sqlite3_accessor.h | 215 ++
src/lib/datasrc/sqlite3_datasrc.cc | 98 +-
src/lib/datasrc/static_datasrc.cc | 3 +-
src/lib/datasrc/tests/Makefile.am | 26 +-
src/lib/datasrc/tests/cache_unittest.cc | 6 +-
src/lib/datasrc/tests/client_unittest.cc | 50 +
src/lib/datasrc/tests/database_unittest.cc | 2410 +++++++++++++
src/lib/datasrc/tests/factory_unittest.cc | 175 +
src/lib/datasrc/tests/memory_datasrc_unittest.cc | 675 ++--
src/lib/datasrc/tests/sqlite3_accessor_unittest.cc | 773 ++++
src/lib/datasrc/tests/static_unittest.cc | 1 +
src/lib/datasrc/tests/testdata/Makefile.am | 6 +
src/lib/datasrc/tests/testdata/rwtest.sqlite3 | Bin 0 -> 11264 bytes
src/lib/datasrc/tests/zonetable_unittest.cc | 36 +-
src/lib/datasrc/zone.h | 388 ++-
src/lib/datasrc/zonetable.cc | 12 +-
src/lib/datasrc/zonetable.h | 6 +-
src/lib/dns/Makefile.am | 21 +
src/lib/dns/benchmarks/Makefile.am | 1 +
src/lib/dns/character_string.cc | 140 +
src/lib/dns/character_string.h | 57 +
src/lib/dns/gen-rdatacode.py.in | 17 +-
src/lib/dns/message.cc | 92 +-
src/lib/dns/message.h | 66 +-
src/lib/dns/python/Makefile.am | 53 +-
src/lib/dns/python/edns_python.cc | 262 +-
src/lib/dns/python/edns_python.h | 64 +
src/lib/dns/python/message_python.cc | 550 ++--
src/lib/dns/python/message_python.h | 40 +
src/lib/dns/python/message_python_inc.cc | 41 +
src/lib/dns/python/messagerenderer_python.cc | 94 +-
src/lib/dns/python/messagerenderer_python.h | 37 +-
src/lib/dns/python/name_python.cc | 133 +-
src/lib/dns/python/name_python.h | 45 +-
src/lib/dns/python/opcode_python.cc | 231 +-
src/lib/dns/python/opcode_python.h | 64 +
src/lib/dns/python/pydnspp.cc | 716 ++++-
src/lib/dns/python/pydnspp_common.cc | 36 +
src/lib/dns/python/pydnspp_common.h | 2 -
src/lib/dns/python/pydnspp_towire.h | 4 +-
src/lib/dns/python/question_python.cc | 271 +-
src/lib/dns/python/question_python.h | 66 +
src/lib/dns/python/rcode_python.cc | 109 +-
src/lib/dns/python/rcode_python.h | 49 +-
src/lib/dns/python/rdata_python.cc | 289 +-
src/lib/dns/python/rdata_python.h | 68 +
src/lib/dns/python/rrclass_python.cc | 303 +-
src/lib/dns/python/rrclass_python.h | 68 +
src/lib/dns/python/rrset_python.cc | 494 ++--
src/lib/dns/python/rrset_python.h | 78 +
src/lib/dns/python/rrttl_python.cc | 281 +-
src/lib/dns/python/rrttl_python.h | 67 +
src/lib/dns/python/rrtype_python.cc | 348 +-
src/lib/dns/python/rrtype_python.h | 68 +
src/lib/dns/python/tests/Makefile.am | 2 +-
src/lib/dns/python/tests/message_python_test.py | 173 +-
src/lib/dns/python/tests/question_python_test.py | 10 +-
src/lib/dns/python/tsig_python.cc | 105 +-
src/lib/dns/python/tsig_python.h | 28 +-
src/lib/dns/python/tsig_rdata_python.cc | 62 +-
src/lib/dns/python/tsig_rdata_python.h | 29 +-
src/lib/dns/python/tsigerror_python.cc | 105 +-
src/lib/dns/python/tsigerror_python.h | 10 +-
src/lib/dns/python/tsigkey_python.cc | 133 +-
src/lib/dns/python/tsigkey_python.h | 52 +-
src/lib/dns/python/tsigrecord_python.cc | 82 +-
src/lib/dns/python/tsigrecord_python.h | 28 +-
src/lib/dns/question.cc | 9 +
src/lib/dns/question.h | 16 +-
src/lib/dns/rdata/any_255/tsig_250.cc | 127 +-
src/lib/dns/rdata/generic/afsdb_18.cc | 171 +
src/lib/dns/rdata/generic/afsdb_18.h | 74 +
src/lib/dns/rdata/generic/detail/ds_like.h | 225 ++
src/lib/dns/rdata/generic/detail/txt_like.h | 172 +
src/lib/dns/rdata/generic/dlv_32769.cc | 121 +
src/lib/dns/rdata/generic/dlv_32769.h | 77 +
src/lib/dns/rdata/generic/ds_43.cc | 109 +-
src/lib/dns/rdata/generic/ds_43.h | 33 +-
src/lib/dns/rdata/generic/hinfo_13.cc | 129 +
src/lib/dns/rdata/generic/hinfo_13.h | 77 +
src/lib/dns/rdata/generic/minfo_14.cc | 156 +
src/lib/dns/rdata/generic/minfo_14.h | 82 +
src/lib/dns/rdata/generic/naptr_35.cc | 220 ++
src/lib/dns/rdata/generic/naptr_35.h | 83 +
src/lib/dns/rdata/generic/rp_17.cc | 1 +
src/lib/dns/rdata/generic/rrsig_46.cc | 5 +
src/lib/dns/rdata/generic/rrsig_46.h | 3 +
src/lib/dns/rdata/generic/spf_99.cc | 87 +
src/lib/dns/rdata/generic/spf_99.h | 52 +
src/lib/dns/rdata/generic/txt_16.cc | 121 +-
src/lib/dns/rdata/generic/txt_16.h | 11 +-
src/lib/dns/rdata/in_1/dhcid_49.cc | 145 +
src/lib/dns/rdata/in_1/dhcid_49.h | 58 +
src/lib/dns/rdata/in_1/srv_33.cc | 245 ++
src/lib/dns/rdata/in_1/srv_33.h | 93 +
src/lib/dns/rdata/template.cc | 1 +
src/lib/dns/rrtype-placeholder.h | 5 +
src/lib/dns/tests/Makefile.am | 8 +-
src/lib/dns/tests/character_string_unittest.cc | 92 +
src/lib/dns/tests/message_unittest.cc | 355 ++-
src/lib/dns/tests/question_unittest.cc | 16 +
src/lib/dns/tests/rdata_afsdb_unittest.cc | 210 ++
src/lib/dns/tests/rdata_ds_like_unittest.cc | 171 +
src/lib/dns/tests/rdata_ds_unittest.cc | 99 -
src/lib/dns/tests/rdata_hinfo_unittest.cc | 115 +
src/lib/dns/tests/rdata_minfo_unittest.cc | 184 +
src/lib/dns/tests/rdata_naptr_unittest.cc | 178 +
src/lib/dns/tests/rdata_rrsig_unittest.cc | 2 +-
src/lib/dns/tests/rdata_srv_unittest.cc | 173 +
src/lib/dns/tests/testdata/Makefile.am | 36 +-
src/lib/dns/tests/testdata/gen-wiredata.py.in | 612 ----
src/lib/dns/tests/testdata/message_fromWire17.spec | 22 +
src/lib/dns/tests/testdata/message_fromWire18.spec | 23 +
src/lib/dns/tests/testdata/message_fromWire19.spec | 20 +
src/lib/dns/tests/testdata/message_fromWire20.spec | 20 +
src/lib/dns/tests/testdata/message_fromWire21.spec | 20 +
src/lib/dns/tests/testdata/message_fromWire22.spec | 14 +
src/lib/dns/tests/testdata/message_toWire4.spec | 27 +
src/lib/dns/tests/testdata/message_toWire5.spec | 36 +
.../dns/tests/testdata/rdata_afsdb_fromWire1.spec | 3 +
.../dns/tests/testdata/rdata_afsdb_fromWire2.spec | 6 +
.../dns/tests/testdata/rdata_afsdb_fromWire3.spec | 4 +
.../dns/tests/testdata/rdata_afsdb_fromWire4.spec | 4 +
.../dns/tests/testdata/rdata_afsdb_fromWire5.spec | 4 +
.../dns/tests/testdata/rdata_afsdb_toWire1.spec | 4 +
.../dns/tests/testdata/rdata_afsdb_toWire2.spec | 8 +
.../dns/tests/testdata/rdata_minfo_fromWire1.spec | 3 +
.../dns/tests/testdata/rdata_minfo_fromWire2.spec | 7 +
.../dns/tests/testdata/rdata_minfo_fromWire3.spec | 6 +
.../dns/tests/testdata/rdata_minfo_fromWire4.spec | 6 +
.../dns/tests/testdata/rdata_minfo_fromWire5.spec | 5 +
.../dns/tests/testdata/rdata_minfo_fromWire6.spec | 5 +
.../dns/tests/testdata/rdata_minfo_toWire1.spec | 5 +
.../dns/tests/testdata/rdata_minfo_toWire2.spec | 6 +
.../testdata/rdata_minfo_toWireUncompressed1.spec | 7 +
.../testdata/rdata_minfo_toWireUncompressed2.spec | 8 +
src/lib/dns/tests/testdata/rdata_srv_fromWire | 36 +
src/lib/dns/tests/tsig_unittest.cc | 74 +-
src/lib/dns/tsig.cc | 103 +-
src/lib/dns/tsig.h | 21 +
src/lib/exceptions/exceptions.h | 12 +
src/lib/log/Makefile.am | 1 +
src/lib/log/README | 115 +-
src/lib/log/compiler/message.cc | 92 +-
src/lib/log/log_formatter.h | 35 +-
src/lib/log/log_messages.cc | 2 +-
src/lib/log/log_messages.h | 2 +-
src/lib/log/log_messages.mes | 32 +-
src/lib/log/logger_support.cc | 74 +-
src/lib/log/logger_support.h | 51 +-
src/lib/log/logger_unittest_support.cc | 175 +
src/lib/log/logger_unittest_support.h | 126 +
src/lib/log/tests/Makefile.am | 17 +-
src/lib/log/tests/console_test.sh.in | 2 -
src/lib/log/tests/destination_test.sh.in | 5 +-
src/lib/log/tests/init_logger_test.cc | 42 +
src/lib/log/tests/init_logger_test.sh.in | 110 +
src/lib/log/tests/local_file_test.sh.in | 2 -
src/lib/log/tests/logger_level_impl_unittest.cc | 7 +-
src/lib/log/tests/logger_level_unittest.cc | 8 +-
src/lib/log/tests/logger_support_unittest.cc | 15 +-
src/lib/log/tests/severity_test.sh.in | 6 +-
src/lib/python/isc/Makefile.am | 3 +-
src/lib/python/isc/__init__.py | 7 +-
src/lib/python/isc/acl/Makefile.am | 45 +
src/lib/python/isc/acl/__init__.py | 11 +
src/lib/python/isc/acl/_dns.py | 29 +
src/lib/python/isc/acl/acl.cc | 80 +
src/lib/python/isc/acl/acl.py | 29 +
src/lib/python/isc/acl/acl_inc.cc | 16 +
src/lib/python/isc/acl/dns.cc | 135 +
src/lib/python/isc/acl/dns.h | 52 +
src/lib/python/isc/acl/dns.py | 73 +
src/lib/python/isc/acl/dns_requestacl_inc.cc | 33 +
src/lib/python/isc/acl/dns_requestacl_python.cc | 184 +
src/lib/python/isc/acl/dns_requestacl_python.h | 53 +
src/lib/python/isc/acl/dns_requestcontext_inc.cc | 33 +
.../python/isc/acl/dns_requestcontext_python.cc | 382 ++
src/lib/python/isc/acl/dns_requestcontext_python.h | 54 +
src/lib/python/isc/acl/dns_requestloader_inc.cc | 87 +
src/lib/python/isc/acl/dns_requestloader_python.cc | 270 ++
src/lib/python/isc/acl/dns_requestloader_python.h | 46 +
src/lib/python/isc/acl/dnsacl_inc.cc | 17 +
src/lib/python/isc/acl/tests/Makefile.am | 30 +
src/lib/python/isc/acl/tests/acl_test.py | 29 +
src/lib/python/isc/acl/tests/dns_test.py | 357 ++
src/lib/python/isc/bind10/Makefile.am | 4 +
.../isc => lib/python/isc/bind10}/__init__.py | 0
src/lib/python/isc/bind10/sockcreator.py | 226 ++
src/lib/python/isc/bind10/tests/Makefile.am | 29 +
.../python/isc/bind10/tests/sockcreator_test.py | 327 ++
src/lib/python/isc/cc/data.py | 18 +-
src/lib/python/isc/cc/message.py | 2 +-
src/lib/python/isc/cc/session.py | 37 +-
src/lib/python/isc/cc/tests/Makefile.am | 4 +-
src/lib/python/isc/cc/tests/message_test.py | 5 +
src/lib/python/isc/cc/tests/session_test.py | 10 +
src/lib/python/isc/config/Makefile.am | 26 +-
src/lib/python/isc/config/ccsession.py | 171 +-
src/lib/python/isc/config/cfgmgr.py | 20 +-
src/lib/python/isc/config/cfgmgr_messages.mes | 7 +
src/lib/python/isc/config/config_data.py | 129 +-
src/lib/python/isc/config/config_messages.mes | 33 +
src/lib/python/isc/config/module_spec.py | 127 +-
src/lib/python/isc/config/tests/Makefile.am | 4 +-
src/lib/python/isc/config/tests/ccsession_test.py | 46 +-
src/lib/python/isc/config/tests/cfgmgr_test.py | 22 +
.../python/isc/config/tests/config_data_test.py | 55 +-
.../python/isc/config/tests/module_spec_test.py | 112 +
src/lib/python/isc/datasrc/Makefile.am | 36 +-
src/lib/python/isc/datasrc/__init__.py | 21 +-
src/lib/python/isc/datasrc/client_inc.cc | 157 +
src/lib/python/isc/datasrc/client_python.cc | 264 ++
src/lib/python/isc/datasrc/client_python.h | 35 +
src/lib/python/isc/datasrc/datasrc.cc | 225 ++
src/lib/python/isc/datasrc/datasrc.h | 50 +
src/lib/python/isc/datasrc/finder_inc.cc | 96 +
src/lib/python/isc/datasrc/finder_python.cc | 248 ++
src/lib/python/isc/datasrc/finder_python.h | 36 +
src/lib/python/isc/datasrc/iterator_inc.cc | 34 +
src/lib/python/isc/datasrc/iterator_python.cc | 202 ++
src/lib/python/isc/datasrc/iterator_python.h | 38 +
src/lib/python/isc/datasrc/sqlite3_ds.py | 84 +-
src/lib/python/isc/datasrc/tests/Makefile.am | 10 +-
src/lib/python/isc/datasrc/tests/datasrc_test.py | 389 ++
.../python/isc/datasrc/tests/sqlite3_ds_test.py | 50 +-
src/lib/python/isc/datasrc/updater_inc.cc | 181 +
src/lib/python/isc/datasrc/updater_python.cc | 318 ++
src/lib/python/isc/datasrc/updater_python.h | 39 +
src/lib/python/isc/dns/Makefile.am | 7 +
src/lib/python/isc/log/Makefile.am | 9 +
src/lib/python/isc/log/log.cc | 30 +-
src/lib/python/isc/log/tests/Makefile.am | 24 +-
src/lib/python/isc/log_messages/Makefile.am | 32 +
src/lib/python/isc/log_messages/README | 68 +
src/lib/python/isc/log_messages/__init__.py | 3 +
src/lib/python/isc/log_messages/bind10_messages.py | 1 +
src/lib/python/isc/log_messages/cfgmgr_messages.py | 1 +
src/lib/python/isc/log_messages/cmdctl_messages.py | 1 +
src/lib/python/isc/log_messages/config_messages.py | 1 +
src/lib/python/isc/log_messages/gen-forwarder.sh | 14 +
.../python/isc/log_messages/libxfrin_messages.py | 1 +
.../python/isc/log_messages/notify_out_messages.py | 1 +
.../isc/log_messages/stats_httpd_messages.py | 1 +
src/lib/python/isc/log_messages/stats_messages.py | 1 +
src/lib/python/isc/log_messages/work/Makefile.am | 12 +
.../python/isc/log_messages/work/__init__.py.in | 3 +
src/lib/python/isc/log_messages/xfrin_messages.py | 1 +
src/lib/python/isc/log_messages/xfrout_messages.py | 1 +
.../python/isc/log_messages/zonemgr_messages.py | 1 +
src/lib/python/isc/net/tests/Makefile.am | 4 +-
src/lib/python/isc/notify/Makefile.am | 14 +-
src/lib/python/isc/notify/notify_out.py | 71 +-
src/lib/python/isc/notify/notify_out_messages.mes | 83 +
src/lib/python/isc/notify/tests/Makefile.am | 4 +-
src/lib/python/isc/notify/tests/notify_out_test.py | 25 +-
src/lib/python/isc/util/tests/Makefile.am | 4 +-
src/lib/python/isc/xfrin/Makefile.am | 23 +
.../isc/util => lib/python/isc/xfrin}/__init__.py | 0
src/lib/python/isc/xfrin/diff.py | 235 ++
src/lib/python/isc/xfrin/libxfrin_messages.mes | 21 +
src/lib/python/isc/xfrin/tests/Makefile.am | 24 +
src/lib/python/isc/xfrin/tests/diff_tests.py | 437 +++
src/lib/resolve/resolve_messages.mes | 14 +-
src/lib/resolve/tests/Makefile.am | 1 +
src/lib/server_common/Makefile.am | 10 +-
src/lib/server_common/client.cc | 7 -
src/lib/server_common/client.h | 11 -
src/lib/server_common/keyring.cc | 4 +
src/lib/server_common/logger.cc | 23 +
src/lib/server_common/logger.h | 44 +
src/lib/server_common/portconfig.cc | 21 +-
src/lib/server_common/server_common_messages.mes | 73 +
src/lib/server_common/tests/Makefile.am | 2 +
src/lib/server_common/tests/client_unittest.cc | 24 -
src/lib/server_common/tests/keyring_test.cc | 3 +-
src/lib/server_common/tests/run_unittests.cc | 3 +
src/lib/testutils/testdata/Makefile.am | 2 +-
src/lib/util/Makefile.am | 2 +-
src/lib/util/filename.cc | 18 +
src/lib/util/filename.h | 12 +
src/lib/util/python/Makefile.am | 1 +
src/lib/util/python/gen_wiredata.py.in | 1232 +++++++
src/lib/util/python/pycppwrapper_util.h | 31 +-
src/lib/util/python/wrapper_template.cc | 8 +-
src/lib/util/python/wrapper_template.h | 6 +-
src/lib/util/strutil.cc | 11 +
src/lib/util/strutil.h | 62 +
src/lib/util/tests/filename_unittest.cc | 52 +
src/lib/util/tests/strutil_unittest.cc | 80 +-
tests/system/bindctl/tests.sh | 16 +-
tests/system/cleanall.sh | 5 +-
tools/system_messages.py | 6 +
505 files changed, 40506 insertions(+), 9807 deletions(-)
rename src/bin/stats/tests/http/__init__.py => TODO (100%)
delete mode 100755 src/bin/bind10/bind10.py.in
create mode 100644 src/bin/bind10/bind10_messages.mes
create mode 100755 src/bin/bind10/bind10_src.py.in
create mode 100644 src/bin/bind10/creatorapi.txt
create mode 100644 src/bin/cmdctl/cmdctl_messages.mes
mode change 100644 => 100755 src/bin/loadzone/tests/correct/correct_test.sh.in
mode change 100644 => 100755 src/bin/loadzone/tests/error/error_test.sh.in
delete mode 100644 src/bin/stats/stats-schema.spec
mode change 100644 => 100755 src/bin/stats/stats.py.in
mode change 100755 => 100644 src/bin/stats/stats_httpd.py.in
create mode 100644 src/bin/stats/stats_httpd_messages.mes
create mode 100644 src/bin/stats/stats_messages.mes
delete mode 100644 src/bin/stats/tests/fake_select.py
delete mode 100644 src/bin/stats/tests/fake_socket.py
delete mode 100644 src/bin/stats/tests/fake_time.py
delete mode 100644 src/bin/stats/tests/http/Makefile.am
delete mode 100644 src/bin/stats/tests/http/server.py
delete mode 100644 src/bin/stats/tests/isc/Makefile.am
delete mode 100644 src/bin/stats/tests/isc/cc/Makefile.am
delete mode 100644 src/bin/stats/tests/isc/cc/__init__.py
delete mode 100644 src/bin/stats/tests/isc/cc/session.py
delete mode 100644 src/bin/stats/tests/isc/config/Makefile.am
delete mode 100644 src/bin/stats/tests/isc/config/__init__.py
delete mode 100644 src/bin/stats/tests/isc/config/ccsession.py
create mode 100644 src/bin/stats/tests/isc/log_messages/Makefile.am
create mode 100644 src/bin/stats/tests/isc/log_messages/__init__.py
create mode 100644 src/bin/stats/tests/isc/log_messages/stats_httpd_messages.py
create mode 100644 src/bin/stats/tests/isc/log_messages/stats_messages.py
delete mode 100644 src/bin/stats/tests/isc/util/Makefile.am
delete mode 100644 src/bin/stats/tests/isc/util/process.py
create mode 100644 src/bin/stats/tests/test_utils.py
delete mode 100644 src/bin/stats/tests/testdata/Makefile.am
delete mode 100644 src/bin/stats/tests/testdata/stats_test.spec
create mode 100644 src/bin/zonemgr/zonemgr_messages.mes
create mode 100644 src/lib/acl/dnsname_check.h
create mode 100644 src/lib/acl/tests/dnsname_check_unittest.cc
create mode 100644 src/lib/acl/tests/sockaddr.h
create mode 100644 src/lib/cache/cache_messages.mes
create mode 100644 src/lib/cache/logger.cc
create mode 100644 src/lib/cache/logger.h
create mode 100644 src/lib/config/tests/testdata/data32_1.data
create mode 100644 src/lib/config/tests/testdata/data32_2.data
create mode 100644 src/lib/config/tests/testdata/data32_3.data
create mode 100644 src/lib/config/tests/testdata/data33_1.data
create mode 100644 src/lib/config/tests/testdata/data33_2.data
create mode 100644 src/lib/config/tests/testdata/spec32.spec
create mode 100644 src/lib/config/tests/testdata/spec33.spec
create mode 100644 src/lib/config/tests/testdata/spec34.spec
create mode 100644 src/lib/config/tests/testdata/spec35.spec
create mode 100644 src/lib/config/tests/testdata/spec36.spec
create mode 100644 src/lib/config/tests/testdata/spec37.spec
create mode 100644 src/lib/config/tests/testdata/spec38.spec
create mode 100644 src/lib/datasrc/client.h
create mode 100644 src/lib/datasrc/database.cc
create mode 100644 src/lib/datasrc/database.h
create mode 100644 src/lib/datasrc/factory.cc
create mode 100644 src/lib/datasrc/factory.h
create mode 100644 src/lib/datasrc/iterator.h
create mode 100644 src/lib/datasrc/sqlite3_accessor.cc
create mode 100644 src/lib/datasrc/sqlite3_accessor.h
create mode 100644 src/lib/datasrc/tests/client_unittest.cc
create mode 100644 src/lib/datasrc/tests/database_unittest.cc
create mode 100644 src/lib/datasrc/tests/factory_unittest.cc
create mode 100644 src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
create mode 100644 src/lib/datasrc/tests/testdata/Makefile.am
create mode 100644 src/lib/datasrc/tests/testdata/rwtest.sqlite3
create mode 100644 src/lib/dns/character_string.cc
create mode 100644 src/lib/dns/character_string.h
create mode 100644 src/lib/dns/python/edns_python.h
create mode 100644 src/lib/dns/python/message_python.h
create mode 100644 src/lib/dns/python/message_python_inc.cc
create mode 100644 src/lib/dns/python/opcode_python.h
create mode 100644 src/lib/dns/python/question_python.h
create mode 100644 src/lib/dns/python/rdata_python.h
create mode 100644 src/lib/dns/python/rrclass_python.h
create mode 100644 src/lib/dns/python/rrset_python.h
create mode 100644 src/lib/dns/python/rrttl_python.h
create mode 100644 src/lib/dns/python/rrtype_python.h
create mode 100644 src/lib/dns/rdata/generic/afsdb_18.cc
create mode 100644 src/lib/dns/rdata/generic/afsdb_18.h
create mode 100644 src/lib/dns/rdata/generic/detail/ds_like.h
create mode 100644 src/lib/dns/rdata/generic/detail/txt_like.h
create mode 100644 src/lib/dns/rdata/generic/dlv_32769.cc
create mode 100644 src/lib/dns/rdata/generic/dlv_32769.h
create mode 100644 src/lib/dns/rdata/generic/hinfo_13.cc
create mode 100644 src/lib/dns/rdata/generic/hinfo_13.h
create mode 100644 src/lib/dns/rdata/generic/minfo_14.cc
create mode 100644 src/lib/dns/rdata/generic/minfo_14.h
create mode 100644 src/lib/dns/rdata/generic/naptr_35.cc
create mode 100644 src/lib/dns/rdata/generic/naptr_35.h
create mode 100644 src/lib/dns/rdata/generic/spf_99.cc
create mode 100644 src/lib/dns/rdata/generic/spf_99.h
create mode 100644 src/lib/dns/rdata/in_1/dhcid_49.cc
create mode 100644 src/lib/dns/rdata/in_1/dhcid_49.h
create mode 100644 src/lib/dns/rdata/in_1/srv_33.cc
create mode 100644 src/lib/dns/rdata/in_1/srv_33.h
create mode 100644 src/lib/dns/tests/character_string_unittest.cc
create mode 100644 src/lib/dns/tests/rdata_afsdb_unittest.cc
create mode 100644 src/lib/dns/tests/rdata_ds_like_unittest.cc
delete mode 100644 src/lib/dns/tests/rdata_ds_unittest.cc
create mode 100644 src/lib/dns/tests/rdata_hinfo_unittest.cc
create mode 100644 src/lib/dns/tests/rdata_minfo_unittest.cc
create mode 100644 src/lib/dns/tests/rdata_naptr_unittest.cc
create mode 100644 src/lib/dns/tests/rdata_srv_unittest.cc
delete mode 100755 src/lib/dns/tests/testdata/gen-wiredata.py.in
create mode 100644 src/lib/dns/tests/testdata/message_fromWire17.spec
create mode 100644 src/lib/dns/tests/testdata/message_fromWire18.spec
create mode 100644 src/lib/dns/tests/testdata/message_fromWire19.spec
create mode 100644 src/lib/dns/tests/testdata/message_fromWire20.spec
create mode 100644 src/lib/dns/tests/testdata/message_fromWire21.spec
create mode 100644 src/lib/dns/tests/testdata/message_fromWire22.spec
create mode 100644 src/lib/dns/tests/testdata/message_toWire4.spec
create mode 100644 src/lib/dns/tests/testdata/message_toWire5.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_fromWire1.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_fromWire2.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_fromWire3.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_fromWire4.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_fromWire5.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_toWire1.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_toWire2.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_fromWire1.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_fromWire2.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_fromWire3.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_fromWire4.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_fromWire5.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_fromWire6.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_toWire1.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_toWire2.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed1.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed2.spec
create mode 100644 src/lib/dns/tests/testdata/rdata_srv_fromWire
create mode 100644 src/lib/log/logger_unittest_support.cc
create mode 100644 src/lib/log/logger_unittest_support.h
create mode 100644 src/lib/log/tests/init_logger_test.cc
create mode 100755 src/lib/log/tests/init_logger_test.sh.in
create mode 100644 src/lib/python/isc/acl/Makefile.am
create mode 100644 src/lib/python/isc/acl/__init__.py
create mode 100644 src/lib/python/isc/acl/_dns.py
create mode 100644 src/lib/python/isc/acl/acl.cc
create mode 100644 src/lib/python/isc/acl/acl.py
create mode 100644 src/lib/python/isc/acl/acl_inc.cc
create mode 100644 src/lib/python/isc/acl/dns.cc
create mode 100644 src/lib/python/isc/acl/dns.h
create mode 100644 src/lib/python/isc/acl/dns.py
create mode 100644 src/lib/python/isc/acl/dns_requestacl_inc.cc
create mode 100644 src/lib/python/isc/acl/dns_requestacl_python.cc
create mode 100644 src/lib/python/isc/acl/dns_requestacl_python.h
create mode 100644 src/lib/python/isc/acl/dns_requestcontext_inc.cc
create mode 100644 src/lib/python/isc/acl/dns_requestcontext_python.cc
create mode 100644 src/lib/python/isc/acl/dns_requestcontext_python.h
create mode 100644 src/lib/python/isc/acl/dns_requestloader_inc.cc
create mode 100644 src/lib/python/isc/acl/dns_requestloader_python.cc
create mode 100644 src/lib/python/isc/acl/dns_requestloader_python.h
create mode 100644 src/lib/python/isc/acl/dnsacl_inc.cc
create mode 100644 src/lib/python/isc/acl/tests/Makefile.am
create mode 100644 src/lib/python/isc/acl/tests/acl_test.py
create mode 100644 src/lib/python/isc/acl/tests/dns_test.py
create mode 100644 src/lib/python/isc/bind10/Makefile.am
rename src/{bin/stats/tests/isc => lib/python/isc/bind10}/__init__.py (100%)
create mode 100644 src/lib/python/isc/bind10/sockcreator.py
create mode 100644 src/lib/python/isc/bind10/tests/Makefile.am
create mode 100644 src/lib/python/isc/bind10/tests/sockcreator_test.py
create mode 100644 src/lib/python/isc/config/config_messages.mes
create mode 100644 src/lib/python/isc/datasrc/client_inc.cc
create mode 100644 src/lib/python/isc/datasrc/client_python.cc
create mode 100644 src/lib/python/isc/datasrc/client_python.h
create mode 100644 src/lib/python/isc/datasrc/datasrc.cc
create mode 100644 src/lib/python/isc/datasrc/datasrc.h
create mode 100644 src/lib/python/isc/datasrc/finder_inc.cc
create mode 100644 src/lib/python/isc/datasrc/finder_python.cc
create mode 100644 src/lib/python/isc/datasrc/finder_python.h
create mode 100644 src/lib/python/isc/datasrc/iterator_inc.cc
create mode 100644 src/lib/python/isc/datasrc/iterator_python.cc
create mode 100644 src/lib/python/isc/datasrc/iterator_python.h
create mode 100644 src/lib/python/isc/datasrc/tests/datasrc_test.py
create mode 100644 src/lib/python/isc/datasrc/updater_inc.cc
create mode 100644 src/lib/python/isc/datasrc/updater_python.cc
create mode 100644 src/lib/python/isc/datasrc/updater_python.h
create mode 100644 src/lib/python/isc/dns/Makefile.am
create mode 100644 src/lib/python/isc/log_messages/Makefile.am
create mode 100644 src/lib/python/isc/log_messages/README
create mode 100644 src/lib/python/isc/log_messages/__init__.py
create mode 100644 src/lib/python/isc/log_messages/bind10_messages.py
create mode 100644 src/lib/python/isc/log_messages/cfgmgr_messages.py
create mode 100644 src/lib/python/isc/log_messages/cmdctl_messages.py
create mode 100644 src/lib/python/isc/log_messages/config_messages.py
create mode 100755 src/lib/python/isc/log_messages/gen-forwarder.sh
create mode 100644 src/lib/python/isc/log_messages/libxfrin_messages.py
create mode 100644 src/lib/python/isc/log_messages/notify_out_messages.py
create mode 100644 src/lib/python/isc/log_messages/stats_httpd_messages.py
create mode 100644 src/lib/python/isc/log_messages/stats_messages.py
create mode 100644 src/lib/python/isc/log_messages/work/Makefile.am
create mode 100644 src/lib/python/isc/log_messages/work/__init__.py.in
create mode 100644 src/lib/python/isc/log_messages/xfrin_messages.py
create mode 100644 src/lib/python/isc/log_messages/xfrout_messages.py
create mode 100644 src/lib/python/isc/log_messages/zonemgr_messages.py
create mode 100644 src/lib/python/isc/notify/notify_out_messages.mes
create mode 100644 src/lib/python/isc/xfrin/Makefile.am
rename src/{bin/stats/tests/isc/util => lib/python/isc/xfrin}/__init__.py (100%)
create mode 100644 src/lib/python/isc/xfrin/diff.py
create mode 100644 src/lib/python/isc/xfrin/libxfrin_messages.mes
create mode 100644 src/lib/python/isc/xfrin/tests/Makefile.am
create mode 100644 src/lib/python/isc/xfrin/tests/diff_tests.py
create mode 100644 src/lib/server_common/logger.cc
create mode 100644 src/lib/server_common/logger.h
create mode 100644 src/lib/server_common/server_common_messages.mes
create mode 100644 src/lib/util/python/Makefile.am
create mode 100755 src/lib/util/python/gen_wiredata.py.in
-----------------------------------------------------------------------
diff --git a/ChangeLog b/ChangeLog
index d88f759..547192e 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,4 @@
-266. [func]* tomek
+293. [func]* tomek
b10-dhcp6: Implemented DHCPv6 echo server. It joins DHCPv6
multicast groups and listens to incoming DHCPv6 client messages.
Received messages are then echoed back to clients. This
@@ -8,6 +8,184 @@
and its address must be specified in interfaces.txt.
(Trac #878, git 3b1a604abf5709bfda7271fa94213f7d823de69d)
+292. [func] dvv
+ Implement the DLV rrtype according to RFC4431.
+ (Trac #1144, git d267c0511a07c41cd92e3b0b9ee9bf693743a7cf)
+
+291. [func] naokikambe
+ Statistics items are specified by each module's spec file.
+ Stats module can read these through the config manager. Stats
+ module and stats httpd report statistics data and statistics
+ schema by each module via both bindctl and HTTP/XML.
+ (Trac #928,#929,#930,#1175, git 054699635affd9c9ecbe7a108d880829f3ba229e)
+
+290. [func] jinmei
+ libdns++/pydnspp: added an option parameter to the "from wire"
+ methods of the Message class. One option is defined,
+ PRESERVE_ORDER, which specifies the parser to handle each RR
+ separately, preserving the order, and constructs RRsets in the
+ message sections so that each RRset contains only one RR.
+ (Trac #1258, git c874cb056e2a5e656165f3c160e1b34ccfe8b302)
+
+289. [func]* jinmei
+ b10-xfrout: ACLs for xfrout can now be configured per zone basis.
+ A per zone ACl is part of a more general zone configuration. A
+ quick example for configuring an ACL for zone "example.com" that
+ rejects any transfer request for that zone is as follows:
+ > config add Xfrout/zone_config
+ > config set Xfrout/zone_config[0]/origin "example.com"
+ > config add Xfrout/zone_config[0]/transfer_acl
+ > config set Xfrout/zone_config[0]/transfer_acl[0] {"action": "REJECT"}
+ The previous global ACL (query_acl) was renamed to transfer_acl,
+ which now works as the default ACL. Note: backward compatibility
+ is not provided, so an existing configuration using query_acl
+ needs to be updated by hand.
+ Note: the per zone configuration framework is a temporary
+ workaround. It will eventually be redesigned as a system wide
+ configuration.
+ (Trac #1165, git 698176eccd5d55759fe9448b2c249717c932ac31)
+
+288. [bug] stephen
+ Fixed problem whereby the order in which component files appeared in
+ rdataclass.cc was system dependent, leading to problems on some
+ systems where data types were used before the header file in which
+ they were declared was included.
+ (Trac #1202, git 4a605525cda67bea8c43ca8b3eae6e6749797450)
+
+287. [bug]* jinmei
+ Python script files for log messages (xxx_messages.py) should have
+ been installed under the "isc" package. This fix itself should
+ be a transparent change without affecting existing configurations
+ or other operational practices, but you may want to clean up the
+ python files from the common directly (such as "site-packages").
+ (Trac #1101, git 0eb576518f81c3758c7dbaa2522bd8302b1836b3)
+
+286. [func] ocean
+ libdns++: Implement the HINFO rrtype support according to RFC1034,
+ and RFC1035.
+ (Trac #1112, git 12d62d54d33fbb1572a1aa3089b0d547d02924aa)
+
+285. [bug] jelte
+ sqlite3 data source: fixed a race condition on initial startup,
+ when the database has not been initialized yet, and multiple
+ processes are trying to do so, resulting in one of them failing.
+ (Trac #326, git 5de6f9658f745e05361242042afd518b444d7466)
+
+284. [bug] jerry
+ b10-zonemgr: zonemgr will not terminate on empty zones, it will
+ log a warning and try to do zone transfer for them.
+ (Trac #1153, git 0a39659638fc68f60b95b102968d7d0ad75443ea)
+
+283. [bug] zhanglikun
+ Make stats and boss processes wait for answer messages from each
+ other in block mode to avoid orphan answer messages, add an internal
+ command "getstats" to boss process for getting statistics data from
+ boss.
+ (Trac #519, git 67d8e93028e014f644868fede3570abb28e5fb43)
+
+282. [func] ocean
+ libdns++: Implement the NAPTR rrtype according to RFC2915,
+ RFC2168 and RFC3403.
+ (Trac #1130, git 01d8d0f13289ecdf9996d6d5d26ac0d43e30549c)
+
+bind10-devel-20110819 released on August 19, 2011
+
+281. [func] jelte
+ Added a new type for configuration data: "named set". This allows for
+ similar configuration as the current "list" type, but with strings
+ instead of indices as identifiers. The intended use is for instance
+ /foo/zones/example.org/bar instead of /foo/zones[2]/bar. Currently
+ this new type is not in use yet.
+ (Trac #926, git 06aeefc4787c82db7f5443651f099c5af47bd4d6)
+
+280. [func] jerry
+ libdns++: Implement the MINFO rrtype according to RFC1035.
+ (Trac #1113, git 7a9a19d6431df02d48a7bc9de44f08d9450d3a37)
+
+279. [func] jerry
+ libdns++: Implement the AFSDB rrtype according to RFC1183.
+ (Trac #1114, git ce052cd92cd128ea3db5a8f154bd151956c2920c)
+
+278. [doc] jelte
+ Add logging configuration documentation to the guide.
+ (Trac #1011, git 2cc500af0929c1f268aeb6f8480bc428af70f4c4)
+
+277. [func] jerry
+ libdns++: Implement the SRV rrtype according to RFC2782.
+ (Trac #1128, git 5fd94aa027828c50e63ae1073d9d6708e0a9c223)
+
+276. [func] stephen
+ Although the top-level loggers are named after the program (e.g.
+ b10-auth, b10-resolver), allow the logger configuration to omit the
+ "b10-" prefix and use just the module name.
+ (Trac #1003, git a01cd4ac5a68a1749593600c0f338620511cae2d)
+
+275. [func] jinmei
+ Added support for TSIG key matching in ACLs. The xfrout ACL can
+ now refer to TSIG key names using the "key" attribute. For
+ example, the following specifies an ACL that allows zone transfer
+ if and only if the request is signed with a TSIG of a key name
+ "key.example":
+ > config set Xfrout/query_acl[0] {"action": "ACCEPT", \
+ "key": "key.example"}
+ (Trac #1104, git 9b2e89cabb6191db86f88ee717f7abc4171fa979)
+
+274. [bug] naokikambe
+ add unittests for functions xml_handler, xsd_handler and xsl_handler
+ respectively to make sure their behaviors are correct, regardless of
+ whether type which xml.etree.ElementTree.tostring() after Python3.2
+ returns is str or byte.
+ (Trac #1021, git 486bf91e0ecc5fbecfe637e1e75ebe373d42509b)
+
+273. [func] vorner
+ It is possible to specify ACL for the xfrout module. It is in the ACL
+ configuration key and has the usual ACL syntax. It currently supports
+ only the source address. Default ACL accepts everything.
+ (Trac #772, git 50070c824270d5da1db0b716db73b726d458e9f7)
+
+272. [func] jinmei
+ libdns++/pydnspp: TSIG signing now handles truncated DNS messages
+ (i.e. with TC bit on) with TSIG correctly.
+ (Trac #910, 8e00f359e81c3cb03c5075710ead0f87f87e3220)
+
+271. [func] stephen
+ Default logging for unit tests changed to severity DEBUG (level 99)
+ with the output routed to /dev/null. This can be altered by setting
+ the B10_LOGGER_XXX environment variables.
+ (Trac #1024, git 72a0beb8dfe85b303f546d09986461886fe7a3d8)
+
+270. [func] jinmei
+ Added python bindings for ACLs using the DNS request as the
+ context. They are accessible via the isc.acl.dns module.
+ (Trac #983, git c24553e21fe01121a42e2136d0a1230d75812b27)
+
+269. [bug] y-aharen
+ Modified IntervalTimerTest not to rely on the accuracy of the timer.
+ This fix addresses occasional failure of build tests.
+ (Trac #1016, git 090c4c5abac33b2b28d7bdcf3039005a014f9c5b)
+
+268. [func] stephen
+ Add environment variable to allow redirection of logging output during
+ unit tests.
+ (Trac #1071, git 05164f9d61006869233b498d248486b4307ea8b6)
+
+bind10-devel-20110705 released on July 05, 2011
+
+267. [func] tomek
+ Added a dummy module for DHCP6. This module does not actually
+ do anything at this point, and BIND 10 has no option for
+ starting it yet. It is included as a base for further
+ development.
+ (Trac #990, git 4a590df96a1b1d373e87f1f56edaceccb95f267d)
+
+266. [func] Multiple developers
+ Convert various error messages, debugging and other output
+ to the new logging interface, including for b10-resolver,
+ the resolver library, the CC library, b10-auth, b10-cfgmgr,
+ b10-xfrin, and b10-xfrout. This includes a lot of new
+ documentation describing the new log messages.
+ (Trac #738, #739, #742, #746, #759, #761, #762)
+
265. [func]* jinmei
b10-resolver: Introduced ACL on incoming queries. By default the
resolver accepts queries from ::1 and 127.0.0.1 and rejects all
@@ -62,7 +240,7 @@
Now builds and runs with Python 3.2
(Trac #710, git dae1d2e24f993e1eef9ab429326652f40a006dfb)
-257. [bug] y-aharen
+257. [bug] y-aharen
Fixed a bug an instance of IntervalTimerImpl may be destructed
while deadline_timer is holding the handler. This fix addresses
occasional failure of IntervalTimerTest.destructIntervalTimer.
@@ -71,25 +249,25 @@
256. [bug] jerry
src/bin/xfrin: update xfrin to check TSIG before other part of
incoming message.
- (Trac955, git 261450e93af0b0406178e9ef121f81e721e0855c)
+ (Trac #955, git 261450e93af0b0406178e9ef121f81e721e0855c)
255. [func] zhang likun
src/lib/cache: remove empty code in lib/cache and the corresponding
suppression rule in src/cppcheck-suppress.lst.
- (Trac639, git 4f714bac4547d0a025afd314c309ca5cb603e212)
+ (Trac #639, git 4f714bac4547d0a025afd314c309ca5cb603e212)
254. [bug] jinmei
b10-xfrout: failed to send notifies over IPv6 correctly.
- (Trac964, git 3255c92714737bb461fb67012376788530f16e40)
+ (Trac #964, git 3255c92714737bb461fb67012376788530f16e40)
-253. [func] jelte
+253. [func] jelte
Add configuration options for logging through the virtual module
Logging.
- (Trac 736, git 9fa2a95177265905408c51d13c96e752b14a0824)
+ (Trac #736, git 9fa2a95177265905408c51d13c96e752b14a0824)
-252. [func] stephen
+252. [func] stephen
Add syslog as destination for logging.
- (Trac976, git 31a30f5485859fd3df2839fc309d836e3206546e)
+ (Trac #976, git 31a30f5485859fd3df2839fc309d836e3206546e)
251. [bug]* jinmei
Make sure bindctl private files are non readable to anyone except
@@ -98,38 +276,38 @@
group will have to be adjusted. Also note that this change is
only effective for a fresh install; if these files already exist,
their permissions must be adjusted by hand (if necessary).
- (Trac870, git 461fc3cb6ebabc9f3fa5213749956467a14ebfd4)
+ (Trac #870, git 461fc3cb6ebabc9f3fa5213749956467a14ebfd4)
-250. [bug] ocean
+250. [bug] ocean
src/lib/util/encode, in some conditions, the DecodeNormalizer's
iterator may reach the end() and when later being dereferenced
it will cause crash on some platform.
- (Trac838, git 83e33ec80c0c6485d8b116b13045b3488071770f)
+ (Trac #838, git 83e33ec80c0c6485d8b116b13045b3488071770f)
-249. [func] jerry
+249. [func] jerry
xfrout: add support for TSIG verification.
- (Trac816, git 3b2040e2af2f8139c1c319a2cbc429035d93f217)
+ (Trac #816, git 3b2040e2af2f8139c1c319a2cbc429035d93f217)
-248. [func] stephen
+248. [func] stephen
Add file and stderr as destinations for logging.
- (Trac555, git 38b3546867425bd64dbc5920111a843a3330646b)
+ (Trac #555, git 38b3546867425bd64dbc5920111a843a3330646b)
-247. [func] jelte
+247. [func] jelte
Upstream queries from the resolver now set EDNS0 buffer size.
- (Trac834, git 48e10c2530fe52c9bde6197db07674a851aa0f5d)
+ (Trac #834, git 48e10c2530fe52c9bde6197db07674a851aa0f5d)
-246. [func] stephen
+246. [func] stephen
Implement logging using log4cplus (http://log4cplus.sourceforge.net)
- (Trac899, git 31d3f525dc01638aecae460cb4bc2040c9e4df10)
+ (Trac #899, git 31d3f525dc01638aecae460cb4bc2040c9e4df10)
-245. [func] vorner
+245. [func] vorner
Authoritative server can now sign the answers using TSIG
(configured in tsig_keys/keys, list of strings like
"name:<base64-secret>:sha1-hmac"). It doesn't use them for
ACL yet, only verifies them and signs if the request is signed.
- (Trac875, git fe5e7003544e4e8f18efa7b466a65f336d8c8e4d)
+ (Trac #875, git fe5e7003544e4e8f18efa7b466a65f336d8c8e4d)
-244. [func] stephen
+244. [func] stephen
In unit tests, allow the choice of whether unhandled exceptions are
caught in the unit test program (and details printed) or allowed to
propagate to the default exception handler. See the bind10-dev thread
@@ -139,7 +317,7 @@
243. [func]* feng
Add optional hmac algorithm SHA224/384/812.
- (Trac#782, git 77d792c9d7c1a3f95d3e6a8b721ac79002cd7db1)
+ (Trac #782, git 77d792c9d7c1a3f95d3e6a8b721ac79002cd7db1)
bind10-devel-20110519 released on May 19, 2011
@@ -186,7 +364,7 @@ bind10-devel-20110519 released on May 19, 2011
stats module and stats-httpd module, and maybe with other
statistical modules in future. "stats.spec" has own configuration
and commands of stats module, if it requires.
- (Trac#719, git a234b20dc6617392deb8a1e00eb0eed0ff353c0a)
+ (Trac #719, git a234b20dc6617392deb8a1e00eb0eed0ff353c0a)
236. [func] jelte
C++ client side of configuration now uses BIND10 logging system.
@@ -229,13 +407,13 @@ bind10-devel-20110519 released on May 19, 2011
instead of '%s,%d', which allows us to cope better with
mismatched placeholders and allows reordering of them in
case of translation.
- (Trac901, git 4903410e45670b30d7283f5d69dc28c2069237d6)
+ (Trac #901, git 4903410e45670b30d7283f5d69dc28c2069237d6)
230. [bug] naokikambe
Removed too repeated verbose messages in two cases of:
- when auth sends statistics data to stats
- when stats receives statistics data from other modules
- (Trac#620, git 0ecb807011196eac01f281d40bc7c9d44565b364)
+ (Trac #620, git 0ecb807011196eac01f281d40bc7c9d44565b364)
229. [doc] jreed
Add manual page for b10-host.
diff --git a/README b/README
index a6509da..4b84a88 100644
--- a/README
+++ b/README
@@ -8,10 +8,10 @@ for serving, maintaining, and developing DNS.
BIND10-devel is new development leading up to the production
BIND 10 release. It contains prototype code and experimental
interfaces. Nevertheless it is ready to use now for testing the
-new BIND 10 infrastructure ideas. The Year 2 milestones of the
-five year plan are described here:
+new BIND 10 infrastructure ideas. The Year 3 goals of the five
+year plan are described here:
- https://bind10.isc.org/wiki/Year2Milestones
+ http://bind10.isc.org/wiki/Year3Goals
This release includes the bind10 master process, b10-msgq message
bus, b10-auth authoritative DNS server (with SQLite3 and in-memory
@@ -67,8 +67,8 @@ e.g.,
Operating-System specific tips:
- FreeBSD
- You may need to install a python binding for sqlite3 by hand. A
- sample procedure is as follows:
+ You may need to install a python binding for sqlite3 by hand.
+ A sample procedure is as follows:
- add the following to /etc/make.conf
PYTHON_VERSION=3.1
- build and install the python binding from ports, assuming the top
diff --git a/TODO b/TODO
new file mode 100644
index 0000000..e69de29
diff --git a/configure.ac b/configure.ac
index 348708f..193c2ec 100644
--- a/configure.ac
+++ b/configure.ac
@@ -2,7 +2,7 @@
# Process this file with autoconf to produce a configure script.
AC_PREREQ([2.59])
-AC_INIT(bind10-devel, 20110519, bind10-dev at isc.org)
+AC_INIT(bind10-devel, 20110809, bind10-dev at isc.org)
AC_CONFIG_SRCDIR(README)
AM_INIT_AUTOMAKE
AC_CONFIG_HEADERS([config.h])
@@ -12,6 +12,12 @@ AC_PROG_CXX
# Libtool configuration
#
+
+# libtool cannot handle spaces in paths, so exit early if there is one
+if [ test `echo $PWD | grep -c ' '` != "0" ]; then
+ AC_MSG_ERROR([BIND 10 cannot be built in a directory that contains spaces, because of libtool limitations. Please change the directory name, or use a symbolic link that does not contain spaces.])
+fi
+
# On FreeBSD (and probably some others), clang++ does not meet an autoconf
# assumption in identifying libtool configuration regarding shared library:
# the configure script will execute "$CC -shared $CFLAGS/$CXXFLAGS -v" and
@@ -139,6 +145,26 @@ else
AC_SUBST(pkgpyexecdir)
fi
+# We need to store the default pyexecdir in a separate variable so that
+# we can specify in Makefile.am the install directory of various BIND 10
+# python scripts and loadable modules; in Makefile.am we cannot replace
+# $(pyexecdir) using itself, e.g, this doesn't work:
+# pyexecdir = $(pyexecdir)/isc/some_module
+# The separate variable makes this setup possible as follows:
+# pyexecdir = $(PYTHON_SITEPKG_DIR)/isc/some_module
+PYTHON_SITEPKG_DIR=${pyexecdir}
+AC_SUBST(PYTHON_SITEPKG_DIR)
+
+# This will be commonly used in various Makefile.am's that need to generate
+# python log messages.
+PYTHON_LOGMSGPKG_DIR="\$(top_builddir)/src/lib/python/isc/log_messages"
+AC_SUBST(PYTHON_LOGMSGPKG_DIR)
+
+# This is python package paths commonly used in python tests. See
+# README of log_messages for why it's included.
+COMMON_PYTHON_PATH="\$(abs_top_builddir)/src/lib/python/isc/log_messages:\$(abs_top_srcdir)/src/lib/python:\$(abs_top_builddir)/src/lib/python"
+AC_SUBST(COMMON_PYTHON_PATH)
+
# Check for python development environments
if test -x ${PYTHON}-config; then
PYTHON_INCLUDES=`${PYTHON}-config --includes`
@@ -260,6 +286,8 @@ B10_CXXFLAGS="-Wall -Wextra -Wwrite-strings -Woverloaded-virtual -Wno-sign-compa
case "$host" in
*-solaris*)
MULTITHREADING_FLAG=-pthreads
+ # In Solaris, IN6ADDR_ANY_INIT and IN6ADDR_LOOPBACK_INIT need -Wno-missing-braces
+ B10_CXXFLAGS="$B10_CXXFLAGS -Wno-missing-braces"
;;
*)
MULTITHREADING_FLAG=-pthread
@@ -409,7 +437,7 @@ AC_ARG_WITH([botan],
AC_HELP_STRING([--with-botan=PATH],
[specify exact directory of Botan library]),
[botan_path="$withval"])
-if test "${botan_path}" == "no" ; then
+if test "${botan_path}" = "no" ; then
AC_MSG_ERROR([Need botan for libcryptolink])
fi
if test "${botan_path}" != "yes" ; then
@@ -482,7 +510,7 @@ AC_ARG_WITH([log4cplus],
AC_HELP_STRING([--with-log4cplus=PATH],
[specify exact directory of log4cplus library and headers]),
[log4cplus_path="$withval"])
-if test "${log4cplus_path}" == "no" ; then
+if test "${log4cplus_path}" = "no" ; then
AC_MSG_ERROR([Need log4cplus])
elif test "${log4cplus_path}" != "yes" ; then
LOG4CPLUS_INCLUDES="-I${log4cplus_path}/include"
@@ -789,12 +817,6 @@ AC_CONFIG_FILES([Makefile
src/bin/zonemgr/tests/Makefile
src/bin/stats/Makefile
src/bin/stats/tests/Makefile
- src/bin/stats/tests/isc/Makefile
- src/bin/stats/tests/isc/cc/Makefile
- src/bin/stats/tests/isc/config/Makefile
- src/bin/stats/tests/isc/util/Makefile
- src/bin/stats/tests/testdata/Makefile
- src/bin/stats/tests/http/Makefile
src/bin/usermgr/Makefile
src/bin/tests/Makefile
src/lib/Makefile
@@ -809,21 +831,30 @@ AC_CONFIG_FILES([Makefile
src/lib/cc/tests/Makefile
src/lib/python/Makefile
src/lib/python/isc/Makefile
+ src/lib/python/isc/acl/Makefile
+ src/lib/python/isc/acl/tests/Makefile
src/lib/python/isc/util/Makefile
src/lib/python/isc/util/tests/Makefile
src/lib/python/isc/datasrc/Makefile
src/lib/python/isc/datasrc/tests/Makefile
+ src/lib/python/isc/dns/Makefile
src/lib/python/isc/cc/Makefile
src/lib/python/isc/cc/tests/Makefile
src/lib/python/isc/config/Makefile
src/lib/python/isc/config/tests/Makefile
src/lib/python/isc/log/Makefile
src/lib/python/isc/log/tests/Makefile
+ src/lib/python/isc/log_messages/Makefile
+ src/lib/python/isc/log_messages/work/Makefile
src/lib/python/isc/net/Makefile
src/lib/python/isc/net/tests/Makefile
src/lib/python/isc/notify/Makefile
src/lib/python/isc/notify/tests/Makefile
src/lib/python/isc/testutils/Makefile
+ src/lib/python/isc/bind10/Makefile
+ src/lib/python/isc/bind10/tests/Makefile
+ src/lib/python/isc/xfrin/Makefile
+ src/lib/python/isc/xfrin/tests/Makefile
src/lib/config/Makefile
src/lib/config/tests/Makefile
src/lib/config/tests/testdata/Makefile
@@ -839,6 +870,7 @@ AC_CONFIG_FILES([Makefile
src/lib/exceptions/tests/Makefile
src/lib/datasrc/Makefile
src/lib/datasrc/tests/Makefile
+ src/lib/datasrc/tests/testdata/Makefile
src/lib/xfr/Makefile
src/lib/log/Makefile
src/lib/log/compiler/Makefile
@@ -856,6 +888,7 @@ AC_CONFIG_FILES([Makefile
src/lib/util/Makefile
src/lib/util/io/Makefile
src/lib/util/unittests/Makefile
+ src/lib/util/python/Makefile
src/lib/util/pyunittests/Makefile
src/lib/util/tests/Makefile
src/lib/acl/Makefile
@@ -889,7 +922,7 @@ AC_OUTPUT([doc/version.ent
src/bin/zonemgr/run_b10-zonemgr.sh
src/bin/stats/stats.py
src/bin/stats/stats_httpd.py
- src/bin/bind10/bind10.py
+ src/bin/bind10/bind10_src.py
src/bin/bind10/run_bind10.sh
src/bin/bind10/tests/bind10_test.py
src/bin/bindctl/run_bindctl.sh
@@ -913,17 +946,19 @@ AC_OUTPUT([doc/version.ent
src/lib/python/isc/cc/tests/cc_test
src/lib/python/isc/notify/tests/notify_out_test
src/lib/python/isc/log/tests/log_console.py
+ src/lib/python/isc/log_messages/work/__init__.py
src/lib/dns/gen-rdatacode.py
src/lib/python/bind10_config.py
- src/lib/dns/tests/testdata/gen-wiredata.py
src/lib/cc/session_config.h.pre
src/lib/cc/tests/session_unittests_config.h
src/lib/log/tests/console_test.sh
src/lib/log/tests/destination_test.sh
+ src/lib/log/tests/init_logger_test.sh
src/lib/log/tests/local_file_test.sh
src/lib/log/tests/severity_test.sh
src/lib/log/tests/tempdir.h
src/lib/util/python/mkpywrapper.py
+ src/lib/util/python/gen_wiredata.py
src/lib/server_common/tests/data_path.h
tests/system/conf.sh
tests/system/glue/setup.sh
@@ -948,12 +983,13 @@ AC_OUTPUT([doc/version.ent
chmod +x src/bin/msgq/run_msgq.sh
chmod +x src/bin/msgq/tests/msgq_test
chmod +x src/lib/dns/gen-rdatacode.py
- chmod +x src/lib/dns/tests/testdata/gen-wiredata.py
- chmod +x src/lib/log/tests/local_file_test.sh
chmod +x src/lib/log/tests/console_test.sh
chmod +x src/lib/log/tests/destination_test.sh
+ chmod +x src/lib/log/tests/init_logger_test.sh
+ chmod +x src/lib/log/tests/local_file_test.sh
chmod +x src/lib/log/tests/severity_test.sh
chmod +x src/lib/util/python/mkpywrapper.py
+ chmod +x src/lib/util/python/gen_wiredata.py
chmod +x src/lib/python/isc/log/tests/log_console.py
chmod +x tests/system/conf.sh
])
diff --git a/doc/Doxyfile b/doc/Doxyfile
index 8857c16..8be9098 100644
--- a/doc/Doxyfile
+++ b/doc/Doxyfile
@@ -568,10 +568,10 @@ WARN_LOGFILE =
# directories like "/usr/src/myproject". Separate the files or directories
# with spaces.
-INPUT = ../src/lib/cc ../src/lib/config \
- ../src/lib/cryptolink ../src/lib/dns ../src/lib/datasrc \
- ../src/bin/auth ../src/bin/resolver ../src/lib/bench \
- ../src/lib/log ../src/lib/asiolink/ ../src/lib/nsas \
+INPUT = ../src/lib/exceptions ../src/lib/cc \
+ ../src/lib/config ../src/lib/cryptolink ../src/lib/dns ../src/lib/datasrc \
+ ../src/bin/auth ../src/bin/resolver ../src/lib/bench ../src/lib/log \
+ ../src/lib/log/compiler ../src/lib/asiolink/ ../src/lib/nsas \
../src/lib/testutils ../src/lib/cache ../src/lib/server_common/ \
../src/bin/sockcreator/ ../src/lib/util/ \
../src/lib/resolve ../src/lib/acl ../src/bin/dhcp6
diff --git a/doc/guide/bind10-guide.html b/doc/guide/bind10-guide.html
index 5754cf0..1070a2e 100644
--- a/doc/guide/bind10-guide.html
+++ b/doc/guide/bind10-guide.html
@@ -1,24 +1,24 @@
-<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Guide</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the reference guide for BIND 10 version 20110519. The most up-to-date version of this document, along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Guide"><div class="titlepage"><div><div><h1 class="title"><a name="id1168230298903"></a>BIND 10 Guide</h1></div><div><h2 class="subtitle">Administrator Reference for BIND 10</h2></div><div><p class="releaseinfo">This is the referenc
e guide for BIND 10 version
- 20110519.</p></div><div><p class="copyright">Copyright © 2010 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Guide</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the reference guide for BIND 10 version 20110809. The most up-to-date version of this document, along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Guide"><div class="titlepage"><div><div><h1 class="title"><a name="id1168229460045"></a>BIND 10 Guide</h1></div><div><h2 class="subtitle">Administrator Reference for BIND 10</h2></div><div><p class="releaseinfo">This is the referenc
e guide for BIND 10 version
+ 20110809.</p></div><div><p class="copyright">Copyright © 2010-2011 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
Internet Systems Consortium (ISC). It includes DNS libraries
and modular components for controlling authoritative and
recursive DNS servers.
</p><p>
- This is the reference guide for BIND 10 version 20110519.
+ This is the reference guide for BIND 10 version 20110809.
The most up-to-date version of this document, along with
- other documents for BIND 10, can be found at <a class="ulink" href="http://bind10.isc.org/docs" target="_top">http://bind10.isc.org/docs</a>. </p></div></div></div><hr></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="chapter"><a href="#intro">1. Introduction</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230299038">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168230299065">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#installation">2. Installation</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230284846">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">In
stallation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285026">Download Tar File</a></span></dt><dt><span class="section"><a href="#id1168230285045">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168230285106">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168230285203">Build</a></span></dt><dt><span class="section"><a href="#id1168230285219">Install</a></span></dt><dt><span class="section"><a href="#id1168230285242">Install Hierarchy</a></span></dt></dl></dd></dl></dd><dt><span class="chapter"><a href="#bind10">3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></a></span></dt><dd><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#msgq">4. Command channel</a></span></dt><dt><span class="chapter"><a href="#cfgmgr">5. Configuration manager</a></span></dt><dt><span class="chapter"><a hr
ef="#cmdctl">6. Remote control daemon</a></span></dt><dd><dl><dt><span class="section"><a href="#cmdctl.spec">Configuration specification for b10-cmdctl</a></span></dt></dl></dd><dt><span class="chapter"><a href="#bindctl">7. Control and configure user interface</a></span></dt><dt><span class="chapter"><a href="#authserver">8. Authoritative Server</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285816">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168230285881">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168230285912">Loading Master Zones Files</a></span></dt></dl></dd><dt><span class="chapter"><a href="#xfrin">9. Incoming Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#xfrout">10. Outbound Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#zonemgr">11. Secondary Manager</a></span></dt><dt><span class="chapter"><a href="#resolverserver">12. Recursive Name Server<
/a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230286300">Forwarding</a></span></dt></dl></dd><dt><span class="chapter"><a href="#statistics">13. Statistics</a></span></dt><dt><span class="chapter"><a href="#logging">14. Logging</a></span></dt></dl></div><div class="chapter" title="Chapter 1. Introduction"><div class="titlepage"><div><div><h2 class="title"><a name="intro"></a>Chapter 1. Introduction</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230299038">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168230299065">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></div><p>
+ other documents for BIND 10, can be found at <a class="ulink" href="http://bind10.isc.org/docs" target="_top">http://bind10.isc.org/docs</a>. </p></div></div></div><hr></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="chapter"><a href="#intro">1. Introduction</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229460181">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168229460208">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#installation">2. Installation</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229445988">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">In
stallation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229446178">Download Tar File</a></span></dt><dt><span class="section"><a href="#id1168229446197">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168229446258">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168229446356">Build</a></span></dt><dt><span class="section"><a href="#id1168229446371">Install</a></span></dt><dt><span class="section"><a href="#id1168229446394">Install Hierarchy</a></span></dt></dl></dd></dl></dd><dt><span class="chapter"><a href="#bind10">3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></a></span></dt><dd><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#msgq">4. Command channel</a></span></dt><dt><span class="chapter"><a href="#cfgmgr">5. Configuration manager</a></span></dt><dt><span class="chapter"><a hr
ef="#cmdctl">6. Remote control daemon</a></span></dt><dd><dl><dt><span class="section"><a href="#cmdctl.spec">Configuration specification for b10-cmdctl</a></span></dt></dl></dd><dt><span class="chapter"><a href="#bindctl">7. Control and configure user interface</a></span></dt><dt><span class="chapter"><a href="#authserver">8. Authoritative Server</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229446979">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168229447044">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168229447074">Loading Master Zones Files</a></span></dt></dl></dd><dt><span class="chapter"><a href="#xfrin">9. Incoming Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#xfrout">10. Outbound Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#zonemgr">11. Secondary Manager</a></span></dt><dt><span class="chapter"><a href="#resolverserver">12. Recursive Name Server<
/a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229447556">Access Control</a></span></dt><dt><span class="section"><a href="#id1168229447671">Forwarding</a></span></dt></dl></dd><dt><span class="chapter"><a href="#statistics">13. Statistics</a></span></dt><dt><span class="chapter"><a href="#logging">14. Logging</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229447788">Logging configuration</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229447799">Loggers</a></span></dt><dt><span class="section"><a href="#id1168229448040">Output Options</a></span></dt><dt><span class="section"><a href="#id1168229448215">Example session</a></span></dt></dl></dd><dt><span class="section"><a href="#id1168229448428">Logging Message Format</a></span></dt></dl></dd></dl></div><div class="chapter" title="Chapter 1. Introduction"><div class="titlepage"><div><div><h2 class="title"><a name="intro"></a>Chapter 1. Introduction</h2></div></div></div><di
v class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229460181">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168229460208">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></div><p>
BIND is the popular implementation of a DNS server, developer
interfaces, and DNS tools.
BIND 10 is a rewrite of BIND 9. BIND 10 is written in C++ and Python
and provides a modular environment for serving and maintaining DNS.
</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
This guide covers the experimental prototype of
- BIND 10 version 20110519.
+ BIND 10 version 20110809.
</p></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
BIND 10 provides a EDNS0- and DNSSEC-capable
authoritative DNS server and a caching recursive name server
which also provides forwarding.
- </p></div><div class="section" title="Supported Platforms"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230299038"></a>Supported Platforms</h2></div></div></div><p>
+ </p></div><div class="section" title="Supported Platforms"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229460181"></a>Supported Platforms</h2></div></div></div><p>
BIND 10 builds have been tested on Debian GNU/Linux 5,
Ubuntu 9.10, NetBSD 5, Solaris 10, FreeBSD 7 and 8, and CentOS
Linux 5.3.
@@ -28,13 +28,15 @@
It is planned for BIND 10 to build, install and run on
Windows and standard Unix-type platforms.
- </p></div><div class="section" title="Required Software"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230299065"></a>Required Software</h2></div></div></div><p>
+ </p></div><div class="section" title="Required Software"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229460208"></a>Required Software</h2></div></div></div><p>
BIND 10 requires Python 3.1. Later versions may work, but Python
3.1 is the minimum version which will work.
</p><p>
BIND 10 uses the Botan crypto library for C++. It requires
- at least Botan version 1.8. To build BIND 10, install the
- Botan libraries and development include headers.
+ at least Botan version 1.8.
+ </p><p>
+ BIND 10 uses the log4cplus C++ logging library. It requires
+ at least log4cplus version 1.0.3.
</p><p>
The authoritative server requires SQLite 3.3.9 or newer.
The <span class="command"><strong>b10-xfrin</strong></span>, <span class="command"><strong>b10-xfrout</strong></span>,
@@ -136,7 +138,10 @@
and, of course, DNS. These include detailed developer
documentation and code examples.
- </p></div><div class="chapter" title="Chapter 2. Installation"><div class="titlepage"><div><div><h2 class="title"><a name="installation"></a>Chapter 2. Installation</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230284846">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">Installation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285026">Download Tar File</a></span></dt><dt><span class="section"><a href="#id1168230285045">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168230285106">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168230285203">Build</a></span></dt><dt><span class="section"><a href="#id1168230285219">Install</a></span></dt><dt><span class="section"><a href="#id1168230285242">Install Hierarchy<
/a></span></dt></dl></dd></dl></div><div class="section" title="Building Requirements"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230284846"></a>Building Requirements</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+ </p></div><div class="chapter" title="Chapter 2. Installation"><div class="titlepage"><div><div><h2 class="title"><a name="installation"></a>Chapter 2. Installation</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229445988">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">Installation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229446178">Download Tar File</a></span></dt><dt><span class="section"><a href="#id1168229446197">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168229446258">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168229446356">Build</a></span></dt><dt><span class="section"><a href="#id1168229446371">Install</a></span></dt><dt><span class="section"><a href="#id1168229446394">Install Hierarchy<
/a></span></dt></dl></dd></dl></div><div class="section" title="Building Requirements"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229445988"></a>Building Requirements</h2></div></div></div><p>
+ In addition to the run-time requirements, building BIND 10
+ from source code requires various development include headers.
+ </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
Some operating systems have split their distribution packages into
a run-time and a development package. You will need to install
the development package versions, which include header files and
@@ -147,6 +152,11 @@
</p><p>
+ To build BIND 10, also install the Botan (at least version
+ 1.8) and the log4cplus (at least version 1.0.3)
+ development include headers.
+ </p><p>
+
The Python Library and Python _sqlite3 module are required to
enable the Xfrout and Xfrin support.
</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
@@ -156,7 +166,7 @@
Building BIND 10 also requires a C++ compiler and
standard development headers, make, and pkg-config.
BIND 10 builds have been tested with GCC g++ 3.4.3, 4.1.2,
- 4.1.3, 4.2.1, 4.3.2, and 4.4.1.
+ 4.1.3, 4.2.1, 4.3.2, and 4.4.1; Clang++ 2.8; and Sun C++ 5.10.
</p></div><div class="section" title="Quick start"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="quickstart"></a>Quick start</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
This quickly covers the standard steps for installing
and deploying BIND 10 as an authoritative name server using
@@ -192,14 +202,14 @@
the Git code revision control system or as a downloadable
tar file. It may also be available in pre-compiled ready-to-use
packages from operating system vendors.
- </p><div class="section" title="Download Tar File"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285026"></a>Download Tar File</h3></div></div></div><p>
+ </p><div class="section" title="Download Tar File"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229446178"></a>Download Tar File</h3></div></div></div><p>
Downloading a release tar file is the recommended method to
obtain the source code.
</p><p>
The BIND 10 releases are available as tar file downloads from
<a class="ulink" href="ftp://ftp.isc.org/isc/bind10/" target="_top">ftp://ftp.isc.org/isc/bind10/</a>.
Periodic development snapshots may also be available.
- </p></div><div class="section" title="Retrieve from Git"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285045"></a>Retrieve from Git</h3></div></div></div><p>
+ </p></div><div class="section" title="Retrieve from Git"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229446197"></a>Retrieve from Git</h3></div></div></div><p>
Downloading this "bleeding edge" code is recommended only for
developers or advanced users. Using development code in a production
environment is not recommended.
@@ -233,7 +243,7 @@
<span class="command"><strong>autoheader</strong></span>,
<span class="command"><strong>automake</strong></span>,
and related commands.
- </p></div><div class="section" title="Configure before the build"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285106"></a>Configure before the build</h3></div></div></div><p>
+ </p></div><div class="section" title="Configure before the build"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229446258"></a>Configure before the build</h3></div></div></div><p>
BIND 10 uses the GNU Build System to discover build environment
details.
To generate the makefiles using the defaults, simply run:
@@ -242,7 +252,7 @@
Run <span class="command"><strong>./configure</strong></span> with the <code class="option">--help</code>
switch to view the different options. The commonly-used options are:
- </p><div class="variablelist"><dl><dt><span class="term">--prefix</span></dt><dd>Define the the installation location (the
+ </p><div class="variablelist"><dl><dt><span class="term">--prefix</span></dt><dd>Define the installation location (the
default is <code class="filename">/usr/local/</code>).
</dd><dt><span class="term">--with-boost-include</span></dt><dd>Define the path to find the Boost headers.
</dd><dt><span class="term">--with-pythonpath</span></dt><dd>Define the path to Python 3.1 if it is not in the
@@ -264,16 +274,16 @@
</p><p>
If the configure fails, it may be due to missing or old
dependencies.
- </p></div><div class="section" title="Build"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285203"></a>Build</h3></div></div></div><p>
+ </p></div><div class="section" title="Build"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229446356"></a>Build</h3></div></div></div><p>
After the configure step is complete, to build the executables
from the C++ code and prepare the Python scripts, run:
</p><pre class="screen">$ <strong class="userinput"><code>make</code></strong></pre><p>
- </p></div><div class="section" title="Install"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285219"></a>Install</h3></div></div></div><p>
+ </p></div><div class="section" title="Install"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229446371"></a>Install</h3></div></div></div><p>
To install the BIND 10 executables, support files,
and documentation, run:
</p><pre class="screen">$ <strong class="userinput"><code>make install</code></strong></pre><p>
- </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>The install step may require superuser privileges.</p></div></div><div class="section" title="Install Hierarchy"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285242"></a>Install Hierarchy</h3></div></div></div><p>
+ </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>The install step may require superuser privileges.</p></div></div><div class="section" title="Install Hierarchy"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229446394"></a>Install Hierarchy</h3></div></div></div><p>
The following is the layout of the complete BIND 10 installation:
</p><div class="itemizedlist"><ul class="itemizedlist" type="disc"><li class="listitem">
<code class="filename">bin/</code> —
@@ -304,14 +314,14 @@
data source and configuration databases.
</li></ul></div><p>
</p></div></div></div><div class="chapter" title="Chapter 3. Starting BIND10 with bind10"><div class="titlepage"><div><div><h2 class="title"><a name="bind10"></a>Chapter 3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt></dl></div><p>
- BIND 10 provides the <span class="command"><strong>bind10</strong></span> command which
+ BIND 10 provides the <span class="command"><strong>bind10</strong></span> command which
starts up the required processes.
<span class="command"><strong>bind10</strong></span>
will also restart processes that exit unexpectedly.
This is the only command needed to start the BIND 10 system.
</p><p>
After starting the <span class="command"><strong>b10-msgq</strong></span> communications channel,
- <span class="command"><strong>bind10</strong></span> connects to it,
+ <span class="command"><strong>bind10</strong></span> connects to it,
runs the configuration manager, and reads its own configuration.
Then it starts the other modules.
</p><p>
@@ -334,7 +344,12 @@
To start the BIND 10 service, simply run <span class="command"><strong>bind10</strong></span>.
Run it with the <code class="option">--verbose</code> switch to
get additional debugging or diagnostic output.
- </p></div></div><div class="chapter" title="Chapter 4. Command channel"><div class="titlepage"><div><div><h2 class="title"><a name="msgq"></a>Chapter 4. Command channel</h2></div></div></div><p>
+ </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+ If the setproctitle Python module is detected at start up,
+ the process names for the Python-based daemons will be renamed
+ to better identify them instead of just <span class="quote">“<span class="quote">python</span>”</span>.
+ This is not needed on some operating systems.
+ </p></div></div></div><div class="chapter" title="Chapter 4. Command channel"><div class="titlepage"><div><div><h2 class="title"><a name="msgq"></a>Chapter 4. Command channel</h2></div></div></div><p>
The BIND 10 components use the <span class="command"><strong>b10-msgq</strong></span>
message routing daemon to communicate with other BIND 10 components.
The <span class="command"><strong>b10-msgq</strong></span> implements what is called the
@@ -490,12 +505,12 @@ shutdown
the details and relays (over a <span class="command"><strong>b10-msgq</strong></span> command
channel) the configuration on to the specified module.
</p><p>
- </p></div><div class="chapter" title="Chapter 8. Authoritative Server"><div class="titlepage"><div><div><h2 class="title"><a name="authserver"></a>Chapter 8. Authoritative Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230285816">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168230285881">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168230285912">Loading Master Zones Files</a></span></dt></dl></div><p>
+ </p></div><div class="chapter" title="Chapter 8. Authoritative Server"><div class="titlepage"><div><div><h2 class="title"><a name="authserver"></a>Chapter 8. Authoritative Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229446979">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168229447044">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168229447074">Loading Master Zones Files</a></span></dt></dl></div><p>
The <span class="command"><strong>b10-auth</strong></span> is the authoritative DNS server.
It supports EDNS0 and DNSSEC. It supports IPv6.
Normally it is started by the <span class="command"><strong>bind10</strong></span> master
process.
- </p><div class="section" title="Server Configurations"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230285816"></a>Server Configurations</h2></div></div></div><p>
+ </p><div class="section" title="Server Configurations"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229446979"></a>Server Configurations</h2></div></div></div><p>
<span class="command"><strong>b10-auth</strong></span> is configured via the
<span class="command"><strong>b10-cfgmgr</strong></span> configuration manager.
The module name is <span class="quote">“<span class="quote">Auth</span>”</span>.
@@ -515,7 +530,7 @@ This may be a temporary setting until then.
</p><div class="variablelist"><dl><dt><span class="term">shutdown</span></dt><dd>Stop the authoritative DNS server.
</dd></dl></div><p>
- </p></div><div class="section" title="Data Source Backends"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230285881"></a>Data Source Backends</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+ </p></div><div class="section" title="Data Source Backends"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229447044"></a>Data Source Backends</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
For the development prototype release, <span class="command"><strong>b10-auth</strong></span>
supports a SQLite3 data source backend and in-memory data source
backend.
@@ -529,7 +544,7 @@ This may be a temporary setting until then.
The default is <code class="filename">/usr/local/var/</code>.)
This data file location may be changed by defining the
<span class="quote">“<span class="quote">database_file</span>”</span> configuration.
- </p></div><div class="section" title="Loading Master Zones Files"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230285912"></a>Loading Master Zones Files</h2></div></div></div><p>
+ </p></div><div class="section" title="Loading Master Zones Files"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229447074"></a>Loading Master Zones Files</h2></div></div></div><p>
RFC 1035 style DNS master zone files may imported
into a BIND 10 data source by using the
<span class="command"><strong>b10-loadzone</strong></span> utility.
@@ -569,7 +584,7 @@ This may be a temporary setting until then.
provide <span class="quote">“<span class="quote">secondary</span>”</span> service.
</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
The current development release of BIND 10 only supports
- AXFR. (IXFR is not supported.)
+ AXFR. (IXFR is not supported.)
@@ -591,7 +606,7 @@ This may be a temporary setting until then.
NOTIFY messages to slaves.
</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
The current development release of BIND 10 only supports
- AXFR. (IXFR is not supported.)
+ AXFR. (IXFR is not supported.)
Access control is not yet provided.
</p></div></div><div class="chapter" title="Chapter 11. Secondary Manager"><div class="titlepage"><div><div><h2 class="title"><a name="zonemgr"></a>Chapter 11. Secondary Manager</h2></div></div></div><p>
The <span class="command"><strong>b10-zonemgr</strong></span> process is started by
@@ -607,13 +622,13 @@ This may be a temporary setting until then.
</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
Access control (such as allowing notifies) is not yet provided.
The primary/secondary service is not yet complete.
- </p></div></div><div class="chapter" title="Chapter 12. Recursive Name Server"><div class="titlepage"><div><div><h2 class="title"><a name="resolverserver"></a>Chapter 12. Recursive Name Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230286300">Forwarding</a></span></dt></dl></div><p>
+ </p></div></div><div class="chapter" title="Chapter 12. Recursive Name Server"><div class="titlepage"><div><div><h2 class="title"><a name="resolverserver"></a>Chapter 12. Recursive Name Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229447556">Access Control</a></span></dt><dt><span class="section"><a href="#id1168229447671">Forwarding</a></span></dt></dl></div><p>
The <span class="command"><strong>b10-resolver</strong></span> process is started by
<span class="command"><strong>bind10</strong></span>.
</p><p>
The main <span class="command"><strong>bind10</strong></span> process can be configured
- to select to run either the authoritative or resolver.
+ to select to run either the authoritative or resolver or both.
By default, it starts the authoritative service.
@@ -629,14 +644,52 @@ This may be a temporary setting until then.
The master <span class="command"><strong>bind10</strong></span> will stop and start
the desired services.
</p><p>
- The resolver also needs to be configured to listen on an address
- and port:
+ By default, the resolver listens on port 53 for 127.0.0.1 and ::1.
+ The following example shows how it can be configured to
+ listen on an additional address (and port):
</p><pre class="screen">
-> <strong class="userinput"><code>config set Resolver/listen_on [{ "address": "127.0.0.1", "port": 53 }]</code></strong>
+> <strong class="userinput"><code>config add Resolver/listen_on</code></strong>
+> <strong class="userinput"><code>config set Resolver/listen_on[<em class="replaceable"><code>2</code></em>]/address "192.168.1.1"</code></strong>
+> <strong class="userinput"><code>config set Resolver/listen_on[<em class="replaceable"><code>2</code></em>]/port 53</code></strong>
> <strong class="userinput"><code>config commit</code></strong>
</pre><p>
- </p><div class="section" title="Forwarding"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230286300"></a>Forwarding</h2></div></div></div><p>
+ </p><p>(Replace the <span class="quote">“<span class="quote"><em class="replaceable"><code>2</code></em></span>”</span>
+ as needed; run <span class="quote">“<span class="quote"><strong class="userinput"><code>config show
+ Resolver/listen_on</code></strong></span>”</span> if needed.)</p><div class="section" title="Access Control"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229447556"></a>Access Control</h2></div></div></div><p>
+ By default, the <span class="command"><strong>b10-resolver</strong></span> daemon only accepts
+ DNS queries from the localhost (127.0.0.1 and ::1).
+ The <code class="option">Resolver/query_acl</code> configuration may
+ be used to reject, drop, or allow specific IPs or networks.
+ This configuration list is first match.
+ </p><p>
+ The configuration's <code class="option">action</code> item may be
+ set to <span class="quote">“<span class="quote">ACCEPT</span>”</span> to allow the incoming query,
+ <span class="quote">“<span class="quote">REJECT</span>”</span> to respond with a DNS REFUSED return
+ code, or <span class="quote">“<span class="quote">DROP</span>”</span> to ignore the query without
+ any response (such as a blackhole). For more information,
+ see the respective debugging messages: <a class="ulink" href="bind10-messages.html#RESOLVER_QUERY_ACCEPTED" target="_top">RESOLVER_QUERY_ACCEPTED</a>,
+ <a class="ulink" href="bind10-messages.html#RESOLVER_QUERY_REJECTED" target="_top">RESOLVER_QUERY_REJECTED</a>,
+ and <a class="ulink" href="bind10-messages.html#RESOLVER_QUERY_DROPPED" target="_top">RESOLVER_QUERY_DROPPED</a>.
+ </p><p>
+ The required configuration's <code class="option">from</code> item is set
+ to an IPv4 or IPv6 address, addresses with an network mask, or to
+ the special lowercase keywords <span class="quote">“<span class="quote">any6</span>”</span> (for
+ any IPv6 address) or <span class="quote">“<span class="quote">any4</span>”</span> (for any IPv4
+ address).
+ </p><p>
+ For example to allow the <em class="replaceable"><code>192.168.1.0/24</code></em>
+ network to use your recursive name server, at the
+ <span class="command"><strong>bindctl</strong></span> prompt run:
+ </p><pre class="screen">
+> <strong class="userinput"><code>config add Resolver/query_acl</code></strong>
+> <strong class="userinput"><code>config set Resolver/query_acl[<em class="replaceable"><code>2</code></em>]/action "ACCEPT"</code></strong>
+> <strong class="userinput"><code>config set Resolver/query_acl[<em class="replaceable"><code>2</code></em>]/from "<em class="replaceable"><code>192.168.1.0/24</code></em>"</code></strong>
+> <strong class="userinput"><code>config commit</code></strong>
+</pre><p>(Replace the <span class="quote">“<span class="quote"><em class="replaceable"><code>2</code></em></span>”</span>
+ as needed; run <span class="quote">“<span class="quote"><strong class="userinput"><code>config show
+ Resolver/query_acl</code></strong></span>”</span> if needed.)</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>This prototype access control configuration
+ syntax may be changed.</p></div></div><div class="section" title="Forwarding"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229447671"></a>Forwarding</h2></div></div></div><p>
To enable forwarding, the upstream address and port must be
configured to forward queries to, such as:
@@ -664,68 +717,440 @@ This may be a temporary setting until then.
</p><p>
- This stats daemon provides commands to identify if it is running,
- show specified or all statistics data, set values, remove data,
- and reset data.
+ This stats daemon provides commands to identify if it is
+ running, show specified or all statistics data, show specified
+ or all statistics data schema, and set specified statistics
+ data.
For example, using <span class="command"><strong>bindctl</strong></span>:
</p><pre class="screen">
> <strong class="userinput"><code>Stats show</code></strong>
{
- "auth.queries.tcp": 1749,
- "auth.queries.udp": 867868,
- "bind10.boot_time": "2011-01-20T16:59:03Z",
- "report_time": "2011-01-20T17:04:06Z",
- "stats.boot_time": "2011-01-20T16:59:05Z",
- "stats.last_update_time": "2011-01-20T17:04:05Z",
- "stats.lname": "4d3869d9_a at jreed.example.net",
- "stats.start_time": "2011-01-20T16:59:05Z",
- "stats.timestamp": 1295543046.823504
+ "Auth": {
+ "queries.tcp": 1749,
+ "queries.udp": 867868
+ },
+ "Boss": {
+ "boot_time": "2011-01-20T16:59:03Z"
+ },
+ "Stats": {
+ "boot_time": "2011-01-20T16:59:05Z",
+ "last_update_time": "2011-01-20T17:04:05Z",
+ "lname": "4d3869d9_a at jreed.example.net",
+ "report_time": "2011-01-20T17:04:06Z",
+ "timestamp": 1295543046.823504
+ }
}
</pre><p>
- </p></div><div class="chapter" title="Chapter 14. Logging"><div class="titlepage"><div><div><h2 class="title"><a name="logging"></a>Chapter 14. Logging</h2></div></div></div><p>
- Each message written by BIND 10 to the configured logging destinations
- comprises a number of components that identify the origin of the
- message and, if the message indicates a problem, information about the
- problem that may be useful in fixing it.
- </p><p>
- Consider the message below logged to a file:
- </p><pre class="screen">2011-06-15 13:48:22.034 ERROR [b10-resolver.asiolink]
+ </p></div><div class="chapter" title="Chapter 14. Logging"><div class="titlepage"><div><div><h2 class="title"><a name="logging"></a>Chapter 14. Logging</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229447788">Logging configuration</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229447799">Loggers</a></span></dt><dt><span class="section"><a href="#id1168229448040">Output Options</a></span></dt><dt><span class="section"><a href="#id1168229448215">Example session</a></span></dt></dl></dd><dt><span class="section"><a href="#id1168229448428">Logging Message Format</a></span></dt></dl></div><div class="section" title="Logging configuration"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229447788"></a>Logging configuration</h2></div></div></div><p>
+
+ The logging system in BIND 10 is configured through the
+ Logging module. All BIND 10 modules will look at the
+ configuration in Logging to see what should be logged and
+ to where.
+
+
+
+ </p><div class="section" title="Loggers"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229447799"></a>Loggers</h3></div></div></div><p>
+
+ Within BIND 10, a message is logged through a component
+ called a "logger". Different parts of BIND 10 log messages
+ through different loggers, and each logger can be configured
+ independently of one another.
+
+ </p><p>
+
+ In the Logging module, you can specify the configuration
+ for zero or more loggers; any that are not specified will
+ take appropriate default values..
+
+ </p><p>
+
+ The three most important elements of a logger configuration
+ are the <code class="option">name</code> (the component that is
+ generating the messages), the <code class="option">severity</code>
+ (what to log), and the <code class="option">output_options</code>
+ (where to log).
+
+ </p><div class="section" title="name (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229447824"></a>name (string)</h4></div></div></div><p>
+ Each logger in the system has a name, the name being that
+ of the component using it to log messages. For instance,
+ if you want to configure logging for the resolver module,
+ you add an entry for a logger named <span class="quote">“<span class="quote">Resolver</span>”</span>. This
+ configuration will then be used by the loggers in the
+ Resolver module, and all the libraries used by it.
+ </p><p>
+
+ If you want to specify logging for one specific library
+ within the module, you set the name to
+ <em class="replaceable"><code>module.library</code></em>. For example, the
+ logger used by the nameserver address store component
+ has the full name of <span class="quote">“<span class="quote">Resolver.nsas</span>”</span>. If
+ there is no entry in Logging for a particular library,
+ it will use the configuration given for the module.
+
+
+
+ </p><p>
+
+
+
+ To illustrate this, suppose you want the cache library
+ to log messages of severity DEBUG, and the rest of the
+ resolver code to log messages of severity INFO. To achieve
+ this you specify two loggers, one with the name
+ <span class="quote">“<span class="quote">Resolver</span>”</span> and severity INFO, and one with
+ the name <span class="quote">“<span class="quote">Resolver.cache</span>”</span> with severity
+ DEBUG. As there are no entries for other libraries (e.g.
+ the nsas), they will use the configuration for the module
+ (<span class="quote">“<span class="quote">Resolver</span>”</span>), so giving the desired behavior.
+
+ </p><p>
+
+ One special case is that of a module name of <span class="quote">“<span class="quote">*</span>”</span>
+ (asterisks), which is interpreted as <span class="emphasis"><em>any</em></span>
+ module. You can set global logging options by using this,
+ including setting the logging configuration for a library
+ that is used by multiple modules (e.g. <span class="quote">“<span class="quote">*.config</span>”</span>
+ specifies the configuration library code in whatever
+ module is using it).
+
+ </p><p>
+
+ If there are multiple logger specifications in the
+ configuration that might match a particular logger, the
+ specification with the more specific logger name takes
+ precedence. For example, if there are entries for for
+ both <span class="quote">“<span class="quote">*</span>”</span> and <span class="quote">“<span class="quote">Resolver</span>”</span>, the
+ resolver module — and all libraries it uses —
+ will log messages according to the configuration in the
+ second entry (<span class="quote">“<span class="quote">Resolver</span>”</span>). All other modules
+ will use the configuration of the first entry
+ (<span class="quote">“<span class="quote">*</span>”</span>). If there was also a configuration
+ entry for <span class="quote">“<span class="quote">Resolver.cache</span>”</span>, the cache library
+ within the resolver would use that in preference to the
+ entry for <span class="quote">“<span class="quote">Resolver</span>”</span>.
+
+ </p><p>
+
+ One final note about the naming. When specifying the
+ module name within a logger, use the name of the module
+ as specified in <span class="command"><strong>bindctl</strong></span>, e.g.
+ <span class="quote">“<span class="quote">Resolver</span>”</span> for the resolver module,
+ <span class="quote">“<span class="quote">Xfrout</span>”</span> for the xfrout module, etc. When
+ the message is logged, the message will include the name
+ of the logger generating the message, but with the module
+ name replaced by the name of the process implementing
+ the module (so for example, a message generated by the
+ <span class="quote">“<span class="quote">Auth.cache</span>”</span> logger will appear in the output
+ with a logger name of <span class="quote">“<span class="quote">b10-auth.cache</span>”</span>).
+
+ </p></div><div class="section" title="severity (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229447923"></a>severity (string)</h4></div></div></div><p>
+
+ This specifies the category of messages logged.
+ Each message is logged with an associated severity which
+ may be one of the following (in descending order of
+ severity):
+ </p><div class="itemizedlist"><ul class="itemizedlist" type="disc"><li class="listitem"> FATAL </li><li class="listitem"> ERROR </li><li class="listitem"> WARN </li><li class="listitem"> INFO </li><li class="listitem"> DEBUG </li></ul></div><p>
+
+ When the severity of a logger is set to one of these
+ values, it will only log messages of that severity, and
+ the severities above it. The severity may also be set to
+ NONE, in which case all messages from that logger are
+ inhibited.
+
+
+
+ </p></div><div class="section" title="output_options (list)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229447973"></a>output_options (list)</h4></div></div></div><p>
+
+ Each logger can have zero or more
+ <code class="option">output_options</code>. These specify where log
+ messages are sent to. These are explained in detail below.
+
+ </p><p>
+
+ The other options for a logger are:
+
+ </p></div><div class="section" title="debuglevel (integer)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229447990"></a>debuglevel (integer)</h4></div></div></div><p>
+
+ When a logger's severity is set to DEBUG, this value
+ specifies what debug messages should be printed. It ranges
+ from 0 (least verbose) to 99 (most verbose).
+ </p><p>
+
+ If severity for the logger is not DEBUG, this value is ignored.
+
+ </p></div><div class="section" title="additive (true or false)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229448005"></a>additive (true or false)</h4></div></div></div><p>
+
+ If this is true, the <code class="option">output_options</code> from
+ the parent will be used. For example, if there are two
+ loggers configured; <span class="quote">“<span class="quote">Resolver</span>”</span> and
+ <span class="quote">“<span class="quote">Resolver.cache</span>”</span>, and <code class="option">additive</code>
+ is true in the second, it will write the log messages
+ not only to the destinations specified for
+ <span class="quote">“<span class="quote">Resolver.cache</span>”</span>, but also to the destinations
+ as specified in the <code class="option">output_options</code> in
+ the logger named <span class="quote">“<span class="quote">Resolver</span>”</span>.
+
+
+
+ </p></div></div><div class="section" title="Output Options"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229448040"></a>Output Options</h3></div></div></div><p>
+
+ The main settings for an output option are the
+ <code class="option">destination</code> and a value called
+ <code class="option">output</code>, the meaning of which depends on
+ the destination that is set.
+
+ </p><div class="section" title="destination (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229448056"></a>destination (string)</h4></div></div></div><p>
+
+ The destination is the type of output. It can be one of:
+
+ </p><div class="itemizedlist"><ul class="itemizedlist" type="disc"><li class="listitem"> console </li><li class="listitem"> file </li><li class="listitem"> syslog </li></ul></div></div><div class="section" title="output (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229448088"></a>output (string)</h4></div></div></div><p>
+
+ Depending on what is set as the output destination, this
+ value is interpreted as follows:
+
+ </p><div class="variablelist"><dl><dt><span class="term"><code class="option">destination</code> is <span class="quote">“<span class="quote">console</span>”</span></span></dt><dd>
+ The value of output must be one of <span class="quote">“<span class="quote">stdout</span>”</span>
+ (messages printed to standard output) or
+ <span class="quote">“<span class="quote">stderr</span>”</span> (messages printed to standard
+ error).
+ </dd><dt><span class="term"><code class="option">destination</code> is <span class="quote">“<span class="quote">file</span>”</span></span></dt><dd>
+ The value of output is interpreted as a file name;
+ log messages will be appended to this file.
+ </dd><dt><span class="term"><code class="option">destination</code> is <span class="quote">“<span class="quote">syslog</span>”</span></span></dt><dd>
+ The value of output is interpreted as the
+ <span class="command"><strong>syslog</strong></span> facility (e.g.
+ <span class="emphasis"><em>local0</em></span>) that should be used
+ for log messages.
+ </dd></dl></div><p>
+
+ The other options for <code class="option">output_options</code> are:
+
+ </p><div class="section" title="flush (true of false)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229448172"></a>flush (true of false)</h5></div></div></div><p>
+ Flush buffers after each log message. Doing this will
+ reduce performance but will ensure that if the program
+ terminates abnormally, all messages up to the point of
+ termination are output.
+ </p></div><div class="section" title="maxsize (integer)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229448182"></a>maxsize (integer)</h5></div></div></div><p>
+ Only relevant when destination is file, this is maximum
+ file size of output files in bytes. When the maximum
+ size is reached, the file is renamed and a new file opened.
+ (For example, a ".1" is appended to the name —
+ if a ".1" file exists, it is renamed ".2",
+ etc.)
+ </p><p>
+ If this is 0, no maximum file size is used.
+ </p></div><div class="section" title="maxver (integer)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229448196"></a>maxver (integer)</h5></div></div></div><p>
+ Maximum number of old log files to keep around when
+ rolling the output file. Only relevant when
+ <code class="option">destination</code> is <span class="quote">“<span class="quote">file</span>”</span>.
+ </p></div></div></div><div class="section" title="Example session"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229448215"></a>Example session</h3></div></div></div><p>
+
+ In this example we want to set the global logging to
+ write to the file <code class="filename">/var/log/my_bind10.log</code>,
+ at severity WARN. We want the authoritative server to
+ log at DEBUG with debuglevel 40, to a different file
+ (<code class="filename">/tmp/debug_messages</code>).
+
+ </p><p>
+
+ Start <span class="command"><strong>bindctl</strong></span>.
+
+ </p><p>
+
+ </p><pre class="screen">["login success "]
+> <strong class="userinput"><code>config show Logging</code></strong>
+Logging/loggers [] list
+</pre><p>
+
+ </p><p>
+
+ By default, no specific loggers are configured, in which
+ case the severity defaults to INFO and the output is
+ written to stderr.
+
+ </p><p>
+
+ Let's first add a default logger:
+
+ </p><p>
+
+ </p><pre class="screen"><strong class="userinput"><code>> config add Logging/loggers</code></strong>
+> <strong class="userinput"><code>config show Logging</code></strong>
+Logging/loggers/ list (modified)
+</pre><p>
+
+ </p><p>
+
+ The loggers value line changed to indicate that it is no
+ longer an empty list:
+
+ </p><p>
+
+ </p><pre class="screen">> <strong class="userinput"><code>config show Logging/loggers</code></strong>
+Logging/loggers[0]/name "" string (default)
+Logging/loggers[0]/severity "INFO" string (default)
+Logging/loggers[0]/debuglevel 0 integer (default)
+Logging/loggers[0]/additive false boolean (default)
+Logging/loggers[0]/output_options [] list (default)
+</pre><p>
+
+ </p><p>
+
+ The name is mandatory, so we must set it. We will also
+ change the severity as well. Let's start with the global
+ logger.
+
+ </p><p>
+
+ </p><pre class="screen">> <strong class="userinput"><code>config set Logging/loggers[0]/name *</code></strong>
+> <strong class="userinput"><code>config set Logging/loggers[0]/severity WARN</code></strong>
+> <strong class="userinput"><code>config show Logging/loggers</code></strong>
+Logging/loggers[0]/name "*" string (modified)
+Logging/loggers[0]/severity "WARN" string (modified)
+Logging/loggers[0]/debuglevel 0 integer (default)
+Logging/loggers[0]/additive false boolean (default)
+Logging/loggers[0]/output_options [] list (default)
+</pre><p>
+
+ </p><p>
+
+ Of course, we need to specify where we want the log
+ messages to go, so we add an entry for an output option.
+
+ </p><p>
+
+ </p><pre class="screen">> <strong class="userinput"><code> config add Logging/loggers[0]/output_options</code></strong>
+> <strong class="userinput"><code> config show Logging/loggers[0]/output_options</code></strong>
+Logging/loggers[0]/output_options[0]/destination "console" string (default)
+Logging/loggers[0]/output_options[0]/output "stdout" string (default)
+Logging/loggers[0]/output_options[0]/flush false boolean (default)
+Logging/loggers[0]/output_options[0]/maxsize 0 integer (default)
+Logging/loggers[0]/output_options[0]/maxver 0 integer (default)
+</pre><p>
+
+
+ </p><p>
+
+ These aren't the values we are looking for.
+
+ </p><p>
+
+ </p><pre class="screen">> <strong class="userinput"><code> config set Logging/loggers[0]/output_options[0]/destination file</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[0]/output_options[0]/output /var/log/bind10.log</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[0]/output_options[0]/maxsize 30000</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[0]/output_options[0]/maxver 8</code></strong>
+</pre><p>
+
+ </p><p>
+
+ Which would make the entire configuration for this logger
+ look like:
+
+ </p><p>
+
+ </p><pre class="screen">> <strong class="userinput"><code> config show all Logging/loggers</code></strong>
+Logging/loggers[0]/name "*" string (modified)
+Logging/loggers[0]/severity "WARN" string (modified)
+Logging/loggers[0]/debuglevel 0 integer (default)
+Logging/loggers[0]/additive false boolean (default)
+Logging/loggers[0]/output_options[0]/destination "file" string (modified)
+Logging/loggers[0]/output_options[0]/output "/var/log/bind10.log" string (modified)
+Logging/loggers[0]/output_options[0]/flush false boolean (default)
+Logging/loggers[0]/output_options[0]/maxsize 30000 integer (modified)
+Logging/loggers[0]/output_options[0]/maxver 8 integer (modified)
+</pre><p>
+
+ </p><p>
+
+ That looks OK, so let's commit it before we add the
+ configuration for the authoritative server's logger.
+
+ </p><p>
+
+ </p><pre class="screen">> <strong class="userinput"><code> config commit</code></strong></pre><p>
+
+ </p><p>
+
+ Now that we have set it, and checked each value along
+ the way, adding a second entry is quite similar.
+
+ </p><p>
+
+ </p><pre class="screen">> <strong class="userinput"><code> config add Logging/loggers</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[1]/name Auth</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[1]/severity DEBUG</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[1]/debuglevel 40</code></strong>
+> <strong class="userinput"><code> config add Logging/loggers[1]/output_options</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[1]/output_options[0]/destination file</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[1]/output_options[0]/output /tmp/auth_debug.log</code></strong>
+> <strong class="userinput"><code> config commit</code></strong>
+</pre><p>
+
+ </p><p>
+
+ And that's it. Once we have found whatever it was we
+ needed the debug messages for, we can simply remove the
+ second logger to let the authoritative server use the
+ same settings as the rest.
+
+ </p><p>
+
+ </p><pre class="screen">> <strong class="userinput"><code> config remove Logging/loggers[1]</code></strong>
+> <strong class="userinput"><code> config commit</code></strong>
+</pre><p>
+
+ </p><p>
+
+ And every module will now be using the values from the
+ logger named <span class="quote">“<span class="quote">*</span>”</span>.
+
+ </p></div></div><div class="section" title="Logging Message Format"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229448428"></a>Logging Message Format</h2></div></div></div><p>
+ Each message written by BIND 10 to the configured logging
+ destinations comprises a number of components that identify
+ the origin of the message and, if the message indicates
+ a problem, information about the problem that may be
+ useful in fixing it.
+ </p><p>
+ Consider the message below logged to a file:
+ </p><pre class="screen">2011-06-15 13:48:22.034 ERROR [b10-resolver.asiolink]
ASIODNS_OPENSOCK error 111 opening TCP socket to 127.0.0.1(53)</pre><p>
- </p><p>
- Note: the layout of messages written to the system logging
- file (syslog) may be slightly different. This message has
- been split across two lines here for display reasons; in the
- logging file, it will appear on one line.)
- </p><p>
- The log message comprises a number of components:
-
- </p><div class="variablelist"><dl><dt><span class="term">2011-06-15 13:48:22.034</span></dt><dd><p>
- The date and time at which the message was generated.
- </p></dd><dt><span class="term">ERROR</span></dt><dd><p>
- The severity of the message.
- </p></dd><dt><span class="term">[b10-resolver.asiolink]</span></dt><dd><p>
- The source of the message. This comprises two components:
- the BIND 10 process generating the message (in this
- case, <span class="command"><strong>b10-resolver</strong></span>) and the module
- within the program from which the message originated
- (which in the example is the asynchronous I/O link
- module, asiolink).
- </p></dd><dt><span class="term">ASIODNS_OPENSOCK</span></dt><dd><p>
+ </p><p>
+ Note: the layout of messages written to the system logging
+ file (syslog) may be slightly different. This message has
+ been split across two lines here for display reasons; in the
+ logging file, it will appear on one line.)
+ </p><p>
+ The log message comprises a number of components:
+
+ </p><div class="variablelist"><dl><dt><span class="term">2011-06-15 13:48:22.034</span></dt><dd><p>
+ The date and time at which the message was generated.
+ </p></dd><dt><span class="term">ERROR</span></dt><dd><p>
+ The severity of the message.
+ </p></dd><dt><span class="term">[b10-resolver.asiolink]</span></dt><dd><p>
+ The source of the message. This comprises two components:
+ the BIND 10 process generating the message (in this
+ case, <span class="command"><strong>b10-resolver</strong></span>) and the module
+ within the program from which the message originated
+ (which in the example is the asynchronous I/O link
+ module, asiolink).
+ </p></dd><dt><span class="term">ASIODNS_OPENSOCK</span></dt><dd><p>
The message identification. Every message in BIND 10
has a unique identification, which can be used as an
index into the <a class="ulink" href="bind10-messages.html" target="_top"><em class="citetitle">BIND 10 Messages
Manual</em></a> (<a class="ulink" href="http://bind10.isc.org/docs/bind10-messages.html" target="_top">http://bind10.isc.org/docs/bind10-messages.html</a>) from which more information can be obtained.
- </p></dd><dt><span class="term">error 111 opening TCP socket to 127.0.0.1(53)</span></dt><dd><p>
- A brief description of the cause of the problem. Within this text,
- information relating to the condition that caused the message to
- be logged will be included. In this example, error number 111
- (an operating system-specific error number) was encountered when
- trying to open a TCP connection to port 53 on the local system
- (address 127.0.0.1). The next step would be to find out the reason
- for the failure by consulting your system's documentation to
- identify what error number 111 means.
- </p></dd></dl></div><p>
-
- </p></div></div></body></html>
+ </p></dd><dt><span class="term">error 111 opening TCP socket to 127.0.0.1(53)</span></dt><dd><p>
+ A brief description of the cause of the problem.
+ Within this text, information relating to the condition
+ that caused the message to be logged will be included.
+ In this example, error number 111 (an operating
+ system-specific error number) was encountered when
+ trying to open a TCP connection to port 53 on the
+ local system (address 127.0.0.1). The next step
+ would be to find out the reason for the failure by
+ consulting your system's documentation to identify
+ what error number 111 means.
+ </p></dd></dl></div><p>
+ </p></div></div></div></body></html>
diff --git a/doc/guide/bind10-guide.xml b/doc/guide/bind10-guide.xml
index 7d1a006..00ffee6 100644
--- a/doc/guide/bind10-guide.xml
+++ b/doc/guide/bind10-guide.xml
@@ -5,6 +5,23 @@
<!ENTITY % version SYSTEM "version.ent">
%version;
]>
+
+<!--
+ - Copyright (C) 2010-2011 Internet Systems Consortium, Inc. ("ISC")
+ -
+ - Permission to use, copy, modify, and/or distribute this software for any
+ - purpose with or without fee is hereby granted, provided that the above
+ - copyright notice and this permission notice appear in all copies.
+ -
+ - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+ - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ - AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+ - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ - PERFORMANCE OF THIS SOFTWARE.
+-->
+
<book>
<?xml-stylesheet href="bind10-guide.css" type="text/css"?>
@@ -129,7 +146,7 @@
The processes started by the <command>bind10</command>
command have names starting with "b10-", including:
</para>
-
+
<para>
<itemizedlist>
@@ -224,7 +241,7 @@
<section id="managing_once_running">
<title>Managing BIND 10</title>
-
+
<para>
Once BIND 10 is running, a few commands are used to interact
directly with the system:
@@ -263,7 +280,7 @@
<!-- TODO point to these -->
In addition, manual pages are also provided in the default installation.
</para>
-
+
<!--
bin/
bindctl*
@@ -370,7 +387,7 @@ Debian and Ubuntu:
</para>
<orderedlist>
-
+
<listitem>
<simpara>
Install required build dependencies.
@@ -454,7 +471,7 @@ Debian and Ubuntu:
Downloading a release tar file is the recommended method to
obtain the source code.
</para>
-
+
<para>
The BIND 10 releases are available as tar file downloads from
<ulink url="ftp://ftp.isc.org/isc/bind10/"/>.
@@ -530,37 +547,37 @@ Debian and Ubuntu:
<varlistentry>
<term>--prefix</term>
<listitem>
- <simpara>Define the the installation location (the
+ <simpara>Define the installation location (the
default is <filename>/usr/local/</filename>).
</simpara>
- </listitem>
+ </listitem>
</varlistentry>
<varlistentry>
<term>--with-boost-include</term>
- <listitem>
+ <listitem>
<simpara>Define the path to find the Boost headers.
</simpara>
- </listitem>
+ </listitem>
</varlistentry>
<varlistentry>
<term>--with-pythonpath</term>
- <listitem>
+ <listitem>
<simpara>Define the path to Python 3.1 if it is not in the
standard execution path.
</simpara>
- </listitem>
+ </listitem>
</varlistentry>
<varlistentry>
<term>--with-gtest</term>
- <listitem>
+ <listitem>
<simpara>Enable building the C++ Unit Tests using the
Google Tests framework. Optionally this can define the
path to the gtest header files and library.
</simpara>
- </listitem>
+ </listitem>
</varlistentry>
</variablelist>
@@ -679,13 +696,13 @@ Debian and Ubuntu:
</para>
</section>
-->
-
+
</chapter>
<chapter id="bind10">
<title>Starting BIND10 with <command>bind10</command></title>
<para>
- BIND 10 provides the <command>bind10</command> command which
+ BIND 10 provides the <command>bind10</command> command which
starts up the required processes.
<command>bind10</command>
will also restart processes that exit unexpectedly.
@@ -694,7 +711,7 @@ Debian and Ubuntu:
<para>
After starting the <command>b10-msgq</command> communications channel,
- <command>bind10</command> connects to it,
+ <command>bind10</command> connects to it,
runs the configuration manager, and reads its own configuration.
Then it starts the other modules.
</para>
@@ -725,6 +742,16 @@ Debian and Ubuntu:
get additional debugging or diagnostic output.
</para>
<!-- TODO: note it doesn't go into background -->
+
+ <note>
+ <para>
+ If the setproctitle Python module is detected at start up,
+ the process names for the Python-based daemons will be renamed
+ to better identify them instead of just <quote>python</quote>.
+ This is not needed on some operating systems.
+ </para>
+ </note>
+
</section>
</chapter>
@@ -752,7 +779,7 @@ Debian and Ubuntu:
<command>b10-msgq</command> service.
It listens on 127.0.0.1.
</para>
-
+
<!-- TODO: this is broken, see Trac #111
<para>
To select an alternate port for the <command>b10-msgq</command> to
@@ -1078,10 +1105,10 @@ since we used bind10 -->
The configuration data item is:
<variablelist>
-
+
<varlistentry>
<term>database_file</term>
- <listitem>
+ <listitem>
<simpara>This is an optional string to define the path to find
the SQLite3 database file.
<!-- TODO: -->
@@ -1103,7 +1130,7 @@ This may be a temporary setting until then.
<varlistentry>
<term>shutdown</term>
- <listitem>
+ <listitem>
<simpara>Stop the authoritative DNS server.
</simpara>
<!-- TODO: what happens when this is sent, will bind10 restart? -->
@@ -1159,7 +1186,7 @@ This may be a temporary setting until then.
<varlistentry>
<term>$INCLUDE</term>
- <listitem>
+ <listitem>
<simpara>Loads an additional zone file. This may be recursive.
</simpara>
</listitem>
@@ -1167,7 +1194,7 @@ This may be a temporary setting until then.
<varlistentry>
<term>$ORIGIN</term>
- <listitem>
+ <listitem>
<simpara>Defines the relative domain name.
</simpara>
</listitem>
@@ -1175,7 +1202,7 @@ This may be a temporary setting until then.
<varlistentry>
<term>$TTL</term>
- <listitem>
+ <listitem>
<simpara>Defines the time-to-live value used for following
records that don't include a TTL.
</simpara>
@@ -1240,7 +1267,7 @@ TODO
<note><simpara>
The current development release of BIND 10 only supports
- AXFR. (IXFR is not supported.)
+ AXFR. (IXFR is not supported.)
<!-- TODO: sqlite3 data source only? -->
@@ -1287,7 +1314,7 @@ what if a NOTIFY is sent?
<note><simpara>
The current development release of BIND 10 only supports
- AXFR. (IXFR is not supported.)
+ AXFR. (IXFR is not supported.)
Access control is not yet provided.
</simpara></note>
@@ -1343,7 +1370,7 @@ what is XfroutClient xfr_client??
<para>
The main <command>bind10</command> process can be configured
- to select to run either the authoritative or resolver.
+ to select to run either the authoritative or resolver or both.
By default, it starts the authoritative service.
<!-- TODO: later both -->
@@ -1363,16 +1390,85 @@ what is XfroutClient xfr_client??
</para>
<para>
- The resolver also needs to be configured to listen on an address
- and port:
+ By default, the resolver listens on port 53 for 127.0.0.1 and ::1.
+ The following example shows how it can be configured to
+ listen on an additional address (and port):
<screen>
-> <userinput>config set Resolver/listen_on [{ "address": "127.0.0.1", "port": 53 }]</userinput>
+> <userinput>config add Resolver/listen_on</userinput>
+> <userinput>config set Resolver/listen_on[<replaceable>2</replaceable>]/address "192.168.1.1"</userinput>
+> <userinput>config set Resolver/listen_on[<replaceable>2</replaceable>]/port 53</userinput>
> <userinput>config commit</userinput>
</screen>
</para>
-<!-- TODO: later the above will have some defaults -->
+ <simpara>(Replace the <quote><replaceable>2</replaceable></quote>
+ as needed; run <quote><userinput>config show
+ Resolver/listen_on</userinput></quote> if needed.)</simpara>
+<!-- TODO: this example should not include the port, ticket #1185 -->
+
+ <section>
+ <title>Access Control</title>
+
+ <para>
+ By default, the <command>b10-resolver</command> daemon only accepts
+ DNS queries from the localhost (127.0.0.1 and ::1).
+ The <option>Resolver/query_acl</option> configuration may
+ be used to reject, drop, or allow specific IPs or networks.
+ This configuration list is first match.
+ </para>
+
+ <para>
+ The configuration's <option>action</option> item may be
+ set to <quote>ACCEPT</quote> to allow the incoming query,
+ <quote>REJECT</quote> to respond with a DNS REFUSED return
+ code, or <quote>DROP</quote> to ignore the query without
+ any response (such as a blackhole). For more information,
+ see the respective debugging messages: <ulink
+ url="bind10-messages.html#RESOLVER_QUERY_ACCEPTED">RESOLVER_QUERY_ACCEPTED</ulink>,
+ <ulink
+ url="bind10-messages.html#RESOLVER_QUERY_REJECTED">RESOLVER_QUERY_REJECTED</ulink>,
+ and <ulink
+url="bind10-messages.html#RESOLVER_QUERY_DROPPED">RESOLVER_QUERY_DROPPED</ulink>.
+ </para>
+
+ <para>
+ The required configuration's <option>from</option> item is set
+ to an IPv4 or IPv6 address, addresses with an network mask, or to
+ the special lowercase keywords <quote>any6</quote> (for
+ any IPv6 address) or <quote>any4</quote> (for any IPv4
+ address).
+ </para>
+
+<!-- TODO:
+/0 is for any address in that address family
+does that need any address too?
+
+TODO: tsig
+-->
+
+ <para>
+ For example to allow the <replaceable>192.168.1.0/24</replaceable>
+ network to use your recursive name server, at the
+ <command>bindctl</command> prompt run:
+ </para>
+
+ <screen>
+> <userinput>config add Resolver/query_acl</userinput>
+> <userinput>config set Resolver/query_acl[<replaceable>2</replaceable>]/action "ACCEPT"</userinput>
+> <userinput>config set Resolver/query_acl[<replaceable>2</replaceable>]/from "<replaceable>192.168.1.0/24</replaceable>"</userinput>
+> <userinput>config commit</userinput>
+</screen>
+
+ <simpara>(Replace the <quote><replaceable>2</replaceable></quote>
+ as needed; run <quote><userinput>config show
+ Resolver/query_acl</userinput></quote> if needed.)</simpara>
+
+<!-- TODO: check this -->
+ <note><simpara>This prototype access control configuration
+ syntax may be changed.</simpara></note>
+
+ </section>
<section>
<title>Forwarding</title>
@@ -1426,24 +1522,30 @@ then change those defaults with config set Resolver/forward_addresses[0]/address
<para>
- This stats daemon provides commands to identify if it is running,
- show specified or all statistics data, set values, remove data,
- and reset data.
+ This stats daemon provides commands to identify if it is
+ running, show specified or all statistics data, show specified
+ or all statistics data schema, and set specified statistics
+ data.
For example, using <command>bindctl</command>:
<screen>
> <userinput>Stats show</userinput>
{
- "auth.queries.tcp": 1749,
- "auth.queries.udp": 867868,
- "bind10.boot_time": "2011-01-20T16:59:03Z",
- "report_time": "2011-01-20T17:04:06Z",
- "stats.boot_time": "2011-01-20T16:59:05Z",
- "stats.last_update_time": "2011-01-20T17:04:05Z",
- "stats.lname": "4d3869d9_a at jreed.example.net",
- "stats.start_time": "2011-01-20T16:59:05Z",
- "stats.timestamp": 1295543046.823504
+ "Auth": {
+ "queries.tcp": 1749,
+ "queries.udp": 867868
+ },
+ "Boss": {
+ "boot_time": "2011-01-20T16:59:03Z"
+ },
+ "Stats": {
+ "boot_time": "2011-01-20T16:59:05Z",
+ "last_update_time": "2011-01-20T17:04:05Z",
+ "lname": "4d3869d9_a at jreed.example.net",
+ "report_time": "2011-01-20T17:04:06Z",
+ "timestamp": 1295543046.823504
+ }
}
</screen>
</para>
@@ -1453,61 +1555,679 @@ then change those defaults with config set Resolver/forward_addresses[0]/address
<chapter id="logging">
<title>Logging</title>
-<!-- TODO: how to configure logging, logging destinations etc. -->
+ <section>
+ <title>Logging configuration</title>
- <para>
- Each message written by BIND 10 to the configured logging destinations
- comprises a number of components that identify the origin of the
- message and, if the message indicates a problem, information about the
- problem that may be useful in fixing it.
- </para>
+ <para>
- <para>
- Consider the message below logged to a file:
- <screen>2011-06-15 13:48:22.034 ERROR [b10-resolver.asiolink]
- ASIODNS_OPENSOCK error 111 opening TCP socket to 127.0.0.1(53)</screen>
- </para>
+ The logging system in BIND 10 is configured through the
+ Logging module. All BIND 10 modules will look at the
+ configuration in Logging to see what should be logged and
+ to where.
- <para>
- Note: the layout of messages written to the system logging
- file (syslog) may be slightly different. This message has
- been split across two lines here for display reasons; in the
- logging file, it will appear on one line.)
- </para>
+<!-- TODO: what is context of Logging module for readers of this guide? -->
- <para>
- The log message comprises a number of components:
+ </para>
+
+ <section>
+ <title>Loggers</title>
+
+ <para>
+
+ Within BIND 10, a message is logged through a component
+ called a "logger". Different parts of BIND 10 log messages
+ through different loggers, and each logger can be configured
+ independently of one another.
+
+ </para>
+
+ <para>
+
+ In the Logging module, you can specify the configuration
+ for zero or more loggers; any that are not specified will
+ take appropriate default values..
+
+ </para>
+
+ <para>
+
+ The three most important elements of a logger configuration
+ are the <option>name</option> (the component that is
+ generating the messages), the <option>severity</option>
+ (what to log), and the <option>output_options</option>
+ (where to log).
+
+ </para>
+
+ <section>
+ <title>name (string)</title>
+
+ <para>
+ Each logger in the system has a name, the name being that
+ of the component using it to log messages. For instance,
+ if you want to configure logging for the resolver module,
+ you add an entry for a logger named <quote>Resolver</quote>. This
+ configuration will then be used by the loggers in the
+ Resolver module, and all the libraries used by it.
+ </para>
+
+<!-- TODO: later we will have a way to know names of all modules
+
+Right now you can only see what their names are if they are running
+(a simple 'help' without anything else in bindctl for instance).
+
+ -->
+
+ <para>
+
+ If you want to specify logging for one specific library
+ within the module, you set the name to
+ <replaceable>module.library</replaceable>. For example, the
+ logger used by the nameserver address store component
+ has the full name of <quote>Resolver.nsas</quote>. If
+ there is no entry in Logging for a particular library,
+ it will use the configuration given for the module.
+
+<!-- TODO: how to know these specific names?
+
+We will either have to document them or tell the administrator to
+specify module-wide logging and see what appears...
+
+-->
+
+ </para>
+
+ <para>
+
+<!-- TODO: severity has not been covered yet -->
+
+ To illustrate this, suppose you want the cache library
+ to log messages of severity DEBUG, and the rest of the
+ resolver code to log messages of severity INFO. To achieve
+ this you specify two loggers, one with the name
+ <quote>Resolver</quote> and severity INFO, and one with
+ the name <quote>Resolver.cache</quote> with severity
+ DEBUG. As there are no entries for other libraries (e.g.
+ the nsas), they will use the configuration for the module
+ (<quote>Resolver</quote>), so giving the desired behavior.
+
+ </para>
+
+ <para>
+
+ One special case is that of a module name of <quote>*</quote>
+ (asterisks), which is interpreted as <emphasis>any</emphasis>
+ module. You can set global logging options by using this,
+ including setting the logging configuration for a library
+ that is used by multiple modules (e.g. <quote>*.config</quote>
+ specifies the configuration library code in whatever
+ module is using it).
+
+ </para>
+
+ <para>
+
+ If there are multiple logger specifications in the
+ configuration that might match a particular logger, the
+ specification with the more specific logger name takes
+ precedence. For example, if there are entries for for
+ both <quote>*</quote> and <quote>Resolver</quote>, the
+ resolver module — and all libraries it uses —
+ will log messages according to the configuration in the
+ second entry (<quote>Resolver</quote>). All other modules
+ will use the configuration of the first entry
+ (<quote>*</quote>). If there was also a configuration
+ entry for <quote>Resolver.cache</quote>, the cache library
+ within the resolver would use that in preference to the
+ entry for <quote>Resolver</quote>.
+
+ </para>
+
+ <para>
+
+ One final note about the naming. When specifying the
+ module name within a logger, use the name of the module
+ as specified in <command>bindctl</command>, e.g.
+ <quote>Resolver</quote> for the resolver module,
+ <quote>Xfrout</quote> for the xfrout module, etc. When
+ the message is logged, the message will include the name
+ of the logger generating the message, but with the module
+ name replaced by the name of the process implementing
+ the module (so for example, a message generated by the
+ <quote>Auth.cache</quote> logger will appear in the output
+ with a logger name of <quote>b10-auth.cache</quote>).
+
+ </para>
+
+ </section>
+
+ <section>
+ <title>severity (string)</title>
+
+ <para>
+
+ This specifies the category of messages logged.
+ Each message is logged with an associated severity which
+ may be one of the following (in descending order of
+ severity):
+ </para>
+
+ <itemizedlist>
+ <listitem>
+ <simpara> FATAL </simpara>
+ </listitem>
+
+ <listitem>
+ <simpara> ERROR </simpara>
+ </listitem>
+
+ <listitem>
+ <simpara> WARN </simpara>
+ </listitem>
+
+ <listitem>
+ <simpara> INFO </simpara>
+ </listitem>
+
+ <listitem>
+ <simpara> DEBUG </simpara>
+ </listitem>
+ </itemizedlist>
+
+ <para>
+
+ When the severity of a logger is set to one of these
+ values, it will only log messages of that severity, and
+ the severities above it. The severity may also be set to
+ NONE, in which case all messages from that logger are
+ inhibited.
+
+<!-- TODO: worded wrong? If I set to INFO, why would it show DEBUG which is literally below in that list? -->
+
+ </para>
+
+ </section>
+
+ <section>
+ <title>output_options (list)</title>
+
+ <para>
+
+ Each logger can have zero or more
+ <option>output_options</option>. These specify where log
+ messages are sent to. These are explained in detail below.
+
+ </para>
+
+ <para>
+
+ The other options for a logger are:
+
+ </para>
+
+ </section>
+
+ <section>
+ <title>debuglevel (integer)</title>
+
+ <para>
+
+ When a logger's severity is set to DEBUG, this value
+ specifies what debug messages should be printed. It ranges
+ from 0 (least verbose) to 99 (most verbose).
+ </para>
+
+
+<!-- TODO: complete this sentence:
+
+ The general classification of debug message types is
+
+TODO; there's a ticket to determine these levels, see #1074
+
+ -->
+
+ <para>
+
+ If severity for the logger is not DEBUG, this value is ignored.
+
+ </para>
+
+ </section>
+
+ <section>
+ <title>additive (true or false)</title>
+
+ <para>
+
+ If this is true, the <option>output_options</option> from
+ the parent will be used. For example, if there are two
+ loggers configured; <quote>Resolver</quote> and
+ <quote>Resolver.cache</quote>, and <option>additive</option>
+ is true in the second, it will write the log messages
+ not only to the destinations specified for
+ <quote>Resolver.cache</quote>, but also to the destinations
+ as specified in the <option>output_options</option> in
+ the logger named <quote>Resolver</quote>.
+
+<!-- TODO: check this -->
+
+ </para>
+
+ </section>
+
+ </section>
+
+ <section>
+ <title>Output Options</title>
+
+ <para>
+
+ The main settings for an output option are the
+ <option>destination</option> and a value called
+ <option>output</option>, the meaning of which depends on
+ the destination that is set.
+
+ </para>
+
+ <section>
+ <title>destination (string)</title>
+
+ <para>
+
+ The destination is the type of output. It can be one of:
+
+ </para>
+
+ <itemizedlist>
+
+ <listitem>
+ <simpara> console </simpara>
+ </listitem>
+
+ <listitem>
+ <simpara> file </simpara>
+ </listitem>
+
+ <listitem>
+ <simpara> syslog </simpara>
+ </listitem>
+
+ </itemizedlist>
+
+ </section>
+
+ <section>
+ <title>output (string)</title>
+
+ <para>
+
+ Depending on what is set as the output destination, this
+ value is interpreted as follows:
+
+ </para>
<variablelist>
- <varlistentry>
- <term>2011-06-15 13:48:22.034</term>
- <listitem><para>
- The date and time at which the message was generated.
- </para></listitem>
- </varlistentry>
-
- <varlistentry>
- <term>ERROR</term>
- <listitem><para>
- The severity of the message.
- </para></listitem>
- </varlistentry>
-
- <varlistentry>
- <term>[b10-resolver.asiolink]</term>
- <listitem><para>
- The source of the message. This comprises two components:
- the BIND 10 process generating the message (in this
- case, <command>b10-resolver</command>) and the module
- within the program from which the message originated
- (which in the example is the asynchronous I/O link
- module, asiolink).
- </para></listitem>
- </varlistentry>
-
- <varlistentry>
- <term>ASIODNS_OPENSOCK</term>
- <listitem><para>
+
+ <varlistentry>
+ <term><option>destination</option> is <quote>console</quote></term>
+ <listitem>
+ <simpara>
+ The value of output must be one of <quote>stdout</quote>
+ (messages printed to standard output) or
+ <quote>stderr</quote> (messages printed to standard
+ error).
+ </simpara>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>destination</option> is <quote>file</quote></term>
+ <listitem>
+ <simpara>
+ The value of output is interpreted as a file name;
+ log messages will be appended to this file.
+ </simpara>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>destination</option> is <quote>syslog</quote></term>
+ <listitem>
+ <simpara>
+ The value of output is interpreted as the
+ <command>syslog</command> facility (e.g.
+ <emphasis>local0</emphasis>) that should be used
+ for log messages.
+ </simpara>
+ </listitem>
+ </varlistentry>
+
+ </variablelist>
+
+ <para>
+
+ The other options for <option>output_options</option> are:
+
+ </para>
+
+ <section>
+ <title>flush (true of false)</title>
+
+ <para>
+ Flush buffers after each log message. Doing this will
+ reduce performance but will ensure that if the program
+ terminates abnormally, all messages up to the point of
+ termination are output.
+ </para>
+
+ </section>
+
+ <section>
+ <title>maxsize (integer)</title>
+
+ <para>
+ Only relevant when destination is file, this is maximum
+ file size of output files in bytes. When the maximum
+ size is reached, the file is renamed and a new file opened.
+ (For example, a ".1" is appended to the name —
+ if a ".1" file exists, it is renamed ".2",
+ etc.)
+ </para>
+
+ <para>
+ If this is 0, no maximum file size is used.
+ </para>
+
+ </section>
+
+ <section>
+ <title>maxver (integer)</title>
+
+ <para>
+ Maximum number of old log files to keep around when
+ rolling the output file. Only relevant when
+ <option>destination</option> is <quote>file</quote>.
+ </para>
+
+ </section>
+
+ </section>
+
+ </section>
+
+ <section>
+ <title>Example session</title>
+
+ <para>
+
+ In this example we want to set the global logging to
+ write to the file <filename>/var/log/my_bind10.log</filename>,
+ at severity WARN. We want the authoritative server to
+ log at DEBUG with debuglevel 40, to a different file
+ (<filename>/tmp/debug_messages</filename>).
+
+ </para>
+
+ <para>
+
+ Start <command>bindctl</command>.
+
+ </para>
+
+ <para>
+
+ <screen>["login success "]
+> <userinput>config show Logging</userinput>
+Logging/loggers [] list
+</screen>
+
+ </para>
+
+ <para>
+
+ By default, no specific loggers are configured, in which
+ case the severity defaults to INFO and the output is
+ written to stderr.
+
+ </para>
+
+ <para>
+
+ Let's first add a default logger:
+
+ </para>
+
+<!-- TODO: adding the empty loggers makes no sense -->
+ <para>
+
+ <screen><userinput>> config add Logging/loggers</userinput>
+> <userinput>config show Logging</userinput>
+Logging/loggers/ list (modified)
+</screen>
+
+ </para>
+
+ <para>
+
+ The loggers value line changed to indicate that it is no
+ longer an empty list:
+
+ </para>
+
+ <para>
+
+ <screen>> <userinput>config show Logging/loggers</userinput>
+Logging/loggers[0]/name "" string (default)
+Logging/loggers[0]/severity "INFO" string (default)
+Logging/loggers[0]/debuglevel 0 integer (default)
+Logging/loggers[0]/additive false boolean (default)
+Logging/loggers[0]/output_options [] list (default)
+</screen>
+
+ </para>
+
+ <para>
+
+ The name is mandatory, so we must set it. We will also
+ change the severity as well. Let's start with the global
+ logger.
+
+ </para>
+
+ <para>
+
+ <screen>> <userinput>config set Logging/loggers[0]/name *</userinput>
+> <userinput>config set Logging/loggers[0]/severity WARN</userinput>
+> <userinput>config show Logging/loggers</userinput>
+Logging/loggers[0]/name "*" string (modified)
+Logging/loggers[0]/severity "WARN" string (modified)
+Logging/loggers[0]/debuglevel 0 integer (default)
+Logging/loggers[0]/additive false boolean (default)
+Logging/loggers[0]/output_options [] list (default)
+</screen>
+
+ </para>
+
+ <para>
+
+ Of course, we need to specify where we want the log
+ messages to go, so we add an entry for an output option.
+
+ </para>
+
+ <para>
+
+ <screen>> <userinput> config add Logging/loggers[0]/output_options</userinput>
+> <userinput> config show Logging/loggers[0]/output_options</userinput>
+Logging/loggers[0]/output_options[0]/destination "console" string (default)
+Logging/loggers[0]/output_options[0]/output "stdout" string (default)
+Logging/loggers[0]/output_options[0]/flush false boolean (default)
+Logging/loggers[0]/output_options[0]/maxsize 0 integer (default)
+Logging/loggers[0]/output_options[0]/maxver 0 integer (default)
+</screen>
+
+
+ </para>
+
+ <para>
+
+ These aren't the values we are looking for.
+
+ </para>
+
+ <para>
+
+ <screen>> <userinput> config set Logging/loggers[0]/output_options[0]/destination file</userinput>
+> <userinput> config set Logging/loggers[0]/output_options[0]/output /var/log/bind10.log</userinput>
+> <userinput> config set Logging/loggers[0]/output_options[0]/maxsize 30000</userinput>
+> <userinput> config set Logging/loggers[0]/output_options[0]/maxver 8</userinput>
+</screen>
+
+ </para>
+
+ <para>
+
+ Which would make the entire configuration for this logger
+ look like:
+
+ </para>
+
+ <para>
+
+ <screen>> <userinput> config show all Logging/loggers</userinput>
+Logging/loggers[0]/name "*" string (modified)
+Logging/loggers[0]/severity "WARN" string (modified)
+Logging/loggers[0]/debuglevel 0 integer (default)
+Logging/loggers[0]/additive false boolean (default)
+Logging/loggers[0]/output_options[0]/destination "file" string (modified)
+Logging/loggers[0]/output_options[0]/output "/var/log/bind10.log" string (modified)
+Logging/loggers[0]/output_options[0]/flush false boolean (default)
+Logging/loggers[0]/output_options[0]/maxsize 30000 integer (modified)
+Logging/loggers[0]/output_options[0]/maxver 8 integer (modified)
+</screen>
+
+ </para>
+
+ <para>
+
+ That looks OK, so let's commit it before we add the
+ configuration for the authoritative server's logger.
+
+ </para>
+
+ <para>
+
+ <screen>> <userinput> config commit</userinput></screen>
+
+ </para>
+
+ <para>
+
+ Now that we have set it, and checked each value along
+ the way, adding a second entry is quite similar.
+
+ </para>
+
+ <para>
+
+ <screen>> <userinput> config add Logging/loggers</userinput>
+> <userinput> config set Logging/loggers[1]/name Auth</userinput>
+> <userinput> config set Logging/loggers[1]/severity DEBUG</userinput>
+> <userinput> config set Logging/loggers[1]/debuglevel 40</userinput>
+> <userinput> config add Logging/loggers[1]/output_options</userinput>
+> <userinput> config set Logging/loggers[1]/output_options[0]/destination file</userinput>
+> <userinput> config set Logging/loggers[1]/output_options[0]/output /tmp/auth_debug.log</userinput>
+> <userinput> config commit</userinput>
+</screen>
+
+ </para>
+
+ <para>
+
+ And that's it. Once we have found whatever it was we
+ needed the debug messages for, we can simply remove the
+ second logger to let the authoritative server use the
+ same settings as the rest.
+
+ </para>
+
+ <para>
+
+ <screen>> <userinput> config remove Logging/loggers[1]</userinput>
+> <userinput> config commit</userinput>
+</screen>
+
+ </para>
+
+ <para>
+
+ And every module will now be using the values from the
+ logger named <quote>*</quote>.
+
+ </para>
+
+ </section>
+
+ </section>
+
+ <section>
+ <title>Logging Message Format</title>
+
+ <para>
+ Each message written by BIND 10 to the configured logging
+ destinations comprises a number of components that identify
+ the origin of the message and, if the message indicates
+ a problem, information about the problem that may be
+ useful in fixing it.
+ </para>
+
+ <para>
+ Consider the message below logged to a file:
+ <screen>2011-06-15 13:48:22.034 ERROR [b10-resolver.asiolink]
+ ASIODNS_OPENSOCK error 111 opening TCP socket to 127.0.0.1(53)</screen>
+ </para>
+
+ <para>
+ Note: the layout of messages written to the system logging
+ file (syslog) may be slightly different. This message has
+ been split across two lines here for display reasons; in the
+ logging file, it will appear on one line.)
+ </para>
+
+ <para>
+ The log message comprises a number of components:
+
+ <variablelist>
+ <varlistentry>
+ <term>2011-06-15 13:48:22.034</term>
+<!-- TODO: timestamp repeated even if using syslog? -->
+ <listitem><para>
+ The date and time at which the message was generated.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>ERROR</term>
+ <listitem><para>
+ The severity of the message.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>[b10-resolver.asiolink]</term>
+ <listitem><para>
+ The source of the message. This comprises two components:
+ the BIND 10 process generating the message (in this
+ case, <command>b10-resolver</command>) and the module
+ within the program from which the message originated
+ (which in the example is the asynchronous I/O link
+ module, asiolink).
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>ASIODNS_OPENSOCK</term>
+ <listitem><para>
The message identification. Every message in BIND 10
has a unique identification, which can be used as an
index into the <ulink
@@ -1515,25 +2235,29 @@ then change those defaults with config set Resolver/forward_addresses[0]/address
Manual</citetitle></ulink> (<ulink
url="http://bind10.isc.org/docs/bind10-messages.html"
/>) from which more information can be obtained.
- </para></listitem>
- </varlistentry>
-
- <varlistentry>
- <term>error 111 opening TCP socket to 127.0.0.1(53)</term>
- <listitem><para>
- A brief description of the cause of the problem. Within this text,
- information relating to the condition that caused the message to
- be logged will be included. In this example, error number 111
- (an operating system-specific error number) was encountered when
- trying to open a TCP connection to port 53 on the local system
- (address 127.0.0.1). The next step would be to find out the reason
- for the failure by consulting your system's documentation to
- identify what error number 111 means.
- </para></listitem>
- </varlistentry>
- </variablelist>
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>error 111 opening TCP socket to 127.0.0.1(53)</term>
+ <listitem><para>
+ A brief description of the cause of the problem.
+ Within this text, information relating to the condition
+ that caused the message to be logged will be included.
+ In this example, error number 111 (an operating
+ system-specific error number) was encountered when
+ trying to open a TCP connection to port 53 on the
+ local system (address 127.0.0.1). The next step
+ would be to find out the reason for the failure by
+ consulting your system's documentation to identify
+ what error number 111 means.
+ </para></listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+
+ </section>
- </para>
</chapter>
<!-- TODO: how to help: run unit tests, join lists, review trac tickets -->
diff --git a/doc/guide/bind10-messages.html b/doc/guide/bind10-messages.html
index b075e96..237b7ad 100644
--- a/doc/guide/bind10-messages.html
+++ b/doc/guide/bind10-messages.html
@@ -1,10 +1,10 @@
-<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Messages Manual</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the messages manual for BIND 10 version 20110519. The most up-to-date version of this document, along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Messages Manual"><div class="titlepage"><div><div><h1 class="title"><a name="id1168230298903"></a>BIND 10 Messages Manual</h1></div><div><p class="releaseinfo">This is the messages manual for BIND 10 version
- 20110519.</p></div><div><p class="copyright">Copyright © 2011 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Messages Manual</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the messages manual for BIND 10 version 20110809. The most up-to-date version of this document, along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Messages Manual"><div class="titlepage"><div><div><h1 class="title"><a name="id1168229460045"></a>BIND 10 Messages Manual</h1></div><div><p class="releaseinfo">This is the messages manual for BIND 10 version
+ 20110809.</p></div><div><p class="copyright">Copyright © 2011 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
Internet Systems Consortium (ISC). It includes DNS libraries
and modular components for controlling authoritative and
recursive DNS servers.
</p><p>
- This is the messages manual for BIND 10 version 20110519.
+ This is the messages manual for BIND 10 version 20110809.
The most up-to-date version of this document, along with
other documents for BIND 10, can be found at
<a class="ulink" href="http://bind10.isc.org/docs" target="_top">http://bind10.isc.org/docs</a>.
@@ -26,38 +26,635 @@
For information on configuring and using BIND 10 logging,
refer to the <a class="ulink" href="bind10-guide.html" target="_top">BIND 10 Guide</a>.
</p></div><div class="chapter" title="Chapter 2. BIND 10 Messages"><div class="titlepage"><div><div><h2 class="title"><a name="messages"></a>Chapter 2. BIND 10 Messages</h2></div></div></div><p>
- </p><div class="variablelist"><dl><dt><a name="ASIODNS_FETCHCOMP"></a><span class="term">ASIODNS_FETCHCOMP upstream fetch to %1(%2) has now completed</span></dt><dd><p>
-A debug message, this records the the upstream fetch (a query made by the
+ </p><div class="variablelist"><dl><dt><a name="ASIODNS_FETCH_COMPLETED"></a><span class="term">ASIODNS_FETCH_COMPLETED upstream fetch to %1(%2) has now completed</span></dt><dd><p>
+A debug message, this records that the upstream fetch (a query made by the
resolver on behalf of its client) to the specified address has completed.
-</p></dd><dt><a name="ASIODNS_FETCHSTOP"></a><span class="term">ASIODNS_FETCHSTOP upstream fetch to %1(%2) has been stopped</span></dt><dd><p>
+</p></dd><dt><a name="ASIODNS_FETCH_STOPPED"></a><span class="term">ASIODNS_FETCH_STOPPED upstream fetch to %1(%2) has been stopped</span></dt><dd><p>
An external component has requested the halting of an upstream fetch. This
is an allowed operation, and the message should only appear if debug is
enabled.
-</p></dd><dt><a name="ASIODNS_OPENSOCK"></a><span class="term">ASIODNS_OPENSOCK error %1 opening %2 socket to %3(%4)</span></dt><dd><p>
+</p></dd><dt><a name="ASIODNS_OPEN_SOCKET"></a><span class="term">ASIODNS_OPEN_SOCKET error %1 opening %2 socket to %3(%4)</span></dt><dd><p>
The asynchronous I/O code encountered an error when trying to open a socket
of the specified protocol in order to send a message to the target address.
-The the number of the system error that cause the problem is given in the
+The number of the system error that caused the problem is given in the
message.
-</p></dd><dt><a name="ASIODNS_RECVSOCK"></a><span class="term">ASIODNS_RECVSOCK error %1 reading %2 data from %3(%4)</span></dt><dd><p>
-The asynchronous I/O code encountered an error when trying read data from
-the specified address on the given protocol. The the number of the system
-error that cause the problem is given in the message.
-</p></dd><dt><a name="ASIODNS_RECVTMO"></a><span class="term">ASIODNS_RECVTMO receive timeout while waiting for data from %1(%2)</span></dt><dd><p>
+</p></dd><dt><a name="ASIODNS_READ_DATA"></a><span class="term">ASIODNS_READ_DATA error %1 reading %2 data from %3(%4)</span></dt><dd><p>
+The asynchronous I/O code encountered an error when trying to read data from
+the specified address on the given protocol. The number of the system
+error that caused the problem is given in the message.
+</p></dd><dt><a name="ASIODNS_READ_TIMEOUT"></a><span class="term">ASIODNS_READ_TIMEOUT receive timeout while waiting for data from %1(%2)</span></dt><dd><p>
An upstream fetch from the specified address timed out. This may happen for
any number of reasons and is most probably a problem at the remote server
or a problem on the network. The message will only appear if debug is
enabled.
-</p></dd><dt><a name="ASIODNS_SENDSOCK"></a><span class="term">ASIODNS_SENDSOCK error %1 sending data using %2 to %3(%4)</span></dt><dd><p>
-The asynchronous I/O code encountered an error when trying send data to
-the specified address on the given protocol. The the number of the system
-error that cause the problem is given in the message.
-</p></dd><dt><a name="ASIODNS_UNKORIGIN"></a><span class="term">ASIODNS_UNKORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)</span></dt><dd><p>
-This message should not appear and indicates an internal error if it does.
-Please enter a bug report.
-</p></dd><dt><a name="ASIODNS_UNKRESULT"></a><span class="term">ASIODNS_UNKRESULT unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)</span></dt><dd><p>
-The termination method of the resolver's upstream fetch class was called with
-an unknown result code (which is given in the message). This message should
-not appear and may indicate an internal error. Please enter a bug report.
+</p></dd><dt><a name="ASIODNS_SEND_DATA"></a><span class="term">ASIODNS_SEND_DATA error %1 sending data using %2 to %3(%4)</span></dt><dd><p>
+The asynchronous I/O code encountered an error when trying to send data to
+the specified address on the given protocol. The number of the system
+error that caused the problem is given in the message.
+</p></dd><dt><a name="ASIODNS_UNKNOWN_ORIGIN"></a><span class="term">ASIODNS_UNKNOWN_ORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)</span></dt><dd><p>
+An internal consistency check on the origin of a message from the
+asynchronous I/O module failed. This may indicate an internal error;
+please submit a bug report.
+</p></dd><dt><a name="ASIODNS_UNKNOWN_RESULT"></a><span class="term">ASIODNS_UNKNOWN_RESULT unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)</span></dt><dd><p>
+An internal error indicating that the termination method of the resolver's
+upstream fetch class was called with an unknown result code (which is
+given in the message). Please submit a bug report.
+</p></dd><dt><a name="AUTH_AXFR_ERROR"></a><span class="term">AUTH_AXFR_ERROR error handling AXFR request: %1</span></dt><dd><p>
+This is a debug message produced by the authoritative server when it
+has encountered an error processing an AXFR request. The message gives
+the reason for the error, and the server will return a SERVFAIL code to
+the sender.
+</p></dd><dt><a name="AUTH_AXFR_UDP"></a><span class="term">AUTH_AXFR_UDP AXFR query received over UDP</span></dt><dd><p>
+This is a debug message output when the authoritative server has received
+an AXFR query over UDP. Use of UDP for AXFRs is not permitted by the
+protocol, so the server will return a FORMERR error to the sender.
+</p></dd><dt><a name="AUTH_COMMAND_FAILED"></a><span class="term">AUTH_COMMAND_FAILED execution of command channel instruction '%1' failed: %2</span></dt><dd><p>
+Execution of the specified command by the authoritative server failed. The
+message contains the reason for the failure.
+</p></dd><dt><a name="AUTH_CONFIG_CHANNEL_CREATED"></a><span class="term">AUTH_CONFIG_CHANNEL_CREATED configuration session channel created</span></dt><dd><p>
+This is a debug message indicating that authoritative server has created
+the channel to the configuration manager. It is issued during server
+startup is an indication that the initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_CONFIG_CHANNEL_ESTABLISHED"></a><span class="term">AUTH_CONFIG_CHANNEL_ESTABLISHED configuration session channel established</span></dt><dd><p>
+This is a debug message indicating that authoritative server
+has established communication the configuration manager over the
+previously-created channel. It is issued during server startup is an
+indication that the initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_CONFIG_CHANNEL_STARTED"></a><span class="term">AUTH_CONFIG_CHANNEL_STARTED configuration session channel started</span></dt><dd><p>
+This is a debug message, issued when the authoritative server has
+posted a request to be notified when new configuration information is
+available. It is issued during server startup is an indication that
+the initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_CONFIG_LOAD_FAIL"></a><span class="term">AUTH_CONFIG_LOAD_FAIL load of configuration failed: %1</span></dt><dd><p>
+An attempt to configure the server with information from the configuration
+database during the startup sequence has failed. (The reason for
+the failure is given in the message.) The server will continue its
+initialization although it may not be configured in the desired way.
+</p></dd><dt><a name="AUTH_CONFIG_UPDATE_FAIL"></a><span class="term">AUTH_CONFIG_UPDATE_FAIL update of configuration failed: %1</span></dt><dd><p>
+At attempt to update the configuration the server with information
+from the configuration database has failed, the reason being given in
+the message.
+</p></dd><dt><a name="AUTH_DATA_SOURCE"></a><span class="term">AUTH_DATA_SOURCE data source database file: %1</span></dt><dd><p>
+This is a debug message produced by the authoritative server when it accesses a
+datebase data source, listing the file that is being accessed.
+</p></dd><dt><a name="AUTH_DNS_SERVICES_CREATED"></a><span class="term">AUTH_DNS_SERVICES_CREATED DNS services created</span></dt><dd><p>
+This is a debug message indicating that the component that will handling
+incoming queries for the authoritative server (DNSServices) has been
+successfully created. It is issued during server startup is an indication
+that the initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_HEADER_PARSE_FAIL"></a><span class="term">AUTH_HEADER_PARSE_FAIL unable to parse header in received DNS packet: %1</span></dt><dd><p>
+This is a debug message, generated by the authoritative server when an
+attempt to parse the header of a received DNS packet has failed. (The
+reason for the failure is given in the message.) The server will drop the
+packet.
+</p></dd><dt><a name="AUTH_LOAD_TSIG"></a><span class="term">AUTH_LOAD_TSIG loading TSIG keys</span></dt><dd><p>
+This is a debug message indicating that the authoritative server
+has requested the keyring holding TSIG keys from the configuration
+database. It is issued during server startup is an indication that the
+initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_LOAD_ZONE"></a><span class="term">AUTH_LOAD_ZONE loaded zone %1/%2</span></dt><dd><p>
+This debug message is issued during the processing of the 'loadzone' command
+when the authoritative server has successfully loaded the named zone of the
+named class.
+</p></dd><dt><a name="AUTH_MEM_DATASRC_DISABLED"></a><span class="term">AUTH_MEM_DATASRC_DISABLED memory data source is disabled for class %1</span></dt><dd><p>
+This is a debug message reporting that the authoritative server has
+discovered that the memory data source is disabled for the given class.
+</p></dd><dt><a name="AUTH_MEM_DATASRC_ENABLED"></a><span class="term">AUTH_MEM_DATASRC_ENABLED memory data source is enabled for class %1</span></dt><dd><p>
+This is a debug message reporting that the authoritative server has
+discovered that the memory data source is enabled for the given class.
+</p></dd><dt><a name="AUTH_NOTIFY_QUESTIONS"></a><span class="term">AUTH_NOTIFY_QUESTIONS invalid number of questions (%1) in incoming NOTIFY</span></dt><dd><p>
+This debug message is logged by the authoritative server when it receives
+a NOTIFY packet that contains zero or more than one question. (A valid
+NOTIFY packet contains one question.) The server will return a FORMERR
+error to the sender.
+</p></dd><dt><a name="AUTH_NOTIFY_RRTYPE"></a><span class="term">AUTH_NOTIFY_RRTYPE invalid question RR type (%1) in incoming NOTIFY</span></dt><dd><p>
+This debug message is logged by the authoritative server when it receives
+a NOTIFY packet that an RR type of something other than SOA in the
+question section. (The RR type received is included in the message.) The
+server will return a FORMERR error to the sender.
+</p></dd><dt><a name="AUTH_NO_STATS_SESSION"></a><span class="term">AUTH_NO_STATS_SESSION session interface for statistics is not available</span></dt><dd><p>
+The authoritative server had no session with the statistics module at the
+time it attempted to send it data: the attempt has been abandoned. This
+could be an error in configuration.
+</p></dd><dt><a name="AUTH_NO_XFRIN"></a><span class="term">AUTH_NO_XFRIN received NOTIFY but XFRIN session is not running</span></dt><dd><p>
+This is a debug message produced by the authoritative server when it receives
+a NOTIFY packet but the XFRIN process is not running. The packet will be
+dropped and nothing returned to the sender.
+</p></dd><dt><a name="AUTH_PACKET_PARSE_ERROR"></a><span class="term">AUTH_PACKET_PARSE_ERROR unable to parse received DNS packet: %1</span></dt><dd><p>
+This is a debug message, generated by the authoritative server when an
+attempt to parse a received DNS packet has failed due to something other
+than a protocol error. The reason for the failure is given in the message;
+the server will return a SERVFAIL error code to the sender.
+</p></dd><dt><a name="AUTH_PACKET_PROTOCOL_ERROR"></a><span class="term">AUTH_PACKET_PROTOCOL_ERROR DNS packet protocol error: %1. Returning %2</span></dt><dd><p>
+This is a debug message, generated by the authoritative server when an
+attempt to parse a received DNS packet has failed due to a protocol error.
+The reason for the failure is given in the message, as is the error code
+that will be returned to the sender.
+</p></dd><dt><a name="AUTH_PACKET_RECEIVED"></a><span class="term">AUTH_PACKET_RECEIVED message received:\n%1</span></dt><dd><p>
+This is a debug message output by the authoritative server when it
+receives a valid DNS packet.
+</p><p>
+Note: This message includes the packet received, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+</p></dd><dt><a name="AUTH_PROCESS_FAIL"></a><span class="term">AUTH_PROCESS_FAIL message processing failure: %1</span></dt><dd><p>
+This message is generated by the authoritative server when it has
+encountered an internal error whilst processing a received packet:
+the cause of the error is included in the message.
+</p><p>
+The server will return a SERVFAIL error code to the sender of the packet.
+This message indicates a potential error in the server. Please open a
+bug ticket for this issue.
+</p></dd><dt><a name="AUTH_RECEIVED_COMMAND"></a><span class="term">AUTH_RECEIVED_COMMAND command '%1' received</span></dt><dd><p>
+This is a debug message issued when the authoritative server has received
+a command on the command channel.
+</p></dd><dt><a name="AUTH_RECEIVED_SENDSTATS"></a><span class="term">AUTH_RECEIVED_SENDSTATS command 'sendstats' received</span></dt><dd><p>
+This is a debug message issued when the authoritative server has received
+a command from the statistics module to send it data. The 'sendstats'
+command is handled differently to other commands, which is why the debug
+message associated with it has its own code.
+</p></dd><dt><a name="AUTH_RESPONSE_RECEIVED"></a><span class="term">AUTH_RESPONSE_RECEIVED received response message, ignoring</span></dt><dd><p>
+This is a debug message, this is output if the authoritative server
+receives a DNS packet with the QR bit set, i.e. a DNS response. The
+server ignores the packet as it only responds to question packets.
+</p></dd><dt><a name="AUTH_SEND_ERROR_RESPONSE"></a><span class="term">AUTH_SEND_ERROR_RESPONSE sending an error response (%1 bytes):\n%2</span></dt><dd><p>
+This is a debug message recording that the authoritative server is sending
+an error response to the originator of the query. A previous message will
+have recorded details of the failure.
+</p><p>
+Note: This message includes the packet sent, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+</p></dd><dt><a name="AUTH_SEND_NORMAL_RESPONSE"></a><span class="term">AUTH_SEND_NORMAL_RESPONSE sending an error response (%1 bytes):\n%2</span></dt><dd><p>
+This is a debug message recording that the authoritative server is sending
+a response to the originator of a query.
+</p><p>
+Note: This message includes the packet sent, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+</p></dd><dt><a name="AUTH_SERVER_CREATED"></a><span class="term">AUTH_SERVER_CREATED server created</span></dt><dd><p>
+An informational message indicating that the authoritative server process has
+been created and is initializing. The AUTH_SERVER_STARTED message will be
+output when initialization has successfully completed and the server starts
+accepting queries.
+</p></dd><dt><a name="AUTH_SERVER_FAILED"></a><span class="term">AUTH_SERVER_FAILED server failed: %1</span></dt><dd><p>
+The authoritative server has encountered a fatal error and is terminating. The
+reason for the failure is included in the message.
+</p></dd><dt><a name="AUTH_SERVER_STARTED"></a><span class="term">AUTH_SERVER_STARTED server started</span></dt><dd><p>
+Initialization of the authoritative server has completed successfully
+and it is entering the main loop, waiting for queries to arrive.
+</p></dd><dt><a name="AUTH_SQLITE3"></a><span class="term">AUTH_SQLITE3 nothing to do for loading sqlite3</span></dt><dd><p>
+This is a debug message indicating that the authoritative server has
+found that the data source it is loading is an SQLite3 data source,
+so no further validation is needed.
+</p></dd><dt><a name="AUTH_STATS_CHANNEL_CREATED"></a><span class="term">AUTH_STATS_CHANNEL_CREATED STATS session channel created</span></dt><dd><p>
+This is a debug message indicating that the authoritative server has
+created a channel to the statistics process. It is issued during server
+startup is an indication that the initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_STATS_CHANNEL_ESTABLISHED"></a><span class="term">AUTH_STATS_CHANNEL_ESTABLISHED STATS session channel established</span></dt><dd><p>
+This is a debug message indicating that the authoritative server
+has established communication over the previously created statistics
+channel. It is issued during server startup is an indication that the
+initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_STATS_COMMS"></a><span class="term">AUTH_STATS_COMMS communication error in sending statistics data: %1</span></dt><dd><p>
+An error was encountered when the authoritative server tried to send data
+to the statistics daemon. The message includes additional information
+describing the reason for the failure.
+</p></dd><dt><a name="AUTH_STATS_TIMEOUT"></a><span class="term">AUTH_STATS_TIMEOUT timeout while sending statistics data: %1</span></dt><dd><p>
+The authoritative server sent data to the statistics daemon but received
+no acknowledgement within the specified time. The message includes
+additional information describing the reason for the failure.
+</p></dd><dt><a name="AUTH_STATS_TIMER_DISABLED"></a><span class="term">AUTH_STATS_TIMER_DISABLED statistics timer has been disabled</span></dt><dd><p>
+This is a debug message indicating that the statistics timer has been
+disabled in the authoritative server and no statistics information is
+being produced.
+</p></dd><dt><a name="AUTH_STATS_TIMER_SET"></a><span class="term">AUTH_STATS_TIMER_SET statistics timer set to %1 second(s)</span></dt><dd><p>
+This is a debug message indicating that the statistics timer has been
+enabled and that the authoritative server will produce statistics data
+at the specified interval.
+</p></dd><dt><a name="AUTH_UNSUPPORTED_OPCODE"></a><span class="term">AUTH_UNSUPPORTED_OPCODE unsupported opcode: %1</span></dt><dd><p>
+This is a debug message, produced when a received DNS packet being
+processed by the authoritative server has been found to contain an
+unsupported opcode. (The opcode is included in the message.) The server
+will return an error code of NOTIMPL to the sender.
+</p></dd><dt><a name="AUTH_XFRIN_CHANNEL_CREATED"></a><span class="term">AUTH_XFRIN_CHANNEL_CREATED XFRIN session channel created</span></dt><dd><p>
+This is a debug message indicating that the authoritative server has
+created a channel to the XFRIN (Transfer-in) process. It is issued
+during server startup is an indication that the initialization is
+proceeding normally.
+</p></dd><dt><a name="AUTH_XFRIN_CHANNEL_ESTABLISHED"></a><span class="term">AUTH_XFRIN_CHANNEL_ESTABLISHED XFRIN session channel established</span></dt><dd><p>
+This is a debug message indicating that the authoritative server has
+established communication over the previously-created channel to the
+XFRIN (Transfer-in) process. It is issued during server startup is an
+indication that the initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_ZONEMGR_COMMS"></a><span class="term">AUTH_ZONEMGR_COMMS error communicating with zone manager: %1</span></dt><dd><p>
+This is a debug message output during the processing of a NOTIFY request.
+An error (listed in the message) has been encountered whilst communicating
+with the zone manager. The NOTIFY request will not be honored.
+</p></dd><dt><a name="AUTH_ZONEMGR_ERROR"></a><span class="term">AUTH_ZONEMGR_ERROR received error response from zone manager: %1</span></dt><dd><p>
+This is a debug message output during the processing of a NOTIFY
+request. The zone manager component has been informed of the request,
+but has returned an error response (which is included in the message). The
+NOTIFY request will not be honored.
+</p></dd><dt><a name="BIND10_CHECK_MSGQ_ALREADY_RUNNING"></a><span class="term">BIND10_CHECK_MSGQ_ALREADY_RUNNING checking if msgq is already running</span></dt><dd><p>
+The boss process is starting up and will now check if the message bus
+daemon is already running. If so, it will not be able to start, as it
+needs a dedicated message bus.
+</p></dd><dt><a name="BIND10_CONFIGURATION_START_AUTH"></a><span class="term">BIND10_CONFIGURATION_START_AUTH start authoritative server: %1</span></dt><dd><p>
+This message shows whether or not the authoritative server should be
+started according to the configuration.
+</p></dd><dt><a name="BIND10_CONFIGURATION_START_RESOLVER"></a><span class="term">BIND10_CONFIGURATION_START_RESOLVER start resolver: %1</span></dt><dd><p>
+This message shows whether or not the resolver should be
+started according to the configuration.
+</p></dd><dt><a name="BIND10_INVALID_USER"></a><span class="term">BIND10_INVALID_USER invalid user: %1</span></dt><dd><p>
+The boss process was started with the -u option, to drop root privileges
+and continue running as the specified user, but the user is unknown.
+</p></dd><dt><a name="BIND10_KILLING_ALL_PROCESSES"></a><span class="term">BIND10_KILLING_ALL_PROCESSES killing all started processes</span></dt><dd><p>
+The boss module was not able to start every process it needed to start
+during startup, and will now kill the processes that did get started.
+</p></dd><dt><a name="BIND10_KILL_PROCESS"></a><span class="term">BIND10_KILL_PROCESS killing process %1</span></dt><dd><p>
+The boss module is sending a kill signal to process with the given name,
+as part of the process of killing all started processes during a failed
+startup, as described for BIND10_KILLING_ALL_PROCESSES
+</p></dd><dt><a name="BIND10_MSGQ_ALREADY_RUNNING"></a><span class="term">BIND10_MSGQ_ALREADY_RUNNING msgq daemon already running, cannot start</span></dt><dd><p>
+There already appears to be a message bus daemon running. Either an
+old process was not shut down correctly, and needs to be killed, or
+another instance of BIND10, with the same msgq domain socket, is
+running, which needs to be stopped.
+</p></dd><dt><a name="BIND10_MSGQ_DAEMON_ENDED"></a><span class="term">BIND10_MSGQ_DAEMON_ENDED b10-msgq process died, shutting down</span></dt><dd><p>
+The message bus daemon has died. This is a fatal error, since it may
+leave the system in an inconsistent state. BIND10 will now shut down.
+</p></dd><dt><a name="BIND10_MSGQ_DISAPPEARED"></a><span class="term">BIND10_MSGQ_DISAPPEARED msgq channel disappeared</span></dt><dd><p>
+While listening on the message bus channel for messages, it suddenly
+disappeared. The msgq daemon may have died. This might lead to an
+inconsistent state of the system, and BIND 10 will now shut down.
+</p></dd><dt><a name="BIND10_PROCESS_ENDED_NO_EXIT_STATUS"></a><span class="term">BIND10_PROCESS_ENDED_NO_EXIT_STATUS process %1 (PID %2) died: exit status not available</span></dt><dd><p>
+The given process ended unexpectedly, but no exit status is
+available. See BIND10_PROCESS_ENDED_WITH_EXIT_STATUS for a longer
+description.
+</p></dd><dt><a name="BIND10_PROCESS_ENDED_WITH_EXIT_STATUS"></a><span class="term">BIND10_PROCESS_ENDED_WITH_EXIT_STATUS process %1 (PID %2) terminated, exit status = %3</span></dt><dd><p>
+The given process ended unexpectedly with the given exit status.
+Depending on which module it was, it may simply be restarted, or it
+may be a problem that will cause the boss module to shut down too.
+The latter happens if it was the message bus daemon, which, if it has
+died suddenly, may leave the system in an inconsistent state. BIND10
+will also shut down now if it has been run with --brittle.
+</p></dd><dt><a name="BIND10_READING_BOSS_CONFIGURATION"></a><span class="term">BIND10_READING_BOSS_CONFIGURATION reading boss configuration</span></dt><dd><p>
+The boss process is starting up, and will now process the initial
+configuration, as received from the configuration manager.
+</p></dd><dt><a name="BIND10_RECEIVED_COMMAND"></a><span class="term">BIND10_RECEIVED_COMMAND received command: %1</span></dt><dd><p>
+The boss module received a command and shall now process it. The command
+is printed.
+</p></dd><dt><a name="BIND10_RECEIVED_NEW_CONFIGURATION"></a><span class="term">BIND10_RECEIVED_NEW_CONFIGURATION received new configuration: %1</span></dt><dd><p>
+The boss module received a configuration update and is going to apply
+it now. The new configuration is printed.
+</p></dd><dt><a name="BIND10_RECEIVED_SIGNAL"></a><span class="term">BIND10_RECEIVED_SIGNAL received signal %1</span></dt><dd><p>
+The boss module received the given signal.
+</p></dd><dt><a name="BIND10_RESURRECTED_PROCESS"></a><span class="term">BIND10_RESURRECTED_PROCESS resurrected %1 (PID %2)</span></dt><dd><p>
+The given process has been restarted successfully, and is now running
+with the given process id.
+</p></dd><dt><a name="BIND10_RESURRECTING_PROCESS"></a><span class="term">BIND10_RESURRECTING_PROCESS resurrecting dead %1 process...</span></dt><dd><p>
+The given process has ended unexpectedly, and is now restarted.
+</p></dd><dt><a name="BIND10_SELECT_ERROR"></a><span class="term">BIND10_SELECT_ERROR error in select() call: %1</span></dt><dd><p>
+There was a fatal error in the call to select(), used to see if a child
+process has ended or if there is a message on the message bus. This
+should not happen under normal circumstances and is considered fatal,
+so BIND 10 will now shut down. The specific error is printed.
+</p></dd><dt><a name="BIND10_SEND_SIGKILL"></a><span class="term">BIND10_SEND_SIGKILL sending SIGKILL to %1 (PID %2)</span></dt><dd><p>
+The boss module is sending a SIGKILL signal to the given process.
+</p></dd><dt><a name="BIND10_SEND_SIGTERM"></a><span class="term">BIND10_SEND_SIGTERM sending SIGTERM to %1 (PID %2)</span></dt><dd><p>
+The boss module is sending a SIGTERM signal to the given process.
+</p></dd><dt><a name="BIND10_SHUTDOWN"></a><span class="term">BIND10_SHUTDOWN stopping the server</span></dt><dd><p>
+The boss process received a command or signal telling it to shut down.
+It will send a shutdown command to each process. The processes that do
+not shut down will then receive a SIGTERM signal. If that doesn't work,
+it shall send SIGKILL signals to the processes still alive.
+</p></dd><dt><a name="BIND10_SHUTDOWN_COMPLETE"></a><span class="term">BIND10_SHUTDOWN_COMPLETE all processes ended, shutdown complete</span></dt><dd><p>
+All child processes have been stopped, and the boss process will now
+stop itself.
+</p></dd><dt><a name="BIND10_SOCKCREATOR_BAD_CAUSE"></a><span class="term">BIND10_SOCKCREATOR_BAD_CAUSE unknown error cause from socket creator: %1</span></dt><dd><p>
+The socket creator reported an error when creating a socket. But the function
+which failed is unknown (not one of 'S' for socket or 'B' for bind).
+</p></dd><dt><a name="BIND10_SOCKCREATOR_BAD_RESPONSE"></a><span class="term">BIND10_SOCKCREATOR_BAD_RESPONSE unknown response for socket request: %1</span></dt><dd><p>
+The boss requested a socket from the creator, but the answer is unknown. This
+looks like a programmer error.
+</p></dd><dt><a name="BIND10_SOCKCREATOR_CRASHED"></a><span class="term">BIND10_SOCKCREATOR_CRASHED the socket creator crashed</span></dt><dd><p>
+The socket creator terminated unexpectedly. It is not possible to restart it
+(because the boss already gave up root privileges), so the system is going
+to terminate.
+</p></dd><dt><a name="BIND10_SOCKCREATOR_EOF"></a><span class="term">BIND10_SOCKCREATOR_EOF eof while expecting data from socket creator</span></dt><dd><p>
+There should be more data from the socket creator, but it closed the socket.
+It probably crashed.
+</p></dd><dt><a name="BIND10_SOCKCREATOR_INIT"></a><span class="term">BIND10_SOCKCREATOR_INIT initializing socket creator parser</span></dt><dd><p>
+The boss module initializes routines for parsing the socket creator
+protocol.
+</p></dd><dt><a name="BIND10_SOCKCREATOR_KILL"></a><span class="term">BIND10_SOCKCREATOR_KILL killing the socket creator</span></dt><dd><p>
+The socket creator is being terminated the aggressive way, by sending it
+sigkill. This should not happen usually.
+</p></dd><dt><a name="BIND10_SOCKCREATOR_TERMINATE"></a><span class="term">BIND10_SOCKCREATOR_TERMINATE terminating socket creator</span></dt><dd><p>
+The boss module sends a request to terminate to the socket creator.
+</p></dd><dt><a name="BIND10_SOCKCREATOR_TRANSPORT_ERROR"></a><span class="term">BIND10_SOCKCREATOR_TRANSPORT_ERROR transport error when talking to the socket creator: %1</span></dt><dd><p>
+Either sending or receiving data from the socket creator failed with the given
+error. The creator probably crashed or some serious OS-level problem happened,
+as the communication happens only on local host.
+</p></dd><dt><a name="BIND10_SOCKET_CREATED"></a><span class="term">BIND10_SOCKET_CREATED successfully created socket %1</span></dt><dd><p>
+The socket creator successfully created and sent a requested socket, it has
+the given file number.
+</p></dd><dt><a name="BIND10_SOCKET_ERROR"></a><span class="term">BIND10_SOCKET_ERROR error on %1 call in the creator: %2/%3</span></dt><dd><p>
+The socket creator failed to create the requested socket. It failed on the
+indicated OS API function with given error.
+</p></dd><dt><a name="BIND10_SOCKET_GET"></a><span class="term">BIND10_SOCKET_GET requesting socket [%1]:%2 of type %3 from the creator</span></dt><dd><p>
+The boss forwards a request for a socket to the socket creator.
+</p></dd><dt><a name="BIND10_STARTED_PROCESS"></a><span class="term">BIND10_STARTED_PROCESS started %1</span></dt><dd><p>
+The given process has successfully been started.
+</p></dd><dt><a name="BIND10_STARTED_PROCESS_PID"></a><span class="term">BIND10_STARTED_PROCESS_PID started %1 (PID %2)</span></dt><dd><p>
+The given process has successfully been started, and has the given PID.
+</p></dd><dt><a name="BIND10_STARTING"></a><span class="term">BIND10_STARTING starting BIND10: %1</span></dt><dd><p>
+Informational message on startup that shows the full version.
+</p></dd><dt><a name="BIND10_STARTING_PROCESS"></a><span class="term">BIND10_STARTING_PROCESS starting process %1</span></dt><dd><p>
+The boss module is starting the given process.
+</p></dd><dt><a name="BIND10_STARTING_PROCESS_PORT"></a><span class="term">BIND10_STARTING_PROCESS_PORT starting process %1 (to listen on port %2)</span></dt><dd><p>
+The boss module is starting the given process, which will listen on the
+given port number.
+</p></dd><dt><a name="BIND10_STARTING_PROCESS_PORT_ADDRESS"></a><span class="term">BIND10_STARTING_PROCESS_PORT_ADDRESS starting process %1 (to listen on %2#%3)</span></dt><dd><p>
+The boss module is starting the given process, which will listen on the
+given address and port number (written as <address>#<port>).
+</p></dd><dt><a name="BIND10_STARTUP_COMPLETE"></a><span class="term">BIND10_STARTUP_COMPLETE BIND 10 started</span></dt><dd><p>
+All modules have been successfully started, and BIND 10 is now running.
+</p></dd><dt><a name="BIND10_STARTUP_ERROR"></a><span class="term">BIND10_STARTUP_ERROR error during startup: %1</span></dt><dd><p>
+There was a fatal error when BIND10 was trying to start. The error is
+shown, and BIND10 will now shut down.
+</p></dd><dt><a name="BIND10_START_AS_NON_ROOT"></a><span class="term">BIND10_START_AS_NON_ROOT starting %1 as a user, not root. This might fail.</span></dt><dd><p>
+The given module is being started or restarted without root privileges.
+If the module needs these privileges, it may have problems starting.
+Note that this issue should be resolved by the pending 'socket-creator'
+process; once that has been implemented, modules should not need root
+privileges anymore. See tickets #800 and #801 for more information.
+</p></dd><dt><a name="BIND10_STOP_PROCESS"></a><span class="term">BIND10_STOP_PROCESS asking %1 to shut down</span></dt><dd><p>
+The boss module is sending a shutdown command to the given module over
+the message channel.
+</p></dd><dt><a name="BIND10_UNKNOWN_CHILD_PROCESS_ENDED"></a><span class="term">BIND10_UNKNOWN_CHILD_PROCESS_ENDED unknown child pid %1 exited</span></dt><dd><p>
+An unknown child process has exited. The PID is printed, but no further
+action will be taken by the boss process.
+</p></dd><dt><a name="CACHE_ENTRY_MISSING_RRSET"></a><span class="term">CACHE_ENTRY_MISSING_RRSET missing RRset to generate message for %1</span></dt><dd><p>
+The cache tried to generate the complete answer message. It knows the structure
+of the message, but some of the RRsets to be put there are not in cache (they
+probably expired already). Therefore it pretends the message was not found.
+</p></dd><dt><a name="CACHE_LOCALZONE_FOUND"></a><span class="term">CACHE_LOCALZONE_FOUND found entry with key %1 in local zone data</span></dt><dd><p>
+Debug message, noting that the requested data was successfully found in the
+local zone data of the cache.
+</p></dd><dt><a name="CACHE_LOCALZONE_UNKNOWN"></a><span class="term">CACHE_LOCALZONE_UNKNOWN entry with key %1 not found in local zone data</span></dt><dd><p>
+Debug message. The requested data was not found in the local zone data.
+</p></dd><dt><a name="CACHE_LOCALZONE_UPDATE"></a><span class="term">CACHE_LOCALZONE_UPDATE updating local zone element at key %1</span></dt><dd><p>
+Debug message issued when there's update to the local zone section of cache.
+</p></dd><dt><a name="CACHE_MESSAGES_DEINIT"></a><span class="term">CACHE_MESSAGES_DEINIT deinitialized message cache</span></dt><dd><p>
+Debug message. It is issued when the server deinitializes the message cache.
+</p></dd><dt><a name="CACHE_MESSAGES_EXPIRED"></a><span class="term">CACHE_MESSAGES_EXPIRED found an expired message entry for %1 in the message cache</span></dt><dd><p>
+Debug message. The requested data was found in the message cache, but it
+already expired. Therefore the cache removes the entry and pretends it found
+nothing.
+</p></dd><dt><a name="CACHE_MESSAGES_FOUND"></a><span class="term">CACHE_MESSAGES_FOUND found a message entry for %1 in the message cache</span></dt><dd><p>
+Debug message. We found the whole message in the cache, so it can be returned
+to user without any other lookups.
+</p></dd><dt><a name="CACHE_MESSAGES_INIT"></a><span class="term">CACHE_MESSAGES_INIT initialized message cache for %1 messages of class %2</span></dt><dd><p>
+Debug message issued when a new message cache is issued. It lists the class
+of messages it can hold and the maximum size of the cache.
+</p></dd><dt><a name="CACHE_MESSAGES_REMOVE"></a><span class="term">CACHE_MESSAGES_REMOVE removing old instance of %1/%2/%3 first</span></dt><dd><p>
+Debug message. This may follow CACHE_MESSAGES_UPDATE and indicates that, while
+updating, the old instance is being removed prior of inserting a new one.
+</p></dd><dt><a name="CACHE_MESSAGES_UNCACHEABLE"></a><span class="term">CACHE_MESSAGES_UNCACHEABLE not inserting uncacheable message %1/%2/%3</span></dt><dd><p>
+Debug message, noting that the given message can not be cached. This is because
+there's no SOA record in the message. See RFC 2308 section 5 for more
+information.
+</p></dd><dt><a name="CACHE_MESSAGES_UNKNOWN"></a><span class="term">CACHE_MESSAGES_UNKNOWN no entry for %1 found in the message cache</span></dt><dd><p>
+Debug message. The message cache didn't find any entry for the given key.
+</p></dd><dt><a name="CACHE_MESSAGES_UPDATE"></a><span class="term">CACHE_MESSAGES_UPDATE updating message entry %1/%2/%3</span></dt><dd><p>
+Debug message issued when the message cache is being updated with a new
+message. Either the old instance is removed or, if none is found, new one
+is created.
+</p></dd><dt><a name="CACHE_RESOLVER_DEEPEST"></a><span class="term">CACHE_RESOLVER_DEEPEST looking up deepest NS for %1/%2</span></dt><dd><p>
+Debug message. The resolver cache is looking up the deepest known nameserver,
+so the resolution doesn't have to start from the root.
+</p></dd><dt><a name="CACHE_RESOLVER_INIT"></a><span class="term">CACHE_RESOLVER_INIT initializing resolver cache for class %1</span></dt><dd><p>
+Debug message. The resolver cache is being created for this given class.
+</p></dd><dt><a name="CACHE_RESOLVER_INIT_INFO"></a><span class="term">CACHE_RESOLVER_INIT_INFO initializing resolver cache for class %1</span></dt><dd><p>
+Debug message, the resolver cache is being created for this given class. The
+difference from CACHE_RESOLVER_INIT is only in different format of passed
+information, otherwise it does the same.
+</p></dd><dt><a name="CACHE_RESOLVER_LOCAL_MSG"></a><span class="term">CACHE_RESOLVER_LOCAL_MSG message for %1/%2 found in local zone data</span></dt><dd><p>
+Debug message. The resolver cache found a complete message for the user query
+in the zone data.
+</p></dd><dt><a name="CACHE_RESOLVER_LOCAL_RRSET"></a><span class="term">CACHE_RESOLVER_LOCAL_RRSET RRset for %1/%2 found in local zone data</span></dt><dd><p>
+Debug message. The resolver cache found a requested RRset in the local zone
+data.
+</p></dd><dt><a name="CACHE_RESOLVER_LOOKUP_MSG"></a><span class="term">CACHE_RESOLVER_LOOKUP_MSG looking up message in resolver cache for %1/%2</span></dt><dd><p>
+Debug message. The resolver cache is trying to find a message to answer the
+user query.
+</p></dd><dt><a name="CACHE_RESOLVER_LOOKUP_RRSET"></a><span class="term">CACHE_RESOLVER_LOOKUP_RRSET looking up RRset in resolver cache for %1/%2</span></dt><dd><p>
+Debug message. The resolver cache is trying to find an RRset (which usually
+originates as internally from resolver).
+</p></dd><dt><a name="CACHE_RESOLVER_NO_QUESTION"></a><span class="term">CACHE_RESOLVER_NO_QUESTION answer message for %1/%2 has empty question section</span></dt><dd><p>
+The cache tried to fill in found data into the response message. But it
+discovered the message contains no question section, which is invalid.
+This is likely a programmer error, please submit a bug report.
+</p></dd><dt><a name="CACHE_RESOLVER_UNKNOWN_CLASS_MSG"></a><span class="term">CACHE_RESOLVER_UNKNOWN_CLASS_MSG no cache for class %1</span></dt><dd><p>
+Debug message. While trying to lookup a message in the resolver cache, it was
+discovered there's no cache for this class at all. Therefore no message is
+found.
+</p></dd><dt><a name="CACHE_RESOLVER_UNKNOWN_CLASS_RRSET"></a><span class="term">CACHE_RESOLVER_UNKNOWN_CLASS_RRSET no cache for class %1</span></dt><dd><p>
+Debug message. While trying to lookup an RRset in the resolver cache, it was
+discovered there's no cache for this class at all. Therefore no data is found.
+</p></dd><dt><a name="CACHE_RESOLVER_UPDATE_MSG"></a><span class="term">CACHE_RESOLVER_UPDATE_MSG updating message for %1/%2/%3</span></dt><dd><p>
+Debug message. The resolver is updating a message in the cache.
+</p></dd><dt><a name="CACHE_RESOLVER_UPDATE_RRSET"></a><span class="term">CACHE_RESOLVER_UPDATE_RRSET updating RRset for %1/%2/%3</span></dt><dd><p>
+Debug message. The resolver is updating an RRset in the cache.
+</p></dd><dt><a name="CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_MSG"></a><span class="term">CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_MSG no cache for class %1</span></dt><dd><p>
+Debug message. While trying to insert a message into the cache, it was
+discovered that there's no cache for the class of message. Therefore
+the message will not be cached.
+</p></dd><dt><a name="CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_RRSET"></a><span class="term">CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_RRSET no cache for class %1</span></dt><dd><p>
+Debug message. While trying to insert an RRset into the cache, it was
+discovered that there's no cache for the class of the RRset. Therefore
+the message will not be cached.
+</p></dd><dt><a name="CACHE_RRSET_EXPIRED"></a><span class="term">CACHE_RRSET_EXPIRED found expired RRset %1/%2/%3</span></dt><dd><p>
+Debug message. The requested data was found in the RRset cache. However, it is
+expired, so the cache removed it and is going to pretend nothing was found.
+</p></dd><dt><a name="CACHE_RRSET_INIT"></a><span class="term">CACHE_RRSET_INIT initializing RRset cache for %1 RRsets of class %2</span></dt><dd><p>
+Debug message. The RRset cache to hold at most this many RRsets for the given
+class is being created.
+</p></dd><dt><a name="CACHE_RRSET_LOOKUP"></a><span class="term">CACHE_RRSET_LOOKUP looking up %1/%2/%3 in RRset cache</span></dt><dd><p>
+Debug message. The resolver is trying to look up data in the RRset cache.
+</p></dd><dt><a name="CACHE_RRSET_NOT_FOUND"></a><span class="term">CACHE_RRSET_NOT_FOUND no RRset found for %1/%2/%3</span></dt><dd><p>
+Debug message which can follow CACHE_RRSET_LOOKUP. This means the data is not
+in the cache.
+</p></dd><dt><a name="CACHE_RRSET_REMOVE_OLD"></a><span class="term">CACHE_RRSET_REMOVE_OLD removing old RRset for %1/%2/%3 to make space for new one</span></dt><dd><p>
+Debug message which can follow CACHE_RRSET_UPDATE. During the update, the cache
+removed an old instance of the RRset to replace it with the new one.
+</p></dd><dt><a name="CACHE_RRSET_UNTRUSTED"></a><span class="term">CACHE_RRSET_UNTRUSTED not replacing old RRset for %1/%2/%3, it has higher trust level</span></dt><dd><p>
+Debug message which can follow CACHE_RRSET_UPDATE. The cache already holds the
+same RRset, but from more trusted source, so the old one is kept and new one
+ignored.
+</p></dd><dt><a name="CACHE_RRSET_UPDATE"></a><span class="term">CACHE_RRSET_UPDATE updating RRset %1/%2/%3 in the cache</span></dt><dd><p>
+Debug message. The RRset is updating its data with this given RRset.
+</p></dd><dt><a name="CC_ASYNC_READ_FAILED"></a><span class="term">CC_ASYNC_READ_FAILED asynchronous read failed</span></dt><dd><p>
+This marks a low level error, we tried to read data from the message queue
+daemon asynchronously, but the ASIO library returned an error.
+</p></dd><dt><a name="CC_CONN_ERROR"></a><span class="term">CC_CONN_ERROR error connecting to message queue (%1)</span></dt><dd><p>
+It is impossible to reach the message queue daemon for the reason given. It
+is unlikely there'll be reason for whatever program this currently is to
+continue running, as the communication with the rest of BIND 10 is vital
+for the components.
+</p></dd><dt><a name="CC_DISCONNECT"></a><span class="term">CC_DISCONNECT disconnecting from message queue daemon</span></dt><dd><p>
+The library is disconnecting from the message queue daemon. This debug message
+indicates that the program is trying to shut down gracefully.
+</p></dd><dt><a name="CC_ESTABLISH"></a><span class="term">CC_ESTABLISH trying to establish connection with message queue daemon at %1</span></dt><dd><p>
+This debug message indicates that the command channel library is about to
+connect to the message queue daemon, which should be listening on the UNIX-domain
+socket listed in the output.
+</p></dd><dt><a name="CC_ESTABLISHED"></a><span class="term">CC_ESTABLISHED successfully connected to message queue daemon</span></dt><dd><p>
+This debug message indicates that the connection was successfully made, this
+should follow CC_ESTABLISH.
+</p></dd><dt><a name="CC_GROUP_RECEIVE"></a><span class="term">CC_GROUP_RECEIVE trying to receive a message</span></dt><dd><p>
+Debug message, noting that a message is expected to come over the command
+channel.
+</p></dd><dt><a name="CC_GROUP_RECEIVED"></a><span class="term">CC_GROUP_RECEIVED message arrived ('%1', '%2')</span></dt><dd><p>
+Debug message, noting that we successfully received a message (its envelope and
+payload listed). This follows CC_GROUP_RECEIVE, but might happen some time
+later, depending if we waited for it or just polled.
+</p></dd><dt><a name="CC_GROUP_SEND"></a><span class="term">CC_GROUP_SEND sending message '%1' to group '%2'</span></dt><dd><p>
+Debug message, we're about to send a message over the command channel.
+</p></dd><dt><a name="CC_INVALID_LENGTHS"></a><span class="term">CC_INVALID_LENGTHS invalid length parameters (%1, %2)</span></dt><dd><p>
+This happens when garbage comes over the command channel or some kind of
+confusion happens in the program. The data received from the socket make no
+sense if we interpret it as lengths of message. The first one is total length
+of the message; the second is the length of the header. The header
+and its length (2 bytes) is counted in the total length.
+</p></dd><dt><a name="CC_LENGTH_NOT_READY"></a><span class="term">CC_LENGTH_NOT_READY length not ready</span></dt><dd><p>
+There should be data representing the length of message on the socket, but it
+is not there.
+</p></dd><dt><a name="CC_NO_MESSAGE"></a><span class="term">CC_NO_MESSAGE no message ready to be received yet</span></dt><dd><p>
+The program polled for incoming messages, but there was no message waiting.
+This is a debug message which may happen only after CC_GROUP_RECEIVE.
+</p></dd><dt><a name="CC_NO_MSGQ"></a><span class="term">CC_NO_MSGQ unable to connect to message queue (%1)</span></dt><dd><p>
+It isn't possible to connect to the message queue daemon, for reason listed.
+It is unlikely any program will be able continue without the communication.
+</p></dd><dt><a name="CC_READ_ERROR"></a><span class="term">CC_READ_ERROR error reading data from command channel (%1)</span></dt><dd><p>
+A low level error happened when the library tried to read data from the
+command channel socket. The reason is listed.
+</p></dd><dt><a name="CC_READ_EXCEPTION"></a><span class="term">CC_READ_EXCEPTION error reading data from command channel (%1)</span></dt><dd><p>
+We received an exception while trying to read data from the command
+channel socket. The reason is listed.
+</p></dd><dt><a name="CC_REPLY"></a><span class="term">CC_REPLY replying to message from '%1' with '%2'</span></dt><dd><p>
+Debug message, noting we're sending a response to the original message
+with the given envelope.
+</p></dd><dt><a name="CC_SET_TIMEOUT"></a><span class="term">CC_SET_TIMEOUT setting timeout to %1ms</span></dt><dd><p>
+Debug message. A timeout for which the program is willing to wait for a reply
+is being set.
+</p></dd><dt><a name="CC_START_READ"></a><span class="term">CC_START_READ starting asynchronous read</span></dt><dd><p>
+Debug message. From now on, when a message (or command) comes, it'll wake the
+program and the library will automatically pass it over to correct place.
+</p></dd><dt><a name="CC_SUBSCRIBE"></a><span class="term">CC_SUBSCRIBE subscribing to communication group %1</span></dt><dd><p>
+Debug message. The program wants to receive messages addressed to this group.
+</p></dd><dt><a name="CC_TIMEOUT"></a><span class="term">CC_TIMEOUT timeout reading data from command channel</span></dt><dd><p>
+The program waited too long for data from the command channel (usually when it
+sent a query to different program and it didn't answer for whatever reason).
+</p></dd><dt><a name="CC_UNSUBSCRIBE"></a><span class="term">CC_UNSUBSCRIBE unsubscribing from communication group %1</span></dt><dd><p>
+Debug message. The program no longer wants to receive messages addressed to
+this group.
+</p></dd><dt><a name="CC_WRITE_ERROR"></a><span class="term">CC_WRITE_ERROR error writing data to command channel (%1)</span></dt><dd><p>
+A low level error happened when the library tried to write data to the command
+channel socket.
+</p></dd><dt><a name="CC_ZERO_LENGTH"></a><span class="term">CC_ZERO_LENGTH invalid message length (0)</span></dt><dd><p>
+The library received a message length being zero, which makes no sense, since
+all messages must contain at least the envelope.
+</p></dd><dt><a name="CFGMGR_AUTOMATIC_CONFIG_DATABASE_UPDATE"></a><span class="term">CFGMGR_AUTOMATIC_CONFIG_DATABASE_UPDATE Updating configuration database from version %1 to %2</span></dt><dd><p>
+An older version of the configuration database has been found, from which
+there was an automatic upgrade path to the current version. These changes
+are now applied, and no action from the administrator is necessary.
+</p></dd><dt><a name="CFGMGR_BAD_UPDATE_RESPONSE_FROM_MODULE"></a><span class="term">CFGMGR_BAD_UPDATE_RESPONSE_FROM_MODULE Unable to parse response from module %1: %2</span></dt><dd><p>
+The configuration manager sent a configuration update to a module, but
+the module responded with an answer that could not be parsed. The answer
+message appears to be invalid JSON data, or not decodable to a string.
+This is likely to be a problem in the module in question. The update is
+assumed to have failed, and will not be stored.
+</p></dd><dt><a name="CFGMGR_CC_SESSION_ERROR"></a><span class="term">CFGMGR_CC_SESSION_ERROR Error connecting to command channel: %1</span></dt><dd><p>
+The configuration manager daemon was unable to connect to the messaging
+system. The most likely cause is that msgq is not running.
+</p></dd><dt><a name="CFGMGR_DATA_READ_ERROR"></a><span class="term">CFGMGR_DATA_READ_ERROR error reading configuration database from disk: %1</span></dt><dd><p>
+There was a problem reading the persistent configuration data as stored
+on disk. The file may be corrupted, or it is of a version from where
+there is no automatic upgrade path. The file needs to be repaired or
+removed. The configuration manager daemon will now shut down.
+</p></dd><dt><a name="CFGMGR_IOERROR_WHILE_WRITING_CONFIGURATION"></a><span class="term">CFGMGR_IOERROR_WHILE_WRITING_CONFIGURATION Unable to write configuration file; configuration not stored: %1</span></dt><dd><p>
+There was an IO error from the system while the configuration manager
+was trying to write the configuration database to disk. The specific
+error is given. The most likely cause is that the directory where
+the file is stored does not exist, or is not writable. The updated
+configuration is not stored.
+</p></dd><dt><a name="CFGMGR_OSERROR_WHILE_WRITING_CONFIGURATION"></a><span class="term">CFGMGR_OSERROR_WHILE_WRITING_CONFIGURATION Unable to write configuration file; configuration not stored: %1</span></dt><dd><p>
+There was an OS error from the system while the configuration manager
+was trying to write the configuration database to disk. The specific
+error is given. The most likely cause is that the system does not have
+write access to the configuration database file. The updated
+configuration is not stored.
+</p></dd><dt><a name="CFGMGR_STOPPED_BY_KEYBOARD"></a><span class="term">CFGMGR_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
+There was a keyboard interrupt signal to stop the cfgmgr daemon. The
+daemon will now shut down.
+</p></dd><dt><a name="CMDCTL_BAD_CONFIG_DATA"></a><span class="term">CMDCTL_BAD_CONFIG_DATA error in config data: %1</span></dt><dd><p>
+There was an error reading the updated configuration data. The specific
+error is printed.
+</p></dd><dt><a name="CMDCTL_BAD_PASSWORD"></a><span class="term">CMDCTL_BAD_PASSWORD bad password for user: %1</span></dt><dd><p>
+A login attempt was made to b10-cmdctl, but the password was wrong.
+Users can be managed with the tool b10-cmdctl-usermgr.
+</p></dd><dt><a name="CMDCTL_CC_SESSION_ERROR"></a><span class="term">CMDCTL_CC_SESSION_ERROR error reading from cc channel: %1</span></dt><dd><p>
+There was a problem reading from the command and control channel. The
+most likely cause is that the message bus daemon is not running.
+</p></dd><dt><a name="CMDCTL_CC_SESSION_TIMEOUT"></a><span class="term">CMDCTL_CC_SESSION_TIMEOUT timeout on cc channel</span></dt><dd><p>
+A timeout occurred when waiting for essential data from the cc session.
+This usually occurs when b10-cfgmgr is not running or not responding.
+Since we are waiting for essential information, this is a fatal error,
+and the cmdctl daemon will now shut down.
+</p></dd><dt><a name="CMDCTL_COMMAND_ERROR"></a><span class="term">CMDCTL_COMMAND_ERROR error in command %1 to module %2: %3</span></dt><dd><p>
+An error was encountered sending the given command to the given module.
+Either there was a communication problem with the module, or the module
+was not able to process the command, and sent back an error. The
+specific error is printed in the message.
+</p></dd><dt><a name="CMDCTL_COMMAND_SENT"></a><span class="term">CMDCTL_COMMAND_SENT command '%1' to module '%2' was sent</span></dt><dd><p>
+This debug message indicates that the given command has been sent to
+the given module.
+</p></dd><dt><a name="CMDCTL_NO_SUCH_USER"></a><span class="term">CMDCTL_NO_SUCH_USER username not found in user database: %1</span></dt><dd><p>
+A login attempt was made to b10-cmdctl, but the username was not known.
+Users can be added with the tool b10-cmdctl-usermgr.
+</p></dd><dt><a name="CMDCTL_NO_USER_ENTRIES_READ"></a><span class="term">CMDCTL_NO_USER_ENTRIES_READ failed to read user information, all users will be denied</span></dt><dd><p>
+The b10-cmdctl daemon was unable to find any user data in the user
+database file. Either it was unable to read the file (in which case
+this message follows a message CMDCTL_USER_DATABASE_READ_ERROR
+containing a specific error), or the file was empty. Users can be added
+with the tool b10-cmdctl-usermgr.
+</p></dd><dt><a name="CMDCTL_SEND_COMMAND"></a><span class="term">CMDCTL_SEND_COMMAND sending command %1 to module %2</span></dt><dd><p>
+This debug message indicates that the given command is being sent to
+the given module.
+</p></dd><dt><a name="CMDCTL_SSL_SETUP_FAILURE_USER_DENIED"></a><span class="term">CMDCTL_SSL_SETUP_FAILURE_USER_DENIED failed to create an SSL connection (user denied): %1</span></dt><dd><p>
+The user was denied because the SSL connection could not successfully
+be set up. The specific error is given in the log message. Possible
+causes may be that the ssl request itself was bad, or the local key or
+certificate file could not be read.
+</p></dd><dt><a name="CMDCTL_STOPPED_BY_KEYBOARD"></a><span class="term">CMDCTL_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
+There was a keyboard interrupt signal to stop the cmdctl daemon. The
+daemon will now shut down.
+</p></dd><dt><a name="CMDCTL_UNCAUGHT_EXCEPTION"></a><span class="term">CMDCTL_UNCAUGHT_EXCEPTION uncaught exception: %1</span></dt><dd><p>
+The b10-cmdctl daemon encountered an uncaught exception and
+will now shut down. This is indicative of a programming error and
+should not happen under normal circumstances. The exception message
+is printed.
+</p></dd><dt><a name="CMDCTL_USER_DATABASE_READ_ERROR"></a><span class="term">CMDCTL_USER_DATABASE_READ_ERROR failed to read user database file %1: %2</span></dt><dd><p>
+The b10-cmdctl daemon was unable to read the user database file. The
+file may be unreadable for the daemon, or it may be corrupted. In the
+latter case, it can be recreated with b10-cmdctl-usermgr. The specific
+error is printed in the log message.
</p></dd><dt><a name="CONFIG_CCSESSION_MSG"></a><span class="term">CONFIG_CCSESSION_MSG error in CC session message: %1</span></dt><dd><p>
There was a problem with an incoming message on the command and control
channel. The message does not appear to be a valid command, and is
@@ -65,77 +662,152 @@ missing a required element or contains an unknown data format. This
most likely means that another BIND10 module is sending a bad message.
The message itself is ignored by this module.
</p></dd><dt><a name="CONFIG_CCSESSION_MSG_INTERNAL"></a><span class="term">CONFIG_CCSESSION_MSG_INTERNAL error handling CC session message: %1</span></dt><dd><p>
-There was an internal problem handling an incoming message on the
-command and control channel. An unexpected exception was thrown. This
-most likely points to an internal inconsistency in the module code. The
-exception message is appended to the log error, and the module will
-continue to run, but will not send back an answer.
-</p></dd><dt><a name="CONFIG_FOPEN_ERR"></a><span class="term">CONFIG_FOPEN_ERR error opening %1: %2</span></dt><dd><p>
-There was an error opening the given file.
-</p></dd><dt><a name="CONFIG_JSON_PARSE"></a><span class="term">CONFIG_JSON_PARSE JSON parse error in %1: %2</span></dt><dd><p>
-There was a parse error in the JSON file. The given file does not appear
-to be in valid JSON format. Please verify that the filename is correct
-and that the contents are valid JSON.
-</p></dd><dt><a name="CONFIG_MANAGER_CONFIG"></a><span class="term">CONFIG_MANAGER_CONFIG error getting configuration from cfgmgr: %1</span></dt><dd><p>
+There was an internal problem handling an incoming message on the command
+and control channel. An unexpected exception was thrown, details of
+which are appended to the message. The module will continue to run,
+but will not send back an answer.
+</p><p>
+The most likely cause of this error is a programming error. Please raise
+a bug report.
+</p></dd><dt><a name="CONFIG_GET_FAIL"></a><span class="term">CONFIG_GET_FAIL error getting configuration from cfgmgr: %1</span></dt><dd><p>
The configuration manager returned an error when this module requested
the configuration. The full error message answer from the configuration
manager is appended to the log error. The most likely cause is that
the module is of a different (command specification) version than the
running configuration manager.
-</p></dd><dt><a name="CONFIG_MANAGER_MOD_SPEC"></a><span class="term">CONFIG_MANAGER_MOD_SPEC module specification not accepted by cfgmgr: %1</span></dt><dd><p>
-The module specification file for this module was rejected by the
-configuration manager. The full error message answer from the
-configuration manager is appended to the log error. The most likely
-cause is that the module is of a different (specification file) version
-than the running configuration manager.
-</p></dd><dt><a name="CONFIG_MODULE_SPEC"></a><span class="term">CONFIG_MODULE_SPEC module specification error in %1: %2</span></dt><dd><p>
-The given file does not appear to be a valid specification file. Please
-verify that the filename is correct and that its contents are a valid
-BIND10 module specification.
+</p></dd><dt><a name="CONFIG_GET_FAILED"></a><span class="term">CONFIG_GET_FAILED error getting configuration from cfgmgr: %1</span></dt><dd><p>
+The configuration manager returned an error response when the module
+requested its configuration. The full error message answer from the
+configuration manager is appended to the log error.
+</p></dd><dt><a name="CONFIG_JSON_PARSE"></a><span class="term">CONFIG_JSON_PARSE JSON parse error in %1: %2</span></dt><dd><p>
+There was an error parsing the JSON file. The given file does not appear
+to be in valid JSON format. Please verify that the filename is correct
+and that the contents are valid JSON.
+</p></dd><dt><a name="CONFIG_LOG_CONFIG_ERRORS"></a><span class="term">CONFIG_LOG_CONFIG_ERRORS error(s) in logging configuration: %1</span></dt><dd><p>
+There was a logging configuration update, but the internal validator
+for logging configuration found that it contained errors. The errors
+are shown, and the update is ignored.
+</p></dd><dt><a name="CONFIG_LOG_EXPLICIT"></a><span class="term">CONFIG_LOG_EXPLICIT will use logging configuration for explicitly-named logger %1</span></dt><dd><p>
+This is a debug message. When processing the "loggers" part of the
+configuration file, the configuration library found an entry for the named
+logger that matches the logger specification for the program. The logging
+configuration for the program will be updated with the information.
+</p></dd><dt><a name="CONFIG_LOG_IGNORE_EXPLICIT"></a><span class="term">CONFIG_LOG_IGNORE_EXPLICIT ignoring logging configuration for explicitly-named logger %1</span></dt><dd><p>
+This is a debug message. When processing the "loggers" part of the
+configuration file, the configuration library found an entry for the
+named logger. As this does not match the logger specification for the
+program, it has been ignored.
+</p></dd><dt><a name="CONFIG_LOG_IGNORE_WILD"></a><span class="term">CONFIG_LOG_IGNORE_WILD ignoring logging configuration for wildcard logger %1</span></dt><dd><p>
+This is a debug message. When processing the "loggers" part of the
+configuration file, the configuration library found the named wildcard
+entry (one containing the "*" character) that matched a logger already
+matched by an explicitly named entry. The configuration is ignored.
+</p></dd><dt><a name="CONFIG_LOG_WILD_MATCH"></a><span class="term">CONFIG_LOG_WILD_MATCH will use logging configuration for wildcard logger %1</span></dt><dd><p>
+This is a debug message. When processing the "loggers" part of
+the configuration file, the configuration library found the named
+wildcard entry (one containing the "*" character) that matches a logger
+specification in the program. The logging configuration for the program
+will be updated with the information.
+</p></dd><dt><a name="CONFIG_MOD_SPEC_FORMAT"></a><span class="term">CONFIG_MOD_SPEC_FORMAT module specification error in %1: %2</span></dt><dd><p>
+The given file does not appear to be a valid specification file: details
+are included in the message. Please verify that the filename is correct
+and that its contents are a valid BIND10 module specification.
+</p></dd><dt><a name="CONFIG_MOD_SPEC_REJECT"></a><span class="term">CONFIG_MOD_SPEC_REJECT module specification rejected by cfgmgr: %1</span></dt><dd><p>
+The specification file for this module was rejected by the configuration
+manager. The full error message answer from the configuration manager is
+appended to the log error. The most likely cause is that the module is of
+a different (specification file) version than the running configuration
+manager.
+</p></dd><dt><a name="CONFIG_OPEN_FAIL"></a><span class="term">CONFIG_OPEN_FAIL error opening %1: %2</span></dt><dd><p>
+There was an error opening the given file. The reason for the failure
+is included in the message.
</p></dd><dt><a name="DATASRC_CACHE_CREATE"></a><span class="term">DATASRC_CACHE_CREATE creating the hotspot cache</span></dt><dd><p>
-Debug information that the hotspot cache was created at startup.
+This is a debug message issued during startup when the hotspot cache
+is created.
</p></dd><dt><a name="DATASRC_CACHE_DESTROY"></a><span class="term">DATASRC_CACHE_DESTROY destroying the hotspot cache</span></dt><dd><p>
Debug information. The hotspot cache is being destroyed.
-</p></dd><dt><a name="DATASRC_CACHE_DISABLE"></a><span class="term">DATASRC_CACHE_DISABLE disabling the cache</span></dt><dd><p>
-The hotspot cache is disabled from now on. It is not going to store
-information or return anything.
-</p></dd><dt><a name="DATASRC_CACHE_ENABLE"></a><span class="term">DATASRC_CACHE_ENABLE enabling the cache</span></dt><dd><p>
-The hotspot cache is enabled from now on.
-</p></dd><dt><a name="DATASRC_CACHE_EXPIRED"></a><span class="term">DATASRC_CACHE_EXPIRED the item '%1' is expired</span></dt><dd><p>
-Debug information. There was an attempt to look up an item in the hotspot
-cache. And the item was actually there, but it was too old, so it was removed
-instead and nothing is reported (the external behaviour is the same as with
-CACHE_NOT_FOUND).
+</p></dd><dt><a name="DATASRC_CACHE_DISABLE"></a><span class="term">DATASRC_CACHE_DISABLE disabling the hotspot cache</span></dt><dd><p>
+A debug message issued when the hotspot cache is disabled.
+</p></dd><dt><a name="DATASRC_CACHE_ENABLE"></a><span class="term">DATASRC_CACHE_ENABLE enabling the hotspot cache</span></dt><dd><p>
+A debug message issued when the hotspot cache is enabled.
+</p></dd><dt><a name="DATASRC_CACHE_EXPIRED"></a><span class="term">DATASRC_CACHE_EXPIRED item '%1' in the hotspot cache has expired</span></dt><dd><p>
+A debug message issued when a hotspot cache lookup located the item but it
+had expired. The item was removed and the program proceeded as if the item
+had not been found.
</p></dd><dt><a name="DATASRC_CACHE_FOUND"></a><span class="term">DATASRC_CACHE_FOUND the item '%1' was found</span></dt><dd><p>
-Debug information. An item was successfully looked up in the hotspot cache.
-</p></dd><dt><a name="DATASRC_CACHE_FULL"></a><span class="term">DATASRC_CACHE_FULL cache is full, dropping oldest</span></dt><dd><p>
+Debug information. An item was successfully located in the hotspot cache.
+</p></dd><dt><a name="DATASRC_CACHE_FULL"></a><span class="term">DATASRC_CACHE_FULL hotspot cache is full, dropping oldest</span></dt><dd><p>
Debug information. After inserting an item into the hotspot cache, the
maximum number of items was exceeded, so the least recently used item will
be dropped. This should be directly followed by CACHE_REMOVE.
-</p></dd><dt><a name="DATASRC_CACHE_INSERT"></a><span class="term">DATASRC_CACHE_INSERT inserting item '%1' into the cache</span></dt><dd><p>
-Debug information. It means a new item is being inserted into the hotspot
+</p></dd><dt><a name="DATASRC_CACHE_INSERT"></a><span class="term">DATASRC_CACHE_INSERT inserting item '%1' into the hotspot cache</span></dt><dd><p>
+A debug message indicating that a new item is being inserted into the hotspot
cache.
-</p></dd><dt><a name="DATASRC_CACHE_NOT_FOUND"></a><span class="term">DATASRC_CACHE_NOT_FOUND the item '%1' was not found</span></dt><dd><p>
-Debug information. It was attempted to look up an item in the hotspot cache,
-but it is not there.
-</p></dd><dt><a name="DATASRC_CACHE_OLD_FOUND"></a><span class="term">DATASRC_CACHE_OLD_FOUND older instance of cache item found, replacing</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_CACHE_NOT_FOUND"></a><span class="term">DATASRC_CACHE_NOT_FOUND the item '%1' was not found in the hotspot cache</span></dt><dd><p>
+A debug message issued when hotspot cache was searched for the specified
+item but it was not found.
+</p></dd><dt><a name="DATASRC_CACHE_OLD_FOUND"></a><span class="term">DATASRC_CACHE_OLD_FOUND older instance of hotspot cache item '%1' found, replacing</span></dt><dd><p>
Debug information. While inserting an item into the hotspot cache, an older
-instance of an item with the same name was found. The old instance will be
-removed. This should be directly followed by CACHE_REMOVE.
-</p></dd><dt><a name="DATASRC_CACHE_REMOVE"></a><span class="term">DATASRC_CACHE_REMOVE removing '%1' from the cache</span></dt><dd><p>
+instance of an item with the same name was found; the old instance will be
+removed. This will be directly followed by CACHE_REMOVE.
+</p></dd><dt><a name="DATASRC_CACHE_REMOVE"></a><span class="term">DATASRC_CACHE_REMOVE removing '%1' from the hotspot cache</span></dt><dd><p>
Debug information. An item is being removed from the hotspot cache.
-</p></dd><dt><a name="DATASRC_CACHE_SLOTS"></a><span class="term">DATASRC_CACHE_SLOTS setting the cache size to '%1', dropping '%2' items</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_CACHE_SLOTS"></a><span class="term">DATASRC_CACHE_SLOTS setting the hotspot cache size to '%1', dropping '%2' items</span></dt><dd><p>
The maximum allowed number of items of the hotspot cache is set to the given
number. If there are too many, some of them will be dropped. The size of 0
means no limit.
+</p></dd><dt><a name="DATASRC_DATABASE_FIND_ERROR"></a><span class="term">DATASRC_DATABASE_FIND_ERROR error retrieving data from datasource %1: %2</span></dt><dd><p>
+This was an internal error while reading data from a datasource. This can either
+mean the specific data source implementation is not behaving correctly, or the
+data it provides is invalid. The current search is aborted.
+The error message contains specific information about the error.
+</p></dd><dt><a name="DATASRC_DATABASE_FIND_RECORDS"></a><span class="term">DATASRC_DATABASE_FIND_RECORDS looking in datasource %1 for record %2/%3</span></dt><dd><p>
+Debug information. The database data source is looking up records with the given
+name and type in the database.
+</p></dd><dt><a name="DATASRC_DATABASE_FIND_TTL_MISMATCH"></a><span class="term">DATASRC_DATABASE_FIND_TTL_MISMATCH TTL values differ in %1 for elements of %2/%3/%4, setting to %5</span></dt><dd><p>
+The datasource backend provided resource records for the given RRset with
+different TTL values. The TTL of the RRSET is set to the lowest value, which
+is printed in the log message.
+</p></dd><dt><a name="DATASRC_DATABASE_FIND_UNCAUGHT_ERROR"></a><span class="term">DATASRC_DATABASE_FIND_UNCAUGHT_ERROR uncaught general error retrieving data from datasource %1: %2</span></dt><dd><p>
+There was an uncaught general exception while reading data from a datasource.
+This most likely points to a logic error in the code, and can be considered a
+bug. The current search is aborted. Specific information about the exception is
+printed in this error message.
+</p></dd><dt><a name="DATASRC_DATABASE_FIND_UNCAUGHT_ISC_ERROR"></a><span class="term">DATASRC_DATABASE_FIND_UNCAUGHT_ISC_ERROR uncaught error retrieving data from datasource %1: %2</span></dt><dd><p>
+There was an uncaught ISC exception while reading data from a datasource. This
+most likely points to a logic error in the code, and can be considered a bug.
+The current search is aborted. Specific information about the exception is
+printed in this error message.
+</p></dd><dt><a name="DATASRC_DATABASE_FOUND_DELEGATION"></a><span class="term">DATASRC_DATABASE_FOUND_DELEGATION Found delegation at %2 in %1</span></dt><dd><p>
+When searching for a domain, the program met a delegation to a different zone
+at the given domain name. It will return that one instead.
+</p></dd><dt><a name="DATASRC_DATABASE_FOUND_DELEGATION_EXACT"></a><span class="term">DATASRC_DATABASE_FOUND_DELEGATION_EXACT Found delegation at %2 (exact match) in %1</span></dt><dd><p>
+The program found the domain requested, but it is a delegation point to a
+different zone, therefore it is not authoritative for this domain name.
+It will return the NS record instead.
+</p></dd><dt><a name="DATASRC_DATABASE_FOUND_DNAME"></a><span class="term">DATASRC_DATABASE_FOUND_DNAME Found DNAME at %2 in %1</span></dt><dd><p>
+When searching for a domain, the program met a DNAME redirection to a different
+place in the domain space at the given domain name. It will return that one
+instead.
+</p></dd><dt><a name="DATASRC_DATABASE_FOUND_NXDOMAIN"></a><span class="term">DATASRC_DATABASE_FOUND_NXDOMAIN search in datasource %1 resulted in NXDOMAIN for %2/%3/%4</span></dt><dd><p>
+The data returned by the database backend did not contain any data for the given
+domain name, class and type.
+</p></dd><dt><a name="DATASRC_DATABASE_FOUND_NXRRSET"></a><span class="term">DATASRC_DATABASE_FOUND_NXRRSET search in datasource %1 resulted in NXRRSET for %2/%3/%4</span></dt><dd><p>
+The data returned by the database backend contained data for the given domain
+name and class, but not for the given type.
+</p></dd><dt><a name="DATASRC_DATABASE_FOUND_RRSET"></a><span class="term">DATASRC_DATABASE_FOUND_RRSET search in datasource %1 resulted in RRset %2</span></dt><dd><p>
+The data returned by the database backend contained data for the given domain
+name, and it either matches the type or has a relevant type. The RRset that is
+returned is printed.
</p></dd><dt><a name="DATASRC_DO_QUERY"></a><span class="term">DATASRC_DO_QUERY handling query for '%1/%2'</span></dt><dd><p>
-Debug information. We're processing some internal query for given name and
-type.
+A debug message indicating that a query for the given name and RR type is being
+processed.
</p></dd><dt><a name="DATASRC_MEM_ADD_RRSET"></a><span class="term">DATASRC_MEM_ADD_RRSET adding RRset '%1/%2' into zone '%3'</span></dt><dd><p>
Debug information. An RRset is being added to the in-memory data source.
</p></dd><dt><a name="DATASRC_MEM_ADD_WILDCARD"></a><span class="term">DATASRC_MEM_ADD_WILDCARD adding wildcards for '%1'</span></dt><dd><p>
-Debug information. Some special marks above each * in wildcard name are needed.
-They are being added now for this name.
+This is a debug message issued during the processing of a wildcard
+name. The internal domain name tree is scanned and some nodes are
+specially marked to allow the wildcard lookup to succeed.
</p></dd><dt><a name="DATASRC_MEM_ADD_ZONE"></a><span class="term">DATASRC_MEM_ADD_ZONE adding zone '%1/%2'</span></dt><dd><p>
Debug information. A zone is being added into the in-memory data source.
</p></dd><dt><a name="DATASRC_MEM_ANY_SUCCESS"></a><span class="term">DATASRC_MEM_ANY_SUCCESS ANY query for '%1' successful</span></dt><dd><p>
@@ -146,7 +818,7 @@ Debug information. The requested domain is an alias to a different domain,
returning the CNAME instead.
</p></dd><dt><a name="DATASRC_MEM_CNAME_COEXIST"></a><span class="term">DATASRC_MEM_CNAME_COEXIST can't add data to CNAME in domain '%1'</span></dt><dd><p>
This is the same problem as in MEM_CNAME_TO_NONEMPTY, but it happened the
-other way around -- adding some outher data to CNAME.
+other way around -- adding some other data to CNAME.
</p></dd><dt><a name="DATASRC_MEM_CNAME_TO_NONEMPTY"></a><span class="term">DATASRC_MEM_CNAME_TO_NONEMPTY can't add CNAME to domain with other data in '%1'</span></dt><dd><p>
Someone or something tried to add a CNAME into a domain that already contains
some other data. But the protocol forbids coexistence of CNAME with anything
@@ -164,10 +836,10 @@ encountered on the way. This may lead to redirection to a different domain and
stop the search.
</p></dd><dt><a name="DATASRC_MEM_DNAME_FOUND"></a><span class="term">DATASRC_MEM_DNAME_FOUND DNAME found at '%1'</span></dt><dd><p>
Debug information. A DNAME was found instead of the requested information.
-</p></dd><dt><a name="DATASRC_MEM_DNAME_NS"></a><span class="term">DATASRC_MEM_DNAME_NS dNAME and NS can't coexist in non-apex domain '%1'</span></dt><dd><p>
-It was requested for DNAME and NS records to be put into the same domain
-which is not the apex (the top of the zone). This is forbidden by RFC
-2672, section 3. This indicates a problem with provided data.
+</p></dd><dt><a name="DATASRC_MEM_DNAME_NS"></a><span class="term">DATASRC_MEM_DNAME_NS DNAME and NS can't coexist in non-apex domain '%1'</span></dt><dd><p>
+A request was made for DNAME and NS records to be put into the same
+domain which is not the apex (the top of the zone). This is forbidden
+by RFC 2672 (section 3) and indicates a problem with provided data.
</p></dd><dt><a name="DATASRC_MEM_DOMAIN_EMPTY"></a><span class="term">DATASRC_MEM_DOMAIN_EMPTY requested domain '%1' is empty</span></dt><dd><p>
Debug information. The requested domain exists in the tree of domains, but
it is empty. Therefore it doesn't contain the requested resource type.
@@ -186,7 +858,7 @@ Debug information. A zone object for this zone is being searched for in the
in-memory data source.
</p></dd><dt><a name="DATASRC_MEM_LOAD"></a><span class="term">DATASRC_MEM_LOAD loading zone '%1' from file '%2'</span></dt><dd><p>
Debug information. The content of master file is being loaded into the memory.
-</p></dd><dt><a name="DATASRC_MEM_NOTFOUND"></a><span class="term">DATASRC_MEM_NOTFOUND requested domain '%1' not found</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_MEM_NOT_FOUND"></a><span class="term">DATASRC_MEM_NOT_FOUND requested domain '%1' not found</span></dt><dd><p>
Debug information. The requested domain does not exist.
</p></dd><dt><a name="DATASRC_MEM_NS_ENCOUNTERED"></a><span class="term">DATASRC_MEM_NS_ENCOUNTERED encountered a NS</span></dt><dd><p>
Debug information. While searching for the requested domain, a NS was
@@ -222,21 +894,21 @@ destroyed.
Debug information. A domain above wildcard was reached, but there's something
below the requested domain. Therefore the wildcard doesn't apply here. This
behaviour is specified by RFC 1034, section 4.3.3
-</p></dd><dt><a name="DATASRC_MEM_WILDCARD_DNAME"></a><span class="term">DATASRC_MEM_WILDCARD_DNAME dNAME record in wildcard domain '%1'</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_MEM_WILDCARD_DNAME"></a><span class="term">DATASRC_MEM_WILDCARD_DNAME DNAME record in wildcard domain '%1'</span></dt><dd><p>
The software refuses to load DNAME records into a wildcard domain. It isn't
explicitly forbidden, but the protocol is ambiguous about how this should
behave and BIND 9 refuses that as well. Please describe your intention using
different tools.
-</p></dd><dt><a name="DATASRC_MEM_WILDCARD_NS"></a><span class="term">DATASRC_MEM_WILDCARD_NS nS record in wildcard domain '%1'</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_MEM_WILDCARD_NS"></a><span class="term">DATASRC_MEM_WILDCARD_NS NS record in wildcard domain '%1'</span></dt><dd><p>
The software refuses to load NS records into a wildcard domain. It isn't
explicitly forbidden, but the protocol is ambiguous about how this should
behave and BIND 9 refuses that as well. Please describe your intention using
different tools.
</p></dd><dt><a name="DATASRC_META_ADD"></a><span class="term">DATASRC_META_ADD adding a data source into meta data source</span></dt><dd><p>
-Debug information. Yet another data source is being added into the meta data
-source. (probably at startup or reconfiguration)
+This is a debug message issued during startup or reconfiguration.
+Another data source is being added into the meta data source.
</p></dd><dt><a name="DATASRC_META_ADD_CLASS_MISMATCH"></a><span class="term">DATASRC_META_ADD_CLASS_MISMATCH mismatch between classes '%1' and '%2'</span></dt><dd><p>
-It was attempted to add a data source into a meta data source. But their
+It was attempted to add a data source into a meta data source, but their
classes do not match.
</p></dd><dt><a name="DATASRC_META_REMOVE"></a><span class="term">DATASRC_META_REMOVE removing data source from meta data source</span></dt><dd><p>
Debug information. A data source is being removed from meta data source.
@@ -257,10 +929,10 @@ specific error already.
</p></dd><dt><a name="DATASRC_QUERY_BAD_REFERRAL"></a><span class="term">DATASRC_QUERY_BAD_REFERRAL bad referral to '%1'</span></dt><dd><p>
The domain lives in another zone. But it is not possible to generate referral
information for it.
-</p></dd><dt><a name="DATASRC_QUERY_CACHED"></a><span class="term">DATASRC_QUERY_CACHED data for %1/%2 found in cache</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_QUERY_CACHED"></a><span class="term">DATASRC_QUERY_CACHED data for %1/%2 found in hotspot cache</span></dt><dd><p>
Debug information. The requested data were found in the hotspot cache, so
no query is sent to the real data source.
-</p></dd><dt><a name="DATASRC_QUERY_CHECK_CACHE"></a><span class="term">DATASRC_QUERY_CHECK_CACHE checking cache for '%1/%2'</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_QUERY_CHECK_CACHE"></a><span class="term">DATASRC_QUERY_CHECK_CACHE checking hotspot cache for '%1/%2'</span></dt><dd><p>
Debug information. While processing a query, lookup to the hotspot cache
is being made.
</p></dd><dt><a name="DATASRC_QUERY_COPY_AUTH"></a><span class="term">DATASRC_QUERY_COPY_AUTH copying authoritative section into message</span></dt><dd><p>
@@ -269,20 +941,19 @@ response message.
</p></dd><dt><a name="DATASRC_QUERY_DELEGATION"></a><span class="term">DATASRC_QUERY_DELEGATION looking for delegation on the path to '%1'</span></dt><dd><p>
Debug information. The software is trying to identify delegation points on the
way down to the given domain.
-</p></dd><dt><a name="DATASRC_QUERY_EMPTY_CNAME"></a><span class="term">DATASRC_QUERY_EMPTY_CNAME cNAME at '%1' is empty</span></dt><dd><p>
-There was an CNAME and it was being followed. But it contains no records,
-so there's nowhere to go. There will be no answer. This indicates a problem
-with supplied data.
-We tried to follow
+</p></dd><dt><a name="DATASRC_QUERY_EMPTY_CNAME"></a><span class="term">DATASRC_QUERY_EMPTY_CNAME CNAME at '%1' is empty</span></dt><dd><p>
+A CNAME chain was being followed and an entry was found that pointed
+to a domain name that had no RRsets associated with it. As a result,
+the query cannot be answered. This indicates a problem with supplied data.
</p></dd><dt><a name="DATASRC_QUERY_EMPTY_DNAME"></a><span class="term">DATASRC_QUERY_EMPTY_DNAME the DNAME on '%1' is empty</span></dt><dd><p>
During an attempt to synthesize CNAME from this DNAME it was discovered the
DNAME is empty (it has no records). This indicates problem with supplied data.
</p></dd><dt><a name="DATASRC_QUERY_FAIL"></a><span class="term">DATASRC_QUERY_FAIL query failed</span></dt><dd><p>
Some subtask of query processing failed. The reason should have been reported
-already. We are returning SERVFAIL.
+already and a SERVFAIL will be returned to the querying system.
</p></dd><dt><a name="DATASRC_QUERY_FOLLOW_CNAME"></a><span class="term">DATASRC_QUERY_FOLLOW_CNAME following CNAME at '%1'</span></dt><dd><p>
-Debug information. The domain is a CNAME (or a DNAME and we created a CNAME
-for it already), so it's being followed.
+Debug information. The domain is a CNAME (or a DNAME and a CNAME for it
+has already been created) and the search is following this chain.
</p></dd><dt><a name="DATASRC_QUERY_GET_MX_ADDITIONAL"></a><span class="term">DATASRC_QUERY_GET_MX_ADDITIONAL addition of A/AAAA for '%1' requested by MX '%2'</span></dt><dd><p>
Debug information. While processing a query, a MX record was met. It
references the mentioned address, so A/AAAA records for it are looked up
@@ -301,12 +972,12 @@ operation code.
</p></dd><dt><a name="DATASRC_QUERY_IS_AUTH"></a><span class="term">DATASRC_QUERY_IS_AUTH auth query (%1/%2)</span></dt><dd><p>
Debug information. The last DO_QUERY is an auth query.
</p></dd><dt><a name="DATASRC_QUERY_IS_GLUE"></a><span class="term">DATASRC_QUERY_IS_GLUE glue query (%1/%2)</span></dt><dd><p>
-Debug information. The last DO_QUERY is query for glue addresses.
+Debug information. The last DO_QUERY is a query for glue addresses.
</p></dd><dt><a name="DATASRC_QUERY_IS_NOGLUE"></a><span class="term">DATASRC_QUERY_IS_NOGLUE query for non-glue addresses (%1/%2)</span></dt><dd><p>
-Debug information. The last DO_QUERY is query for addresses that are not
+Debug information. The last DO_QUERY is a query for addresses that are not
glue.
</p></dd><dt><a name="DATASRC_QUERY_IS_REF"></a><span class="term">DATASRC_QUERY_IS_REF query for referral (%1/%2)</span></dt><dd><p>
-Debug information. The last DO_QUERY is query for referral information.
+Debug information. The last DO_QUERY is a query for referral information.
</p></dd><dt><a name="DATASRC_QUERY_IS_SIMPLE"></a><span class="term">DATASRC_QUERY_IS_SIMPLE simple query (%1/%2)</span></dt><dd><p>
Debug information. The last DO_QUERY is a simple query.
</p></dd><dt><a name="DATASRC_QUERY_MISPLACED_TASK"></a><span class="term">DATASRC_QUERY_MISPLACED_TASK task of this type should not be here</span></dt><dd><p>
@@ -324,10 +995,10 @@ does not have one. This indicates problem with provided data.
The underlying data source failed to answer the no-glue query. 1 means some
error, 2 is not implemented. The data source should have logged the specific
error already.
-</p></dd><dt><a name="DATASRC_QUERY_NO_CACHE_ANY_AUTH"></a><span class="term">DATASRC_QUERY_NO_CACHE_ANY_AUTH ignoring cache for ANY query (%1/%2 in %3 class)</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_QUERY_NO_CACHE_ANY_AUTH"></a><span class="term">DATASRC_QUERY_NO_CACHE_ANY_AUTH ignoring hotspot cache for ANY query (%1/%2 in %3 class)</span></dt><dd><p>
Debug information. The hotspot cache is ignored for authoritative ANY queries
for consistency reasons.
-</p></dd><dt><a name="DATASRC_QUERY_NO_CACHE_ANY_SIMPLE"></a><span class="term">DATASRC_QUERY_NO_CACHE_ANY_SIMPLE ignoring cache for ANY query (%1/%2 in %3 class)</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_QUERY_NO_CACHE_ANY_SIMPLE"></a><span class="term">DATASRC_QUERY_NO_CACHE_ANY_SIMPLE ignoring hotspot cache for ANY query (%1/%2 in %3 class)</span></dt><dd><p>
Debug information. The hotspot cache is ignored for ANY queries for consistency
reasons.
</p></dd><dt><a name="DATASRC_QUERY_NO_DS_NSEC"></a><span class="term">DATASRC_QUERY_NO_DS_NSEC there's no DS record in the '%1' zone</span></dt><dd><p>
@@ -341,7 +1012,7 @@ Lookup of domain failed because the data have no zone that contain the
domain. Maybe someone sent a query to the wrong server for some reason.
</p></dd><dt><a name="DATASRC_QUERY_PROCESS"></a><span class="term">DATASRC_QUERY_PROCESS processing query '%1/%2' in the '%3' class</span></dt><dd><p>
Debug information. A sure query is being processed now.
-</p></dd><dt><a name="DATASRC_QUERY_PROVENX_FAIL"></a><span class="term">DATASRC_QUERY_PROVENX_FAIL unable to prove nonexistence of '%1'</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_QUERY_PROVE_NX_FAIL"></a><span class="term">DATASRC_QUERY_PROVE_NX_FAIL unable to prove nonexistence of '%1'</span></dt><dd><p>
The user wants DNSSEC and we discovered the entity doesn't exist (either
domain or the record). But there was an error getting NSEC/NSEC3 record
to prove the nonexistence.
@@ -357,13 +1028,13 @@ The underlying data source failed to answer the simple query. 1 means some
error, 2 is not implemented. The data source should have logged the specific
error already.
</p></dd><dt><a name="DATASRC_QUERY_SYNTH_CNAME"></a><span class="term">DATASRC_QUERY_SYNTH_CNAME synthesizing CNAME from DNAME on '%1'</span></dt><dd><p>
-Debug information. While answering a query, a DNAME was met. The DNAME itself
-will be returned, but along with it a CNAME for clients which don't understand
-DNAMEs will be synthesized.
+This is a debug message. While answering a query, a DNAME was encountered. The
+DNAME itself will be returned, along with a synthesized CNAME for clients that
+do not understand the DNAME RR.
</p></dd><dt><a name="DATASRC_QUERY_TASK_FAIL"></a><span class="term">DATASRC_QUERY_TASK_FAIL task failed with %1</span></dt><dd><p>
The query subtask failed. The reason should have been reported by the subtask
already. The code is 1 for error, 2 for not implemented.
-</p></dd><dt><a name="DATASRC_QUERY_TOO_MANY_CNAMES"></a><span class="term">DATASRC_QUERY_TOO_MANY_CNAMES cNAME chain limit exceeded at '%1'</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_QUERY_TOO_MANY_CNAMES"></a><span class="term">DATASRC_QUERY_TOO_MANY_CNAMES CNAME chain limit exceeded at '%1'</span></dt><dd><p>
A CNAME led to another CNAME and it led to another, and so on. After 16
CNAMEs, the software gave up. Long CNAME chains are discouraged, and this
might possibly be a loop as well. Note that some of the CNAMEs might have
@@ -377,7 +1048,7 @@ domain is being looked for now.
</p></dd><dt><a name="DATASRC_QUERY_WILDCARD_FAIL"></a><span class="term">DATASRC_QUERY_WILDCARD_FAIL error processing wildcard for '%1'</span></dt><dd><p>
During an attempt to cover the domain by a wildcard an error happened. The
exact kind was hopefully already reported.
-</p></dd><dt><a name="DATASRC_QUERY_WILDCARD_PROVENX_FAIL"></a><span class="term">DATASRC_QUERY_WILDCARD_PROVENX_FAIL unable to prove nonexistence of '%1' (%2)</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_QUERY_WILDCARD_PROVE_NX_FAIL"></a><span class="term">DATASRC_QUERY_WILDCARD_PROVE_NX_FAIL unable to prove nonexistence of '%1' (%2)</span></dt><dd><p>
While processing a wildcard, it wasn't possible to prove nonexistence of the
given domain or record. The code is 1 for error and 2 for not implemented.
</p></dd><dt><a name="DATASRC_QUERY_WILDCARD_REFERRAL"></a><span class="term">DATASRC_QUERY_WILDCARD_REFERRAL unable to find referral info for '%1' (%2)</span></dt><dd><p>
@@ -385,15 +1056,21 @@ While processing a wildcard, a referral was met. But it wasn't possible to get
enough information for it. The code is 1 for error, 2 for not implemented.
</p></dd><dt><a name="DATASRC_SQLITE_CLOSE"></a><span class="term">DATASRC_SQLITE_CLOSE closing SQLite database</span></dt><dd><p>
Debug information. The SQLite data source is closing the database file.
-</p></dd><dt><a name="DATASRC_SQLITE_CREATE"></a><span class="term">DATASRC_SQLITE_CREATE sQLite data source created</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_SQLITE_CONNCLOSE"></a><span class="term">DATASRC_SQLITE_CONNCLOSE Closing sqlite database</span></dt><dd><p>
+The database file is no longer needed and is being closed.
+</p></dd><dt><a name="DATASRC_SQLITE_CONNOPEN"></a><span class="term">DATASRC_SQLITE_CONNOPEN Opening sqlite database file '%1'</span></dt><dd><p>
+The database file is being opened so it can start providing data.
+</p></dd><dt><a name="DATASRC_SQLITE_CREATE"></a><span class="term">DATASRC_SQLITE_CREATE SQLite data source created</span></dt><dd><p>
Debug information. An instance of SQLite data source is being created.
-</p></dd><dt><a name="DATASRC_SQLITE_DESTROY"></a><span class="term">DATASRC_SQLITE_DESTROY sQLite data source destroyed</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_SQLITE_DESTROY"></a><span class="term">DATASRC_SQLITE_DESTROY SQLite data source destroyed</span></dt><dd><p>
Debug information. An instance of SQLite data source is being destroyed.
+</p></dd><dt><a name="DATASRC_SQLITE_DROPCONN"></a><span class="term">DATASRC_SQLITE_DROPCONN SQLite3Database is being deinitialized</span></dt><dd><p>
+The object around a database connection is being destroyed.
</p></dd><dt><a name="DATASRC_SQLITE_ENCLOSURE"></a><span class="term">DATASRC_SQLITE_ENCLOSURE looking for zone containing '%1'</span></dt><dd><p>
-Debug information. The SQLite data source is trying to identify, which zone
+Debug information. The SQLite data source is trying to identify which zone
should hold this domain.
-</p></dd><dt><a name="DATASRC_SQLITE_ENCLOSURE_NOTFOUND"></a><span class="term">DATASRC_SQLITE_ENCLOSURE_NOTFOUND no zone contains it</span></dt><dd><p>
-Debug information. The last SQLITE_ENCLOSURE query was unsuccessful, there's
+</p></dd><dt><a name="DATASRC_SQLITE_ENCLOSURE_NOT_FOUND"></a><span class="term">DATASRC_SQLITE_ENCLOSURE_NOT_FOUND no zone contains '%1'</span></dt><dd><p>
+Debug information. The last SQLITE_ENCLOSURE query was unsuccessful; there's
no such zone in our data.
</p></dd><dt><a name="DATASRC_SQLITE_FIND"></a><span class="term">DATASRC_SQLITE_FIND looking for RRset '%1/%2'</span></dt><dd><p>
Debug information. The SQLite data source is looking up a resource record
@@ -417,7 +1094,7 @@ and type in the database.
Debug information. The SQLite data source is identifying if this domain is
a referral and where it goes.
</p></dd><dt><a name="DATASRC_SQLITE_FINDREF_BAD_CLASS"></a><span class="term">DATASRC_SQLITE_FINDREF_BAD_CLASS class mismatch looking for referral ('%1' and '%2')</span></dt><dd><p>
-The SQLite data source was trying to identify, if there's a referral. But
+The SQLite data source was trying to identify if there's a referral. But
it contains different class than the query was for.
</p></dd><dt><a name="DATASRC_SQLITE_FIND_BAD_CLASS"></a><span class="term">DATASRC_SQLITE_FIND_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')</span></dt><dd><p>
The SQLite data source was looking up an RRset, but the data source contains
@@ -428,21 +1105,30 @@ source.
</p></dd><dt><a name="DATASRC_SQLITE_FIND_NSEC3_NO_ZONE"></a><span class="term">DATASRC_SQLITE_FIND_NSEC3_NO_ZONE no such zone '%1'</span></dt><dd><p>
The SQLite data source was asked to provide a NSEC3 record for given zone.
But it doesn't contain that zone.
+</p></dd><dt><a name="DATASRC_SQLITE_NEWCONN"></a><span class="term">DATASRC_SQLITE_NEWCONN SQLite3Database is being initialized</span></dt><dd><p>
+A wrapper object to hold database connection is being initialized.
</p></dd><dt><a name="DATASRC_SQLITE_OPEN"></a><span class="term">DATASRC_SQLITE_OPEN opening SQLite database '%1'</span></dt><dd><p>
Debug information. The SQLite data source is loading an SQLite database in
the provided file.
</p></dd><dt><a name="DATASRC_SQLITE_PREVIOUS"></a><span class="term">DATASRC_SQLITE_PREVIOUS looking for name previous to '%1'</span></dt><dd><p>
-Debug information. We're trying to look up name preceding the supplied one.
+This is a debug message. The name given was not found, so the program
+is searching for the next name higher up the hierarchy (e.g. if
+www.example.com were queried for and not found, the software searches
+for the "previous" name, example.com).
</p></dd><dt><a name="DATASRC_SQLITE_PREVIOUS_NO_ZONE"></a><span class="term">DATASRC_SQLITE_PREVIOUS_NO_ZONE no zone containing '%1'</span></dt><dd><p>
-The SQLite data source tried to identify name preceding this one. But this
-one is not contained in any zone in the data source.
+The name given was not found, so the program is searching for the next
+name higher up the hierarchy (e.g. if www.example.com were queried
+for and not found, the software searches for the "previous" name,
+example.com). However, this name is not contained in any zone in the
+data source. This is an error since it indicates a problem in the earlier
+processing of the query.
</p></dd><dt><a name="DATASRC_SQLITE_SETUP"></a><span class="term">DATASRC_SQLITE_SETUP setting up SQLite database</span></dt><dd><p>
The database for SQLite data source was found empty. It is assumed this is the
first run and it is being initialized with current schema. It'll still contain
no data, but it will be ready for use.
-</p></dd><dt><a name="DATASRC_STATIC_BAD_CLASS"></a><span class="term">DATASRC_STATIC_BAD_CLASS static data source can handle CH only</span></dt><dd><p>
-For some reason, someone asked the static data source a query that is not in
-the CH class.
+</p></dd><dt><a name="DATASRC_STATIC_CLASS_NOT_CH"></a><span class="term">DATASRC_STATIC_CLASS_NOT_CH static data source can handle CH class only</span></dt><dd><p>
+An error message indicating that a query requesting a RR for a class other
+that CH was sent to the static data source (which only handles CH queries).
</p></dd><dt><a name="DATASRC_STATIC_CREATE"></a><span class="term">DATASRC_STATIC_CREATE creating the static datasource</span></dt><dd><p>
Debug information. The static data source (the one holding stuff like
version.bind) is being created.
@@ -452,142 +1138,229 @@ data source.
</p></dd><dt><a name="DATASRC_UNEXPECTED_QUERY_STATE"></a><span class="term">DATASRC_UNEXPECTED_QUERY_STATE unexpected query state</span></dt><dd><p>
This indicates a programming error. An internal task of unknown type was
generated.
-</p></dd><dt><a name="LOGIMPL_ABOVEDBGMAX"></a><span class="term">LOGIMPL_ABOVEDBGMAX debug level of %1 is too high and will be set to the maximum of %2</span></dt><dd><p>
-A message from the underlying logger implementation code, the debug level
-(as set by the string DEBGUGn) is above the maximum allowed value and has
-been reduced to that value.
-</p></dd><dt><a name="LOGIMPL_BADDEBUG"></a><span class="term">LOGIMPL_BADDEBUG debug string is '%1': must be of the form DEBUGn</span></dt><dd><p>
-The string indicating the extended logging level (used by the underlying
-logger implementation code) is not of the stated form. In particular,
-it starts DEBUG but does not end with an integer.
-</p></dd><dt><a name="LOGIMPL_BELOWDBGMIN"></a><span class="term">LOGIMPL_BELOWDBGMIN debug level of %1 is too low and will be set to the minimum of %2</span></dt><dd><p>
-A message from the underlying logger implementation code, the debug level
-(as set by the string DEBGUGn) is below the minimum allowed value and has
-been increased to that value.
-</p></dd><dt><a name="MSG_BADDESTINATION"></a><span class="term">MSG_BADDESTINATION unrecognized log destination: %1</span></dt><dd><p>
+</p></dd><dt><a name="LOGIMPL_ABOVE_MAX_DEBUG"></a><span class="term">LOGIMPL_ABOVE_MAX_DEBUG debug level of %1 is too high and will be set to the maximum of %2</span></dt><dd><p>
+A message from the interface to the underlying logger implementation reporting
+that the debug level (as set by an internally-created string DEBUGn, where n
+is an integer, e.g. DEBUG22) is above the maximum allowed value and has
+been reduced to that value. The appearance of this message may indicate
+a programming error - please submit a bug report.
+</p></dd><dt><a name="LOGIMPL_BAD_DEBUG_STRING"></a><span class="term">LOGIMPL_BAD_DEBUG_STRING debug string '%1' has invalid format</span></dt><dd><p>
+A message from the interface to the underlying logger implementation
+reporting that an internally-created string used to set the debug level
+is not of the correct format (it should be of the form DEBUGn, where n
+is an integer, e.g. DEBUG22). The appearance of this message indicates
+a programming error - please submit a bug report.
+</p></dd><dt><a name="LOGIMPL_BELOW_MIN_DEBUG"></a><span class="term">LOGIMPL_BELOW_MIN_DEBUG debug level of %1 is too low and will be set to the minimum of %2</span></dt><dd><p>
+A message from the interface to the underlying logger implementation reporting
+that the debug level (as set by an internally-created string DEBUGn, where n
+is an integer, e.g. DEBUG22) is below the minimum allowed value and has
+been increased to that value. The appearance of this message may indicate
+a programming error - please submit a bug report.
+</p></dd><dt><a name="LOG_BAD_DESTINATION"></a><span class="term">LOG_BAD_DESTINATION unrecognized log destination: %1</span></dt><dd><p>
A logger destination value was given that was not recognized. The
destination should be one of "console", "file", or "syslog".
-</p></dd><dt><a name="MSG_BADSEVERITY"></a><span class="term">MSG_BADSEVERITY unrecognized log severity: %1</span></dt><dd><p>
+</p></dd><dt><a name="LOG_BAD_SEVERITY"></a><span class="term">LOG_BAD_SEVERITY unrecognized log severity: %1</span></dt><dd><p>
A logger severity value was given that was not recognized. The severity
-should be one of "DEBUG", "INFO", "WARN", "ERROR", or "FATAL".
-</p></dd><dt><a name="MSG_BADSTREAM"></a><span class="term">MSG_BADSTREAM bad log console output stream: %1</span></dt><dd><p>
-A log console output stream was given that was not recognized. The
-output stream should be one of "stdout", or "stderr"
-</p></dd><dt><a name="MSG_DUPLNS"></a><span class="term">MSG_DUPLNS line %1: duplicate $NAMESPACE directive found</span></dt><dd><p>
-When reading a message file, more than one $NAMESPACE directive was found. In
-this version of the code, such a condition is regarded as an error and the
-read will be abandoned.
-</p></dd><dt><a name="MSG_DUPMSGID"></a><span class="term">MSG_DUPMSGID duplicate message ID (%1) in compiled code</span></dt><dd><p>
-Indicative of a programming error, when it started up, BIND10 detected that
-the given message ID had been registered by one or more modules. (All message
-IDs should be unique throughout BIND10.) This has no impact on the operation
-of the server other that erroneous messages may be logged. (When BIND10 loads
-the message IDs (and their associated text), if a duplicate ID is found it is
-discarded. However, when the module that supplied the duplicate ID logs that
-particular message, the text supplied by the module that added the original
-ID will be output - something that may bear no relation to the condition being
-logged.
-</p></dd><dt><a name="MSG_IDNOTFND"></a><span class="term">MSG_IDNOTFND could not replace message text for '%1': no such message</span></dt><dd><p>
+should be one of "DEBUG", "INFO", "WARN", "ERROR", "FATAL" or "NONE".
+</p></dd><dt><a name="LOG_BAD_STREAM"></a><span class="term">LOG_BAD_STREAM bad log console output stream: %1</span></dt><dd><p>
+Logging has been configured so that output is written to the terminal
+(console) but the stream on which it is to be written is not recognised.
+Allowed values are "stdout" and "stderr".
+</p></dd><dt><a name="LOG_DUPLICATE_MESSAGE_ID"></a><span class="term">LOG_DUPLICATE_MESSAGE_ID duplicate message ID (%1) in compiled code</span></dt><dd><p>
+During start-up, BIND 10 detected that the given message identification
+had been defined multiple times in the BIND 10 code. This indicates a
+programming error; please submit a bug report.
+</p></dd><dt><a name="LOG_DUPLICATE_NAMESPACE"></a><span class="term">LOG_DUPLICATE_NAMESPACE line %1: duplicate $NAMESPACE directive found</span></dt><dd><p>
+When reading a message file, more than one $NAMESPACE directive was found.
+(This directive is used to set a C++ namespace when generating header
+files during software development.) Such a condition is regarded as an
+error and the read will be abandoned.
+</p></dd><dt><a name="LOG_INPUT_OPEN_FAIL"></a><span class="term">LOG_INPUT_OPEN_FAIL unable to open message file %1 for input: %2</span></dt><dd><p>
+The program was not able to open the specified input message file for
+the reason given.
+</p></dd><dt><a name="LOG_INVALID_MESSAGE_ID"></a><span class="term">LOG_INVALID_MESSAGE_ID line %1: invalid message identification '%2'</span></dt><dd><p>
+An invalid message identification (ID) has been found during the read of
+a message file. Message IDs should comprise only alphanumeric characters
+and the underscore, and should not start with a digit.
+</p></dd><dt><a name="LOG_NAMESPACE_EXTRA_ARGS"></a><span class="term">LOG_NAMESPACE_EXTRA_ARGS line %1: $NAMESPACE directive has too many arguments</span></dt><dd><p>
+The $NAMESPACE directive in a message file takes a single argument, a
+namespace in which all the generated symbol names are placed. This error
+is generated when the compiler finds a $NAMESPACE directive with more
+than one argument.
+</p></dd><dt><a name="LOG_NAMESPACE_INVALID_ARG"></a><span class="term">LOG_NAMESPACE_INVALID_ARG line %1: $NAMESPACE directive has an invalid argument ('%2')</span></dt><dd><p>
+The $NAMESPACE argument in a message file should be a valid C++ namespace.
+This message is output if the simple check on the syntax of the string
+carried out by the reader fails.
+</p></dd><dt><a name="LOG_NAMESPACE_NO_ARGS"></a><span class="term">LOG_NAMESPACE_NO_ARGS line %1: no arguments were given to the $NAMESPACE directive</span></dt><dd><p>
+The $NAMESPACE directive in a message file takes a single argument,
+a C++ namespace in which all the generated symbol names are placed.
+This error is generated when the compiler finds a $NAMESPACE directive
+with no arguments.
+</p></dd><dt><a name="LOG_NO_MESSAGE_ID"></a><span class="term">LOG_NO_MESSAGE_ID line %1: message definition line found without a message ID</span></dt><dd><p>
+Within a message file, message are defined by lines starting with a "%".
+The rest of the line should comprise the message ID and text describing
+the message. This error indicates the message compiler found a line in
+the message file comprising just the "%" and nothing else.
+</p></dd><dt><a name="LOG_NO_MESSAGE_TEXT"></a><span class="term">LOG_NO_MESSAGE_TEXT line %1: line found containing a message ID ('%2') and no text</span></dt><dd><p>
+Within a message file, message are defined by lines starting with a "%".
+The rest of the line should comprise the message ID and text describing
+the message. This error indicates the message compiler found a line
+in the message file comprising just the "%" and message identification,
+but no text.
+</p></dd><dt><a name="LOG_NO_SUCH_MESSAGE"></a><span class="term">LOG_NO_SUCH_MESSAGE could not replace message text for '%1': no such message</span></dt><dd><p>
During start-up a local message file was read. A line with the listed
-message identification was found in the file, but the identification is not
-one contained in the compiled-in message dictionary. Either the message
-identification has been mis-spelled in the file, or the local file was used
-for an earlier version of the software and the message with that
-identification has been removed.
+message identification was found in the file, but the identification is
+not one contained in the compiled-in message dictionary. This message
+may appear a number of times in the file, once for every such unknown
+message identification.
+</p><p>
+There may be several reasons why this message may appear:
+</p><p>
+- The message ID has been mis-spelled in the local message file.
+</p><p>
+- The program outputting the message may not use that particular message
+(e.g. it originates in a module not used by the program.)
+</p><p>
+- The local file was written for an earlier version of the BIND 10 software
+and the later version no longer generates that message.
+</p><p>
+Whatever the reason, there is no impact on the operation of BIND 10.
+</p></dd><dt><a name="LOG_OPEN_OUTPUT_FAIL"></a><span class="term">LOG_OPEN_OUTPUT_FAIL unable to open %1 for output: %2</span></dt><dd><p>
+Originating within the logging code, the program was not able to open
+the specified output file for the reason given.
+</p></dd><dt><a name="LOG_PREFIX_EXTRA_ARGS"></a><span class="term">LOG_PREFIX_EXTRA_ARGS line %1: $PREFIX directive has too many arguments</span></dt><dd><p>
+Within a message file, the $PREFIX directive takes a single argument,
+a prefix to be added to the symbol names when a C++ file is created.
+This error is generated when the compiler finds a $PREFIX directive with
+more than one argument.
+</p><p>
+Note: the $PREFIX directive is deprecated and will be removed in a future
+version of BIND 10.
+</p></dd><dt><a name="LOG_PREFIX_INVALID_ARG"></a><span class="term">LOG_PREFIX_INVALID_ARG line %1: $PREFIX directive has an invalid argument ('%2')</span></dt><dd><p>
+Within a message file, the $PREFIX directive takes a single argument,
+a prefix to be added to the symbol names when a C++ file is created.
+As such, it must adhere to restrictions on C++ symbol names (e.g. may
+only contain alphanumeric characters or underscores, and may nor start
+with a digit). A $PREFIX directive was found with an argument (given
+in the message) that violates those restrictions.
</p><p>
-This message may appear a number of times in the file, once for every such
-unknown message identification.
-</p></dd><dt><a name="MSG_INVMSGID"></a><span class="term">MSG_INVMSGID line %1: invalid message identification '%2'</span></dt><dd><p>
-The concatenation of the prefix and the message identification is used as
-a symbol in the C++ module; as such it may only contain
-</p></dd><dt><a name="MSG_NOMSGID"></a><span class="term">MSG_NOMSGID line %1: message definition line found without a message ID</span></dt><dd><p>
-Message definition lines are lines starting with a "%". The rest of the line
-should comprise the message ID and text describing the message. This error
-indicates the message compiler found a line in the message file comprising
-just the "%" and nothing else.
-</p></dd><dt><a name="MSG_NOMSGTXT"></a><span class="term">MSG_NOMSGTXT line %1: line found containing a message ID ('%2') and no text</span></dt><dd><p>
-Message definition lines are lines starting with a "%". The rest of the line
-should comprise the message ID and text describing the message. This error
-is generated when a line is found in the message file that contains the
-leading "%" and the message identification but no text.
-</p></dd><dt><a name="MSG_NSEXTRARG"></a><span class="term">MSG_NSEXTRARG line %1: $NAMESPACE directive has too many arguments</span></dt><dd><p>
-The $NAMESPACE directive takes a single argument, a namespace in which all the
-generated symbol names are placed. This error is generated when the
-compiler finds a $NAMESPACE directive with more than one argument.
-</p></dd><dt><a name="MSG_NSINVARG"></a><span class="term">MSG_NSINVARG line %1: $NAMESPACE directive has an invalid argument ('%2')</span></dt><dd><p>
-The $NAMESPACE argument should be a valid C++ namespace. The reader does a
-cursory check on its validity, checking that the characters in the namespace
-are correct. The error is generated when the reader finds an invalid
-character. (Valid are alphanumeric characters, underscores and colons.)
-</p></dd><dt><a name="MSG_NSNOARG"></a><span class="term">MSG_NSNOARG line %1: no arguments were given to the $NAMESPACE directive</span></dt><dd><p>
-The $NAMESPACE directive takes a single argument, a namespace in which all the
-generated symbol names are placed. This error is generated when the
-compiler finds a $NAMESPACE directive with no arguments.
-</p></dd><dt><a name="MSG_OPENIN"></a><span class="term">MSG_OPENIN unable to open message file %1 for input: %2</span></dt><dd><p>
-The program was not able to open the specified input message file for the
-reason given.
-</p></dd><dt><a name="MSG_OPENOUT"></a><span class="term">MSG_OPENOUT unable to open %1 for output: %2</span></dt><dd><p>
-The program was not able to open the specified output file for the reason
-given.
-</p></dd><dt><a name="MSG_PRFEXTRARG"></a><span class="term">MSG_PRFEXTRARG line %1: $PREFIX directive has too many arguments</span></dt><dd><p>
-The $PREFIX directive takes a single argument, a prefix to be added to the
-symbol names when a C++ .h file is created. This error is generated when the
-compiler finds a $PREFIX directive with more than one argument.
-</p></dd><dt><a name="MSG_PRFINVARG"></a><span class="term">MSG_PRFINVARG line %1: $PREFIX directive has an invalid argument ('%2')</span></dt><dd><p>
-The $PREFIX argument is used in a symbol name in a C++ header file. As such,
-it must adhere to restrictions on C++ symbol names (e.g. may only contain
-alphanumeric characters or underscores, and may nor start with a digit).
-A $PREFIX directive was found with an argument (given in the message) that
-violates those restictions.
-</p></dd><dt><a name="MSG_RDLOCMES"></a><span class="term">MSG_RDLOCMES reading local message file %1</span></dt><dd><p>
-This is an informational message output by BIND10 when it starts to read a
-local message file. (A local message file may replace the text of one of more
-messages; the ID of the message will not be changed though.)
-</p></dd><dt><a name="MSG_READERR"></a><span class="term">MSG_READERR error reading from message file %1: %2</span></dt><dd><p>
+Note: the $PREFIX directive is deprecated and will be removed in a future
+version of BIND 10.
+</p></dd><dt><a name="LOG_READING_LOCAL_FILE"></a><span class="term">LOG_READING_LOCAL_FILE reading local message file %1</span></dt><dd><p>
+This is an informational message output by BIND 10 when it starts to read
+a local message file. (A local message file may replace the text of
+one of more messages; the ID of the message will not be changed though.)
+</p></dd><dt><a name="LOG_READ_ERROR"></a><span class="term">LOG_READ_ERROR error reading from message file %1: %2</span></dt><dd><p>
The specified error was encountered reading from the named message file.
-</p></dd><dt><a name="MSG_UNRECDIR"></a><span class="term">MSG_UNRECDIR line %1: unrecognised directive '%2'</span></dt><dd><p>
-A line starting with a dollar symbol was found, but the first word on the line
-(shown in the message) was not a recognised message compiler directive.
-</p></dd><dt><a name="MSG_WRITERR"></a><span class="term">MSG_WRITERR error writing to %1: %2</span></dt><dd><p>
-The specified error was encountered by the message compiler when writing to
-the named output file.
-</p></dd><dt><a name="NSAS_INVRESPSTR"></a><span class="term">NSAS_INVRESPSTR queried for %1 but got invalid response</span></dt><dd><p>
-This message indicates an internal error in the nameserver address store
-component (NSAS) of the resolver. The NSAS made a query for a RR for the
-specified nameserver but received an invalid response. Either the success
-function was called without a DNS message or the message was invalid on some
-way. (In the latter case, the error should have been picked up elsewhere in
-the processing logic, hence the raising of the error here.)
-</p></dd><dt><a name="NSAS_INVRESPTC"></a><span class="term">NSAS_INVRESPTC queried for %1 RR of type/class %2/%3, received response %4/%5</span></dt><dd><p>
-This message indicates an internal error in the nameserver address store
-component (NSAS) of the resolver. The NSAS made a query for the given RR
-type and class, but instead received an answer with the given type and class.
-</p></dd><dt><a name="NSAS_LOOKUPCANCEL"></a><span class="term">NSAS_LOOKUPCANCEL lookup for zone %1 has been cancelled</span></dt><dd><p>
-A debug message, this is output when a NSAS (nameserver address store -
-part of the resolver) lookup for a zone has been cancelled.
-</p></dd><dt><a name="NSAS_LOOKUPZONE"></a><span class="term">NSAS_LOOKUPZONE searching NSAS for nameservers for zone %1</span></dt><dd><p>
-A debug message, this is output when a call is made to the nameserver address
-store (part of the resolver) to obtain the nameservers for the specified zone.
-</p></dd><dt><a name="NSAS_NSADDR"></a><span class="term">NSAS_NSADDR asking resolver to obtain A and AAAA records for %1</span></dt><dd><p>
-A debug message, the NSAS (nameserver address store - part of the resolver) is
-making a callback into the resolver to retrieve the address records for the
-specified nameserver.
-</p></dd><dt><a name="NSAS_NSLKUPFAIL"></a><span class="term">NSAS_NSLKUPFAIL failed to lookup any %1 for %2</span></dt><dd><p>
-A debug message, the NSAS (nameserver address store - part of the resolver)
-has been unable to retrieve the specified resource record for the specified
-nameserver. This is not necessarily a problem - the nameserver may be
-unreachable, in which case the NSAS will try other nameservers in the zone.
-</p></dd><dt><a name="NSAS_NSLKUPSUCC"></a><span class="term">NSAS_NSLKUPSUCC found address %1 for %2</span></dt><dd><p>
-A debug message, the NSAS (nameserver address store - part of the resolver)
-has retrieved the given address for the specified nameserver through an
-external query.
-</p></dd><dt><a name="NSAS_SETRTT"></a><span class="term">NSAS_SETRTT reporting RTT for %1 as %2; new value is now %3</span></dt><dd><p>
+</p></dd><dt><a name="LOG_UNRECOGNISED_DIRECTIVE"></a><span class="term">LOG_UNRECOGNISED_DIRECTIVE line %1: unrecognised directive '%2'</span></dt><dd><p>
+Within a message file, a line starting with a dollar symbol was found
+(indicating the presence of a directive) but the first word on the line
+(shown in the message) was not recognised.
+</p></dd><dt><a name="LOG_WRITE_ERROR"></a><span class="term">LOG_WRITE_ERROR error writing to %1: %2</span></dt><dd><p>
+The specified error was encountered by the message compiler when writing
+to the named output file.
+</p></dd><dt><a name="NOTIFY_OUT_INVALID_ADDRESS"></a><span class="term">NOTIFY_OUT_INVALID_ADDRESS invalid address %1#%2: %3</span></dt><dd><p>
+The notify_out library tried to send a notify message to the given
+address, but it appears to be an invalid address. The configuration
+for secondary nameservers might contain a typographic error, or a
+different BIND 10 module has forgotten to validate its data before
+sending this module a notify command. As such, this should normally
+not happen, and points to an oversight in a different module.
+</p></dd><dt><a name="NOTIFY_OUT_REPLY_BAD_OPCODE"></a><span class="term">NOTIFY_OUT_REPLY_BAD_OPCODE bad opcode in notify reply from %1#%2: %3</span></dt><dd><p>
+The notify_out library sent a notify message to the nameserver at
+the given address, but the response did not have the opcode set to
+NOTIFY. The opcode in the response is printed. Since there was a
+response, no more notifies will be sent to this server for this
+notification event.
+</p></dd><dt><a name="NOTIFY_OUT_REPLY_BAD_QID"></a><span class="term">NOTIFY_OUT_REPLY_BAD_QID bad QID in notify reply from %1#%2: got %3, should be %4</span></dt><dd><p>
+The notify_out library sent a notify message to the nameserver at
+the given address, but the query id in the response does not match
+the one we sent. Since there was a response, no more notifies will
+be sent to this server for this notification event.
+</p></dd><dt><a name="NOTIFY_OUT_REPLY_BAD_QUERY_NAME"></a><span class="term">NOTIFY_OUT_REPLY_BAD_QUERY_NAME bad query name in notify reply from %1#%2: got %3, should be %4</span></dt><dd><p>
+The notify_out library sent a notify message to the nameserver at
+the given address, but the query name in the response does not match
+the one we sent. Since there was a response, no more notifies will
+be sent to this server for this notification event.
+</p></dd><dt><a name="NOTIFY_OUT_REPLY_QR_NOT_SET"></a><span class="term">NOTIFY_OUT_REPLY_QR_NOT_SET QR flags set to 0 in reply to notify from %1#%2</span></dt><dd><p>
+The notify_out library sent a notify message to the namesever at the
+given address, but the reply did not have the QR bit set to one.
+Since there was a response, no more notifies will be sent to this
+server for this notification event.
+</p></dd><dt><a name="NOTIFY_OUT_REPLY_UNCAUGHT_EXCEPTION"></a><span class="term">NOTIFY_OUT_REPLY_UNCAUGHT_EXCEPTION uncaught exception: %1</span></dt><dd><p>
+There was an uncaught exception in the handling of a notify reply
+message, either in the message parser, or while trying to extract data
+from the parsed message. The error is printed, and notify_out will
+treat the response as a bad message, but this does point to a
+programming error, since all exceptions should have been caught
+explicitly. Please file a bug report. Since there was a response,
+no more notifies will be sent to this server for this notification
+event.
+</p></dd><dt><a name="NOTIFY_OUT_RETRY_EXCEEDED"></a><span class="term">NOTIFY_OUT_RETRY_EXCEEDED notify to %1#%2: number of retries (%3) exceeded</span></dt><dd><p>
+The maximum number of retries for the notify target has been exceeded.
+Either the address of the secondary nameserver is wrong, or it is not
+responding.
+</p></dd><dt><a name="NOTIFY_OUT_SENDING_NOTIFY"></a><span class="term">NOTIFY_OUT_SENDING_NOTIFY sending notify to %1#%2</span></dt><dd><p>
+A notify message is sent to the secondary nameserver at the given
+address.
+</p></dd><dt><a name="NOTIFY_OUT_SOCKET_ERROR"></a><span class="term">NOTIFY_OUT_SOCKET_ERROR socket error sending notify to %1#%2: %3</span></dt><dd><p>
+There was a network error while trying to send a notify message to
+the given address. The address might be unreachable. The socket
+error is printed and should provide more information.
+</p></dd><dt><a name="NOTIFY_OUT_SOCKET_RECV_ERROR"></a><span class="term">NOTIFY_OUT_SOCKET_RECV_ERROR socket error reading notify reply from %1#%2: %3</span></dt><dd><p>
+There was a network error while trying to read a notify reply
+message from the given address. The socket error is printed and should
+provide more information.
+</p></dd><dt><a name="NOTIFY_OUT_TIMEOUT"></a><span class="term">NOTIFY_OUT_TIMEOUT retry notify to %1#%2</span></dt><dd><p>
+The notify message to the given address (noted as address#port) has
+timed out, and the message will be resent until the max retry limit
+is reached.
+</p></dd><dt><a name="NSAS_FIND_NS_ADDRESS"></a><span class="term">NSAS_FIND_NS_ADDRESS asking resolver to obtain A and AAAA records for %1</span></dt><dd><p>
+A debug message issued when the NSAS (nameserver address store - part
+of the resolver) is making a callback into the resolver to retrieve the
+address records for the specified nameserver.
+</p></dd><dt><a name="NSAS_FOUND_ADDRESS"></a><span class="term">NSAS_FOUND_ADDRESS found address %1 for %2</span></dt><dd><p>
+A debug message issued when the NSAS (nameserver address store - part
+of the resolver) has retrieved the given address for the specified
+nameserver through an external query.
+</p></dd><dt><a name="NSAS_INVALID_RESPONSE"></a><span class="term">NSAS_INVALID_RESPONSE queried for %1 but got invalid response</span></dt><dd><p>
+The NSAS (nameserver address store - part of the resolver) made a query
+for a RR for the specified nameserver but received an invalid response.
+Either the success function was called without a DNS message or the
+message was invalid on some way. (In the latter case, the error should
+have been picked up elsewhere in the processing logic, hence the raising
+of the error here.)
+</p><p>
+This message indicates an internal error in the NSAS. Please raise a
+bug report.
+</p></dd><dt><a name="NSAS_LOOKUP_CANCEL"></a><span class="term">NSAS_LOOKUP_CANCEL lookup for zone %1 has been canceled</span></dt><dd><p>
+A debug message issued when an NSAS (nameserver address store - part of
+the resolver) lookup for a zone has been canceled.
+</p></dd><dt><a name="NSAS_NS_LOOKUP_FAIL"></a><span class="term">NSAS_NS_LOOKUP_FAIL failed to lookup any %1 for %2</span></dt><dd><p>
+A debug message issued when the NSAS (nameserver address store - part of
+the resolver) has been unable to retrieve the specified resource record
+for the specified nameserver. This is not necessarily a problem - the
+nameserver may be unreachable, in which case the NSAS will try other
+nameservers in the zone.
+</p></dd><dt><a name="NSAS_SEARCH_ZONE_NS"></a><span class="term">NSAS_SEARCH_ZONE_NS searching NSAS for nameservers for zone %1</span></dt><dd><p>
+A debug message output when a call is made to the NSAS (nameserver
+address store - part of the resolver) to obtain the nameservers for
+the specified zone.
+</p></dd><dt><a name="NSAS_UPDATE_RTT"></a><span class="term">NSAS_UPDATE_RTT update RTT for %1: was %2 ms, is now %3 ms</span></dt><dd><p>
A NSAS (nameserver address store - part of the resolver) debug message
-reporting the round-trip time (RTT) for a query made to the specified
-nameserver. The RTT has been updated using the value given and the new RTT is
-displayed. (The RTT is subject to a calculation that damps out sudden
-changes. As a result, the new RTT is not necessarily equal to the RTT
-reported.)
+reporting the update of a round-trip time (RTT) for a query made to the
+specified nameserver. The RTT has been updated using the value given
+and the new RTT is displayed. (The RTT is subject to a calculation that
+damps out sudden changes. As a result, the new RTT used by the NSAS in
+future decisions of which nameserver to use is not necessarily equal to
+the RTT reported.)
+</p></dd><dt><a name="NSAS_WRONG_ANSWER"></a><span class="term">NSAS_WRONG_ANSWER queried for %1 RR of type/class %2/%3, received response %4/%5</span></dt><dd><p>
+A NSAS (nameserver address store - part of the resolver) made a query for
+a resource record of a particular type and class, but instead received
+an answer with a different given type and class.
+</p><p>
+This message indicates an internal error in the NSAS. Please raise a
+bug report.
</p></dd><dt><a name="RESLIB_ANSWER"></a><span class="term">RESLIB_ANSWER answer received in response to query for <%1></span></dt><dd><p>
A debug message recording that an answer has been received to an upstream
query for the specified question. Previous debug messages will have indicated
@@ -599,95 +1372,95 @@ the server to which the question was sent.
</p></dd><dt><a name="RESLIB_DEEPEST"></a><span class="term">RESLIB_DEEPEST did not find <%1> in cache, deepest delegation found is %2</span></dt><dd><p>
A debug message, a cache lookup did not find the specified <name, class,
type> tuple in the cache; instead, the deepest delegation found is indicated.
-</p></dd><dt><a name="RESLIB_FOLLOWCNAME"></a><span class="term">RESLIB_FOLLOWCNAME following CNAME chain to <%1></span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_FOLLOW_CNAME"></a><span class="term">RESLIB_FOLLOW_CNAME following CNAME chain to <%1></span></dt><dd><p>
A debug message, a CNAME response was received and another query is being issued
for the <name, class, type> tuple.
-</p></dd><dt><a name="RESLIB_LONGCHAIN"></a><span class="term">RESLIB_LONGCHAIN CNAME received in response to query for <%1>: CNAME chain length exceeded</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_LONG_CHAIN"></a><span class="term">RESLIB_LONG_CHAIN CNAME received in response to query for <%1>: CNAME chain length exceeded</span></dt><dd><p>
A debug message recording that a CNAME response has been received to an upstream
query for the specified question (Previous debug messages will have indicated
the server to which the question was sent). However, receipt of this CNAME
has meant that the resolver has exceeded the CNAME chain limit (a CNAME chain
is where on CNAME points to another) and so an error is being returned.
-</p></dd><dt><a name="RESLIB_NONSRRSET"></a><span class="term">RESLIB_NONSRRSET no NS RRSet in referral response received to query for <%1></span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_NO_NS_RRSET"></a><span class="term">RESLIB_NO_NS_RRSET no NS RRSet in referral response received to query for <%1></span></dt><dd><p>
A debug message, this indicates that a response was received for the specified
-query and was categorised as a referral. However, the received message did
+query and was categorized as a referral. However, the received message did
not contain any NS RRsets. This may indicate a programming error in the
response classification code.
-</p></dd><dt><a name="RESLIB_NSASLOOK"></a><span class="term">RESLIB_NSASLOOK looking up nameserver for zone %1 in the NSAS</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_NSAS_LOOKUP"></a><span class="term">RESLIB_NSAS_LOOKUP looking up nameserver for zone %1 in the NSAS</span></dt><dd><p>
A debug message, the RunningQuery object is querying the NSAS for the
nameservers for the specified zone.
-</p></dd><dt><a name="RESLIB_NXDOMRR"></a><span class="term">RESLIB_NXDOMRR NXDOMAIN/NXRRSET received in response to query for <%1></span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_NXDOM_NXRR"></a><span class="term">RESLIB_NXDOM_NXRR NXDOMAIN/NXRRSET received in response to query for <%1></span></dt><dd><p>
A debug message recording that either a NXDOMAIN or an NXRRSET response has
been received to an upstream query for the specified question. Previous debug
messages will have indicated the server to which the question was sent.
</p></dd><dt><a name="RESLIB_PROTOCOL"></a><span class="term">RESLIB_PROTOCOL protocol error in answer for %1: %3</span></dt><dd><p>
A debug message indicating that a protocol error was received. As there
are no retries left, an error will be reported.
-</p></dd><dt><a name="RESLIB_PROTOCOLRTRY"></a><span class="term">RESLIB_PROTOCOLRTRY protocol error in answer for %1: %2 (retries left: %3)</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_PROTOCOL_RETRY"></a><span class="term">RESLIB_PROTOCOL_RETRY protocol error in answer for %1: %2 (retries left: %3)</span></dt><dd><p>
A debug message indicating that a protocol error was received and that
the resolver is repeating the query to the same nameserver. After this
repeated query, there will be the indicated number of retries left.
-</p></dd><dt><a name="RESLIB_RCODERR"></a><span class="term">RESLIB_RCODERR RCODE indicates error in response to query for <%1></span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RCODE_ERR"></a><span class="term">RESLIB_RCODE_ERR RCODE indicates error in response to query for <%1></span></dt><dd><p>
A debug message, the response to the specified query indicated an error
that is not covered by a specific code path. A SERVFAIL will be returned.
-</p></dd><dt><a name="RESLIB_REFERRAL"></a><span class="term">RESLIB_REFERRAL referral received in response to query for <%1></span></dt><dd><p>
-A debug message recording that a referral response has been received to an
-upstream query for the specified question. Previous debug messages will
-have indicated the server to which the question was sent.
-</p></dd><dt><a name="RESLIB_REFERZONE"></a><span class="term">RESLIB_REFERZONE referred to zone %1</span></dt><dd><p>
-A debug message indicating that the last referral message was to the specified
-zone.
-</p></dd><dt><a name="RESLIB_RESCAFND"></a><span class="term">RESLIB_RESCAFND found <%1> in the cache (resolve() instance %2)</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RECQ_CACHE_FIND"></a><span class="term">RESLIB_RECQ_CACHE_FIND found <%1> in the cache (resolve() instance %2)</span></dt><dd><p>
This is a debug message and indicates that a RecursiveQuery object found the
the specified <name, class, type> tuple in the cache. The instance number
at the end of the message indicates which of the two resolve() methods has
been called.
-</p></dd><dt><a name="RESLIB_RESCANOTFND"></a><span class="term">RESLIB_RESCANOTFND did not find <%1> in the cache, starting RunningQuery (resolve() instance %2)</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RECQ_CACHE_NO_FIND"></a><span class="term">RESLIB_RECQ_CACHE_NO_FIND did not find <%1> in the cache, starting RunningQuery (resolve() instance %2)</span></dt><dd><p>
This is a debug message and indicates that the look in the cache made by the
RecursiveQuery::resolve() method did not find an answer, so a new RunningQuery
object has been created to resolve the question. The instance number at
the end of the message indicates which of the two resolve() methods has
been called.
+</p></dd><dt><a name="RESLIB_REFERRAL"></a><span class="term">RESLIB_REFERRAL referral received in response to query for <%1></span></dt><dd><p>
+A debug message recording that a referral response has been received to an
+upstream query for the specified question. Previous debug messages will
+have indicated the server to which the question was sent.
+</p></dd><dt><a name="RESLIB_REFER_ZONE"></a><span class="term">RESLIB_REFER_ZONE referred to zone %1</span></dt><dd><p>
+A debug message indicating that the last referral message was to the specified
+zone.
</p></dd><dt><a name="RESLIB_RESOLVE"></a><span class="term">RESLIB_RESOLVE asked to resolve <%1> (resolve() instance %2)</span></dt><dd><p>
A debug message, the RecursiveQuery::resolve method has been called to resolve
the specified <name, class, type> tuple. The first action will be to lookup
the specified tuple in the cache. The instance number at the end of the
message indicates which of the two resolve() methods has been called.
-</p></dd><dt><a name="RESLIB_RRSETFND"></a><span class="term">RESLIB_RRSETFND found single RRset in the cache when querying for <%1> (resolve() instance %2)</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RRSET_FOUND"></a><span class="term">RESLIB_RRSET_FOUND found single RRset in the cache when querying for <%1> (resolve() instance %2)</span></dt><dd><p>
A debug message, indicating that when RecursiveQuery::resolve queried the
cache, a single RRset was found which was put in the answer. The instance
number at the end of the message indicates which of the two resolve()
methods has been called.
</p></dd><dt><a name="RESLIB_RTT"></a><span class="term">RESLIB_RTT round-trip time of last query calculated as %1 ms</span></dt><dd><p>
A debug message giving the round-trip time of the last query and response.
-</p></dd><dt><a name="RESLIB_RUNCAFND"></a><span class="term">RESLIB_RUNCAFND found <%1> in the cache</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RUNQ_CACHE_FIND"></a><span class="term">RESLIB_RUNQ_CACHE_FIND found <%1> in the cache</span></dt><dd><p>
This is a debug message and indicates that a RunningQuery object found
the specified <name, class, type> tuple in the cache.
-</p></dd><dt><a name="RESLIB_RUNCALOOK"></a><span class="term">RESLIB_RUNCALOOK looking up up <%1> in the cache</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RUNQ_CACHE_LOOKUP"></a><span class="term">RESLIB_RUNQ_CACHE_LOOKUP looking up up <%1> in the cache</span></dt><dd><p>
This is a debug message and indicates that a RunningQuery object has made
a call to its doLookup() method to look up the specified <name, class, type>
tuple, the first action of which will be to examine the cache.
-</p></dd><dt><a name="RESLIB_RUNQUFAIL"></a><span class="term">RESLIB_RUNQUFAIL failure callback - nameservers are unreachable</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RUNQ_FAIL"></a><span class="term">RESLIB_RUNQ_FAIL failure callback - nameservers are unreachable</span></dt><dd><p>
A debug message indicating that a RunningQuery's failure callback has been
called because all nameservers for the zone in question are unreachable.
-</p></dd><dt><a name="RESLIB_RUNQUSUCC"></a><span class="term">RESLIB_RUNQUSUCC success callback - sending query to %1</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RUNQ_SUCCESS"></a><span class="term">RESLIB_RUNQ_SUCCESS success callback - sending query to %1</span></dt><dd><p>
A debug message indicating that a RunningQuery's success callback has been
called because a nameserver has been found, and that a query is being sent
to the specified nameserver.
-</p></dd><dt><a name="RESLIB_TESTSERV"></a><span class="term">RESLIB_TESTSERV setting test server to %1(%2)</span></dt><dd><p>
-This is an internal debugging message and is only generated in unit tests.
-It indicates that all upstream queries from the resolver are being routed to
-the specified server, regardless of the address of the nameserver to which
-the query would normally be routed. As it should never be seen in normal
-operation, it is a warning message instead of a debug message.
-</p></dd><dt><a name="RESLIB_TESTUPSTR"></a><span class="term">RESLIB_TESTUPSTR sending upstream query for <%1> to test server at %2</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_TEST_SERVER"></a><span class="term">RESLIB_TEST_SERVER setting test server to %1(%2)</span></dt><dd><p>
+This is a warning message only generated in unit tests. It indicates
+that all upstream queries from the resolver are being routed to the
+specified server, regardless of the address of the nameserver to which
+the query would normally be routed. If seen during normal operation,
+please submit a bug report.
+</p></dd><dt><a name="RESLIB_TEST_UPSTREAM"></a><span class="term">RESLIB_TEST_UPSTREAM sending upstream query for <%1> to test server at %2</span></dt><dd><p>
This is a debug message and should only be seen in unit tests. A query for
the specified <name, class, type> tuple is being sent to a test nameserver
whose address is given in the message.
</p></dd><dt><a name="RESLIB_TIMEOUT"></a><span class="term">RESLIB_TIMEOUT query <%1> to %2 timed out</span></dt><dd><p>
-A debug message indicating that the specified query has timed out and as
-there are no retries left, an error will be reported.
-</p></dd><dt><a name="RESLIB_TIMEOUTRTRY"></a><span class="term">RESLIB_TIMEOUTRTRY query <%1> to %2 timed out, re-trying (retries left: %3)</span></dt><dd><p>
+A debug message indicating that the specified upstream query has timed out and
+there are no retries left.
+</p></dd><dt><a name="RESLIB_TIMEOUT_RETRY"></a><span class="term">RESLIB_TIMEOUT_RETRY query <%1> to %2 timed out, re-trying (retries left: %3)</span></dt><dd><p>
A debug message indicating that the specified query has timed out and that
the resolver is repeating the query to the same nameserver. After this
repeated query, there will be the indicated number of retries left.
@@ -699,143 +1472,610 @@ gives no cause for concern.
</p></dd><dt><a name="RESLIB_UPSTREAM"></a><span class="term">RESLIB_UPSTREAM sending upstream query for <%1> to %2</span></dt><dd><p>
A debug message indicating that a query for the specified <name, class, type>
tuple is being sent to a nameserver whose address is given in the message.
-</p></dd><dt><a name="RESOLVER_AXFRTCP"></a><span class="term">RESOLVER_AXFRTCP AXFR request received over TCP</span></dt><dd><p>
-A debug message, the resolver received a NOTIFY message over TCP. The server
-cannot process it and will return an error message to the sender with the
-RCODE set to NOTIMP.
-</p></dd><dt><a name="RESOLVER_AXFRUDP"></a><span class="term">RESOLVER_AXFRUDP AXFR request received over UDP</span></dt><dd><p>
-A debug message, the resolver received a NOTIFY message over UDP. The server
-cannot process it (and in any case, an AXFR request should be sent over TCP)
-and will return an error message to the sender with the RCODE set to FORMERR.
-</p></dd><dt><a name="RESOLVER_CLTMOSMALL"></a><span class="term">RESOLVER_CLTMOSMALL client timeout of %1 is too small</span></dt><dd><p>
-An error indicating that the configuration value specified for the query
-timeout is too small.
-</p></dd><dt><a name="RESOLVER_CONFIGCHAN"></a><span class="term">RESOLVER_CONFIGCHAN configuration channel created</span></dt><dd><p>
-A debug message, output when the resolver has successfully established a
-connection to the configuration channel.
-</p></dd><dt><a name="RESOLVER_CONFIGERR"></a><span class="term">RESOLVER_CONFIGERR error in configuration: %1</span></dt><dd><p>
-An error was detected in a configuration update received by the resolver. This
-may be in the format of the configuration message (in which case this is a
-programming error) or it may be in the data supplied (in which case it is
-a user error). The reason for the error, given as a parameter in the message,
-will give more details.
-</p></dd><dt><a name="RESOLVER_CONFIGLOAD"></a><span class="term">RESOLVER_CONFIGLOAD configuration loaded</span></dt><dd><p>
-A debug message, output when the resolver configuration has been successfully
-loaded.
-</p></dd><dt><a name="RESOLVER_CONFIGUPD"></a><span class="term">RESOLVER_CONFIGUPD configuration updated: %1</span></dt><dd><p>
-A debug message, the configuration has been updated with the specified
-information.
+</p></dd><dt><a name="RESOLVER_AXFR_TCP"></a><span class="term">RESOLVER_AXFR_TCP AXFR request received over TCP</span></dt><dd><p>
+This is a debug message output when the resolver received a request for
+an AXFR (full transfer of a zone) over TCP. Only authoritative servers
+are able to handle AXFR requests, so the resolver will return an error
+message to the sender with the RCODE set to NOTIMP.
+</p></dd><dt><a name="RESOLVER_AXFR_UDP"></a><span class="term">RESOLVER_AXFR_UDP AXFR request received over UDP</span></dt><dd><p>
+This is a debug message output when the resolver received a request for
+an AXFR (full transfer of a zone) over UDP. Only authoritative servers
+are able to handle AXFR requests (and in any case, an AXFR request should
+be sent over TCP), so the resolver will return an error message to the
+sender with the RCODE set to NOTIMP.
+</p></dd><dt><a name="RESOLVER_CLIENT_TIME_SMALL"></a><span class="term">RESOLVER_CLIENT_TIME_SMALL client timeout of %1 is too small</span></dt><dd><p>
+During the update of the resolver's configuration parameters, the value
+of the client timeout was found to be too small. The configuration
+update was abandoned and the parameters were not changed.
+</p></dd><dt><a name="RESOLVER_CONFIG_CHANNEL"></a><span class="term">RESOLVER_CONFIG_CHANNEL configuration channel created</span></dt><dd><p>
+This is a debug message output when the resolver has successfully
+established a connection to the configuration channel.
+</p></dd><dt><a name="RESOLVER_CONFIG_ERROR"></a><span class="term">RESOLVER_CONFIG_ERROR error in configuration: %1</span></dt><dd><p>
+An error was detected in a configuration update received by the
+resolver. This may be in the format of the configuration message (in
+which case this is a programming error) or it may be in the data supplied
+(in which case it is a user error). The reason for the error, included
+in the message, will give more details. The configuration update is
+not applied and the resolver parameters were not changed.
+</p></dd><dt><a name="RESOLVER_CONFIG_LOADED"></a><span class="term">RESOLVER_CONFIG_LOADED configuration loaded</span></dt><dd><p>
+This is a debug message output when the resolver configuration has been
+successfully loaded.
+</p></dd><dt><a name="RESOLVER_CONFIG_UPDATED"></a><span class="term">RESOLVER_CONFIG_UPDATED configuration updated: %1</span></dt><dd><p>
+This is a debug message output when the resolver configuration is being
+updated with the specified information.
</p></dd><dt><a name="RESOLVER_CREATED"></a><span class="term">RESOLVER_CREATED main resolver object created</span></dt><dd><p>
-A debug message, output when the Resolver() object has been created.
-</p></dd><dt><a name="RESOLVER_DNSMSGRCVD"></a><span class="term">RESOLVER_DNSMSGRCVD DNS message received: %1</span></dt><dd><p>
-A debug message, this always precedes some other logging message and is the
-formatted contents of the DNS packet that the other message refers to.
-</p></dd><dt><a name="RESOLVER_DNSMSGSENT"></a><span class="term">RESOLVER_DNSMSGSENT DNS message of %1 bytes sent: %2</span></dt><dd><p>
-A debug message, this contains details of the response sent back to the querying
-system.
+This is a debug message indicating that the main resolver object has
+been created.
+</p></dd><dt><a name="RESOLVER_DNS_MESSAGE_RECEIVED"></a><span class="term">RESOLVER_DNS_MESSAGE_RECEIVED DNS message received: %1</span></dt><dd><p>
+This is a debug message from the resolver listing the contents of a
+received DNS message.
+</p></dd><dt><a name="RESOLVER_DNS_MESSAGE_SENT"></a><span class="term">RESOLVER_DNS_MESSAGE_SENT DNS message of %1 bytes sent: %2</span></dt><dd><p>
+This is a debug message containing details of the response returned by
+the resolver to the querying system.
</p></dd><dt><a name="RESOLVER_FAILED"></a><span class="term">RESOLVER_FAILED resolver failed, reason: %1</span></dt><dd><p>
-This is an error message output when an unhandled exception is caught by the
-resolver. All it can do is to shut down.
-</p></dd><dt><a name="RESOLVER_FWDADDR"></a><span class="term">RESOLVER_FWDADDR setting forward address %1(%2)</span></dt><dd><p>
-This message may appear multiple times during startup, and it lists the
-forward addresses used by the resolver when running in forwarding mode.
-</p></dd><dt><a name="RESOLVER_FWDQUERY"></a><span class="term">RESOLVER_FWDQUERY processing forward query</span></dt><dd><p>
-The received query has passed all checks and is being forwarded to upstream
+This is an error message output when an unhandled exception is caught
+by the resolver. After this, the resolver will shut itself down.
+Please submit a bug report.
+</p></dd><dt><a name="RESOLVER_FORWARD_ADDRESS"></a><span class="term">RESOLVER_FORWARD_ADDRESS setting forward address %1(%2)</span></dt><dd><p>
+If the resolver is running in forward mode, this message will appear
+during startup to list the forward address. If multiple addresses are
+specified, it will appear once for each address.
+</p></dd><dt><a name="RESOLVER_FORWARD_QUERY"></a><span class="term">RESOLVER_FORWARD_QUERY processing forward query</span></dt><dd><p>
+This is a debug message indicating that a query received by the resolver
+has passed a set of checks (message is well-formed, it is allowed by the
+ACL, it is a supported opcode, etc.) and is being forwarded to upstream
servers.
-</p></dd><dt><a name="RESOLVER_HDRERR"></a><span class="term">RESOLVER_HDRERR message received, exception when processing header: %1</span></dt><dd><p>
-A debug message noting that an exception occurred during the processing of
-a received packet. The packet has been dropped.
+</p></dd><dt><a name="RESOLVER_HEADER_ERROR"></a><span class="term">RESOLVER_HEADER_ERROR message received, exception when processing header: %1</span></dt><dd><p>
+This is a debug message from the resolver noting that an exception
+occurred during the processing of a received packet. The packet has
+been dropped.
</p></dd><dt><a name="RESOLVER_IXFR"></a><span class="term">RESOLVER_IXFR IXFR request received</span></dt><dd><p>
-The resolver received a NOTIFY message over TCP. The server cannot process it
-and will return an error message to the sender with the RCODE set to NOTIMP.
-</p></dd><dt><a name="RESOLVER_LKTMOSMALL"></a><span class="term">RESOLVER_LKTMOSMALL lookup timeout of %1 is too small</span></dt><dd><p>
-An error indicating that the configuration value specified for the lookup
-timeout is too small.
-</p></dd><dt><a name="RESOLVER_NFYNOTAUTH"></a><span class="term">RESOLVER_NFYNOTAUTH NOTIFY arrived but server is not authoritative</span></dt><dd><p>
-The resolver received a NOTIFY message. As the server is not authoritative it
-cannot process it, so it returns an error message to the sender with the RCODE
-set to NOTAUTH.
-</p></dd><dt><a name="RESOLVER_NORMQUERY"></a><span class="term">RESOLVER_NORMQUERY processing normal query</span></dt><dd><p>
-The received query has passed all checks and is being processed by the resolver.
-</p></dd><dt><a name="RESOLVER_NOROOTADDR"></a><span class="term">RESOLVER_NOROOTADDR no root addresses available</span></dt><dd><p>
-A warning message during startup, indicates that no root addresses have been
-set. This may be because the resolver will get them from a priming query.
-</p></dd><dt><a name="RESOLVER_NOTIN"></a><span class="term">RESOLVER_NOTIN non-IN class request received, returning REFUSED message</span></dt><dd><p>
-A debug message, the resolver has received a DNS packet that was not IN class.
-The resolver cannot handle such packets, so is returning a REFUSED response to
-the sender.
-</p></dd><dt><a name="RESOLVER_NOTONEQUES"></a><span class="term">RESOLVER_NOTONEQUES query contained %1 questions, exactly one question was expected</span></dt><dd><p>
-A debug message, the resolver received a query that contained the number of
-entires in the question section detailed in the message. This is a malformed
-message, as a DNS query must contain only one question. The resolver will
-return a message to the sender with the RCODE set to FORMERR.
-</p></dd><dt><a name="RESOLVER_OPCODEUNS"></a><span class="term">RESOLVER_OPCODEUNS opcode %1 not supported by the resolver</span></dt><dd><p>
-A debug message, the resolver received a message with an unsupported opcode
-(it can only process QUERY opcodes). It will return a message to the sender
-with the RCODE set to NOTIMP.
-</p></dd><dt><a name="RESOLVER_PARSEERR"></a><span class="term">RESOLVER_PARSEERR error parsing received message: %1 - returning %2</span></dt><dd><p>
-A debug message noting that the resolver received a message and the parsing
-of the body of the message failed due to some non-protocol related reason
-(although the parsing of the header succeeded). The message parameters give
-a textual description of the problem and the RCODE returned.
-</p></dd><dt><a name="RESOLVER_PRINTMSG"></a><span class="term">RESOLVER_PRINTMSG print message command, aeguments are: %1</span></dt><dd><p>
-This message is logged when a "print_message" command is received over the
-command channel.
-</p></dd><dt><a name="RESOLVER_PROTERR"></a><span class="term">RESOLVER_PROTERR protocol error parsing received message: %1 - returning %2</span></dt><dd><p>
-A debug message noting that the resolver received a message and the parsing
-of the body of the message failed due to some protocol error (although the
-parsing of the header succeeded). The message parameters give a textual
-description of the problem and the RCODE returned.
-</p></dd><dt><a name="RESOLVER_QUSETUP"></a><span class="term">RESOLVER_QUSETUP query setup</span></dt><dd><p>
-A debug message noting that the resolver is creating a RecursiveQuery object.
-</p></dd><dt><a name="RESOLVER_QUSHUT"></a><span class="term">RESOLVER_QUSHUT query shutdown</span></dt><dd><p>
-A debug message noting that the resolver is destroying a RecursiveQuery object.
-</p></dd><dt><a name="RESOLVER_QUTMOSMALL"></a><span class="term">RESOLVER_QUTMOSMALL query timeout of %1 is too small</span></dt><dd><p>
-An error indicating that the configuration value specified for the query
-timeout is too small.
+This is a debug message indicating that the resolver received a request
+for an IXFR (incremental transfer of a zone). Only authoritative servers
+are able to handle IXFR requests, so the resolver will return an error
+message to the sender with the RCODE set to NOTIMP.
+</p></dd><dt><a name="RESOLVER_LOOKUP_TIME_SMALL"></a><span class="term">RESOLVER_LOOKUP_TIME_SMALL lookup timeout of %1 is too small</span></dt><dd><p>
+During the update of the resolver's configuration parameters, the value
+of the lookup timeout was found to be too small. The configuration
+update will not be applied.
+</p></dd><dt><a name="RESOLVER_MESSAGE_ERROR"></a><span class="term">RESOLVER_MESSAGE_ERROR error parsing received message: %1 - returning %2</span></dt><dd><p>
+This is a debug message noting that parsing of the body of a received
+message by the resolver failed due to some error (although the parsing of
+the header succeeded). The message parameters give a textual description
+of the problem and the RCODE returned.
+</p></dd><dt><a name="RESOLVER_NEGATIVE_RETRIES"></a><span class="term">RESOLVER_NEGATIVE_RETRIES negative number of retries (%1) specified in the configuration</span></dt><dd><p>
+This error is issued when a resolver configuration update has specified
+a negative retry count: only zero or positive values are valid. The
+configuration update was abandoned and the parameters were not changed.
+</p></dd><dt><a name="RESOLVER_NON_IN_PACKET"></a><span class="term">RESOLVER_NON_IN_PACKET non-IN class request received, returning REFUSED message</span></dt><dd><p>
+This debug message is issued when resolver has received a DNS packet that
+was not IN (Internet) class. The resolver cannot handle such packets,
+so is returning a REFUSED response to the sender.
+</p></dd><dt><a name="RESOLVER_NORMAL_QUERY"></a><span class="term">RESOLVER_NORMAL_QUERY processing normal query</span></dt><dd><p>
+This is a debug message indicating that the query received by the resolver
+has passed a set of checks (message is well-formed, it is allowed by the
+ACL, it is a supported opcode, etc.) and is being processed by the resolver.
+</p></dd><dt><a name="RESOLVER_NOTIFY_RECEIVED"></a><span class="term">RESOLVER_NOTIFY_RECEIVED NOTIFY arrived but server is not authoritative</span></dt><dd><p>
+The resolver has received a NOTIFY message. As the server is not
+authoritative it cannot process it, so it returns an error message to
+the sender with the RCODE set to NOTAUTH.
+</p></dd><dt><a name="RESOLVER_NOT_ONE_QUESTION"></a><span class="term">RESOLVER_NOT_ONE_QUESTION query contained %1 questions, exactly one question was expected</span></dt><dd><p>
+This debug message indicates that the resolver received a query that
+contained the number of entries in the question section detailed in
+the message. This is a malformed message, as a DNS query must contain
+only one question. The resolver will return a message to the sender
+with the RCODE set to FORMERR.
+</p></dd><dt><a name="RESOLVER_NO_ROOT_ADDRESS"></a><span class="term">RESOLVER_NO_ROOT_ADDRESS no root addresses available</span></dt><dd><p>
+A warning message issued during resolver startup, this indicates that
+no root addresses have been set. This may be because the resolver will
+get them from a priming query.
+</p></dd><dt><a name="RESOLVER_PARSE_ERROR"></a><span class="term">RESOLVER_PARSE_ERROR error parsing received message: %1 - returning %2</span></dt><dd><p>
+This is a debug message noting that the resolver received a message and
+the parsing of the body of the message failed due to some non-protocol
+related reason (although the parsing of the header succeeded).
+The message parameters give a textual description of the problem and
+the RCODE returned.
+</p></dd><dt><a name="RESOLVER_PRINT_COMMAND"></a><span class="term">RESOLVER_PRINT_COMMAND print message command, arguments are: %1</span></dt><dd><p>
+This debug message is logged when a "print_message" command is received
+by the resolver over the command channel.
+</p></dd><dt><a name="RESOLVER_PROTOCOL_ERROR"></a><span class="term">RESOLVER_PROTOCOL_ERROR protocol error parsing received message: %1 - returning %2</span></dt><dd><p>
+This is a debug message noting that the resolver received a message and
+the parsing of the body of the message failed due to some protocol error
+(although the parsing of the header succeeded). The message parameters
+give a textual description of the problem and the RCODE returned.
+</p></dd><dt><a name="RESOLVER_QUERY_ACCEPTED"></a><span class="term">RESOLVER_QUERY_ACCEPTED query accepted: '%1/%2/%3' from %4</span></dt><dd><p>
+This debug message is produced by the resolver when an incoming query
+is accepted in terms of the query ACL. The log message shows the query
+in the form of <query name>/<query type>/<query class>, and the client
+that sends the query in the form of <Source IP address>#<source port>.
+</p></dd><dt><a name="RESOLVER_QUERY_DROPPED"></a><span class="term">RESOLVER_QUERY_DROPPED query dropped: '%1/%2/%3' from %4</span></dt><dd><p>
+This is an informational message that indicates an incoming query has
+been dropped by the resolver because of the query ACL. Unlike the
+RESOLVER_QUERY_REJECTED case, the server does not return any response.
+The log message shows the query in the form of <query name>/<query
+type>/<query class>, and the client that sends the query in the form of
+<Source IP address>#<source port>.
+</p></dd><dt><a name="RESOLVER_QUERY_REJECTED"></a><span class="term">RESOLVER_QUERY_REJECTED query rejected: '%1/%2/%3' from %4</span></dt><dd><p>
+This is an informational message that indicates an incoming query has
+been rejected by the resolver because of the query ACL. This results
+in a response with an RCODE of REFUSED. The log message shows the query
+in the form of <query name>/<query type>/<query class>, and the client
+that sends the query in the form of <Source IP address>#<source port>.
+</p></dd><dt><a name="RESOLVER_QUERY_SETUP"></a><span class="term">RESOLVER_QUERY_SETUP query setup</span></dt><dd><p>
+This is a debug message noting that the resolver is creating a
+RecursiveQuery object.
+</p></dd><dt><a name="RESOLVER_QUERY_SHUTDOWN"></a><span class="term">RESOLVER_QUERY_SHUTDOWN query shutdown</span></dt><dd><p>
+This is a debug message noting that the resolver is destroying a
+RecursiveQuery object.
+</p></dd><dt><a name="RESOLVER_QUERY_TIME_SMALL"></a><span class="term">RESOLVER_QUERY_TIME_SMALL query timeout of %1 is too small</span></dt><dd><p>
+During the update of the resolver's configuration parameters, the value
+of the query timeout was found to be too small. The configuration
+parameters were not changed.
+</p></dd><dt><a name="RESOLVER_RECEIVED_MESSAGE"></a><span class="term">RESOLVER_RECEIVED_MESSAGE resolver has received a DNS message</span></dt><dd><p>
+This is a debug message indicating that the resolver has received a
+DNS message. Depending on the debug settings, subsequent log output
+will indicate the nature of the message.
</p></dd><dt><a name="RESOLVER_RECURSIVE"></a><span class="term">RESOLVER_RECURSIVE running in recursive mode</span></dt><dd><p>
-This is an informational message that appears at startup noting that the
-resolver is running in recursive mode.
-</p></dd><dt><a name="RESOLVER_RECVMSG"></a><span class="term">RESOLVER_RECVMSG resolver has received a DNS message</span></dt><dd><p>
-A debug message indicating that the resolver has received a message. Depending
-on the debug settings, subsequent log output will indicate the nature of the
-message.
-</p></dd><dt><a name="RESOLVER_RETRYNEG"></a><span class="term">RESOLVER_RETRYNEG negative number of retries (%1) specified in the configuration</span></dt><dd><p>
-An error message indicating that the resolver configuration has specified a
-negative retry count. Only zero or positive values are valid.
-</p></dd><dt><a name="RESOLVER_ROOTADDR"></a><span class="term">RESOLVER_ROOTADDR setting root address %1(%2)</span></dt><dd><p>
-This message may appear multiple times during startup; it lists the root
-addresses used by the resolver.
-</p></dd><dt><a name="RESOLVER_SERVICE"></a><span class="term">RESOLVER_SERVICE service object created</span></dt><dd><p>
-A debug message, output when the main service object (which handles the
-received queries) is created.
-</p></dd><dt><a name="RESOLVER_SETPARAM"></a><span class="term">RESOLVER_SETPARAM query timeout: %1, client timeout: %2, lookup timeout: %3, retry count: %4</span></dt><dd><p>
-A debug message, lists the parameters associated with the message. These are:
+This is an informational message that appears at startup noting that
+the resolver is running in recursive mode.
+</p></dd><dt><a name="RESOLVER_SERVICE_CREATED"></a><span class="term">RESOLVER_SERVICE_CREATED service object created</span></dt><dd><p>
+This debug message is output when resolver creates the main service object
+(which handles the received queries).
+</p></dd><dt><a name="RESOLVER_SET_PARAMS"></a><span class="term">RESOLVER_SET_PARAMS query timeout: %1, client timeout: %2, lookup timeout: %3, retry count: %4</span></dt><dd><p>
+This debug message lists the parameters being set for the resolver. These are:
query timeout: the timeout (in ms) used for queries originated by the resolver
-to upstream servers. Client timeout: the interval to resolver a query by
+to upstream servers. Client timeout: the interval to resolve a query by
a client: after this time, the resolver sends back a SERVFAIL to the client
-whilst continuing to resolver the query. Lookup timeout: the time at which the
+whilst continuing to resolve the query. Lookup timeout: the time at which the
resolver gives up trying to resolve a query. Retry count: the number of times
the resolver will retry a query to an upstream server if it gets a timeout.
</p><p>
The client and lookup timeouts require a bit more explanation. The
-resolution of the clent query might require a large number of queries to
+resolution of the client query might require a large number of queries to
upstream nameservers. Even if none of these queries timeout, the total time
taken to perform all the queries may exceed the client timeout. When this
happens, a SERVFAIL is returned to the client, but the resolver continues
-with the resolution process. Data received is added to the cache. However,
-there comes a time - the lookup timeout - when even the resolve gives up.
+with the resolution process; data received is added to the cache. However,
+there comes a time - the lookup timeout - when even the resolver gives up.
At this point it will wait for pending upstream queries to complete or
timeout and drop the query.
+</p></dd><dt><a name="RESOLVER_SET_QUERY_ACL"></a><span class="term">RESOLVER_SET_QUERY_ACL query ACL is configured</span></dt><dd><p>
+This debug message is generated when a new query ACL is configured for
+the resolver.
+</p></dd><dt><a name="RESOLVER_SET_ROOT_ADDRESS"></a><span class="term">RESOLVER_SET_ROOT_ADDRESS setting root address %1(%2)</span></dt><dd><p>
+This message gives the address of one of the root servers used by the
+resolver. It is output during startup and may appear multiple times,
+once for each root server address.
</p></dd><dt><a name="RESOLVER_SHUTDOWN"></a><span class="term">RESOLVER_SHUTDOWN resolver shutdown complete</span></dt><dd><p>
-This information message is output when the resolver has shut down.
+This informational message is output when the resolver has shut down.
</p></dd><dt><a name="RESOLVER_STARTED"></a><span class="term">RESOLVER_STARTED resolver started</span></dt><dd><p>
This informational message is output by the resolver when all initialization
has been completed and it is entering its main loop.
</p></dd><dt><a name="RESOLVER_STARTING"></a><span class="term">RESOLVER_STARTING starting resolver with command line '%1'</span></dt><dd><p>
An informational message, this is output when the resolver starts up.
-</p></dd><dt><a name="RESOLVER_UNEXRESP"></a><span class="term">RESOLVER_UNEXRESP received unexpected response, ignoring</span></dt><dd><p>
-A debug message noting that the server has received a response instead of a
-query and is ignoring it.
+</p></dd><dt><a name="RESOLVER_UNEXPECTED_RESPONSE"></a><span class="term">RESOLVER_UNEXPECTED_RESPONSE received unexpected response, ignoring</span></dt><dd><p>
+This is a debug message noting that the resolver received a DNS response
+packet on the port on which is it listening for queries. The packet
+has been ignored.
+</p></dd><dt><a name="RESOLVER_UNSUPPORTED_OPCODE"></a><span class="term">RESOLVER_UNSUPPORTED_OPCODE opcode %1 not supported by the resolver</span></dt><dd><p>
+This is debug message output when the resolver received a message with an
+unsupported opcode (it can only process QUERY opcodes). It will return
+a message to the sender with the RCODE set to NOTIMP.
+</p></dd><dt><a name="SRVCOMM_ADDRESSES_NOT_LIST"></a><span class="term">SRVCOMM_ADDRESSES_NOT_LIST the address and port specification is not a list in %1</span></dt><dd><p>
+This points to an error in configuration. What was supposed to be a list of
+IP address - port pairs isn't a list at all but something else.
+</p></dd><dt><a name="SRVCOMM_ADDRESS_FAIL"></a><span class="term">SRVCOMM_ADDRESS_FAIL failed to listen on addresses (%1)</span></dt><dd><p>
+The server failed to bind to one of the address/port pair it should according
+to configuration, for reason listed in the message (usually because that pair
+is already used by other service or missing privileges). The server will try
+to recover and bind the address/port pairs it was listening to before (if any).
+</p></dd><dt><a name="SRVCOMM_ADDRESS_MISSING"></a><span class="term">SRVCOMM_ADDRESS_MISSING address specification is missing "address" or "port" element in %1</span></dt><dd><p>
+This points to an error in configuration. An address specification in the
+configuration is missing either an address or port and so cannot be used. The
+specification causing the error is given in the message.
+</p></dd><dt><a name="SRVCOMM_ADDRESS_TYPE"></a><span class="term">SRVCOMM_ADDRESS_TYPE address specification type is invalid in %1</span></dt><dd><p>
+This points to an error in configuration. An address specification in the
+configuration malformed. The specification causing the error is given in the
+message. A valid specification contains an address part (which must be a string
+and must represent a valid IPv4 or IPv6 address) and port (which must be an
+integer in the range valid for TCP/UDP ports on your system).
+</p></dd><dt><a name="SRVCOMM_ADDRESS_UNRECOVERABLE"></a><span class="term">SRVCOMM_ADDRESS_UNRECOVERABLE failed to recover original addresses also (%2)</span></dt><dd><p>
+The recovery of old addresses after SRVCOMM_ADDRESS_FAIL also failed for
+the reason listed.
+</p><p>
+The condition indicates problems with the server and/or the system on
+which it is running. The server will continue running to allow
+reconfiguration, but will not be listening on any address or port until
+an administrator does so.
+</p></dd><dt><a name="SRVCOMM_ADDRESS_VALUE"></a><span class="term">SRVCOMM_ADDRESS_VALUE address to set: %1#%2</span></dt><dd><p>
+Debug message. This lists one address and port value of the set of
+addresses we are going to listen on (eg. there will be one log message
+per pair). This appears only after SRVCOMM_SET_LISTEN, but might
+be hidden, as it has higher debug level.
+</p></dd><dt><a name="SRVCOMM_KEYS_DEINIT"></a><span class="term">SRVCOMM_KEYS_DEINIT deinitializing TSIG keyring</span></dt><dd><p>
+Debug message indicating that the server is deinitializing the TSIG keyring.
+</p></dd><dt><a name="SRVCOMM_KEYS_INIT"></a><span class="term">SRVCOMM_KEYS_INIT initializing TSIG keyring</span></dt><dd><p>
+Debug message indicating that the server is initializing the global TSIG
+keyring. This should be seen only at server start.
+</p></dd><dt><a name="SRVCOMM_KEYS_UPDATE"></a><span class="term">SRVCOMM_KEYS_UPDATE updating TSIG keyring</span></dt><dd><p>
+Debug message indicating new keyring is being loaded from configuration (either
+on startup or as a result of configuration update).
+</p></dd><dt><a name="SRVCOMM_PORT_RANGE"></a><span class="term">SRVCOMM_PORT_RANGE port out of valid range (%1 in %2)</span></dt><dd><p>
+This points to an error in configuration. The port in an address
+specification is outside the valid range of 0 to 65535.
+</p></dd><dt><a name="SRVCOMM_SET_LISTEN"></a><span class="term">SRVCOMM_SET_LISTEN setting addresses to listen to</span></dt><dd><p>
+Debug message, noting that the server is about to start listening on a
+different set of IP addresses and ports than before.
+</p></dd><dt><a name="STATHTTPD_BAD_OPTION_VALUE"></a><span class="term">STATHTTPD_BAD_OPTION_VALUE bad command line argument: %1</span></dt><dd><p>
+The stats-httpd module was called with a bad command-line argument
+and will not start.
+</p></dd><dt><a name="STATHTTPD_CC_SESSION_ERROR"></a><span class="term">STATHTTPD_CC_SESSION_ERROR error connecting to message bus: %1</span></dt><dd><p>
+The stats-httpd module was unable to connect to the BIND 10 command
+and control bus. A likely problem is that the message bus daemon
+(b10-msgq) is not running. The stats-httpd module will now shut down.
+</p></dd><dt><a name="STATHTTPD_CLOSING"></a><span class="term">STATHTTPD_CLOSING closing %1#%2</span></dt><dd><p>
+The stats-httpd daemon will stop listening for requests on the given
+address and port number.
+</p></dd><dt><a name="STATHTTPD_CLOSING_CC_SESSION"></a><span class="term">STATHTTPD_CLOSING_CC_SESSION stopping cc session</span></dt><dd><p>
+Debug message indicating that the stats-httpd module is disconnecting
+from the command and control bus.
+</p></dd><dt><a name="STATHTTPD_HANDLE_CONFIG"></a><span class="term">STATHTTPD_HANDLE_CONFIG reading configuration: %1</span></dt><dd><p>
+The stats-httpd daemon has received new configuration data and will now
+process it. The (changed) data is printed.
+</p></dd><dt><a name="STATHTTPD_RECEIVED_SHUTDOWN_COMMAND"></a><span class="term">STATHTTPD_RECEIVED_SHUTDOWN_COMMAND shutdown command received</span></dt><dd><p>
+A shutdown command was sent to the stats-httpd module, and it will
+now shut down.
+</p></dd><dt><a name="STATHTTPD_RECEIVED_STATUS_COMMAND"></a><span class="term">STATHTTPD_RECEIVED_STATUS_COMMAND received command to return status</span></dt><dd><p>
+A status command was sent to the stats-httpd module, and it will
+respond with 'Stats Httpd is up.' and its PID.
+</p></dd><dt><a name="STATHTTPD_RECEIVED_UNKNOWN_COMMAND"></a><span class="term">STATHTTPD_RECEIVED_UNKNOWN_COMMAND received unknown command: %1</span></dt><dd><p>
+An unknown command has been sent to the stats-httpd module. The
+stats-httpd module will respond with an error, and the command will
+be ignored.
+</p></dd><dt><a name="STATHTTPD_SERVER_ERROR"></a><span class="term">STATHTTPD_SERVER_ERROR HTTP server error: %1</span></dt><dd><p>
+An internal error occurred while handling an HTTP request. An HTTP 500
+response will be sent back, and the specific error is printed. This
+is an error condition that likely points to a module that is not
+responding correctly to statistic requests.
+</p></dd><dt><a name="STATHTTPD_SERVER_INIT_ERROR"></a><span class="term">STATHTTPD_SERVER_INIT_ERROR HTTP server initialization error: %1</span></dt><dd><p>
+There was a problem initializing the HTTP server in the stats-httpd
+module upon receiving its configuration data. The most likely cause
+is a port binding problem or a bad configuration value. The specific
+error is printed in the message. The new configuration is ignored,
+and an error is sent back.
+</p></dd><dt><a name="STATHTTPD_SHUTDOWN"></a><span class="term">STATHTTPD_SHUTDOWN shutting down</span></dt><dd><p>
+The stats-httpd daemon is shutting down.
+</p></dd><dt><a name="STATHTTPD_STARTED"></a><span class="term">STATHTTPD_STARTED listening on %1#%2</span></dt><dd><p>
+The stats-httpd daemon will now start listening for requests on the
+given address and port number.
+</p></dd><dt><a name="STATHTTPD_STARTING_CC_SESSION"></a><span class="term">STATHTTPD_STARTING_CC_SESSION starting cc session</span></dt><dd><p>
+Debug message indicating that the stats-httpd module is connecting to
+the command and control bus.
+</p></dd><dt><a name="STATHTTPD_START_SERVER_INIT_ERROR"></a><span class="term">STATHTTPD_START_SERVER_INIT_ERROR HTTP server initialization error: %1</span></dt><dd><p>
+There was a problem initializing the HTTP server in the stats-httpd
+module upon startup. The most likely cause is that it was not able
+to bind to the listening port. The specific error is printed, and the
+module will shut down.
+</p></dd><dt><a name="STATHTTPD_STOPPED_BY_KEYBOARD"></a><span class="term">STATHTTPD_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
+There was a keyboard interrupt signal to stop the stats-httpd
+daemon. The daemon will now shut down.
+</p></dd><dt><a name="STATHTTPD_UNKNOWN_CONFIG_ITEM"></a><span class="term">STATHTTPD_UNKNOWN_CONFIG_ITEM unknown configuration item: %1</span></dt><dd><p>
+The stats-httpd daemon received a configuration update from the
+configuration manager. However, one of the items in the
+configuration is unknown. The new configuration is ignored, and an
+error is sent back. As possible cause is that there was an upgrade
+problem, and the stats-httpd version is out of sync with the rest of
+the system.
+</p></dd><dt><a name="STATS_BAD_OPTION_VALUE"></a><span class="term">STATS_BAD_OPTION_VALUE bad command line argument: %1</span></dt><dd><p>
+The stats module was called with a bad command-line argument and will
+not start.
+</p></dd><dt><a name="STATS_CC_SESSION_ERROR"></a><span class="term">STATS_CC_SESSION_ERROR error connecting to message bus: %1</span></dt><dd><p>
+The stats module was unable to connect to the BIND 10 command and
+control bus. A likely problem is that the message bus daemon
+(b10-msgq) is not running. The stats module will now shut down.
+</p></dd><dt><a name="STATS_RECEIVED_NEW_CONFIG"></a><span class="term">STATS_RECEIVED_NEW_CONFIG received new configuration: %1</span></dt><dd><p>
+This debug message is printed when the stats module has received a
+configuration update from the configuration manager.
+</p></dd><dt><a name="STATS_RECEIVED_REMOVE_COMMAND"></a><span class="term">STATS_RECEIVED_REMOVE_COMMAND received command to remove %1</span></dt><dd><p>
+A remove command for the given name was sent to the stats module, and
+the given statistics value will now be removed. It will not appear in
+statistics reports until it appears in a statistics update from a
+module again.
+</p></dd><dt><a name="STATS_RECEIVED_RESET_COMMAND"></a><span class="term">STATS_RECEIVED_RESET_COMMAND received command to reset all statistics</span></dt><dd><p>
+The stats module received a command to clear all collected statistics.
+The data is cleared until it receives an update from the modules again.
+</p></dd><dt><a name="STATS_RECEIVED_SHOW_ALL_COMMAND"></a><span class="term">STATS_RECEIVED_SHOW_ALL_COMMAND received command to show all statistics</span></dt><dd><p>
+The stats module received a command to show all statistics that it has
+collected.
+</p></dd><dt><a name="STATS_RECEIVED_SHOW_NAME_COMMAND"></a><span class="term">STATS_RECEIVED_SHOW_NAME_COMMAND received command to show statistics for %1</span></dt><dd><p>
+The stats module received a command to show the statistics that it has
+collected for the given item.
+</p></dd><dt><a name="STATS_RECEIVED_SHUTDOWN_COMMAND"></a><span class="term">STATS_RECEIVED_SHUTDOWN_COMMAND shutdown command received</span></dt><dd><p>
+A shutdown command was sent to the stats module and it will now shut down.
+</p></dd><dt><a name="STATS_RECEIVED_STATUS_COMMAND"></a><span class="term">STATS_RECEIVED_STATUS_COMMAND received command to return status</span></dt><dd><p>
+A status command was sent to the stats module. It will return a
+response indicating that it is running normally.
+</p></dd><dt><a name="STATS_RECEIVED_UNKNOWN_COMMAND"></a><span class="term">STATS_RECEIVED_UNKNOWN_COMMAND received unknown command: %1</span></dt><dd><p>
+An unknown command has been sent to the stats module. The stats module
+will respond with an error and the command will be ignored.
+</p></dd><dt><a name="STATS_SEND_REQUEST_BOSS"></a><span class="term">STATS_SEND_REQUEST_BOSS requesting boss to send statistics</span></dt><dd><p>
+This debug message is printed when a request is sent to the boss module
+to send its data to the stats module.
+</p></dd><dt><a name="STATS_STOPPED_BY_KEYBOARD"></a><span class="term">STATS_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
+There was a keyboard interrupt signal to stop the stats module. The
+daemon will now shut down.
+</p></dd><dt><a name="STATS_UNKNOWN_COMMAND_IN_SPEC"></a><span class="term">STATS_UNKNOWN_COMMAND_IN_SPEC unknown command in specification file: %1</span></dt><dd><p>
+The specification file for the stats module contains a command that
+is unknown in the implementation. The most likely cause is an
+installation problem, where the specification file stats.spec is
+from a different version of BIND 10 than the stats module itself.
+Please check your installation.
+</p></dd><dt><a name="XFRIN_AXFR_DATABASE_FAILURE"></a><span class="term">XFRIN_AXFR_DATABASE_FAILURE AXFR transfer of zone %1 failed: %2</span></dt><dd><p>
+The AXFR transfer for the given zone has failed due to a database problem.
+The error is shown in the log message.
+</p></dd><dt><a name="XFRIN_AXFR_INTERNAL_FAILURE"></a><span class="term">XFRIN_AXFR_INTERNAL_FAILURE AXFR transfer of zone %1 failed: %2</span></dt><dd><p>
+The AXFR transfer for the given zone has failed due to an internal
+problem in the bind10 python wrapper library.
+The error is shown in the log message.
+</p></dd><dt><a name="XFRIN_AXFR_TRANSFER_FAILURE"></a><span class="term">XFRIN_AXFR_TRANSFER_FAILURE AXFR transfer of zone %1 failed: %2</span></dt><dd><p>
+The AXFR transfer for the given zone has failed due to a protocol error.
+The error is shown in the log message.
+</p></dd><dt><a name="XFRIN_AXFR_TRANSFER_STARTED"></a><span class="term">XFRIN_AXFR_TRANSFER_STARTED AXFR transfer of zone %1 started</span></dt><dd><p>
+A connection to the master server has been made, the serial value in
+the SOA record has been checked, and a zone transfer has been started.
+</p></dd><dt><a name="XFRIN_AXFR_TRANSFER_SUCCESS"></a><span class="term">XFRIN_AXFR_TRANSFER_SUCCESS AXFR transfer of zone %1 succeeded</span></dt><dd><p>
+The AXFR transfer of the given zone was successfully completed.
+</p></dd><dt><a name="XFRIN_BAD_MASTER_ADDR_FORMAT"></a><span class="term">XFRIN_BAD_MASTER_ADDR_FORMAT bad format for master address: %1</span></dt><dd><p>
+The given master address is not a valid IP address.
+</p></dd><dt><a name="XFRIN_BAD_MASTER_PORT_FORMAT"></a><span class="term">XFRIN_BAD_MASTER_PORT_FORMAT bad format for master port: %1</span></dt><dd><p>
+The master port as read from the configuration is not a valid port number.
+</p></dd><dt><a name="XFRIN_BAD_TSIG_KEY_STRING"></a><span class="term">XFRIN_BAD_TSIG_KEY_STRING bad TSIG key string: %1</span></dt><dd><p>
+The TSIG key string as read from the configuration does not represent
+a valid TSIG key.
+</p></dd><dt><a name="XFRIN_BAD_ZONE_CLASS"></a><span class="term">XFRIN_BAD_ZONE_CLASS Invalid zone class: %1</span></dt><dd><p>
+The zone class as read from the configuration is not a valid DNS class.
+</p></dd><dt><a name="XFRIN_CC_SESSION_ERROR"></a><span class="term">XFRIN_CC_SESSION_ERROR error reading from cc channel: %1</span></dt><dd><p>
+There was a problem reading from the command and control channel. The
+most likely cause is that xfrin the msgq daemon is not running.
+</p></dd><dt><a name="XFRIN_COMMAND_ERROR"></a><span class="term">XFRIN_COMMAND_ERROR error while executing command '%1': %2</span></dt><dd><p>
+There was an error while the given command was being processed. The
+error is given in the log message.
+</p></dd><dt><a name="XFRIN_CONNECT_MASTER"></a><span class="term">XFRIN_CONNECT_MASTER error connecting to master at %1: %2</span></dt><dd><p>
+There was an error opening a connection to the master. The error is
+shown in the log message.
+</p></dd><dt><a name="XFRIN_IMPORT_DNS"></a><span class="term">XFRIN_IMPORT_DNS error importing python DNS module: %1</span></dt><dd><p>
+There was an error importing the python DNS module pydnspp. The most
+likely cause is a PYTHONPATH problem.
+</p></dd><dt><a name="XFRIN_MSGQ_SEND_ERROR"></a><span class="term">XFRIN_MSGQ_SEND_ERROR error while contacting %1 and %2</span></dt><dd><p>
+There was a problem sending a message to the xfrout module or the
+zone manager. This most likely means that the msgq daemon has quit or
+was killed.
+</p></dd><dt><a name="XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER"></a><span class="term">XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER error while contacting %1</span></dt><dd><p>
+There was a problem sending a message to the zone manager. This most
+likely means that the msgq daemon has quit or was killed.
+</p></dd><dt><a name="XFRIN_RETRANSFER_UNKNOWN_ZONE"></a><span class="term">XFRIN_RETRANSFER_UNKNOWN_ZONE got notification to retransfer unknown zone %1</span></dt><dd><p>
+There was an internal command to retransfer the given zone, but the
+zone is not known to the system. This may indicate that the configuration
+for xfrin is incomplete, or there was a typographical error in the
+zone name in the configuration.
+</p></dd><dt><a name="XFRIN_STARTING"></a><span class="term">XFRIN_STARTING starting resolver with command line '%1'</span></dt><dd><p>
+An informational message, this is output when the resolver starts up.
+</p></dd><dt><a name="XFRIN_STOPPED_BY_KEYBOARD"></a><span class="term">XFRIN_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
+There was a keyboard interrupt signal to stop the xfrin daemon. The
+daemon will now shut down.
+</p></dd><dt><a name="XFRIN_UNKNOWN_ERROR"></a><span class="term">XFRIN_UNKNOWN_ERROR unknown error: %1</span></dt><dd><p>
+An uncaught exception was raised while running the xfrin daemon. The
+exception message is printed in the log message.
+</p></dd><dt><a name="XFROUT_AXFR_TRANSFER_DONE"></a><span class="term">XFROUT_AXFR_TRANSFER_DONE transfer of %1/%2 complete</span></dt><dd><p>
+The transfer of the given zone has been completed successfully, or was
+aborted due to a shutdown event.
+</p></dd><dt><a name="XFROUT_AXFR_TRANSFER_ERROR"></a><span class="term">XFROUT_AXFR_TRANSFER_ERROR error transferring zone %1/%2: %3</span></dt><dd><p>
+An uncaught exception was encountered while sending the response to
+an AXFR query. The error message of the exception is included in the
+log message, but this error most likely points to incomplete exception
+handling in the code.
+</p></dd><dt><a name="XFROUT_AXFR_TRANSFER_FAILED"></a><span class="term">XFROUT_AXFR_TRANSFER_FAILED transfer of %1/%2 failed, rcode: %3</span></dt><dd><p>
+A transfer out for the given zone failed. An error response is sent
+to the client. The given rcode is the rcode that is set in the error
+response. This is either NOTAUTH (we are not authoritative for the
+zone), SERVFAIL (our internal database is missing the SOA record for
+the zone), or REFUSED (the limit of simultaneous outgoing AXFR
+transfers, as specified by the configuration value
+Xfrout/max_transfers_out, has been reached).
+</p></dd><dt><a name="XFROUT_AXFR_TRANSFER_STARTED"></a><span class="term">XFROUT_AXFR_TRANSFER_STARTED transfer of zone %1/%2 has started</span></dt><dd><p>
+A transfer out of the given zone has started.
+</p></dd><dt><a name="XFROUT_BAD_TSIG_KEY_STRING"></a><span class="term">XFROUT_BAD_TSIG_KEY_STRING bad TSIG key string: %1</span></dt><dd><p>
+The TSIG key string as read from the configuration does not represent
+a valid TSIG key.
+</p></dd><dt><a name="XFROUT_CC_SESSION_ERROR"></a><span class="term">XFROUT_CC_SESSION_ERROR error reading from cc channel: %1</span></dt><dd><p>
+There was a problem reading from the command and control channel. The
+most likely cause is that the msgq daemon is not running.
+</p></dd><dt><a name="XFROUT_CC_SESSION_TIMEOUT_ERROR"></a><span class="term">XFROUT_CC_SESSION_TIMEOUT_ERROR timeout waiting for cc response</span></dt><dd><p>
+There was a problem reading a response from another module over the
+command and control channel. The most likely cause is that the
+configuration manager b10-cfgmgr is not running.
+</p></dd><dt><a name="XFROUT_FETCH_REQUEST_ERROR"></a><span class="term">XFROUT_FETCH_REQUEST_ERROR socket error while fetching a request from the auth daemon</span></dt><dd><p>
+There was a socket error while contacting the b10-auth daemon to
+fetch a transfer request. The auth daemon may have shutdown.
+</p></dd><dt><a name="XFROUT_HANDLE_QUERY_ERROR"></a><span class="term">XFROUT_HANDLE_QUERY_ERROR error while handling query: %1</span></dt><dd><p>
+There was a general error handling an xfrout query. The error is shown
+in the message. In principle this error should not appear, and points
+to an oversight catching exceptions in the right place. However, to
+ensure the daemon keeps running, this error is caught and reported.
+</p></dd><dt><a name="XFROUT_IMPORT"></a><span class="term">XFROUT_IMPORT error importing python module: %1</span></dt><dd><p>
+There was an error importing a python module. One of the modules needed
+by xfrout could not be found. This suggests that either some libraries
+are missing on the system, or the PYTHONPATH variable is not correct.
+The specific place where this library needs to be depends on your
+system and your specific installation.
+</p></dd><dt><a name="XFROUT_NEW_CONFIG"></a><span class="term">XFROUT_NEW_CONFIG Update xfrout configuration</span></dt><dd><p>
+New configuration settings have been sent from the configuration
+manager. The xfrout daemon will now apply them.
+</p></dd><dt><a name="XFROUT_NEW_CONFIG_DONE"></a><span class="term">XFROUT_NEW_CONFIG_DONE Update xfrout configuration done</span></dt><dd><p>
+The xfrout daemon is now done reading the new configuration settings
+received from the configuration manager.
+</p></dd><dt><a name="XFROUT_NOTIFY_COMMAND"></a><span class="term">XFROUT_NOTIFY_COMMAND received command to send notifies for %1/%2</span></dt><dd><p>
+The xfrout daemon received a command on the command channel that
+NOTIFY packets should be sent for the given zone.
+</p></dd><dt><a name="XFROUT_PARSE_QUERY_ERROR"></a><span class="term">XFROUT_PARSE_QUERY_ERROR error parsing query: %1</span></dt><dd><p>
+There was a parse error while reading an incoming query. The parse
+error is shown in the log message. A remote client sent a packet we
+do not understand or support. The xfrout request will be ignored.
+In general, this should only occur for unexpected problems like
+memory allocation failures, as the query should already have been
+parsed by the b10-auth daemon, before it was passed here.
+</p></dd><dt><a name="XFROUT_PROCESS_REQUEST_ERROR"></a><span class="term">XFROUT_PROCESS_REQUEST_ERROR error processing transfer request: %2</span></dt><dd><p>
+There was an error processing a transfer request. The error is included
+in the log message, but at this point no specific information other
+than that could be given. This points to incomplete exception handling
+in the code.
+</p></dd><dt><a name="XFROUT_QUERY_DROPPED"></a><span class="term">XFROUT_QUERY_DROPPED request to transfer %1/%2 to [%3]:%4 dropped</span></dt><dd><p>
+The xfrout process silently dropped a request to transfer zone to given host.
+This is required by the ACLs. The %1 and %2 represent the zone name and class,
+the %3 and %4 the IP address and port of the peer requesting the transfer.
+</p></dd><dt><a name="XFROUT_QUERY_REJECTED"></a><span class="term">XFROUT_QUERY_REJECTED request to transfer %1/%2 to [%3]:%4 rejected</span></dt><dd><p>
+The xfrout process rejected (by REFUSED rcode) a request to transfer zone to
+given host. This is because of ACLs. The %1 and %2 represent the zone name and
+class, the %3 and %4 the IP address and port of the peer requesting the
+transfer.
+</p></dd><dt><a name="XFROUT_RECEIVED_SHUTDOWN_COMMAND"></a><span class="term">XFROUT_RECEIVED_SHUTDOWN_COMMAND shutdown command received</span></dt><dd><p>
+The xfrout daemon received a shutdown command from the command channel
+and will now shut down.
+</p></dd><dt><a name="XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR"></a><span class="term">XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR error receiving the file descriptor for an XFR connection</span></dt><dd><p>
+There was an error receiving the file descriptor for the transfer
+request. Normally, the request is received by b10-auth, and passed on
+to the xfrout daemon, so it can answer directly. However, there was a
+problem receiving this file descriptor. The request will be ignored.
+</p></dd><dt><a name="XFROUT_REMOVE_OLD_UNIX_SOCKET_FILE_ERROR"></a><span class="term">XFROUT_REMOVE_OLD_UNIX_SOCKET_FILE_ERROR error removing unix socket file %1: %2</span></dt><dd><p>
+The unix socket file xfrout needs for contact with the auth daemon
+already exists, and needs to be removed first, but there is a problem
+removing it. It is likely that we do not have permission to remove
+this file. The specific error is show in the log message. The xfrout
+daemon will shut down.
+</p></dd><dt><a name="XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR"></a><span class="term">XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR error clearing unix socket file %1: %2</span></dt><dd><p>
+When shutting down, the xfrout daemon tried to clear the unix socket
+file used for communication with the auth daemon. It failed to remove
+the file. The reason for the failure is given in the error message.
+</p></dd><dt><a name="XFROUT_SOCKET_SELECT_ERROR"></a><span class="term">XFROUT_SOCKET_SELECT_ERROR error while calling select() on request socket: %1</span></dt><dd><p>
+There was an error while calling select() on the socket that informs
+the xfrout daemon that a new xfrout request has arrived. This should
+be a result of rare local error such as memory allocation failure and
+shouldn't happen under normal conditions. The error is included in the
+log message.
+</p></dd><dt><a name="XFROUT_STOPPED_BY_KEYBOARD"></a><span class="term">XFROUT_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
+There was a keyboard interrupt signal to stop the xfrout daemon. The
+daemon will now shut down.
+</p></dd><dt><a name="XFROUT_STOPPING"></a><span class="term">XFROUT_STOPPING the xfrout daemon is shutting down</span></dt><dd><p>
+The current transfer is aborted, as the xfrout daemon is shutting down.
+</p></dd><dt><a name="XFROUT_UNIX_SOCKET_FILE_IN_USE"></a><span class="term">XFROUT_UNIX_SOCKET_FILE_IN_USE another xfrout process seems to be using the unix socket file %1</span></dt><dd><p>
+While starting up, the xfrout daemon tried to clear the unix domain
+socket needed for contacting the b10-auth daemon to pass requests
+on, but the file is in use. The most likely cause is that another
+xfrout daemon process is still running. This xfrout daemon (the one
+printing this message) will not start.
+</p></dd><dt><a name="ZONEMGR_CCSESSION_ERROR"></a><span class="term">ZONEMGR_CCSESSION_ERROR command channel session error: %1</span></dt><dd><p>
+An error was encountered on the command channel. The message indicates
+the nature of the error.
+</p></dd><dt><a name="ZONEMGR_JITTER_TOO_BIG"></a><span class="term">ZONEMGR_JITTER_TOO_BIG refresh_jitter is too big, setting to 0.5</span></dt><dd><p>
+The value specified in the configuration for the refresh jitter is too large
+so its value has been set to the maximum of 0.5.
+</p></dd><dt><a name="ZONEMGR_KEYBOARD_INTERRUPT"></a><span class="term">ZONEMGR_KEYBOARD_INTERRUPT exiting zonemgr process as result of keyboard interrupt</span></dt><dd><p>
+An informational message output when the zone manager was being run at a
+terminal and it was terminated via a keyboard interrupt signal.
+</p></dd><dt><a name="ZONEMGR_LOAD_ZONE"></a><span class="term">ZONEMGR_LOAD_ZONE loading zone %1 (class %2)</span></dt><dd><p>
+This is a debug message indicating that the zone of the specified class
+is being loaded.
+</p></dd><dt><a name="ZONEMGR_NO_MASTER_ADDRESS"></a><span class="term">ZONEMGR_NO_MASTER_ADDRESS internal BIND 10 command did not contain address of master</span></dt><dd><p>
+A command received by the zone manager from the Auth module did not
+contain the address of the master server from which a NOTIFY message
+was received. This may be due to an internal programming error; please
+submit a bug report.
+</p></dd><dt><a name="ZONEMGR_NO_SOA"></a><span class="term">ZONEMGR_NO_SOA zone %1 (class %2) does not have an SOA record</span></dt><dd><p>
+When loading the named zone of the specified class the zone manager
+discovered that the data did not contain an SOA record. The load has
+been abandoned.
+</p></dd><dt><a name="ZONEMGR_NO_TIMER_THREAD"></a><span class="term">ZONEMGR_NO_TIMER_THREAD trying to stop zone timer thread but it is not running</span></dt><dd><p>
+An attempt was made to stop the timer thread (used to track when zones
+should be refreshed) but it was not running. This may indicate an
+internal program error. Please submit a bug report.
+</p></dd><dt><a name="ZONEMGR_NO_ZONE_CLASS"></a><span class="term">ZONEMGR_NO_ZONE_CLASS internal BIND 10 command did not contain class of zone</span></dt><dd><p>
+A command received by the zone manager from another BIND 10 module did
+not contain the class of the zone on which the zone manager should act.
+This may be due to an internal programming error; please submit a
+bug report.
+</p></dd><dt><a name="ZONEMGR_NO_ZONE_NAME"></a><span class="term">ZONEMGR_NO_ZONE_NAME internal BIND 10 command did not contain name of zone</span></dt><dd><p>
+A command received by the zone manager from another BIND 10 module did
+not contain the name of the zone on which the zone manager should act.
+This may be due to an internal programming error; please submit a
+bug report.
+</p></dd><dt><a name="ZONEMGR_RECEIVE_NOTIFY"></a><span class="term">ZONEMGR_RECEIVE_NOTIFY received NOTIFY command for zone %1 (class %2)</span></dt><dd><p>
+This is a debug message indicating that the zone manager has received a
+NOTIFY command over the command channel. The command is sent by the Auth
+process when it is acting as a slave server for the zone and causes the
+zone manager to record the master server for the zone and start a timer;
+when the timer expires, the master will be polled to see if it contains
+new data.
+</p></dd><dt><a name="ZONEMGR_RECEIVE_SHUTDOWN"></a><span class="term">ZONEMGR_RECEIVE_SHUTDOWN received SHUTDOWN command</span></dt><dd><p>
+This is a debug message indicating that the zone manager has received
+a SHUTDOWN command over the command channel from the Boss process.
+It will act on this command and shut down.
+</p></dd><dt><a name="ZONEMGR_RECEIVE_UNKNOWN"></a><span class="term">ZONEMGR_RECEIVE_UNKNOWN received unknown command '%1'</span></dt><dd><p>
+This is a warning message indicating that the zone manager has received
+the stated command over the command channel. The command is not known
+to the zone manager and although the command is ignored, its receipt
+may indicate an internal error. Please submit a bug report.
+</p></dd><dt><a name="ZONEMGR_RECEIVE_XFRIN_FAILED"></a><span class="term">ZONEMGR_RECEIVE_XFRIN_FAILED received XFRIN FAILED command for zone %1 (class %2)</span></dt><dd><p>
+This is a debug message indicating that the zone manager has received
+an XFRIN FAILED command over the command channel. The command is sent
+by the Xfrin process when a transfer of zone data into the system has
+failed, and causes the zone manager to schedule another transfer attempt.
+</p></dd><dt><a name="ZONEMGR_RECEIVE_XFRIN_SUCCESS"></a><span class="term">ZONEMGR_RECEIVE_XFRIN_SUCCESS received XFRIN SUCCESS command for zone %1 (class %2)</span></dt><dd><p>
+This is a debug message indicating that the zone manager has received
+an XFRIN SUCCESS command over the command channel. The command is sent
+by the Xfrin process when the transfer of zone data into the system has
+succeeded, and causes the data to be loaded and served by BIND 10.
+</p></dd><dt><a name="ZONEMGR_REFRESH_ZONE"></a><span class="term">ZONEMGR_REFRESH_ZONE refreshing zone %1 (class %2)</span></dt><dd><p>
+The zone manager is refreshing the named zone of the specified class
+with updated information.
+</p></dd><dt><a name="ZONEMGR_SELECT_ERROR"></a><span class="term">ZONEMGR_SELECT_ERROR error with select(): %1</span></dt><dd><p>
+An attempt to wait for input from a socket failed. The failing operation
+is a call to the operating system's select() function, which failed for
+the given reason.
+</p></dd><dt><a name="ZONEMGR_SEND_FAIL"></a><span class="term">ZONEMGR_SEND_FAIL failed to send command to %1, session has been closed</span></dt><dd><p>
+The zone manager attempted to send a command to the named BIND 10 module,
+but the send failed. The session between the modules has been closed.
+</p></dd><dt><a name="ZONEMGR_SESSION_ERROR"></a><span class="term">ZONEMGR_SESSION_ERROR unable to establish session to command channel daemon</span></dt><dd><p>
+The zonemgr process was not able to be started because it could not
+connect to the command channel daemon. The most usual cause of this
+problem is that the daemon is not running.
+</p></dd><dt><a name="ZONEMGR_SESSION_TIMEOUT"></a><span class="term">ZONEMGR_SESSION_TIMEOUT timeout on session to command channel daemon</span></dt><dd><p>
+The zonemgr process was not able to be started because it timed out when
+connecting to the command channel daemon. The most usual cause of this
+problem is that the daemon is not running.
+</p></dd><dt><a name="ZONEMGR_SHUTDOWN"></a><span class="term">ZONEMGR_SHUTDOWN zone manager has shut down</span></dt><dd><p>
+A debug message, output when the zone manager has shut down completely.
+</p></dd><dt><a name="ZONEMGR_STARTING"></a><span class="term">ZONEMGR_STARTING zone manager starting</span></dt><dd><p>
+A debug message output when the zone manager starts up.
+</p></dd><dt><a name="ZONEMGR_TIMER_THREAD_RUNNING"></a><span class="term">ZONEMGR_TIMER_THREAD_RUNNING trying to start timer thread but one is already running</span></dt><dd><p>
+This message is issued when an attempt is made to start the timer
+thread (which keeps track of when zones need a refresh) but one is
+already running. It indicates either an error in the program logic or
+a problem with stopping a previous instance of the timer. Please submit
+a bug report.
+</p></dd><dt><a name="ZONEMGR_UNKNOWN_ZONE_FAIL"></a><span class="term">ZONEMGR_UNKNOWN_ZONE_FAIL zone %1 (class %2) is not known to the zone manager</span></dt><dd><p>
+An XFRIN operation has failed but the zone that was the subject of the
+operation is not being managed by the zone manager. This may indicate
+an error in the program (as the operation should not have been initiated
+if this were the case). Please submit a bug report.
+</p></dd><dt><a name="ZONEMGR_UNKNOWN_ZONE_NOTIFIED"></a><span class="term">ZONEMGR_UNKNOWN_ZONE_NOTIFIED notified zone %1 (class %2) is not known to the zone manager</span></dt><dd><p>
+A NOTIFY was received but the zone that was the subject of the operation
+is not being managed by the zone manager. This may indicate an error
+in the program (as the operation should not have been initiated if this
+were the case). Please submit a bug report.
+</p></dd><dt><a name="ZONEMGR_UNKNOWN_ZONE_SUCCESS"></a><span class="term">ZONEMGR_UNKNOWN_ZONE_SUCCESS zone %1 (class %2) is not known to the zone manager</span></dt><dd><p>
+An XFRIN operation has succeeded but the zone received is not being
+managed by the zone manager. This may indicate an error in the program
+(as the operation should not have been initiated if this were the case).
+Please submit a bug report.
</p></dd></dl></div><p>
</p></div></div></body></html>
diff --git a/doc/guide/bind10-messages.xml b/doc/guide/bind10-messages.xml
index eaa8bb9..f5c44b3 100644
--- a/doc/guide/bind10-messages.xml
+++ b/doc/guide/bind10-messages.xml
@@ -5,6 +5,12 @@
<!ENTITY % version SYSTEM "version.ent">
%version;
]>
+<!--
+ This XML document is generated using the system_messages.py tool
+ based on the .mes message files.
+
+ Do not edit this file.
+-->
<book>
<?xml-stylesheet href="bind10-guide.css" type="text/css"?>
@@ -62,16 +68,16 @@
<para>
<variablelist>
-<varlistentry id="ASIODNS_FETCHCOMP">
-<term>ASIODNS_FETCHCOMP upstream fetch to %1(%2) has now completed</term>
+<varlistentry id="ASIODNS_FETCH_COMPLETED">
+<term>ASIODNS_FETCH_COMPLETED upstream fetch to %1(%2) has now completed</term>
<listitem><para>
-A debug message, this records the the upstream fetch (a query made by the
+A debug message, this records that the upstream fetch (a query made by the
resolver on behalf of its client) to the specified address has completed.
</para></listitem>
</varlistentry>
-<varlistentry id="ASIODNS_FETCHSTOP">
-<term>ASIODNS_FETCHSTOP upstream fetch to %1(%2) has been stopped</term>
+<varlistentry id="ASIODNS_FETCH_STOPPED">
+<term>ASIODNS_FETCH_STOPPED upstream fetch to %1(%2) has been stopped</term>
<listitem><para>
An external component has requested the halting of an upstream fetch. This
is an allowed operation, and the message should only appear if debug is
@@ -79,27 +85,27 @@ enabled.
</para></listitem>
</varlistentry>
-<varlistentry id="ASIODNS_OPENSOCK">
-<term>ASIODNS_OPENSOCK error %1 opening %2 socket to %3(%4)</term>
+<varlistentry id="ASIODNS_OPEN_SOCKET">
+<term>ASIODNS_OPEN_SOCKET error %1 opening %2 socket to %3(%4)</term>
<listitem><para>
The asynchronous I/O code encountered an error when trying to open a socket
of the specified protocol in order to send a message to the target address.
-The the number of the system error that cause the problem is given in the
+The number of the system error that caused the problem is given in the
message.
</para></listitem>
</varlistentry>
-<varlistentry id="ASIODNS_RECVSOCK">
-<term>ASIODNS_RECVSOCK error %1 reading %2 data from %3(%4)</term>
+<varlistentry id="ASIODNS_READ_DATA">
+<term>ASIODNS_READ_DATA error %1 reading %2 data from %3(%4)</term>
<listitem><para>
-The asynchronous I/O code encountered an error when trying read data from
-the specified address on the given protocol. The the number of the system
-error that cause the problem is given in the message.
+The asynchronous I/O code encountered an error when trying to read data from
+the specified address on the given protocol. The number of the system
+error that caused the problem is given in the message.
</para></listitem>
</varlistentry>
-<varlistentry id="ASIODNS_RECVTMO">
-<term>ASIODNS_RECVTMO receive timeout while waiting for data from %1(%2)</term>
+<varlistentry id="ASIODNS_READ_TIMEOUT">
+<term>ASIODNS_READ_TIMEOUT receive timeout while waiting for data from %1(%2)</term>
<listitem><para>
An upstream fetch from the specified address timed out. This may happen for
any number of reasons and is most probably a problem at the remote server
@@ -108,29 +114,1436 @@ enabled.
</para></listitem>
</varlistentry>
-<varlistentry id="ASIODNS_SENDSOCK">
-<term>ASIODNS_SENDSOCK error %1 sending data using %2 to %3(%4)</term>
+<varlistentry id="ASIODNS_SEND_DATA">
+<term>ASIODNS_SEND_DATA error %1 sending data using %2 to %3(%4)</term>
+<listitem><para>
+The asynchronous I/O code encountered an error when trying to send data to
+the specified address on the given protocol. The number of the system
+error that caused the problem is given in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_UNKNOWN_ORIGIN">
+<term>ASIODNS_UNKNOWN_ORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)</term>
+<listitem><para>
+An internal consistency check on the origin of a message from the
+asynchronous I/O module failed. This may indicate an internal error;
+please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_UNKNOWN_RESULT">
+<term>ASIODNS_UNKNOWN_RESULT unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)</term>
+<listitem><para>
+An internal error indicating that the termination method of the resolver's
+upstream fetch class was called with an unknown result code (which is
+given in the message). Please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_AXFR_ERROR">
+<term>AUTH_AXFR_ERROR error handling AXFR request: %1</term>
+<listitem><para>
+This is a debug message produced by the authoritative server when it
+has encountered an error processing an AXFR request. The message gives
+the reason for the error, and the server will return a SERVFAIL code to
+the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_AXFR_UDP">
+<term>AUTH_AXFR_UDP AXFR query received over UDP</term>
+<listitem><para>
+This is a debug message output when the authoritative server has received
+an AXFR query over UDP. Use of UDP for AXFRs is not permitted by the
+protocol, so the server will return a FORMERR error to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_COMMAND_FAILED">
+<term>AUTH_COMMAND_FAILED execution of command channel instruction '%1' failed: %2</term>
+<listitem><para>
+Execution of the specified command by the authoritative server failed. The
+message contains the reason for the failure.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_CONFIG_CHANNEL_CREATED">
+<term>AUTH_CONFIG_CHANNEL_CREATED configuration session channel created</term>
+<listitem><para>
+This is a debug message indicating that authoritative server has created
+the channel to the configuration manager. It is issued during server
+startup is an indication that the initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_CONFIG_CHANNEL_ESTABLISHED">
+<term>AUTH_CONFIG_CHANNEL_ESTABLISHED configuration session channel established</term>
+<listitem><para>
+This is a debug message indicating that authoritative server
+has established communication the configuration manager over the
+previously-created channel. It is issued during server startup is an
+indication that the initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_CONFIG_CHANNEL_STARTED">
+<term>AUTH_CONFIG_CHANNEL_STARTED configuration session channel started</term>
+<listitem><para>
+This is a debug message, issued when the authoritative server has
+posted a request to be notified when new configuration information is
+available. It is issued during server startup is an indication that
+the initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_CONFIG_LOAD_FAIL">
+<term>AUTH_CONFIG_LOAD_FAIL load of configuration failed: %1</term>
+<listitem><para>
+An attempt to configure the server with information from the configuration
+database during the startup sequence has failed. (The reason for
+the failure is given in the message.) The server will continue its
+initialization although it may not be configured in the desired way.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_CONFIG_UPDATE_FAIL">
+<term>AUTH_CONFIG_UPDATE_FAIL update of configuration failed: %1</term>
+<listitem><para>
+At attempt to update the configuration the server with information
+from the configuration database has failed, the reason being given in
+the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_DATA_SOURCE">
+<term>AUTH_DATA_SOURCE data source database file: %1</term>
+<listitem><para>
+This is a debug message produced by the authoritative server when it accesses a
+datebase data source, listing the file that is being accessed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_DNS_SERVICES_CREATED">
+<term>AUTH_DNS_SERVICES_CREATED DNS services created</term>
+<listitem><para>
+This is a debug message indicating that the component that will handling
+incoming queries for the authoritative server (DNSServices) has been
+successfully created. It is issued during server startup is an indication
+that the initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_HEADER_PARSE_FAIL">
+<term>AUTH_HEADER_PARSE_FAIL unable to parse header in received DNS packet: %1</term>
+<listitem><para>
+This is a debug message, generated by the authoritative server when an
+attempt to parse the header of a received DNS packet has failed. (The
+reason for the failure is given in the message.) The server will drop the
+packet.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_LOAD_TSIG">
+<term>AUTH_LOAD_TSIG loading TSIG keys</term>
+<listitem><para>
+This is a debug message indicating that the authoritative server
+has requested the keyring holding TSIG keys from the configuration
+database. It is issued during server startup is an indication that the
+initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_LOAD_ZONE">
+<term>AUTH_LOAD_ZONE loaded zone %1/%2</term>
+<listitem><para>
+This debug message is issued during the processing of the 'loadzone' command
+when the authoritative server has successfully loaded the named zone of the
+named class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_MEM_DATASRC_DISABLED">
+<term>AUTH_MEM_DATASRC_DISABLED memory data source is disabled for class %1</term>
+<listitem><para>
+This is a debug message reporting that the authoritative server has
+discovered that the memory data source is disabled for the given class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_MEM_DATASRC_ENABLED">
+<term>AUTH_MEM_DATASRC_ENABLED memory data source is enabled for class %1</term>
+<listitem><para>
+This is a debug message reporting that the authoritative server has
+discovered that the memory data source is enabled for the given class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_NOTIFY_QUESTIONS">
+<term>AUTH_NOTIFY_QUESTIONS invalid number of questions (%1) in incoming NOTIFY</term>
+<listitem><para>
+This debug message is logged by the authoritative server when it receives
+a NOTIFY packet that contains zero or more than one question. (A valid
+NOTIFY packet contains one question.) The server will return a FORMERR
+error to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_NOTIFY_RRTYPE">
+<term>AUTH_NOTIFY_RRTYPE invalid question RR type (%1) in incoming NOTIFY</term>
+<listitem><para>
+This debug message is logged by the authoritative server when it receives
+a NOTIFY packet that an RR type of something other than SOA in the
+question section. (The RR type received is included in the message.) The
+server will return a FORMERR error to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_NO_STATS_SESSION">
+<term>AUTH_NO_STATS_SESSION session interface for statistics is not available</term>
+<listitem><para>
+The authoritative server had no session with the statistics module at the
+time it attempted to send it data: the attempt has been abandoned. This
+could be an error in configuration.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_NO_XFRIN">
+<term>AUTH_NO_XFRIN received NOTIFY but XFRIN session is not running</term>
+<listitem><para>
+This is a debug message produced by the authoritative server when it receives
+a NOTIFY packet but the XFRIN process is not running. The packet will be
+dropped and nothing returned to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_PACKET_PARSE_ERROR">
+<term>AUTH_PACKET_PARSE_ERROR unable to parse received DNS packet: %1</term>
+<listitem><para>
+This is a debug message, generated by the authoritative server when an
+attempt to parse a received DNS packet has failed due to something other
+than a protocol error. The reason for the failure is given in the message;
+the server will return a SERVFAIL error code to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_PACKET_PROTOCOL_ERROR">
+<term>AUTH_PACKET_PROTOCOL_ERROR DNS packet protocol error: %1. Returning %2</term>
+<listitem><para>
+This is a debug message, generated by the authoritative server when an
+attempt to parse a received DNS packet has failed due to a protocol error.
+The reason for the failure is given in the message, as is the error code
+that will be returned to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_PACKET_RECEIVED">
+<term>AUTH_PACKET_RECEIVED message received:\n%1</term>
+<listitem><para>
+This is a debug message output by the authoritative server when it
+receives a valid DNS packet.
+</para><para>
+Note: This message includes the packet received, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_PROCESS_FAIL">
+<term>AUTH_PROCESS_FAIL message processing failure: %1</term>
+<listitem><para>
+This message is generated by the authoritative server when it has
+encountered an internal error whilst processing a received packet:
+the cause of the error is included in the message.
+</para><para>
+The server will return a SERVFAIL error code to the sender of the packet.
+This message indicates a potential error in the server. Please open a
+bug ticket for this issue.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_RECEIVED_COMMAND">
+<term>AUTH_RECEIVED_COMMAND command '%1' received</term>
+<listitem><para>
+This is a debug message issued when the authoritative server has received
+a command on the command channel.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_RECEIVED_SENDSTATS">
+<term>AUTH_RECEIVED_SENDSTATS command 'sendstats' received</term>
+<listitem><para>
+This is a debug message issued when the authoritative server has received
+a command from the statistics module to send it data. The 'sendstats'
+command is handled differently to other commands, which is why the debug
+message associated with it has its own code.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_RESPONSE_RECEIVED">
+<term>AUTH_RESPONSE_RECEIVED received response message, ignoring</term>
+<listitem><para>
+This is a debug message, this is output if the authoritative server
+receives a DNS packet with the QR bit set, i.e. a DNS response. The
+server ignores the packet as it only responds to question packets.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_SEND_ERROR_RESPONSE">
+<term>AUTH_SEND_ERROR_RESPONSE sending an error response (%1 bytes):\n%2</term>
+<listitem><para>
+This is a debug message recording that the authoritative server is sending
+an error response to the originator of the query. A previous message will
+have recorded details of the failure.
+</para><para>
+Note: This message includes the packet sent, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_SEND_NORMAL_RESPONSE">
+<term>AUTH_SEND_NORMAL_RESPONSE sending an error response (%1 bytes):\n%2</term>
+<listitem><para>
+This is a debug message recording that the authoritative server is sending
+a response to the originator of a query.
+</para><para>
+Note: This message includes the packet sent, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_SERVER_CREATED">
+<term>AUTH_SERVER_CREATED server created</term>
+<listitem><para>
+An informational message indicating that the authoritative server process has
+been created and is initializing. The AUTH_SERVER_STARTED message will be
+output when initialization has successfully completed and the server starts
+accepting queries.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_SERVER_FAILED">
+<term>AUTH_SERVER_FAILED server failed: %1</term>
+<listitem><para>
+The authoritative server has encountered a fatal error and is terminating. The
+reason for the failure is included in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_SERVER_STARTED">
+<term>AUTH_SERVER_STARTED server started</term>
+<listitem><para>
+Initialization of the authoritative server has completed successfully
+and it is entering the main loop, waiting for queries to arrive.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_SQLITE3">
+<term>AUTH_SQLITE3 nothing to do for loading sqlite3</term>
+<listitem><para>
+This is a debug message indicating that the authoritative server has
+found that the data source it is loading is an SQLite3 data source,
+so no further validation is needed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_STATS_CHANNEL_CREATED">
+<term>AUTH_STATS_CHANNEL_CREATED STATS session channel created</term>
+<listitem><para>
+This is a debug message indicating that the authoritative server has
+created a channel to the statistics process. It is issued during server
+startup is an indication that the initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_STATS_CHANNEL_ESTABLISHED">
+<term>AUTH_STATS_CHANNEL_ESTABLISHED STATS session channel established</term>
+<listitem><para>
+This is a debug message indicating that the authoritative server
+has established communication over the previously created statistics
+channel. It is issued during server startup is an indication that the
+initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_STATS_COMMS">
+<term>AUTH_STATS_COMMS communication error in sending statistics data: %1</term>
+<listitem><para>
+An error was encountered when the authoritative server tried to send data
+to the statistics daemon. The message includes additional information
+describing the reason for the failure.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_STATS_TIMEOUT">
+<term>AUTH_STATS_TIMEOUT timeout while sending statistics data: %1</term>
+<listitem><para>
+The authoritative server sent data to the statistics daemon but received
+no acknowledgement within the specified time. The message includes
+additional information describing the reason for the failure.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_STATS_TIMER_DISABLED">
+<term>AUTH_STATS_TIMER_DISABLED statistics timer has been disabled</term>
+<listitem><para>
+This is a debug message indicating that the statistics timer has been
+disabled in the authoritative server and no statistics information is
+being produced.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_STATS_TIMER_SET">
+<term>AUTH_STATS_TIMER_SET statistics timer set to %1 second(s)</term>
+<listitem><para>
+This is a debug message indicating that the statistics timer has been
+enabled and that the authoritative server will produce statistics data
+at the specified interval.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_UNSUPPORTED_OPCODE">
+<term>AUTH_UNSUPPORTED_OPCODE unsupported opcode: %1</term>
+<listitem><para>
+This is a debug message, produced when a received DNS packet being
+processed by the authoritative server has been found to contain an
+unsupported opcode. (The opcode is included in the message.) The server
+will return an error code of NOTIMPL to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_XFRIN_CHANNEL_CREATED">
+<term>AUTH_XFRIN_CHANNEL_CREATED XFRIN session channel created</term>
+<listitem><para>
+This is a debug message indicating that the authoritative server has
+created a channel to the XFRIN (Transfer-in) process. It is issued
+during server startup is an indication that the initialization is
+proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_XFRIN_CHANNEL_ESTABLISHED">
+<term>AUTH_XFRIN_CHANNEL_ESTABLISHED XFRIN session channel established</term>
+<listitem><para>
+This is a debug message indicating that the authoritative server has
+established communication over the previously-created channel to the
+XFRIN (Transfer-in) process. It is issued during server startup is an
+indication that the initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_ZONEMGR_COMMS">
+<term>AUTH_ZONEMGR_COMMS error communicating with zone manager: %1</term>
+<listitem><para>
+This is a debug message output during the processing of a NOTIFY request.
+An error (listed in the message) has been encountered whilst communicating
+with the zone manager. The NOTIFY request will not be honored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_ZONEMGR_ERROR">
+<term>AUTH_ZONEMGR_ERROR received error response from zone manager: %1</term>
+<listitem><para>
+This is a debug message output during the processing of a NOTIFY
+request. The zone manager component has been informed of the request,
+but has returned an error response (which is included in the message). The
+NOTIFY request will not be honored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CHECK_MSGQ_ALREADY_RUNNING">
+<term>BIND10_CHECK_MSGQ_ALREADY_RUNNING checking if msgq is already running</term>
+<listitem><para>
+The boss process is starting up and will now check if the message bus
+daemon is already running. If so, it will not be able to start, as it
+needs a dedicated message bus.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATION_START_AUTH">
+<term>BIND10_CONFIGURATION_START_AUTH start authoritative server: %1</term>
+<listitem><para>
+This message shows whether or not the authoritative server should be
+started according to the configuration.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATION_START_RESOLVER">
+<term>BIND10_CONFIGURATION_START_RESOLVER start resolver: %1</term>
+<listitem><para>
+This message shows whether or not the resolver should be
+started according to the configuration.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_INVALID_USER">
+<term>BIND10_INVALID_USER invalid user: %1</term>
+<listitem><para>
+The boss process was started with the -u option, to drop root privileges
+and continue running as the specified user, but the user is unknown.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_KILLING_ALL_PROCESSES">
+<term>BIND10_KILLING_ALL_PROCESSES killing all started processes</term>
+<listitem><para>
+The boss module was not able to start every process it needed to start
+during startup, and will now kill the processes that did get started.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_KILL_PROCESS">
+<term>BIND10_KILL_PROCESS killing process %1</term>
+<listitem><para>
+The boss module is sending a kill signal to process with the given name,
+as part of the process of killing all started processes during a failed
+startup, as described for BIND10_KILLING_ALL_PROCESSES
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_MSGQ_ALREADY_RUNNING">
+<term>BIND10_MSGQ_ALREADY_RUNNING msgq daemon already running, cannot start</term>
+<listitem><para>
+There already appears to be a message bus daemon running. Either an
+old process was not shut down correctly, and needs to be killed, or
+another instance of BIND10, with the same msgq domain socket, is
+running, which needs to be stopped.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_MSGQ_DAEMON_ENDED">
+<term>BIND10_MSGQ_DAEMON_ENDED b10-msgq process died, shutting down</term>
+<listitem><para>
+The message bus daemon has died. This is a fatal error, since it may
+leave the system in an inconsistent state. BIND10 will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_MSGQ_DISAPPEARED">
+<term>BIND10_MSGQ_DISAPPEARED msgq channel disappeared</term>
+<listitem><para>
+While listening on the message bus channel for messages, it suddenly
+disappeared. The msgq daemon may have died. This might lead to an
+inconsistent state of the system, and BIND 10 will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_PROCESS_ENDED_NO_EXIT_STATUS">
+<term>BIND10_PROCESS_ENDED_NO_EXIT_STATUS process %1 (PID %2) died: exit status not available</term>
+<listitem><para>
+The given process ended unexpectedly, but no exit status is
+available. See BIND10_PROCESS_ENDED_WITH_EXIT_STATUS for a longer
+description.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_PROCESS_ENDED_WITH_EXIT_STATUS">
+<term>BIND10_PROCESS_ENDED_WITH_EXIT_STATUS process %1 (PID %2) terminated, exit status = %3</term>
+<listitem><para>
+The given process ended unexpectedly with the given exit status.
+Depending on which module it was, it may simply be restarted, or it
+may be a problem that will cause the boss module to shut down too.
+The latter happens if it was the message bus daemon, which, if it has
+died suddenly, may leave the system in an inconsistent state. BIND10
+will also shut down now if it has been run with --brittle.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_READING_BOSS_CONFIGURATION">
+<term>BIND10_READING_BOSS_CONFIGURATION reading boss configuration</term>
+<listitem><para>
+The boss process is starting up, and will now process the initial
+configuration, as received from the configuration manager.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_RECEIVED_COMMAND">
+<term>BIND10_RECEIVED_COMMAND received command: %1</term>
+<listitem><para>
+The boss module received a command and shall now process it. The command
+is printed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_RECEIVED_NEW_CONFIGURATION">
+<term>BIND10_RECEIVED_NEW_CONFIGURATION received new configuration: %1</term>
+<listitem><para>
+The boss module received a configuration update and is going to apply
+it now. The new configuration is printed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_RECEIVED_SIGNAL">
+<term>BIND10_RECEIVED_SIGNAL received signal %1</term>
+<listitem><para>
+The boss module received the given signal.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_RESURRECTED_PROCESS">
+<term>BIND10_RESURRECTED_PROCESS resurrected %1 (PID %2)</term>
+<listitem><para>
+The given process has been restarted successfully, and is now running
+with the given process id.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_RESURRECTING_PROCESS">
+<term>BIND10_RESURRECTING_PROCESS resurrecting dead %1 process...</term>
+<listitem><para>
+The given process has ended unexpectedly, and is now restarted.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SELECT_ERROR">
+<term>BIND10_SELECT_ERROR error in select() call: %1</term>
+<listitem><para>
+There was a fatal error in the call to select(), used to see if a child
+process has ended or if there is a message on the message bus. This
+should not happen under normal circumstances and is considered fatal,
+so BIND 10 will now shut down. The specific error is printed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SEND_SIGKILL">
+<term>BIND10_SEND_SIGKILL sending SIGKILL to %1 (PID %2)</term>
+<listitem><para>
+The boss module is sending a SIGKILL signal to the given process.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SEND_SIGTERM">
+<term>BIND10_SEND_SIGTERM sending SIGTERM to %1 (PID %2)</term>
+<listitem><para>
+The boss module is sending a SIGTERM signal to the given process.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SHUTDOWN">
+<term>BIND10_SHUTDOWN stopping the server</term>
+<listitem><para>
+The boss process received a command or signal telling it to shut down.
+It will send a shutdown command to each process. The processes that do
+not shut down will then receive a SIGTERM signal. If that doesn't work,
+it shall send SIGKILL signals to the processes still alive.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SHUTDOWN_COMPLETE">
+<term>BIND10_SHUTDOWN_COMPLETE all processes ended, shutdown complete</term>
+<listitem><para>
+All child processes have been stopped, and the boss process will now
+stop itself.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_BAD_CAUSE">
+<term>BIND10_SOCKCREATOR_BAD_CAUSE unknown error cause from socket creator: %1</term>
+<listitem><para>
+The socket creator reported an error when creating a socket. But the function
+which failed is unknown (not one of 'S' for socket or 'B' for bind).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_BAD_RESPONSE">
+<term>BIND10_SOCKCREATOR_BAD_RESPONSE unknown response for socket request: %1</term>
+<listitem><para>
+The boss requested a socket from the creator, but the answer is unknown. This
+looks like a programmer error.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_CRASHED">
+<term>BIND10_SOCKCREATOR_CRASHED the socket creator crashed</term>
+<listitem><para>
+The socket creator terminated unexpectedly. It is not possible to restart it
+(because the boss already gave up root privileges), so the system is going
+to terminate.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_EOF">
+<term>BIND10_SOCKCREATOR_EOF eof while expecting data from socket creator</term>
+<listitem><para>
+There should be more data from the socket creator, but it closed the socket.
+It probably crashed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_INIT">
+<term>BIND10_SOCKCREATOR_INIT initializing socket creator parser</term>
+<listitem><para>
+The boss module initializes routines for parsing the socket creator
+protocol.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_KILL">
+<term>BIND10_SOCKCREATOR_KILL killing the socket creator</term>
+<listitem><para>
+The socket creator is being terminated the aggressive way, by sending it
+sigkill. This should not happen usually.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_TERMINATE">
+<term>BIND10_SOCKCREATOR_TERMINATE terminating socket creator</term>
+<listitem><para>
+The boss module sends a request to terminate to the socket creator.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_TRANSPORT_ERROR">
+<term>BIND10_SOCKCREATOR_TRANSPORT_ERROR transport error when talking to the socket creator: %1</term>
+<listitem><para>
+Either sending or receiving data from the socket creator failed with the given
+error. The creator probably crashed or some serious OS-level problem happened,
+as the communication happens only on local host.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKET_CREATED">
+<term>BIND10_SOCKET_CREATED successfully created socket %1</term>
+<listitem><para>
+The socket creator successfully created and sent a requested socket, it has
+the given file number.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKET_ERROR">
+<term>BIND10_SOCKET_ERROR error on %1 call in the creator: %2/%3</term>
+<listitem><para>
+The socket creator failed to create the requested socket. It failed on the
+indicated OS API function with given error.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKET_GET">
+<term>BIND10_SOCKET_GET requesting socket [%1]:%2 of type %3 from the creator</term>
+<listitem><para>
+The boss forwards a request for a socket to the socket creator.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTED_PROCESS">
+<term>BIND10_STARTED_PROCESS started %1</term>
+<listitem><para>
+The given process has successfully been started.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTED_PROCESS_PID">
+<term>BIND10_STARTED_PROCESS_PID started %1 (PID %2)</term>
+<listitem><para>
+The given process has successfully been started, and has the given PID.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTING">
+<term>BIND10_STARTING starting BIND10: %1</term>
+<listitem><para>
+Informational message on startup that shows the full version.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTING_PROCESS">
+<term>BIND10_STARTING_PROCESS starting process %1</term>
+<listitem><para>
+The boss module is starting the given process.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTING_PROCESS_PORT">
+<term>BIND10_STARTING_PROCESS_PORT starting process %1 (to listen on port %2)</term>
+<listitem><para>
+The boss module is starting the given process, which will listen on the
+given port number.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTING_PROCESS_PORT_ADDRESS">
+<term>BIND10_STARTING_PROCESS_PORT_ADDRESS starting process %1 (to listen on %2#%3)</term>
+<listitem><para>
+The boss module is starting the given process, which will listen on the
+given address and port number (written as <address>#<port>).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTUP_COMPLETE">
+<term>BIND10_STARTUP_COMPLETE BIND 10 started</term>
+<listitem><para>
+All modules have been successfully started, and BIND 10 is now running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTUP_ERROR">
+<term>BIND10_STARTUP_ERROR error during startup: %1</term>
+<listitem><para>
+There was a fatal error when BIND10 was trying to start. The error is
+shown, and BIND10 will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_START_AS_NON_ROOT">
+<term>BIND10_START_AS_NON_ROOT starting %1 as a user, not root. This might fail.</term>
+<listitem><para>
+The given module is being started or restarted without root privileges.
+If the module needs these privileges, it may have problems starting.
+Note that this issue should be resolved by the pending 'socket-creator'
+process; once that has been implemented, modules should not need root
+privileges anymore. See tickets #800 and #801 for more information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STOP_PROCESS">
+<term>BIND10_STOP_PROCESS asking %1 to shut down</term>
+<listitem><para>
+The boss module is sending a shutdown command to the given module over
+the message channel.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_UNKNOWN_CHILD_PROCESS_ENDED">
+<term>BIND10_UNKNOWN_CHILD_PROCESS_ENDED unknown child pid %1 exited</term>
+<listitem><para>
+An unknown child process has exited. The PID is printed, but no further
+action will be taken by the boss process.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_ENTRY_MISSING_RRSET">
+<term>CACHE_ENTRY_MISSING_RRSET missing RRset to generate message for %1</term>
+<listitem><para>
+The cache tried to generate the complete answer message. It knows the structure
+of the message, but some of the RRsets to be put there are not in cache (they
+probably expired already). Therefore it pretends the message was not found.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_LOCALZONE_FOUND">
+<term>CACHE_LOCALZONE_FOUND found entry with key %1 in local zone data</term>
+<listitem><para>
+Debug message, noting that the requested data was successfully found in the
+local zone data of the cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_LOCALZONE_UNKNOWN">
+<term>CACHE_LOCALZONE_UNKNOWN entry with key %1 not found in local zone data</term>
+<listitem><para>
+Debug message. The requested data was not found in the local zone data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_LOCALZONE_UPDATE">
+<term>CACHE_LOCALZONE_UPDATE updating local zone element at key %1</term>
+<listitem><para>
+Debug message issued when there's update to the local zone section of cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_DEINIT">
+<term>CACHE_MESSAGES_DEINIT deinitialized message cache</term>
+<listitem><para>
+Debug message. It is issued when the server deinitializes the message cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_EXPIRED">
+<term>CACHE_MESSAGES_EXPIRED found an expired message entry for %1 in the message cache</term>
+<listitem><para>
+Debug message. The requested data was found in the message cache, but it
+already expired. Therefore the cache removes the entry and pretends it found
+nothing.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_FOUND">
+<term>CACHE_MESSAGES_FOUND found a message entry for %1 in the message cache</term>
+<listitem><para>
+Debug message. We found the whole message in the cache, so it can be returned
+to user without any other lookups.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_INIT">
+<term>CACHE_MESSAGES_INIT initialized message cache for %1 messages of class %2</term>
+<listitem><para>
+Debug message issued when a new message cache is issued. It lists the class
+of messages it can hold and the maximum size of the cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_REMOVE">
+<term>CACHE_MESSAGES_REMOVE removing old instance of %1/%2/%3 first</term>
+<listitem><para>
+Debug message. This may follow CACHE_MESSAGES_UPDATE and indicates that, while
+updating, the old instance is being removed prior of inserting a new one.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_UNCACHEABLE">
+<term>CACHE_MESSAGES_UNCACHEABLE not inserting uncacheable message %1/%2/%3</term>
+<listitem><para>
+Debug message, noting that the given message can not be cached. This is because
+there's no SOA record in the message. See RFC 2308 section 5 for more
+information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_UNKNOWN">
+<term>CACHE_MESSAGES_UNKNOWN no entry for %1 found in the message cache</term>
<listitem><para>
-The asynchronous I/O code encountered an error when trying send data to
-the specified address on the given protocol. The the number of the system
-error that cause the problem is given in the message.
+Debug message. The message cache didn't find any entry for the given key.
</para></listitem>
</varlistentry>
-<varlistentry id="ASIODNS_UNKORIGIN">
-<term>ASIODNS_UNKORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)</term>
+<varlistentry id="CACHE_MESSAGES_UPDATE">
+<term>CACHE_MESSAGES_UPDATE updating message entry %1/%2/%3</term>
<listitem><para>
-This message should not appear and indicates an internal error if it does.
-Please enter a bug report.
+Debug message issued when the message cache is being updated with a new
+message. Either the old instance is removed or, if none is found, new one
+is created.
</para></listitem>
</varlistentry>
-<varlistentry id="ASIODNS_UNKRESULT">
-<term>ASIODNS_UNKRESULT unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)</term>
+<varlistentry id="CACHE_RESOLVER_DEEPEST">
+<term>CACHE_RESOLVER_DEEPEST looking up deepest NS for %1/%2</term>
<listitem><para>
-The termination method of the resolver's upstream fetch class was called with
-an unknown result code (which is given in the message). This message should
-not appear and may indicate an internal error. Please enter a bug report.
+Debug message. The resolver cache is looking up the deepest known nameserver,
+so the resolution doesn't have to start from the root.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_INIT">
+<term>CACHE_RESOLVER_INIT initializing resolver cache for class %1</term>
+<listitem><para>
+Debug message. The resolver cache is being created for this given class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_INIT_INFO">
+<term>CACHE_RESOLVER_INIT_INFO initializing resolver cache for class %1</term>
+<listitem><para>
+Debug message, the resolver cache is being created for this given class. The
+difference from CACHE_RESOLVER_INIT is only in different format of passed
+information, otherwise it does the same.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_LOCAL_MSG">
+<term>CACHE_RESOLVER_LOCAL_MSG message for %1/%2 found in local zone data</term>
+<listitem><para>
+Debug message. The resolver cache found a complete message for the user query
+in the zone data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_LOCAL_RRSET">
+<term>CACHE_RESOLVER_LOCAL_RRSET RRset for %1/%2 found in local zone data</term>
+<listitem><para>
+Debug message. The resolver cache found a requested RRset in the local zone
+data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_LOOKUP_MSG">
+<term>CACHE_RESOLVER_LOOKUP_MSG looking up message in resolver cache for %1/%2</term>
+<listitem><para>
+Debug message. The resolver cache is trying to find a message to answer the
+user query.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_LOOKUP_RRSET">
+<term>CACHE_RESOLVER_LOOKUP_RRSET looking up RRset in resolver cache for %1/%2</term>
+<listitem><para>
+Debug message. The resolver cache is trying to find an RRset (which usually
+originates as internally from resolver).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_NO_QUESTION">
+<term>CACHE_RESOLVER_NO_QUESTION answer message for %1/%2 has empty question section</term>
+<listitem><para>
+The cache tried to fill in found data into the response message. But it
+discovered the message contains no question section, which is invalid.
+This is likely a programmer error, please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_UNKNOWN_CLASS_MSG">
+<term>CACHE_RESOLVER_UNKNOWN_CLASS_MSG no cache for class %1</term>
+<listitem><para>
+Debug message. While trying to lookup a message in the resolver cache, it was
+discovered there's no cache for this class at all. Therefore no message is
+found.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_UNKNOWN_CLASS_RRSET">
+<term>CACHE_RESOLVER_UNKNOWN_CLASS_RRSET no cache for class %1</term>
+<listitem><para>
+Debug message. While trying to lookup an RRset in the resolver cache, it was
+discovered there's no cache for this class at all. Therefore no data is found.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_UPDATE_MSG">
+<term>CACHE_RESOLVER_UPDATE_MSG updating message for %1/%2/%3</term>
+<listitem><para>
+Debug message. The resolver is updating a message in the cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_UPDATE_RRSET">
+<term>CACHE_RESOLVER_UPDATE_RRSET updating RRset for %1/%2/%3</term>
+<listitem><para>
+Debug message. The resolver is updating an RRset in the cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_MSG">
+<term>CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_MSG no cache for class %1</term>
+<listitem><para>
+Debug message. While trying to insert a message into the cache, it was
+discovered that there's no cache for the class of message. Therefore
+the message will not be cached.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_RRSET">
+<term>CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_RRSET no cache for class %1</term>
+<listitem><para>
+Debug message. While trying to insert an RRset into the cache, it was
+discovered that there's no cache for the class of the RRset. Therefore
+the message will not be cached.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RRSET_EXPIRED">
+<term>CACHE_RRSET_EXPIRED found expired RRset %1/%2/%3</term>
+<listitem><para>
+Debug message. The requested data was found in the RRset cache. However, it is
+expired, so the cache removed it and is going to pretend nothing was found.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RRSET_INIT">
+<term>CACHE_RRSET_INIT initializing RRset cache for %1 RRsets of class %2</term>
+<listitem><para>
+Debug message. The RRset cache to hold at most this many RRsets for the given
+class is being created.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RRSET_LOOKUP">
+<term>CACHE_RRSET_LOOKUP looking up %1/%2/%3 in RRset cache</term>
+<listitem><para>
+Debug message. The resolver is trying to look up data in the RRset cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RRSET_NOT_FOUND">
+<term>CACHE_RRSET_NOT_FOUND no RRset found for %1/%2/%3</term>
+<listitem><para>
+Debug message which can follow CACHE_RRSET_LOOKUP. This means the data is not
+in the cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RRSET_REMOVE_OLD">
+<term>CACHE_RRSET_REMOVE_OLD removing old RRset for %1/%2/%3 to make space for new one</term>
+<listitem><para>
+Debug message which can follow CACHE_RRSET_UPDATE. During the update, the cache
+removed an old instance of the RRset to replace it with the new one.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RRSET_UNTRUSTED">
+<term>CACHE_RRSET_UNTRUSTED not replacing old RRset for %1/%2/%3, it has higher trust level</term>
+<listitem><para>
+Debug message which can follow CACHE_RRSET_UPDATE. The cache already holds the
+same RRset, but from more trusted source, so the old one is kept and new one
+ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RRSET_UPDATE">
+<term>CACHE_RRSET_UPDATE updating RRset %1/%2/%3 in the cache</term>
+<listitem><para>
+Debug message. The RRset is updating its data with this given RRset.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_ASYNC_READ_FAILED">
+<term>CC_ASYNC_READ_FAILED asynchronous read failed</term>
+<listitem><para>
+This marks a low level error, we tried to read data from the message queue
+daemon asynchronously, but the ASIO library returned an error.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_CONN_ERROR">
+<term>CC_CONN_ERROR error connecting to message queue (%1)</term>
+<listitem><para>
+It is impossible to reach the message queue daemon for the reason given. It
+is unlikely there'll be reason for whatever program this currently is to
+continue running, as the communication with the rest of BIND 10 is vital
+for the components.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_DISCONNECT">
+<term>CC_DISCONNECT disconnecting from message queue daemon</term>
+<listitem><para>
+The library is disconnecting from the message queue daemon. This debug message
+indicates that the program is trying to shut down gracefully.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_ESTABLISH">
+<term>CC_ESTABLISH trying to establish connection with message queue daemon at %1</term>
+<listitem><para>
+This debug message indicates that the command channel library is about to
+connect to the message queue daemon, which should be listening on the UNIX-domain
+socket listed in the output.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_ESTABLISHED">
+<term>CC_ESTABLISHED successfully connected to message queue daemon</term>
+<listitem><para>
+This debug message indicates that the connection was successfully made, this
+should follow CC_ESTABLISH.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_GROUP_RECEIVE">
+<term>CC_GROUP_RECEIVE trying to receive a message</term>
+<listitem><para>
+Debug message, noting that a message is expected to come over the command
+channel.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_GROUP_RECEIVED">
+<term>CC_GROUP_RECEIVED message arrived ('%1', '%2')</term>
+<listitem><para>
+Debug message, noting that we successfully received a message (its envelope and
+payload listed). This follows CC_GROUP_RECEIVE, but might happen some time
+later, depending if we waited for it or just polled.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_GROUP_SEND">
+<term>CC_GROUP_SEND sending message '%1' to group '%2'</term>
+<listitem><para>
+Debug message, we're about to send a message over the command channel.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_INVALID_LENGTHS">
+<term>CC_INVALID_LENGTHS invalid length parameters (%1, %2)</term>
+<listitem><para>
+This happens when garbage comes over the command channel or some kind of
+confusion happens in the program. The data received from the socket make no
+sense if we interpret it as lengths of message. The first one is total length
+of the message; the second is the length of the header. The header
+and its length (2 bytes) is counted in the total length.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_LENGTH_NOT_READY">
+<term>CC_LENGTH_NOT_READY length not ready</term>
+<listitem><para>
+There should be data representing the length of message on the socket, but it
+is not there.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_NO_MESSAGE">
+<term>CC_NO_MESSAGE no message ready to be received yet</term>
+<listitem><para>
+The program polled for incoming messages, but there was no message waiting.
+This is a debug message which may happen only after CC_GROUP_RECEIVE.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_NO_MSGQ">
+<term>CC_NO_MSGQ unable to connect to message queue (%1)</term>
+<listitem><para>
+It isn't possible to connect to the message queue daemon, for reason listed.
+It is unlikely any program will be able continue without the communication.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_READ_ERROR">
+<term>CC_READ_ERROR error reading data from command channel (%1)</term>
+<listitem><para>
+A low level error happened when the library tried to read data from the
+command channel socket. The reason is listed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_READ_EXCEPTION">
+<term>CC_READ_EXCEPTION error reading data from command channel (%1)</term>
+<listitem><para>
+We received an exception while trying to read data from the command
+channel socket. The reason is listed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_REPLY">
+<term>CC_REPLY replying to message from '%1' with '%2'</term>
+<listitem><para>
+Debug message, noting we're sending a response to the original message
+with the given envelope.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_SET_TIMEOUT">
+<term>CC_SET_TIMEOUT setting timeout to %1ms</term>
+<listitem><para>
+Debug message. A timeout for which the program is willing to wait for a reply
+is being set.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_START_READ">
+<term>CC_START_READ starting asynchronous read</term>
+<listitem><para>
+Debug message. From now on, when a message (or command) comes, it'll wake the
+program and the library will automatically pass it over to correct place.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_SUBSCRIBE">
+<term>CC_SUBSCRIBE subscribing to communication group %1</term>
+<listitem><para>
+Debug message. The program wants to receive messages addressed to this group.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_TIMEOUT">
+<term>CC_TIMEOUT timeout reading data from command channel</term>
+<listitem><para>
+The program waited too long for data from the command channel (usually when it
+sent a query to different program and it didn't answer for whatever reason).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_UNSUBSCRIBE">
+<term>CC_UNSUBSCRIBE unsubscribing from communication group %1</term>
+<listitem><para>
+Debug message. The program no longer wants to receive messages addressed to
+this group.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_WRITE_ERROR">
+<term>CC_WRITE_ERROR error writing data to command channel (%1)</term>
+<listitem><para>
+A low level error happened when the library tried to write data to the command
+channel socket.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_ZERO_LENGTH">
+<term>CC_ZERO_LENGTH invalid message length (0)</term>
+<listitem><para>
+The library received a message length being zero, which makes no sense, since
+all messages must contain at least the envelope.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_AUTOMATIC_CONFIG_DATABASE_UPDATE">
+<term>CFGMGR_AUTOMATIC_CONFIG_DATABASE_UPDATE Updating configuration database from version %1 to %2</term>
+<listitem><para>
+An older version of the configuration database has been found, from which
+there was an automatic upgrade path to the current version. These changes
+are now applied, and no action from the administrator is necessary.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_BAD_UPDATE_RESPONSE_FROM_MODULE">
+<term>CFGMGR_BAD_UPDATE_RESPONSE_FROM_MODULE Unable to parse response from module %1: %2</term>
+<listitem><para>
+The configuration manager sent a configuration update to a module, but
+the module responded with an answer that could not be parsed. The answer
+message appears to be invalid JSON data, or not decodable to a string.
+This is likely to be a problem in the module in question. The update is
+assumed to have failed, and will not be stored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_CC_SESSION_ERROR">
+<term>CFGMGR_CC_SESSION_ERROR Error connecting to command channel: %1</term>
+<listitem><para>
+The configuration manager daemon was unable to connect to the messaging
+system. The most likely cause is that msgq is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_DATA_READ_ERROR">
+<term>CFGMGR_DATA_READ_ERROR error reading configuration database from disk: %1</term>
+<listitem><para>
+There was a problem reading the persistent configuration data as stored
+on disk. The file may be corrupted, or it is of a version from where
+there is no automatic upgrade path. The file needs to be repaired or
+removed. The configuration manager daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_IOERROR_WHILE_WRITING_CONFIGURATION">
+<term>CFGMGR_IOERROR_WHILE_WRITING_CONFIGURATION Unable to write configuration file; configuration not stored: %1</term>
+<listitem><para>
+There was an IO error from the system while the configuration manager
+was trying to write the configuration database to disk. The specific
+error is given. The most likely cause is that the directory where
+the file is stored does not exist, or is not writable. The updated
+configuration is not stored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_OSERROR_WHILE_WRITING_CONFIGURATION">
+<term>CFGMGR_OSERROR_WHILE_WRITING_CONFIGURATION Unable to write configuration file; configuration not stored: %1</term>
+<listitem><para>
+There was an OS error from the system while the configuration manager
+was trying to write the configuration database to disk. The specific
+error is given. The most likely cause is that the system does not have
+write access to the configuration database file. The updated
+configuration is not stored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_STOPPED_BY_KEYBOARD">
+<term>CFGMGR_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
+<listitem><para>
+There was a keyboard interrupt signal to stop the cfgmgr daemon. The
+daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_BAD_CONFIG_DATA">
+<term>CMDCTL_BAD_CONFIG_DATA error in config data: %1</term>
+<listitem><para>
+There was an error reading the updated configuration data. The specific
+error is printed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_BAD_PASSWORD">
+<term>CMDCTL_BAD_PASSWORD bad password for user: %1</term>
+<listitem><para>
+A login attempt was made to b10-cmdctl, but the password was wrong.
+Users can be managed with the tool b10-cmdctl-usermgr.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_CC_SESSION_ERROR">
+<term>CMDCTL_CC_SESSION_ERROR error reading from cc channel: %1</term>
+<listitem><para>
+There was a problem reading from the command and control channel. The
+most likely cause is that the message bus daemon is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_CC_SESSION_TIMEOUT">
+<term>CMDCTL_CC_SESSION_TIMEOUT timeout on cc channel</term>
+<listitem><para>
+A timeout occurred when waiting for essential data from the cc session.
+This usually occurs when b10-cfgmgr is not running or not responding.
+Since we are waiting for essential information, this is a fatal error,
+and the cmdctl daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_COMMAND_ERROR">
+<term>CMDCTL_COMMAND_ERROR error in command %1 to module %2: %3</term>
+<listitem><para>
+An error was encountered sending the given command to the given module.
+Either there was a communication problem with the module, or the module
+was not able to process the command, and sent back an error. The
+specific error is printed in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_COMMAND_SENT">
+<term>CMDCTL_COMMAND_SENT command '%1' to module '%2' was sent</term>
+<listitem><para>
+This debug message indicates that the given command has been sent to
+the given module.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_NO_SUCH_USER">
+<term>CMDCTL_NO_SUCH_USER username not found in user database: %1</term>
+<listitem><para>
+A login attempt was made to b10-cmdctl, but the username was not known.
+Users can be added with the tool b10-cmdctl-usermgr.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_NO_USER_ENTRIES_READ">
+<term>CMDCTL_NO_USER_ENTRIES_READ failed to read user information, all users will be denied</term>
+<listitem><para>
+The b10-cmdctl daemon was unable to find any user data in the user
+database file. Either it was unable to read the file (in which case
+this message follows a message CMDCTL_USER_DATABASE_READ_ERROR
+containing a specific error), or the file was empty. Users can be added
+with the tool b10-cmdctl-usermgr.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_SEND_COMMAND">
+<term>CMDCTL_SEND_COMMAND sending command %1 to module %2</term>
+<listitem><para>
+This debug message indicates that the given command is being sent to
+the given module.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_SSL_SETUP_FAILURE_USER_DENIED">
+<term>CMDCTL_SSL_SETUP_FAILURE_USER_DENIED failed to create an SSL connection (user denied): %1</term>
+<listitem><para>
+The user was denied because the SSL connection could not successfully
+be set up. The specific error is given in the log message. Possible
+causes may be that the ssl request itself was bad, or the local key or
+certificate file could not be read.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_STOPPED_BY_KEYBOARD">
+<term>CMDCTL_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
+<listitem><para>
+There was a keyboard interrupt signal to stop the cmdctl daemon. The
+daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_UNCAUGHT_EXCEPTION">
+<term>CMDCTL_UNCAUGHT_EXCEPTION uncaught exception: %1</term>
+<listitem><para>
+The b10-cmdctl daemon encountered an uncaught exception and
+will now shut down. This is indicative of a programming error and
+should not happen under normal circumstances. The exception message
+is printed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_USER_DATABASE_READ_ERROR">
+<term>CMDCTL_USER_DATABASE_READ_ERROR failed to read user database file %1: %2</term>
+<listitem><para>
+The b10-cmdctl daemon was unable to read the user database file. The
+file may be unreadable for the daemon, or it may be corrupted. In the
+latter case, it can be recreated with b10-cmdctl-usermgr. The specific
+error is printed in the log message.
</para></listitem>
</varlistentry>
@@ -148,65 +1561,128 @@ The message itself is ignored by this module.
<varlistentry id="CONFIG_CCSESSION_MSG_INTERNAL">
<term>CONFIG_CCSESSION_MSG_INTERNAL error handling CC session message: %1</term>
<listitem><para>
-There was an internal problem handling an incoming message on the
-command and control channel. An unexpected exception was thrown. This
-most likely points to an internal inconsistency in the module code. The
-exception message is appended to the log error, and the module will
-continue to run, but will not send back an answer.
+There was an internal problem handling an incoming message on the command
+and control channel. An unexpected exception was thrown, details of
+which are appended to the message. The module will continue to run,
+but will not send back an answer.
+</para><para>
+The most likely cause of this error is a programming error. Please raise
+a bug report.
</para></listitem>
</varlistentry>
-<varlistentry id="CONFIG_FOPEN_ERR">
-<term>CONFIG_FOPEN_ERR error opening %1: %2</term>
+<varlistentry id="CONFIG_GET_FAIL">
+<term>CONFIG_GET_FAIL error getting configuration from cfgmgr: %1</term>
<listitem><para>
-There was an error opening the given file.
+The configuration manager returned an error when this module requested
+the configuration. The full error message answer from the configuration
+manager is appended to the log error. The most likely cause is that
+the module is of a different (command specification) version than the
+running configuration manager.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_GET_FAILED">
+<term>CONFIG_GET_FAILED error getting configuration from cfgmgr: %1</term>
+<listitem><para>
+The configuration manager returned an error response when the module
+requested its configuration. The full error message answer from the
+configuration manager is appended to the log error.
</para></listitem>
</varlistentry>
<varlistentry id="CONFIG_JSON_PARSE">
<term>CONFIG_JSON_PARSE JSON parse error in %1: %2</term>
<listitem><para>
-There was a parse error in the JSON file. The given file does not appear
+There was an error parsing the JSON file. The given file does not appear
to be in valid JSON format. Please verify that the filename is correct
and that the contents are valid JSON.
</para></listitem>
</varlistentry>
-<varlistentry id="CONFIG_MANAGER_CONFIG">
-<term>CONFIG_MANAGER_CONFIG error getting configuration from cfgmgr: %1</term>
+<varlistentry id="CONFIG_LOG_CONFIG_ERRORS">
+<term>CONFIG_LOG_CONFIG_ERRORS error(s) in logging configuration: %1</term>
<listitem><para>
-The configuration manager returned an error when this module requested
-the configuration. The full error message answer from the configuration
-manager is appended to the log error. The most likely cause is that
-the module is of a different (command specification) version than the
-running configuration manager.
+There was a logging configuration update, but the internal validator
+for logging configuration found that it contained errors. The errors
+are shown, and the update is ignored.
</para></listitem>
</varlistentry>
-<varlistentry id="CONFIG_MANAGER_MOD_SPEC">
-<term>CONFIG_MANAGER_MOD_SPEC module specification not accepted by cfgmgr: %1</term>
+<varlistentry id="CONFIG_LOG_EXPLICIT">
+<term>CONFIG_LOG_EXPLICIT will use logging configuration for explicitly-named logger %1</term>
<listitem><para>
-The module specification file for this module was rejected by the
-configuration manager. The full error message answer from the
-configuration manager is appended to the log error. The most likely
-cause is that the module is of a different (specification file) version
-than the running configuration manager.
+This is a debug message. When processing the "loggers" part of the
+configuration file, the configuration library found an entry for the named
+logger that matches the logger specification for the program. The logging
+configuration for the program will be updated with the information.
</para></listitem>
</varlistentry>
-<varlistentry id="CONFIG_MODULE_SPEC">
-<term>CONFIG_MODULE_SPEC module specification error in %1: %2</term>
+<varlistentry id="CONFIG_LOG_IGNORE_EXPLICIT">
+<term>CONFIG_LOG_IGNORE_EXPLICIT ignoring logging configuration for explicitly-named logger %1</term>
<listitem><para>
-The given file does not appear to be a valid specification file. Please
-verify that the filename is correct and that its contents are a valid
-BIND10 module specification.
+This is a debug message. When processing the "loggers" part of the
+configuration file, the configuration library found an entry for the
+named logger. As this does not match the logger specification for the
+program, it has been ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_LOG_IGNORE_WILD">
+<term>CONFIG_LOG_IGNORE_WILD ignoring logging configuration for wildcard logger %1</term>
+<listitem><para>
+This is a debug message. When processing the "loggers" part of the
+configuration file, the configuration library found the named wildcard
+entry (one containing the "*" character) that matched a logger already
+matched by an explicitly named entry. The configuration is ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_LOG_WILD_MATCH">
+<term>CONFIG_LOG_WILD_MATCH will use logging configuration for wildcard logger %1</term>
+<listitem><para>
+This is a debug message. When processing the "loggers" part of
+the configuration file, the configuration library found the named
+wildcard entry (one containing the "*" character) that matches a logger
+specification in the program. The logging configuration for the program
+will be updated with the information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_MOD_SPEC_FORMAT">
+<term>CONFIG_MOD_SPEC_FORMAT module specification error in %1: %2</term>
+<listitem><para>
+The given file does not appear to be a valid specification file: details
+are included in the message. Please verify that the filename is correct
+and that its contents are a valid BIND10 module specification.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_MOD_SPEC_REJECT">
+<term>CONFIG_MOD_SPEC_REJECT module specification rejected by cfgmgr: %1</term>
+<listitem><para>
+The specification file for this module was rejected by the configuration
+manager. The full error message answer from the configuration manager is
+appended to the log error. The most likely cause is that the module is of
+a different (specification file) version than the running configuration
+manager.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_OPEN_FAIL">
+<term>CONFIG_OPEN_FAIL error opening %1: %2</term>
+<listitem><para>
+There was an error opening the given file. The reason for the failure
+is included in the message.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_CACHE_CREATE">
<term>DATASRC_CACHE_CREATE creating the hotspot cache</term>
<listitem><para>
-Debug information that the hotspot cache was created at startup.
+This is a debug message issued during startup when the hotspot cache
+is created.
</para></listitem>
</varlistentry>
@@ -218,39 +1694,37 @@ Debug information. The hotspot cache is being destroyed.
</varlistentry>
<varlistentry id="DATASRC_CACHE_DISABLE">
-<term>DATASRC_CACHE_DISABLE disabling the cache</term>
+<term>DATASRC_CACHE_DISABLE disabling the hotspot cache</term>
<listitem><para>
-The hotspot cache is disabled from now on. It is not going to store
-information or return anything.
+A debug message issued when the hotspot cache is disabled.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_CACHE_ENABLE">
-<term>DATASRC_CACHE_ENABLE enabling the cache</term>
+<term>DATASRC_CACHE_ENABLE enabling the hotspot cache</term>
<listitem><para>
-The hotspot cache is enabled from now on.
+A debug message issued when the hotspot cache is enabled.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_CACHE_EXPIRED">
-<term>DATASRC_CACHE_EXPIRED the item '%1' is expired</term>
+<term>DATASRC_CACHE_EXPIRED item '%1' in the hotspot cache has expired</term>
<listitem><para>
-Debug information. There was an attempt to look up an item in the hotspot
-cache. And the item was actually there, but it was too old, so it was removed
-instead and nothing is reported (the external behaviour is the same as with
-CACHE_NOT_FOUND).
+A debug message issued when a hotspot cache lookup located the item but it
+had expired. The item was removed and the program proceeded as if the item
+had not been found.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_CACHE_FOUND">
<term>DATASRC_CACHE_FOUND the item '%1' was found</term>
<listitem><para>
-Debug information. An item was successfully looked up in the hotspot cache.
+Debug information. An item was successfully located in the hotspot cache.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_CACHE_FULL">
-<term>DATASRC_CACHE_FULL cache is full, dropping oldest</term>
+<term>DATASRC_CACHE_FULL hotspot cache is full, dropping oldest</term>
<listitem><para>
Debug information. After inserting an item into the hotspot cache, the
maximum number of items was exceeded, so the least recently used item will
@@ -259,39 +1733,39 @@ be dropped. This should be directly followed by CACHE_REMOVE.
</varlistentry>
<varlistentry id="DATASRC_CACHE_INSERT">
-<term>DATASRC_CACHE_INSERT inserting item '%1' into the cache</term>
+<term>DATASRC_CACHE_INSERT inserting item '%1' into the hotspot cache</term>
<listitem><para>
-Debug information. It means a new item is being inserted into the hotspot
+A debug message indicating that a new item is being inserted into the hotspot
cache.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_CACHE_NOT_FOUND">
-<term>DATASRC_CACHE_NOT_FOUND the item '%1' was not found</term>
+<term>DATASRC_CACHE_NOT_FOUND the item '%1' was not found in the hotspot cache</term>
<listitem><para>
-Debug information. It was attempted to look up an item in the hotspot cache,
-but it is not there.
+A debug message issued when hotspot cache was searched for the specified
+item but it was not found.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_CACHE_OLD_FOUND">
-<term>DATASRC_CACHE_OLD_FOUND older instance of cache item found, replacing</term>
+<term>DATASRC_CACHE_OLD_FOUND older instance of hotspot cache item '%1' found, replacing</term>
<listitem><para>
Debug information. While inserting an item into the hotspot cache, an older
-instance of an item with the same name was found. The old instance will be
-removed. This should be directly followed by CACHE_REMOVE.
+instance of an item with the same name was found; the old instance will be
+removed. This will be directly followed by CACHE_REMOVE.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_CACHE_REMOVE">
-<term>DATASRC_CACHE_REMOVE removing '%1' from the cache</term>
+<term>DATASRC_CACHE_REMOVE removing '%1' from the hotspot cache</term>
<listitem><para>
Debug information. An item is being removed from the hotspot cache.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_CACHE_SLOTS">
-<term>DATASRC_CACHE_SLOTS setting the cache size to '%1', dropping '%2' items</term>
+<term>DATASRC_CACHE_SLOTS setting the hotspot cache size to '%1', dropping '%2' items</term>
<listitem><para>
The maximum allowed number of items of the hotspot cache is set to the given
number. If there are too many, some of them will be dropped. The size of 0
@@ -299,11 +1773,109 @@ means no limit.
</para></listitem>
</varlistentry>
+<varlistentry id="DATASRC_DATABASE_FIND_ERROR">
+<term>DATASRC_DATABASE_FIND_ERROR error retrieving data from datasource %1: %2</term>
+<listitem><para>
+This was an internal error while reading data from a datasource. This can either
+mean the specific data source implementation is not behaving correctly, or the
+data it provides is invalid. The current search is aborted.
+The error message contains specific information about the error.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FIND_RECORDS">
+<term>DATASRC_DATABASE_FIND_RECORDS looking in datasource %1 for record %2/%3</term>
+<listitem><para>
+Debug information. The database data source is looking up records with the given
+name and type in the database.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FIND_TTL_MISMATCH">
+<term>DATASRC_DATABASE_FIND_TTL_MISMATCH TTL values differ in %1 for elements of %2/%3/%4, setting to %5</term>
+<listitem><para>
+The datasource backend provided resource records for the given RRset with
+different TTL values. The TTL of the RRSET is set to the lowest value, which
+is printed in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FIND_UNCAUGHT_ERROR">
+<term>DATASRC_DATABASE_FIND_UNCAUGHT_ERROR uncaught general error retrieving data from datasource %1: %2</term>
+<listitem><para>
+There was an uncaught general exception while reading data from a datasource.
+This most likely points to a logic error in the code, and can be considered a
+bug. The current search is aborted. Specific information about the exception is
+printed in this error message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FIND_UNCAUGHT_ISC_ERROR">
+<term>DATASRC_DATABASE_FIND_UNCAUGHT_ISC_ERROR uncaught error retrieving data from datasource %1: %2</term>
+<listitem><para>
+There was an uncaught ISC exception while reading data from a datasource. This
+most likely points to a logic error in the code, and can be considered a bug.
+The current search is aborted. Specific information about the exception is
+printed in this error message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FOUND_DELEGATION">
+<term>DATASRC_DATABASE_FOUND_DELEGATION Found delegation at %2 in %1</term>
+<listitem><para>
+When searching for a domain, the program met a delegation to a different zone
+at the given domain name. It will return that one instead.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FOUND_DELEGATION_EXACT">
+<term>DATASRC_DATABASE_FOUND_DELEGATION_EXACT Found delegation at %2 (exact match) in %1</term>
+<listitem><para>
+The program found the domain requested, but it is a delegation point to a
+different zone, therefore it is not authoritative for this domain name.
+It will return the NS record instead.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FOUND_DNAME">
+<term>DATASRC_DATABASE_FOUND_DNAME Found DNAME at %2 in %1</term>
+<listitem><para>
+When searching for a domain, the program met a DNAME redirection to a different
+place in the domain space at the given domain name. It will return that one
+instead.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FOUND_NXDOMAIN">
+<term>DATASRC_DATABASE_FOUND_NXDOMAIN search in datasource %1 resulted in NXDOMAIN for %2/%3/%4</term>
+<listitem><para>
+The data returned by the database backend did not contain any data for the given
+domain name, class and type.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FOUND_NXRRSET">
+<term>DATASRC_DATABASE_FOUND_NXRRSET search in datasource %1 resulted in NXRRSET for %2/%3/%4</term>
+<listitem><para>
+The data returned by the database backend contained data for the given domain
+name and class, but not for the given type.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FOUND_RRSET">
+<term>DATASRC_DATABASE_FOUND_RRSET search in datasource %1 resulted in RRset %2</term>
+<listitem><para>
+The data returned by the database backend contained data for the given domain
+name, and it either matches the type or has a relevant type. The RRset that is
+returned is printed.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="DATASRC_DO_QUERY">
<term>DATASRC_DO_QUERY handling query for '%1/%2'</term>
<listitem><para>
-Debug information. We're processing some internal query for given name and
-type.
+A debug message indicating that a query for the given name and RR type is being
+processed.
</para></listitem>
</varlistentry>
@@ -317,8 +1889,9 @@ Debug information. An RRset is being added to the in-memory data source.
<varlistentry id="DATASRC_MEM_ADD_WILDCARD">
<term>DATASRC_MEM_ADD_WILDCARD adding wildcards for '%1'</term>
<listitem><para>
-Debug information. Some special marks above each * in wildcard name are needed.
-They are being added now for this name.
+This is a debug message issued during the processing of a wildcard
+name. The internal domain name tree is scanned and some nodes are
+specially marked to allow the wildcard lookup to succeed.
</para></listitem>
</varlistentry>
@@ -349,7 +1922,7 @@ returning the CNAME instead.
<term>DATASRC_MEM_CNAME_COEXIST can't add data to CNAME in domain '%1'</term>
<listitem><para>
This is the same problem as in MEM_CNAME_TO_NONEMPTY, but it happened the
-other way around -- adding some outher data to CNAME.
+other way around -- adding some other data to CNAME.
</para></listitem>
</varlistentry>
@@ -401,11 +1974,11 @@ Debug information. A DNAME was found instead of the requested information.
</varlistentry>
<varlistentry id="DATASRC_MEM_DNAME_NS">
-<term>DATASRC_MEM_DNAME_NS dNAME and NS can't coexist in non-apex domain '%1'</term>
+<term>DATASRC_MEM_DNAME_NS DNAME and NS can't coexist in non-apex domain '%1'</term>
<listitem><para>
-It was requested for DNAME and NS records to be put into the same domain
-which is not the apex (the top of the zone). This is forbidden by RFC
-2672, section 3. This indicates a problem with provided data.
+A request was made for DNAME and NS records to be put into the same
+domain which is not the apex (the top of the zone). This is forbidden
+by RFC 2672 (section 3) and indicates a problem with provided data.
</para></listitem>
</varlistentry>
@@ -457,8 +2030,8 @@ Debug information. The content of master file is being loaded into the memory.
</para></listitem>
</varlistentry>
-<varlistentry id="DATASRC_MEM_NOTFOUND">
-<term>DATASRC_MEM_NOTFOUND requested domain '%1' not found</term>
+<varlistentry id="DATASRC_MEM_NOT_FOUND">
+<term>DATASRC_MEM_NOT_FOUND requested domain '%1' not found</term>
<listitem><para>
Debug information. The requested domain does not exist.
</para></listitem>
@@ -544,7 +2117,7 @@ behaviour is specified by RFC 1034, section 4.3.3
</varlistentry>
<varlistentry id="DATASRC_MEM_WILDCARD_DNAME">
-<term>DATASRC_MEM_WILDCARD_DNAME dNAME record in wildcard domain '%1'</term>
+<term>DATASRC_MEM_WILDCARD_DNAME DNAME record in wildcard domain '%1'</term>
<listitem><para>
The software refuses to load DNAME records into a wildcard domain. It isn't
explicitly forbidden, but the protocol is ambiguous about how this should
@@ -554,7 +2127,7 @@ different tools.
</varlistentry>
<varlistentry id="DATASRC_MEM_WILDCARD_NS">
-<term>DATASRC_MEM_WILDCARD_NS nS record in wildcard domain '%1'</term>
+<term>DATASRC_MEM_WILDCARD_NS NS record in wildcard domain '%1'</term>
<listitem><para>
The software refuses to load NS records into a wildcard domain. It isn't
explicitly forbidden, but the protocol is ambiguous about how this should
@@ -566,15 +2139,15 @@ different tools.
<varlistentry id="DATASRC_META_ADD">
<term>DATASRC_META_ADD adding a data source into meta data source</term>
<listitem><para>
-Debug information. Yet another data source is being added into the meta data
-source. (probably at startup or reconfiguration)
+This is a debug message issued during startup or reconfiguration.
+Another data source is being added into the meta data source.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_META_ADD_CLASS_MISMATCH">
<term>DATASRC_META_ADD_CLASS_MISMATCH mismatch between classes '%1' and '%2'</term>
<listitem><para>
-It was attempted to add a data source into a meta data source. But their
+It was attempted to add a data source into a meta data source, but their
classes do not match.
</para></listitem>
</varlistentry>
@@ -634,7 +2207,7 @@ information for it.
</varlistentry>
<varlistentry id="DATASRC_QUERY_CACHED">
-<term>DATASRC_QUERY_CACHED data for %1/%2 found in cache</term>
+<term>DATASRC_QUERY_CACHED data for %1/%2 found in hotspot cache</term>
<listitem><para>
Debug information. The requested data were found in the hotspot cache, so
no query is sent to the real data source.
@@ -642,7 +2215,7 @@ no query is sent to the real data source.
</varlistentry>
<varlistentry id="DATASRC_QUERY_CHECK_CACHE">
-<term>DATASRC_QUERY_CHECK_CACHE checking cache for '%1/%2'</term>
+<term>DATASRC_QUERY_CHECK_CACHE checking hotspot cache for '%1/%2'</term>
<listitem><para>
Debug information. While processing a query, lookup to the hotspot cache
is being made.
@@ -666,12 +2239,11 @@ way down to the given domain.
</varlistentry>
<varlistentry id="DATASRC_QUERY_EMPTY_CNAME">
-<term>DATASRC_QUERY_EMPTY_CNAME cNAME at '%1' is empty</term>
+<term>DATASRC_QUERY_EMPTY_CNAME CNAME at '%1' is empty</term>
<listitem><para>
-There was an CNAME and it was being followed. But it contains no records,
-so there's nowhere to go. There will be no answer. This indicates a problem
-with supplied data.
-We tried to follow
+A CNAME chain was being followed and an entry was found that pointed
+to a domain name that had no RRsets associated with it. As a result,
+the query cannot be answered. This indicates a problem with supplied data.
</para></listitem>
</varlistentry>
@@ -687,15 +2259,15 @@ DNAME is empty (it has no records). This indicates problem with supplied data.
<term>DATASRC_QUERY_FAIL query failed</term>
<listitem><para>
Some subtask of query processing failed. The reason should have been reported
-already. We are returning SERVFAIL.
+already and a SERVFAIL will be returned to the querying system.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_QUERY_FOLLOW_CNAME">
<term>DATASRC_QUERY_FOLLOW_CNAME following CNAME at '%1'</term>
<listitem><para>
-Debug information. The domain is a CNAME (or a DNAME and we created a CNAME
-for it already), so it's being followed.
+Debug information. The domain is a CNAME (or a DNAME and a CNAME for it
+has already been created) and the search is following this chain.
</para></listitem>
</varlistentry>
@@ -744,14 +2316,14 @@ Debug information. The last DO_QUERY is an auth query.
<varlistentry id="DATASRC_QUERY_IS_GLUE">
<term>DATASRC_QUERY_IS_GLUE glue query (%1/%2)</term>
<listitem><para>
-Debug information. The last DO_QUERY is query for glue addresses.
+Debug information. The last DO_QUERY is a query for glue addresses.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_QUERY_IS_NOGLUE">
<term>DATASRC_QUERY_IS_NOGLUE query for non-glue addresses (%1/%2)</term>
<listitem><para>
-Debug information. The last DO_QUERY is query for addresses that are not
+Debug information. The last DO_QUERY is a query for addresses that are not
glue.
</para></listitem>
</varlistentry>
@@ -759,7 +2331,7 @@ glue.
<varlistentry id="DATASRC_QUERY_IS_REF">
<term>DATASRC_QUERY_IS_REF query for referral (%1/%2)</term>
<listitem><para>
-Debug information. The last DO_QUERY is query for referral information.
+Debug information. The last DO_QUERY is a query for referral information.
</para></listitem>
</varlistentry>
@@ -806,7 +2378,7 @@ error already.
</varlistentry>
<varlistentry id="DATASRC_QUERY_NO_CACHE_ANY_AUTH">
-<term>DATASRC_QUERY_NO_CACHE_ANY_AUTH ignoring cache for ANY query (%1/%2 in %3 class)</term>
+<term>DATASRC_QUERY_NO_CACHE_ANY_AUTH ignoring hotspot cache for ANY query (%1/%2 in %3 class)</term>
<listitem><para>
Debug information. The hotspot cache is ignored for authoritative ANY queries
for consistency reasons.
@@ -814,7 +2386,7 @@ for consistency reasons.
</varlistentry>
<varlistentry id="DATASRC_QUERY_NO_CACHE_ANY_SIMPLE">
-<term>DATASRC_QUERY_NO_CACHE_ANY_SIMPLE ignoring cache for ANY query (%1/%2 in %3 class)</term>
+<term>DATASRC_QUERY_NO_CACHE_ANY_SIMPLE ignoring hotspot cache for ANY query (%1/%2 in %3 class)</term>
<listitem><para>
Debug information. The hotspot cache is ignored for ANY queries for consistency
reasons.
@@ -852,8 +2424,8 @@ Debug information. A sure query is being processed now.
</para></listitem>
</varlistentry>
-<varlistentry id="DATASRC_QUERY_PROVENX_FAIL">
-<term>DATASRC_QUERY_PROVENX_FAIL unable to prove nonexistence of '%1'</term>
+<varlistentry id="DATASRC_QUERY_PROVE_NX_FAIL">
+<term>DATASRC_QUERY_PROVE_NX_FAIL unable to prove nonexistence of '%1'</term>
<listitem><para>
The user wants DNSSEC and we discovered the entity doesn't exist (either
domain or the record). But there was an error getting NSEC/NSEC3 record
@@ -890,9 +2462,9 @@ error already.
<varlistentry id="DATASRC_QUERY_SYNTH_CNAME">
<term>DATASRC_QUERY_SYNTH_CNAME synthesizing CNAME from DNAME on '%1'</term>
<listitem><para>
-Debug information. While answering a query, a DNAME was met. The DNAME itself
-will be returned, but along with it a CNAME for clients which don't understand
-DNAMEs will be synthesized.
+This is a debug message. While answering a query, a DNAME was encountered. The
+DNAME itself will be returned, along with a synthesized CNAME for clients that
+do not understand the DNAME RR.
</para></listitem>
</varlistentry>
@@ -905,7 +2477,7 @@ already. The code is 1 for error, 2 for not implemented.
</varlistentry>
<varlistentry id="DATASRC_QUERY_TOO_MANY_CNAMES">
-<term>DATASRC_QUERY_TOO_MANY_CNAMES cNAME chain limit exceeded at '%1'</term>
+<term>DATASRC_QUERY_TOO_MANY_CNAMES CNAME chain limit exceeded at '%1'</term>
<listitem><para>
A CNAME led to another CNAME and it led to another, and so on. After 16
CNAMEs, the software gave up. Long CNAME chains are discouraged, and this
@@ -938,8 +2510,8 @@ exact kind was hopefully already reported.
</para></listitem>
</varlistentry>
-<varlistentry id="DATASRC_QUERY_WILDCARD_PROVENX_FAIL">
-<term>DATASRC_QUERY_WILDCARD_PROVENX_FAIL unable to prove nonexistence of '%1' (%2)</term>
+<varlistentry id="DATASRC_QUERY_WILDCARD_PROVE_NX_FAIL">
+<term>DATASRC_QUERY_WILDCARD_PROVE_NX_FAIL unable to prove nonexistence of '%1' (%2)</term>
<listitem><para>
While processing a wildcard, it wasn't possible to prove nonexistence of the
given domain or record. The code is 1 for error and 2 for not implemented.
@@ -961,32 +2533,53 @@ Debug information. The SQLite data source is closing the database file.
</para></listitem>
</varlistentry>
+<varlistentry id="DATASRC_SQLITE_CONNCLOSE">
+<term>DATASRC_SQLITE_CONNCLOSE Closing sqlite database</term>
+<listitem><para>
+The database file is no longer needed and is being closed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_CONNOPEN">
+<term>DATASRC_SQLITE_CONNOPEN Opening sqlite database file '%1'</term>
+<listitem><para>
+The database file is being opened so it can start providing data.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="DATASRC_SQLITE_CREATE">
-<term>DATASRC_SQLITE_CREATE sQLite data source created</term>
+<term>DATASRC_SQLITE_CREATE SQLite data source created</term>
<listitem><para>
Debug information. An instance of SQLite data source is being created.
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_SQLITE_DESTROY">
-<term>DATASRC_SQLITE_DESTROY sQLite data source destroyed</term>
+<term>DATASRC_SQLITE_DESTROY SQLite data source destroyed</term>
<listitem><para>
Debug information. An instance of SQLite data source is being destroyed.
</para></listitem>
</varlistentry>
+<varlistentry id="DATASRC_SQLITE_DROPCONN">
+<term>DATASRC_SQLITE_DROPCONN SQLite3Database is being deinitialized</term>
+<listitem><para>
+The object around a database connection is being destroyed.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="DATASRC_SQLITE_ENCLOSURE">
<term>DATASRC_SQLITE_ENCLOSURE looking for zone containing '%1'</term>
<listitem><para>
-Debug information. The SQLite data source is trying to identify, which zone
+Debug information. The SQLite data source is trying to identify which zone
should hold this domain.
</para></listitem>
</varlistentry>
-<varlistentry id="DATASRC_SQLITE_ENCLOSURE_NOTFOUND">
-<term>DATASRC_SQLITE_ENCLOSURE_NOTFOUND no zone contains it</term>
+<varlistentry id="DATASRC_SQLITE_ENCLOSURE_NOT_FOUND">
+<term>DATASRC_SQLITE_ENCLOSURE_NOT_FOUND no zone contains '%1'</term>
<listitem><para>
-Debug information. The last SQLITE_ENCLOSURE query was unsuccessful, there's
+Debug information. The last SQLITE_ENCLOSURE query was unsuccessful; there's
no such zone in our data.
</para></listitem>
</varlistentry>
@@ -1050,7 +2643,7 @@ a referral and where it goes.
<varlistentry id="DATASRC_SQLITE_FINDREF_BAD_CLASS">
<term>DATASRC_SQLITE_FINDREF_BAD_CLASS class mismatch looking for referral ('%1' and '%2')</term>
<listitem><para>
-The SQLite data source was trying to identify, if there's a referral. But
+The SQLite data source was trying to identify if there's a referral. But
it contains different class than the query was for.
</para></listitem>
</varlistentry>
@@ -1079,6 +2672,13 @@ But it doesn't contain that zone.
</para></listitem>
</varlistentry>
+<varlistentry id="DATASRC_SQLITE_NEWCONN">
+<term>DATASRC_SQLITE_NEWCONN SQLite3Database is being initialized</term>
+<listitem><para>
+A wrapper object to hold database connection is being initialized.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="DATASRC_SQLITE_OPEN">
<term>DATASRC_SQLITE_OPEN opening SQLite database '%1'</term>
<listitem><para>
@@ -1090,15 +2690,22 @@ the provided file.
<varlistentry id="DATASRC_SQLITE_PREVIOUS">
<term>DATASRC_SQLITE_PREVIOUS looking for name previous to '%1'</term>
<listitem><para>
-Debug information. We're trying to look up name preceding the supplied one.
+This is a debug message. The name given was not found, so the program
+is searching for the next name higher up the hierarchy (e.g. if
+www.example.com were queried for and not found, the software searches
+for the "previous" name, example.com).
</para></listitem>
</varlistentry>
<varlistentry id="DATASRC_SQLITE_PREVIOUS_NO_ZONE">
<term>DATASRC_SQLITE_PREVIOUS_NO_ZONE no zone containing '%1'</term>
<listitem><para>
-The SQLite data source tried to identify name preceding this one. But this
-one is not contained in any zone in the data source.
+The name given was not found, so the program is searching for the next
+name higher up the hierarchy (e.g. if www.example.com were queried
+for and not found, the software searches for the "previous" name,
+example.com). However, this name is not contained in any zone in the
+data source. This is an error since it indicates a problem in the earlier
+processing of the query.
</para></listitem>
</varlistentry>
@@ -1111,11 +2718,11 @@ no data, but it will be ready for use.
</para></listitem>
</varlistentry>
-<varlistentry id="DATASRC_STATIC_BAD_CLASS">
-<term>DATASRC_STATIC_BAD_CLASS static data source can handle CH only</term>
+<varlistentry id="DATASRC_STATIC_CLASS_NOT_CH">
+<term>DATASRC_STATIC_CLASS_NOT_CH static data source can handle CH class only</term>
<listitem><para>
-For some reason, someone asked the static data source a query that is not in
-the CH class.
+An error message indicating that a query requesting a RR for a class other
+that CH was sent to the static data source (which only handles CH queries).
</para></listitem>
</varlistentry>
@@ -1143,294 +2750,436 @@ generated.
</para></listitem>
</varlistentry>
-<varlistentry id="LOGIMPL_ABOVEDBGMAX">
-<term>LOGIMPL_ABOVEDBGMAX debug level of %1 is too high and will be set to the maximum of %2</term>
+<varlistentry id="LOGIMPL_ABOVE_MAX_DEBUG">
+<term>LOGIMPL_ABOVE_MAX_DEBUG debug level of %1 is too high and will be set to the maximum of %2</term>
<listitem><para>
-A message from the underlying logger implementation code, the debug level
-(as set by the string DEBGUGn) is above the maximum allowed value and has
-been reduced to that value.
+A message from the interface to the underlying logger implementation reporting
+that the debug level (as set by an internally-created string DEBUGn, where n
+is an integer, e.g. DEBUG22) is above the maximum allowed value and has
+been reduced to that value. The appearance of this message may indicate
+a programming error - please submit a bug report.
</para></listitem>
</varlistentry>
-<varlistentry id="LOGIMPL_BADDEBUG">
-<term>LOGIMPL_BADDEBUG debug string is '%1': must be of the form DEBUGn</term>
+<varlistentry id="LOGIMPL_BAD_DEBUG_STRING">
+<term>LOGIMPL_BAD_DEBUG_STRING debug string '%1' has invalid format</term>
<listitem><para>
-The string indicating the extended logging level (used by the underlying
-logger implementation code) is not of the stated form. In particular,
-it starts DEBUG but does not end with an integer.
+A message from the interface to the underlying logger implementation
+reporting that an internally-created string used to set the debug level
+is not of the correct format (it should be of the form DEBUGn, where n
+is an integer, e.g. DEBUG22). The appearance of this message indicates
+a programming error - please submit a bug report.
</para></listitem>
</varlistentry>
-<varlistentry id="LOGIMPL_BELOWDBGMIN">
-<term>LOGIMPL_BELOWDBGMIN debug level of %1 is too low and will be set to the minimum of %2</term>
+<varlistentry id="LOGIMPL_BELOW_MIN_DEBUG">
+<term>LOGIMPL_BELOW_MIN_DEBUG debug level of %1 is too low and will be set to the minimum of %2</term>
<listitem><para>
-A message from the underlying logger implementation code, the debug level
-(as set by the string DEBGUGn) is below the minimum allowed value and has
-been increased to that value.
+A message from the interface to the underlying logger implementation reporting
+that the debug level (as set by an internally-created string DEBUGn, where n
+is an integer, e.g. DEBUG22) is below the minimum allowed value and has
+been increased to that value. The appearance of this message may indicate
+a programming error - please submit a bug report.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_BADDESTINATION">
-<term>MSG_BADDESTINATION unrecognized log destination: %1</term>
+<varlistentry id="LOG_BAD_DESTINATION">
+<term>LOG_BAD_DESTINATION unrecognized log destination: %1</term>
<listitem><para>
A logger destination value was given that was not recognized. The
destination should be one of "console", "file", or "syslog".
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_BADSEVERITY">
-<term>MSG_BADSEVERITY unrecognized log severity: %1</term>
+<varlistentry id="LOG_BAD_SEVERITY">
+<term>LOG_BAD_SEVERITY unrecognized log severity: %1</term>
<listitem><para>
A logger severity value was given that was not recognized. The severity
-should be one of "DEBUG", "INFO", "WARN", "ERROR", or "FATAL".
+should be one of "DEBUG", "INFO", "WARN", "ERROR", "FATAL" or "NONE".
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_BADSTREAM">
-<term>MSG_BADSTREAM bad log console output stream: %1</term>
+<varlistentry id="LOG_BAD_STREAM">
+<term>LOG_BAD_STREAM bad log console output stream: %1</term>
<listitem><para>
-A log console output stream was given that was not recognized. The
-output stream should be one of "stdout", or "stderr"
+Logging has been configured so that output is written to the terminal
+(console) but the stream on which it is to be written is not recognised.
+Allowed values are "stdout" and "stderr".
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_DUPLNS">
-<term>MSG_DUPLNS line %1: duplicate $NAMESPACE directive found</term>
+<varlistentry id="LOG_DUPLICATE_MESSAGE_ID">
+<term>LOG_DUPLICATE_MESSAGE_ID duplicate message ID (%1) in compiled code</term>
<listitem><para>
-When reading a message file, more than one $NAMESPACE directive was found. In
-this version of the code, such a condition is regarded as an error and the
-read will be abandoned.
+During start-up, BIND 10 detected that the given message identification
+had been defined multiple times in the BIND 10 code. This indicates a
+programming error; please submit a bug report.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_DUPMSGID">
-<term>MSG_DUPMSGID duplicate message ID (%1) in compiled code</term>
+<varlistentry id="LOG_DUPLICATE_NAMESPACE">
+<term>LOG_DUPLICATE_NAMESPACE line %1: duplicate $NAMESPACE directive found</term>
<listitem><para>
-Indicative of a programming error, when it started up, BIND10 detected that
-the given message ID had been registered by one or more modules. (All message
-IDs should be unique throughout BIND10.) This has no impact on the operation
-of the server other that erroneous messages may be logged. (When BIND10 loads
-the message IDs (and their associated text), if a duplicate ID is found it is
-discarded. However, when the module that supplied the duplicate ID logs that
-particular message, the text supplied by the module that added the original
-ID will be output - something that may bear no relation to the condition being
-logged.
+When reading a message file, more than one $NAMESPACE directive was found.
+(This directive is used to set a C++ namespace when generating header
+files during software development.) Such a condition is regarded as an
+error and the read will be abandoned.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_IDNOTFND">
-<term>MSG_IDNOTFND could not replace message text for '%1': no such message</term>
+<varlistentry id="LOG_INPUT_OPEN_FAIL">
+<term>LOG_INPUT_OPEN_FAIL unable to open message file %1 for input: %2</term>
+<listitem><para>
+The program was not able to open the specified input message file for
+the reason given.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_INVALID_MESSAGE_ID">
+<term>LOG_INVALID_MESSAGE_ID line %1: invalid message identification '%2'</term>
+<listitem><para>
+An invalid message identification (ID) has been found during the read of
+a message file. Message IDs should comprise only alphanumeric characters
+and the underscore, and should not start with a digit.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_NAMESPACE_EXTRA_ARGS">
+<term>LOG_NAMESPACE_EXTRA_ARGS line %1: $NAMESPACE directive has too many arguments</term>
+<listitem><para>
+The $NAMESPACE directive in a message file takes a single argument, a
+namespace in which all the generated symbol names are placed. This error
+is generated when the compiler finds a $NAMESPACE directive with more
+than one argument.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_NAMESPACE_INVALID_ARG">
+<term>LOG_NAMESPACE_INVALID_ARG line %1: $NAMESPACE directive has an invalid argument ('%2')</term>
+<listitem><para>
+The $NAMESPACE argument in a message file should be a valid C++ namespace.
+This message is output if the simple check on the syntax of the string
+carried out by the reader fails.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_NAMESPACE_NO_ARGS">
+<term>LOG_NAMESPACE_NO_ARGS line %1: no arguments were given to the $NAMESPACE directive</term>
+<listitem><para>
+The $NAMESPACE directive in a message file takes a single argument,
+a C++ namespace in which all the generated symbol names are placed.
+This error is generated when the compiler finds a $NAMESPACE directive
+with no arguments.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_NO_MESSAGE_ID">
+<term>LOG_NO_MESSAGE_ID line %1: message definition line found without a message ID</term>
+<listitem><para>
+Within a message file, message are defined by lines starting with a "%".
+The rest of the line should comprise the message ID and text describing
+the message. This error indicates the message compiler found a line in
+the message file comprising just the "%" and nothing else.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_NO_MESSAGE_TEXT">
+<term>LOG_NO_MESSAGE_TEXT line %1: line found containing a message ID ('%2') and no text</term>
+<listitem><para>
+Within a message file, message are defined by lines starting with a "%".
+The rest of the line should comprise the message ID and text describing
+the message. This error indicates the message compiler found a line
+in the message file comprising just the "%" and message identification,
+but no text.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_NO_SUCH_MESSAGE">
+<term>LOG_NO_SUCH_MESSAGE could not replace message text for '%1': no such message</term>
<listitem><para>
During start-up a local message file was read. A line with the listed
-message identification was found in the file, but the identification is not
-one contained in the compiled-in message dictionary. Either the message
-identification has been mis-spelled in the file, or the local file was used
-for an earlier version of the software and the message with that
-identification has been removed.
+message identification was found in the file, but the identification is
+not one contained in the compiled-in message dictionary. This message
+may appear a number of times in the file, once for every such unknown
+message identification.
</para><para>
-This message may appear a number of times in the file, once for every such
-unknown message identification.
+There may be several reasons why this message may appear:
+</para><para>
+- The message ID has been mis-spelled in the local message file.
+</para><para>
+- The program outputting the message may not use that particular message
+(e.g. it originates in a module not used by the program.)
+</para><para>
+- The local file was written for an earlier version of the BIND 10 software
+and the later version no longer generates that message.
+</para><para>
+Whatever the reason, there is no impact on the operation of BIND 10.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_OPEN_OUTPUT_FAIL">
+<term>LOG_OPEN_OUTPUT_FAIL unable to open %1 for output: %2</term>
+<listitem><para>
+Originating within the logging code, the program was not able to open
+the specified output file for the reason given.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_INVMSGID">
-<term>MSG_INVMSGID line %1: invalid message identification '%2'</term>
+<varlistentry id="LOG_PREFIX_EXTRA_ARGS">
+<term>LOG_PREFIX_EXTRA_ARGS line %1: $PREFIX directive has too many arguments</term>
<listitem><para>
-The concatenation of the prefix and the message identification is used as
-a symbol in the C++ module; as such it may only contain
+Within a message file, the $PREFIX directive takes a single argument,
+a prefix to be added to the symbol names when a C++ file is created.
+This error is generated when the compiler finds a $PREFIX directive with
+more than one argument.
+</para><para>
+Note: the $PREFIX directive is deprecated and will be removed in a future
+version of BIND 10.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_NOMSGID">
-<term>MSG_NOMSGID line %1: message definition line found without a message ID</term>
+<varlistentry id="LOG_PREFIX_INVALID_ARG">
+<term>LOG_PREFIX_INVALID_ARG line %1: $PREFIX directive has an invalid argument ('%2')</term>
<listitem><para>
-Message definition lines are lines starting with a "%". The rest of the line
-should comprise the message ID and text describing the message. This error
-indicates the message compiler found a line in the message file comprising
-just the "%" and nothing else.
+Within a message file, the $PREFIX directive takes a single argument,
+a prefix to be added to the symbol names when a C++ file is created.
+As such, it must adhere to restrictions on C++ symbol names (e.g. may
+only contain alphanumeric characters or underscores, and may nor start
+with a digit). A $PREFIX directive was found with an argument (given
+in the message) that violates those restrictions.
+</para><para>
+Note: the $PREFIX directive is deprecated and will be removed in a future
+version of BIND 10.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_NOMSGTXT">
-<term>MSG_NOMSGTXT line %1: line found containing a message ID ('%2') and no text</term>
+<varlistentry id="LOG_READING_LOCAL_FILE">
+<term>LOG_READING_LOCAL_FILE reading local message file %1</term>
<listitem><para>
-Message definition lines are lines starting with a "%". The rest of the line
-should comprise the message ID and text describing the message. This error
-is generated when a line is found in the message file that contains the
-leading "%" and the message identification but no text.
+This is an informational message output by BIND 10 when it starts to read
+a local message file. (A local message file may replace the text of
+one of more messages; the ID of the message will not be changed though.)
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_NSEXTRARG">
-<term>MSG_NSEXTRARG line %1: $NAMESPACE directive has too many arguments</term>
+<varlistentry id="LOG_READ_ERROR">
+<term>LOG_READ_ERROR error reading from message file %1: %2</term>
<listitem><para>
-The $NAMESPACE directive takes a single argument, a namespace in which all the
-generated symbol names are placed. This error is generated when the
-compiler finds a $NAMESPACE directive with more than one argument.
+The specified error was encountered reading from the named message file.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_NSINVARG">
-<term>MSG_NSINVARG line %1: $NAMESPACE directive has an invalid argument ('%2')</term>
+<varlistentry id="LOG_UNRECOGNISED_DIRECTIVE">
+<term>LOG_UNRECOGNISED_DIRECTIVE line %1: unrecognised directive '%2'</term>
<listitem><para>
-The $NAMESPACE argument should be a valid C++ namespace. The reader does a
-cursory check on its validity, checking that the characters in the namespace
-are correct. The error is generated when the reader finds an invalid
-character. (Valid are alphanumeric characters, underscores and colons.)
+Within a message file, a line starting with a dollar symbol was found
+(indicating the presence of a directive) but the first word on the line
+(shown in the message) was not recognised.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_NSNOARG">
-<term>MSG_NSNOARG line %1: no arguments were given to the $NAMESPACE directive</term>
+<varlistentry id="LOG_WRITE_ERROR">
+<term>LOG_WRITE_ERROR error writing to %1: %2</term>
<listitem><para>
-The $NAMESPACE directive takes a single argument, a namespace in which all the
-generated symbol names are placed. This error is generated when the
-compiler finds a $NAMESPACE directive with no arguments.
+The specified error was encountered by the message compiler when writing
+to the named output file.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_OPENIN">
-<term>MSG_OPENIN unable to open message file %1 for input: %2</term>
+<varlistentry id="NOTIFY_OUT_INVALID_ADDRESS">
+<term>NOTIFY_OUT_INVALID_ADDRESS invalid address %1#%2: %3</term>
<listitem><para>
-The program was not able to open the specified input message file for the
-reason given.
+The notify_out library tried to send a notify message to the given
+address, but it appears to be an invalid address. The configuration
+for secondary nameservers might contain a typographic error, or a
+different BIND 10 module has forgotten to validate its data before
+sending this module a notify command. As such, this should normally
+not happen, and points to an oversight in a different module.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_OPENOUT">
-<term>MSG_OPENOUT unable to open %1 for output: %2</term>
+<varlistentry id="NOTIFY_OUT_REPLY_BAD_OPCODE">
+<term>NOTIFY_OUT_REPLY_BAD_OPCODE bad opcode in notify reply from %1#%2: %3</term>
<listitem><para>
-The program was not able to open the specified output file for the reason
-given.
+The notify_out library sent a notify message to the nameserver at
+the given address, but the response did not have the opcode set to
+NOTIFY. The opcode in the response is printed. Since there was a
+response, no more notifies will be sent to this server for this
+notification event.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_PRFEXTRARG">
-<term>MSG_PRFEXTRARG line %1: $PREFIX directive has too many arguments</term>
+<varlistentry id="NOTIFY_OUT_REPLY_BAD_QID">
+<term>NOTIFY_OUT_REPLY_BAD_QID bad QID in notify reply from %1#%2: got %3, should be %4</term>
<listitem><para>
-The $PREFIX directive takes a single argument, a prefix to be added to the
-symbol names when a C++ .h file is created. This error is generated when the
-compiler finds a $PREFIX directive with more than one argument.
+The notify_out library sent a notify message to the nameserver at
+the given address, but the query id in the response does not match
+the one we sent. Since there was a response, no more notifies will
+be sent to this server for this notification event.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_PRFINVARG">
-<term>MSG_PRFINVARG line %1: $PREFIX directive has an invalid argument ('%2')</term>
+<varlistentry id="NOTIFY_OUT_REPLY_BAD_QUERY_NAME">
+<term>NOTIFY_OUT_REPLY_BAD_QUERY_NAME bad query name in notify reply from %1#%2: got %3, should be %4</term>
<listitem><para>
-The $PREFIX argument is used in a symbol name in a C++ header file. As such,
-it must adhere to restrictions on C++ symbol names (e.g. may only contain
-alphanumeric characters or underscores, and may nor start with a digit).
-A $PREFIX directive was found with an argument (given in the message) that
-violates those restictions.
+The notify_out library sent a notify message to the nameserver at
+the given address, but the query name in the response does not match
+the one we sent. Since there was a response, no more notifies will
+be sent to this server for this notification event.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_RDLOCMES">
-<term>MSG_RDLOCMES reading local message file %1</term>
+<varlistentry id="NOTIFY_OUT_REPLY_QR_NOT_SET">
+<term>NOTIFY_OUT_REPLY_QR_NOT_SET QR flags set to 0 in reply to notify from %1#%2</term>
<listitem><para>
-This is an informational message output by BIND10 when it starts to read a
-local message file. (A local message file may replace the text of one of more
-messages; the ID of the message will not be changed though.)
+The notify_out library sent a notify message to the namesever at the
+given address, but the reply did not have the QR bit set to one.
+Since there was a response, no more notifies will be sent to this
+server for this notification event.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_READERR">
-<term>MSG_READERR error reading from message file %1: %2</term>
+<varlistentry id="NOTIFY_OUT_REPLY_UNCAUGHT_EXCEPTION">
+<term>NOTIFY_OUT_REPLY_UNCAUGHT_EXCEPTION uncaught exception: %1</term>
<listitem><para>
-The specified error was encountered reading from the named message file.
+There was an uncaught exception in the handling of a notify reply
+message, either in the message parser, or while trying to extract data
+from the parsed message. The error is printed, and notify_out will
+treat the response as a bad message, but this does point to a
+programming error, since all exceptions should have been caught
+explicitly. Please file a bug report. Since there was a response,
+no more notifies will be sent to this server for this notification
+event.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NOTIFY_OUT_RETRY_EXCEEDED">
+<term>NOTIFY_OUT_RETRY_EXCEEDED notify to %1#%2: number of retries (%3) exceeded</term>
+<listitem><para>
+The maximum number of retries for the notify target has been exceeded.
+Either the address of the secondary nameserver is wrong, or it is not
+responding.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NOTIFY_OUT_SENDING_NOTIFY">
+<term>NOTIFY_OUT_SENDING_NOTIFY sending notify to %1#%2</term>
+<listitem><para>
+A notify message is sent to the secondary nameserver at the given
+address.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_UNRECDIR">
-<term>MSG_UNRECDIR line %1: unrecognised directive '%2'</term>
+<varlistentry id="NOTIFY_OUT_SOCKET_ERROR">
+<term>NOTIFY_OUT_SOCKET_ERROR socket error sending notify to %1#%2: %3</term>
<listitem><para>
-A line starting with a dollar symbol was found, but the first word on the line
-(shown in the message) was not a recognised message compiler directive.
+There was a network error while trying to send a notify message to
+the given address. The address might be unreachable. The socket
+error is printed and should provide more information.
</para></listitem>
</varlistentry>
-<varlistentry id="MSG_WRITERR">
-<term>MSG_WRITERR error writing to %1: %2</term>
+<varlistentry id="NOTIFY_OUT_SOCKET_RECV_ERROR">
+<term>NOTIFY_OUT_SOCKET_RECV_ERROR socket error reading notify reply from %1#%2: %3</term>
<listitem><para>
-The specified error was encountered by the message compiler when writing to
-the named output file.
+There was a network error while trying to read a notify reply
+message from the given address. The socket error is printed and should
+provide more information.
</para></listitem>
</varlistentry>
-<varlistentry id="NSAS_INVRESPSTR">
-<term>NSAS_INVRESPSTR queried for %1 but got invalid response</term>
+<varlistentry id="NOTIFY_OUT_TIMEOUT">
+<term>NOTIFY_OUT_TIMEOUT retry notify to %1#%2</term>
<listitem><para>
-This message indicates an internal error in the nameserver address store
-component (NSAS) of the resolver. The NSAS made a query for a RR for the
-specified nameserver but received an invalid response. Either the success
-function was called without a DNS message or the message was invalid on some
-way. (In the latter case, the error should have been picked up elsewhere in
-the processing logic, hence the raising of the error here.)
+The notify message to the given address (noted as address#port) has
+timed out, and the message will be resent until the max retry limit
+is reached.
</para></listitem>
</varlistentry>
-<varlistentry id="NSAS_INVRESPTC">
-<term>NSAS_INVRESPTC queried for %1 RR of type/class %2/%3, received response %4/%5</term>
+<varlistentry id="NSAS_FIND_NS_ADDRESS">
+<term>NSAS_FIND_NS_ADDRESS asking resolver to obtain A and AAAA records for %1</term>
<listitem><para>
-This message indicates an internal error in the nameserver address store
-component (NSAS) of the resolver. The NSAS made a query for the given RR
-type and class, but instead received an answer with the given type and class.
+A debug message issued when the NSAS (nameserver address store - part
+of the resolver) is making a callback into the resolver to retrieve the
+address records for the specified nameserver.
</para></listitem>
</varlistentry>
-<varlistentry id="NSAS_LOOKUPCANCEL">
-<term>NSAS_LOOKUPCANCEL lookup for zone %1 has been cancelled</term>
+<varlistentry id="NSAS_FOUND_ADDRESS">
+<term>NSAS_FOUND_ADDRESS found address %1 for %2</term>
<listitem><para>
-A debug message, this is output when a NSAS (nameserver address store -
-part of the resolver) lookup for a zone has been cancelled.
+A debug message issued when the NSAS (nameserver address store - part
+of the resolver) has retrieved the given address for the specified
+nameserver through an external query.
</para></listitem>
</varlistentry>
-<varlistentry id="NSAS_LOOKUPZONE">
-<term>NSAS_LOOKUPZONE searching NSAS for nameservers for zone %1</term>
+<varlistentry id="NSAS_INVALID_RESPONSE">
+<term>NSAS_INVALID_RESPONSE queried for %1 but got invalid response</term>
<listitem><para>
-A debug message, this is output when a call is made to the nameserver address
-store (part of the resolver) to obtain the nameservers for the specified zone.
+The NSAS (nameserver address store - part of the resolver) made a query
+for a RR for the specified nameserver but received an invalid response.
+Either the success function was called without a DNS message or the
+message was invalid on some way. (In the latter case, the error should
+have been picked up elsewhere in the processing logic, hence the raising
+of the error here.)
+</para><para>
+This message indicates an internal error in the NSAS. Please raise a
+bug report.
</para></listitem>
</varlistentry>
-<varlistentry id="NSAS_NSADDR">
-<term>NSAS_NSADDR asking resolver to obtain A and AAAA records for %1</term>
+<varlistentry id="NSAS_LOOKUP_CANCEL">
+<term>NSAS_LOOKUP_CANCEL lookup for zone %1 has been canceled</term>
<listitem><para>
-A debug message, the NSAS (nameserver address store - part of the resolver) is
-making a callback into the resolver to retrieve the address records for the
-specified nameserver.
+A debug message issued when an NSAS (nameserver address store - part of
+the resolver) lookup for a zone has been canceled.
</para></listitem>
</varlistentry>
-<varlistentry id="NSAS_NSLKUPFAIL">
-<term>NSAS_NSLKUPFAIL failed to lookup any %1 for %2</term>
+<varlistentry id="NSAS_NS_LOOKUP_FAIL">
+<term>NSAS_NS_LOOKUP_FAIL failed to lookup any %1 for %2</term>
<listitem><para>
-A debug message, the NSAS (nameserver address store - part of the resolver)
-has been unable to retrieve the specified resource record for the specified
-nameserver. This is not necessarily a problem - the nameserver may be
-unreachable, in which case the NSAS will try other nameservers in the zone.
+A debug message issued when the NSAS (nameserver address store - part of
+the resolver) has been unable to retrieve the specified resource record
+for the specified nameserver. This is not necessarily a problem - the
+nameserver may be unreachable, in which case the NSAS will try other
+nameservers in the zone.
</para></listitem>
</varlistentry>
-<varlistentry id="NSAS_NSLKUPSUCC">
-<term>NSAS_NSLKUPSUCC found address %1 for %2</term>
+<varlistentry id="NSAS_SEARCH_ZONE_NS">
+<term>NSAS_SEARCH_ZONE_NS searching NSAS for nameservers for zone %1</term>
<listitem><para>
-A debug message, the NSAS (nameserver address store - part of the resolver)
-has retrieved the given address for the specified nameserver through an
-external query.
+A debug message output when a call is made to the NSAS (nameserver
+address store - part of the resolver) to obtain the nameservers for
+the specified zone.
</para></listitem>
</varlistentry>
-<varlistentry id="NSAS_SETRTT">
-<term>NSAS_SETRTT reporting RTT for %1 as %2; new value is now %3</term>
+<varlistentry id="NSAS_UPDATE_RTT">
+<term>NSAS_UPDATE_RTT update RTT for %1: was %2 ms, is now %3 ms</term>
<listitem><para>
A NSAS (nameserver address store - part of the resolver) debug message
-reporting the round-trip time (RTT) for a query made to the specified
-nameserver. The RTT has been updated using the value given and the new RTT is
-displayed. (The RTT is subject to a calculation that damps out sudden
-changes. As a result, the new RTT is not necessarily equal to the RTT
-reported.)
+reporting the update of a round-trip time (RTT) for a query made to the
+specified nameserver. The RTT has been updated using the value given
+and the new RTT is displayed. (The RTT is subject to a calculation that
+damps out sudden changes. As a result, the new RTT used by the NSAS in
+future decisions of which nameserver to use is not necessarily equal to
+the RTT reported.)
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_WRONG_ANSWER">
+<term>NSAS_WRONG_ANSWER queried for %1 RR of type/class %2/%3, received response %4/%5</term>
+<listitem><para>
+A NSAS (nameserver address store - part of the resolver) made a query for
+a resource record of a particular type and class, but instead received
+an answer with a different given type and class.
+</para><para>
+This message indicates an internal error in the NSAS. Please raise a
+bug report.
</para></listitem>
</varlistentry>
@@ -1460,16 +3209,16 @@ type> tuple in the cache; instead, the deepest delegation found is indicated.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_FOLLOWCNAME">
-<term>RESLIB_FOLLOWCNAME following CNAME chain to <%1></term>
+<varlistentry id="RESLIB_FOLLOW_CNAME">
+<term>RESLIB_FOLLOW_CNAME following CNAME chain to <%1></term>
<listitem><para>
A debug message, a CNAME response was received and another query is being issued
for the <name, class, type> tuple.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_LONGCHAIN">
-<term>RESLIB_LONGCHAIN CNAME received in response to query for <%1>: CNAME chain length exceeded</term>
+<varlistentry id="RESLIB_LONG_CHAIN">
+<term>RESLIB_LONG_CHAIN CNAME received in response to query for <%1>: CNAME chain length exceeded</term>
<listitem><para>
A debug message recording that a CNAME response has been received to an upstream
query for the specified question (Previous debug messages will have indicated
@@ -1479,26 +3228,26 @@ is where on CNAME points to another) and so an error is being returned.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_NONSRRSET">
-<term>RESLIB_NONSRRSET no NS RRSet in referral response received to query for <%1></term>
+<varlistentry id="RESLIB_NO_NS_RRSET">
+<term>RESLIB_NO_NS_RRSET no NS RRSet in referral response received to query for <%1></term>
<listitem><para>
A debug message, this indicates that a response was received for the specified
-query and was categorised as a referral. However, the received message did
+query and was categorized as a referral. However, the received message did
not contain any NS RRsets. This may indicate a programming error in the
response classification code.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_NSASLOOK">
-<term>RESLIB_NSASLOOK looking up nameserver for zone %1 in the NSAS</term>
+<varlistentry id="RESLIB_NSAS_LOOKUP">
+<term>RESLIB_NSAS_LOOKUP looking up nameserver for zone %1 in the NSAS</term>
<listitem><para>
A debug message, the RunningQuery object is querying the NSAS for the
nameservers for the specified zone.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_NXDOMRR">
-<term>RESLIB_NXDOMRR NXDOMAIN/NXRRSET received in response to query for <%1></term>
+<varlistentry id="RESLIB_NXDOM_NXRR">
+<term>RESLIB_NXDOM_NXRR NXDOMAIN/NXRRSET received in response to query for <%1></term>
<listitem><para>
A debug message recording that either a NXDOMAIN or an NXRRSET response has
been received to an upstream query for the specified question. Previous debug
@@ -1514,8 +3263,8 @@ are no retries left, an error will be reported.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_PROTOCOLRTRY">
-<term>RESLIB_PROTOCOLRTRY protocol error in answer for %1: %2 (retries left: %3)</term>
+<varlistentry id="RESLIB_PROTOCOL_RETRY">
+<term>RESLIB_PROTOCOL_RETRY protocol error in answer for %1: %2 (retries left: %3)</term>
<listitem><para>
A debug message indicating that a protocol error was received and that
the resolver is repeating the query to the same nameserver. After this
@@ -1523,33 +3272,16 @@ repeated query, there will be the indicated number of retries left.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_RCODERR">
-<term>RESLIB_RCODERR RCODE indicates error in response to query for <%1></term>
+<varlistentry id="RESLIB_RCODE_ERR">
+<term>RESLIB_RCODE_ERR RCODE indicates error in response to query for <%1></term>
<listitem><para>
A debug message, the response to the specified query indicated an error
that is not covered by a specific code path. A SERVFAIL will be returned.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_REFERRAL">
-<term>RESLIB_REFERRAL referral received in response to query for <%1></term>
-<listitem><para>
-A debug message recording that a referral response has been received to an
-upstream query for the specified question. Previous debug messages will
-have indicated the server to which the question was sent.
-</para></listitem>
-</varlistentry>
-
-<varlistentry id="RESLIB_REFERZONE">
-<term>RESLIB_REFERZONE referred to zone %1</term>
-<listitem><para>
-A debug message indicating that the last referral message was to the specified
-zone.
-</para></listitem>
-</varlistentry>
-
-<varlistentry id="RESLIB_RESCAFND">
-<term>RESLIB_RESCAFND found <%1> in the cache (resolve() instance %2)</term>
+<varlistentry id="RESLIB_RECQ_CACHE_FIND">
+<term>RESLIB_RECQ_CACHE_FIND found <%1> in the cache (resolve() instance %2)</term>
<listitem><para>
This is a debug message and indicates that a RecursiveQuery object found the
the specified <name, class, type> tuple in the cache. The instance number
@@ -1558,8 +3290,8 @@ been called.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_RESCANOTFND">
-<term>RESLIB_RESCANOTFND did not find <%1> in the cache, starting RunningQuery (resolve() instance %2)</term>
+<varlistentry id="RESLIB_RECQ_CACHE_NO_FIND">
+<term>RESLIB_RECQ_CACHE_NO_FIND did not find <%1> in the cache, starting RunningQuery (resolve() instance %2)</term>
<listitem><para>
This is a debug message and indicates that the look in the cache made by the
RecursiveQuery::resolve() method did not find an answer, so a new RunningQuery
@@ -1569,6 +3301,23 @@ been called.
</para></listitem>
</varlistentry>
+<varlistentry id="RESLIB_REFERRAL">
+<term>RESLIB_REFERRAL referral received in response to query for <%1></term>
+<listitem><para>
+A debug message recording that a referral response has been received to an
+upstream query for the specified question. Previous debug messages will
+have indicated the server to which the question was sent.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_REFER_ZONE">
+<term>RESLIB_REFER_ZONE referred to zone %1</term>
+<listitem><para>
+A debug message indicating that the last referral message was to the specified
+zone.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="RESLIB_RESOLVE">
<term>RESLIB_RESOLVE asked to resolve <%1> (resolve() instance %2)</term>
<listitem><para>
@@ -1579,8 +3328,8 @@ message indicates which of the two resolve() methods has been called.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_RRSETFND">
-<term>RESLIB_RRSETFND found single RRset in the cache when querying for <%1> (resolve() instance %2)</term>
+<varlistentry id="RESLIB_RRSET_FOUND">
+<term>RESLIB_RRSET_FOUND found single RRset in the cache when querying for <%1> (resolve() instance %2)</term>
<listitem><para>
A debug message, indicating that when RecursiveQuery::resolve queried the
cache, a single RRset was found which was put in the answer. The instance
@@ -1596,16 +3345,16 @@ A debug message giving the round-trip time of the last query and response.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_RUNCAFND">
-<term>RESLIB_RUNCAFND found <%1> in the cache</term>
+<varlistentry id="RESLIB_RUNQ_CACHE_FIND">
+<term>RESLIB_RUNQ_CACHE_FIND found <%1> in the cache</term>
<listitem><para>
This is a debug message and indicates that a RunningQuery object found
the specified <name, class, type> tuple in the cache.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_RUNCALOOK">
-<term>RESLIB_RUNCALOOK looking up up <%1> in the cache</term>
+<varlistentry id="RESLIB_RUNQ_CACHE_LOOKUP">
+<term>RESLIB_RUNQ_CACHE_LOOKUP looking up up <%1> in the cache</term>
<listitem><para>
This is a debug message and indicates that a RunningQuery object has made
a call to its doLookup() method to look up the specified <name, class, type>
@@ -1613,16 +3362,16 @@ tuple, the first action of which will be to examine the cache.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_RUNQUFAIL">
-<term>RESLIB_RUNQUFAIL failure callback - nameservers are unreachable</term>
+<varlistentry id="RESLIB_RUNQ_FAIL">
+<term>RESLIB_RUNQ_FAIL failure callback - nameservers are unreachable</term>
<listitem><para>
A debug message indicating that a RunningQuery's failure callback has been
called because all nameservers for the zone in question are unreachable.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_RUNQUSUCC">
-<term>RESLIB_RUNQUSUCC success callback - sending query to %1</term>
+<varlistentry id="RESLIB_RUNQ_SUCCESS">
+<term>RESLIB_RUNQ_SUCCESS success callback - sending query to %1</term>
<listitem><para>
A debug message indicating that a RunningQuery's success callback has been
called because a nameserver has been found, and that a query is being sent
@@ -1630,19 +3379,19 @@ to the specified nameserver.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_TESTSERV">
-<term>RESLIB_TESTSERV setting test server to %1(%2)</term>
+<varlistentry id="RESLIB_TEST_SERVER">
+<term>RESLIB_TEST_SERVER setting test server to %1(%2)</term>
<listitem><para>
-This is an internal debugging message and is only generated in unit tests.
-It indicates that all upstream queries from the resolver are being routed to
-the specified server, regardless of the address of the nameserver to which
-the query would normally be routed. As it should never be seen in normal
-operation, it is a warning message instead of a debug message.
+This is a warning message only generated in unit tests. It indicates
+that all upstream queries from the resolver are being routed to the
+specified server, regardless of the address of the nameserver to which
+the query would normally be routed. If seen during normal operation,
+please submit a bug report.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_TESTUPSTR">
-<term>RESLIB_TESTUPSTR sending upstream query for <%1> to test server at %2</term>
+<varlistentry id="RESLIB_TEST_UPSTREAM">
+<term>RESLIB_TEST_UPSTREAM sending upstream query for <%1> to test server at %2</term>
<listitem><para>
This is a debug message and should only be seen in unit tests. A query for
the specified <name, class, type> tuple is being sent to a test nameserver
@@ -1653,13 +3402,13 @@ whose address is given in the message.
<varlistentry id="RESLIB_TIMEOUT">
<term>RESLIB_TIMEOUT query <%1> to %2 timed out</term>
<listitem><para>
-A debug message indicating that the specified query has timed out and as
-there are no retries left, an error will be reported.
+A debug message indicating that the specified upstream query has timed out and
+there are no retries left.
</para></listitem>
</varlistentry>
-<varlistentry id="RESLIB_TIMEOUTRTRY">
-<term>RESLIB_TIMEOUTRTRY query <%1> to %2 timed out, re-trying (retries left: %3)</term>
+<varlistentry id="RESLIB_TIMEOUT_RETRY">
+<term>RESLIB_TIMEOUT_RETRY query <%1> to %2 timed out, re-trying (retries left: %3)</term>
<listitem><para>
A debug message indicating that the specified query has timed out and that
the resolver is repeating the query to the same nameserver. After this
@@ -1685,308 +3434,374 @@ tuple is being sent to a nameserver whose address is given in the message.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_AXFRTCP">
-<term>RESOLVER_AXFRTCP AXFR request received over TCP</term>
+<varlistentry id="RESOLVER_AXFR_TCP">
+<term>RESOLVER_AXFR_TCP AXFR request received over TCP</term>
<listitem><para>
-A debug message, the resolver received a NOTIFY message over TCP. The server
-cannot process it and will return an error message to the sender with the
-RCODE set to NOTIMP.
+This is a debug message output when the resolver received a request for
+an AXFR (full transfer of a zone) over TCP. Only authoritative servers
+are able to handle AXFR requests, so the resolver will return an error
+message to the sender with the RCODE set to NOTIMP.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_AXFRUDP">
-<term>RESOLVER_AXFRUDP AXFR request received over UDP</term>
+<varlistentry id="RESOLVER_AXFR_UDP">
+<term>RESOLVER_AXFR_UDP AXFR request received over UDP</term>
<listitem><para>
-A debug message, the resolver received a NOTIFY message over UDP. The server
-cannot process it (and in any case, an AXFR request should be sent over TCP)
-and will return an error message to the sender with the RCODE set to FORMERR.
+This is a debug message output when the resolver received a request for
+an AXFR (full transfer of a zone) over UDP. Only authoritative servers
+are able to handle AXFR requests (and in any case, an AXFR request should
+be sent over TCP), so the resolver will return an error message to the
+sender with the RCODE set to NOTIMP.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_CLTMOSMALL">
-<term>RESOLVER_CLTMOSMALL client timeout of %1 is too small</term>
+<varlistentry id="RESOLVER_CLIENT_TIME_SMALL">
+<term>RESOLVER_CLIENT_TIME_SMALL client timeout of %1 is too small</term>
<listitem><para>
-An error indicating that the configuration value specified for the query
-timeout is too small.
+During the update of the resolver's configuration parameters, the value
+of the client timeout was found to be too small. The configuration
+update was abandoned and the parameters were not changed.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_CONFIGCHAN">
-<term>RESOLVER_CONFIGCHAN configuration channel created</term>
+<varlistentry id="RESOLVER_CONFIG_CHANNEL">
+<term>RESOLVER_CONFIG_CHANNEL configuration channel created</term>
<listitem><para>
-A debug message, output when the resolver has successfully established a
-connection to the configuration channel.
+This is a debug message output when the resolver has successfully
+established a connection to the configuration channel.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_CONFIGERR">
-<term>RESOLVER_CONFIGERR error in configuration: %1</term>
+<varlistentry id="RESOLVER_CONFIG_ERROR">
+<term>RESOLVER_CONFIG_ERROR error in configuration: %1</term>
<listitem><para>
-An error was detected in a configuration update received by the resolver. This
-may be in the format of the configuration message (in which case this is a
-programming error) or it may be in the data supplied (in which case it is
-a user error). The reason for the error, given as a parameter in the message,
-will give more details.
+An error was detected in a configuration update received by the
+resolver. This may be in the format of the configuration message (in
+which case this is a programming error) or it may be in the data supplied
+(in which case it is a user error). The reason for the error, included
+in the message, will give more details. The configuration update is
+not applied and the resolver parameters were not changed.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_CONFIGLOAD">
-<term>RESOLVER_CONFIGLOAD configuration loaded</term>
+<varlistentry id="RESOLVER_CONFIG_LOADED">
+<term>RESOLVER_CONFIG_LOADED configuration loaded</term>
<listitem><para>
-A debug message, output when the resolver configuration has been successfully
-loaded.
+This is a debug message output when the resolver configuration has been
+successfully loaded.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_CONFIGUPD">
-<term>RESOLVER_CONFIGUPD configuration updated: %1</term>
+<varlistentry id="RESOLVER_CONFIG_UPDATED">
+<term>RESOLVER_CONFIG_UPDATED configuration updated: %1</term>
<listitem><para>
-A debug message, the configuration has been updated with the specified
-information.
+This is a debug message output when the resolver configuration is being
+updated with the specified information.
</para></listitem>
</varlistentry>
<varlistentry id="RESOLVER_CREATED">
<term>RESOLVER_CREATED main resolver object created</term>
<listitem><para>
-A debug message, output when the Resolver() object has been created.
+This is a debug message indicating that the main resolver object has
+been created.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_DNSMSGRCVD">
-<term>RESOLVER_DNSMSGRCVD DNS message received: %1</term>
+<varlistentry id="RESOLVER_DNS_MESSAGE_RECEIVED">
+<term>RESOLVER_DNS_MESSAGE_RECEIVED DNS message received: %1</term>
<listitem><para>
-A debug message, this always precedes some other logging message and is the
-formatted contents of the DNS packet that the other message refers to.
+This is a debug message from the resolver listing the contents of a
+received DNS message.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_DNSMSGSENT">
-<term>RESOLVER_DNSMSGSENT DNS message of %1 bytes sent: %2</term>
+<varlistentry id="RESOLVER_DNS_MESSAGE_SENT">
+<term>RESOLVER_DNS_MESSAGE_SENT DNS message of %1 bytes sent: %2</term>
<listitem><para>
-A debug message, this contains details of the response sent back to the querying
-system.
+This is a debug message containing details of the response returned by
+the resolver to the querying system.
</para></listitem>
</varlistentry>
<varlistentry id="RESOLVER_FAILED">
<term>RESOLVER_FAILED resolver failed, reason: %1</term>
<listitem><para>
-This is an error message output when an unhandled exception is caught by the
-resolver. All it can do is to shut down.
+This is an error message output when an unhandled exception is caught
+by the resolver. After this, the resolver will shut itself down.
+Please submit a bug report.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_FWDADDR">
-<term>RESOLVER_FWDADDR setting forward address %1(%2)</term>
+<varlistentry id="RESOLVER_FORWARD_ADDRESS">
+<term>RESOLVER_FORWARD_ADDRESS setting forward address %1(%2)</term>
<listitem><para>
-This message may appear multiple times during startup, and it lists the
-forward addresses used by the resolver when running in forwarding mode.
+If the resolver is running in forward mode, this message will appear
+during startup to list the forward address. If multiple addresses are
+specified, it will appear once for each address.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_FWDQUERY">
-<term>RESOLVER_FWDQUERY processing forward query</term>
+<varlistentry id="RESOLVER_FORWARD_QUERY">
+<term>RESOLVER_FORWARD_QUERY processing forward query</term>
<listitem><para>
-The received query has passed all checks and is being forwarded to upstream
+This is a debug message indicating that a query received by the resolver
+has passed a set of checks (message is well-formed, it is allowed by the
+ACL, it is a supported opcode, etc.) and is being forwarded to upstream
servers.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_HDRERR">
-<term>RESOLVER_HDRERR message received, exception when processing header: %1</term>
+<varlistentry id="RESOLVER_HEADER_ERROR">
+<term>RESOLVER_HEADER_ERROR message received, exception when processing header: %1</term>
<listitem><para>
-A debug message noting that an exception occurred during the processing of
-a received packet. The packet has been dropped.
+This is a debug message from the resolver noting that an exception
+occurred during the processing of a received packet. The packet has
+been dropped.
</para></listitem>
</varlistentry>
<varlistentry id="RESOLVER_IXFR">
<term>RESOLVER_IXFR IXFR request received</term>
<listitem><para>
-The resolver received a NOTIFY message over TCP. The server cannot process it
-and will return an error message to the sender with the RCODE set to NOTIMP.
+This is a debug message indicating that the resolver received a request
+for an IXFR (incremental transfer of a zone). Only authoritative servers
+are able to handle IXFR requests, so the resolver will return an error
+message to the sender with the RCODE set to NOTIMP.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_LKTMOSMALL">
-<term>RESOLVER_LKTMOSMALL lookup timeout of %1 is too small</term>
+<varlistentry id="RESOLVER_LOOKUP_TIME_SMALL">
+<term>RESOLVER_LOOKUP_TIME_SMALL lookup timeout of %1 is too small</term>
<listitem><para>
-An error indicating that the configuration value specified for the lookup
-timeout is too small.
+During the update of the resolver's configuration parameters, the value
+of the lookup timeout was found to be too small. The configuration
+update will not be applied.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_NFYNOTAUTH">
-<term>RESOLVER_NFYNOTAUTH NOTIFY arrived but server is not authoritative</term>
+<varlistentry id="RESOLVER_MESSAGE_ERROR">
+<term>RESOLVER_MESSAGE_ERROR error parsing received message: %1 - returning %2</term>
<listitem><para>
-The resolver received a NOTIFY message. As the server is not authoritative it
-cannot process it, so it returns an error message to the sender with the RCODE
-set to NOTAUTH.
+This is a debug message noting that parsing of the body of a received
+message by the resolver failed due to some error (although the parsing of
+the header succeeded). The message parameters give a textual description
+of the problem and the RCODE returned.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_NORMQUERY">
-<term>RESOLVER_NORMQUERY processing normal query</term>
+<varlistentry id="RESOLVER_NEGATIVE_RETRIES">
+<term>RESOLVER_NEGATIVE_RETRIES negative number of retries (%1) specified in the configuration</term>
<listitem><para>
-The received query has passed all checks and is being processed by the resolver.
+This error is issued when a resolver configuration update has specified
+a negative retry count: only zero or positive values are valid. The
+configuration update was abandoned and the parameters were not changed.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_NOROOTADDR">
-<term>RESOLVER_NOROOTADDR no root addresses available</term>
+<varlistentry id="RESOLVER_NON_IN_PACKET">
+<term>RESOLVER_NON_IN_PACKET non-IN class request received, returning REFUSED message</term>
<listitem><para>
-A warning message during startup, indicates that no root addresses have been
-set. This may be because the resolver will get them from a priming query.
+This debug message is issued when resolver has received a DNS packet that
+was not IN (Internet) class. The resolver cannot handle such packets,
+so is returning a REFUSED response to the sender.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_NOTIN">
-<term>RESOLVER_NOTIN non-IN class request received, returning REFUSED message</term>
+<varlistentry id="RESOLVER_NORMAL_QUERY">
+<term>RESOLVER_NORMAL_QUERY processing normal query</term>
<listitem><para>
-A debug message, the resolver has received a DNS packet that was not IN class.
-The resolver cannot handle such packets, so is returning a REFUSED response to
-the sender.
+This is a debug message indicating that the query received by the resolver
+has passed a set of checks (message is well-formed, it is allowed by the
+ACL, it is a supported opcode, etc.) and is being processed by the resolver.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_NOTONEQUES">
-<term>RESOLVER_NOTONEQUES query contained %1 questions, exactly one question was expected</term>
+<varlistentry id="RESOLVER_NOTIFY_RECEIVED">
+<term>RESOLVER_NOTIFY_RECEIVED NOTIFY arrived but server is not authoritative</term>
<listitem><para>
-A debug message, the resolver received a query that contained the number of
-entires in the question section detailed in the message. This is a malformed
-message, as a DNS query must contain only one question. The resolver will
-return a message to the sender with the RCODE set to FORMERR.
+The resolver has received a NOTIFY message. As the server is not
+authoritative it cannot process it, so it returns an error message to
+the sender with the RCODE set to NOTAUTH.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_OPCODEUNS">
-<term>RESOLVER_OPCODEUNS opcode %1 not supported by the resolver</term>
+<varlistentry id="RESOLVER_NOT_ONE_QUESTION">
+<term>RESOLVER_NOT_ONE_QUESTION query contained %1 questions, exactly one question was expected</term>
<listitem><para>
-A debug message, the resolver received a message with an unsupported opcode
-(it can only process QUERY opcodes). It will return a message to the sender
-with the RCODE set to NOTIMP.
+This debug message indicates that the resolver received a query that
+contained the number of entries in the question section detailed in
+the message. This is a malformed message, as a DNS query must contain
+only one question. The resolver will return a message to the sender
+with the RCODE set to FORMERR.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_PARSEERR">
-<term>RESOLVER_PARSEERR error parsing received message: %1 - returning %2</term>
+<varlistentry id="RESOLVER_NO_ROOT_ADDRESS">
+<term>RESOLVER_NO_ROOT_ADDRESS no root addresses available</term>
<listitem><para>
-A debug message noting that the resolver received a message and the parsing
-of the body of the message failed due to some non-protocol related reason
-(although the parsing of the header succeeded). The message parameters give
-a textual description of the problem and the RCODE returned.
+A warning message issued during resolver startup, this indicates that
+no root addresses have been set. This may be because the resolver will
+get them from a priming query.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_PRINTMSG">
-<term>RESOLVER_PRINTMSG print message command, aeguments are: %1</term>
+<varlistentry id="RESOLVER_PARSE_ERROR">
+<term>RESOLVER_PARSE_ERROR error parsing received message: %1 - returning %2</term>
<listitem><para>
-This message is logged when a "print_message" command is received over the
-command channel.
+This is a debug message noting that the resolver received a message and
+the parsing of the body of the message failed due to some non-protocol
+related reason (although the parsing of the header succeeded).
+The message parameters give a textual description of the problem and
+the RCODE returned.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_PROTERR">
-<term>RESOLVER_PROTERR protocol error parsing received message: %1 - returning %2</term>
+<varlistentry id="RESOLVER_PRINT_COMMAND">
+<term>RESOLVER_PRINT_COMMAND print message command, arguments are: %1</term>
<listitem><para>
-A debug message noting that the resolver received a message and the parsing
-of the body of the message failed due to some protocol error (although the
-parsing of the header succeeded). The message parameters give a textual
-description of the problem and the RCODE returned.
+This debug message is logged when a "print_message" command is received
+by the resolver over the command channel.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_QUSETUP">
-<term>RESOLVER_QUSETUP query setup</term>
+<varlistentry id="RESOLVER_PROTOCOL_ERROR">
+<term>RESOLVER_PROTOCOL_ERROR protocol error parsing received message: %1 - returning %2</term>
<listitem><para>
-A debug message noting that the resolver is creating a RecursiveQuery object.
+This is a debug message noting that the resolver received a message and
+the parsing of the body of the message failed due to some protocol error
+(although the parsing of the header succeeded). The message parameters
+give a textual description of the problem and the RCODE returned.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_QUSHUT">
-<term>RESOLVER_QUSHUT query shutdown</term>
+<varlistentry id="RESOLVER_QUERY_ACCEPTED">
+<term>RESOLVER_QUERY_ACCEPTED query accepted: '%1/%2/%3' from %4</term>
<listitem><para>
-A debug message noting that the resolver is destroying a RecursiveQuery object.
+This debug message is produced by the resolver when an incoming query
+is accepted in terms of the query ACL. The log message shows the query
+in the form of <query name>/<query type>/<query class>, and the client
+that sends the query in the form of <Source IP address>#<source port>.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_QUTMOSMALL">
-<term>RESOLVER_QUTMOSMALL query timeout of %1 is too small</term>
+<varlistentry id="RESOLVER_QUERY_DROPPED">
+<term>RESOLVER_QUERY_DROPPED query dropped: '%1/%2/%3' from %4</term>
<listitem><para>
-An error indicating that the configuration value specified for the query
-timeout is too small.
+This is an informational message that indicates an incoming query has
+been dropped by the resolver because of the query ACL. Unlike the
+RESOLVER_QUERY_REJECTED case, the server does not return any response.
+The log message shows the query in the form of <query name>/<query
+type>/<query class>, and the client that sends the query in the form of
+<Source IP address>#<source port>.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_RECURSIVE">
-<term>RESOLVER_RECURSIVE running in recursive mode</term>
+<varlistentry id="RESOLVER_QUERY_REJECTED">
+<term>RESOLVER_QUERY_REJECTED query rejected: '%1/%2/%3' from %4</term>
<listitem><para>
-This is an informational message that appears at startup noting that the
-resolver is running in recursive mode.
+This is an informational message that indicates an incoming query has
+been rejected by the resolver because of the query ACL. This results
+in a response with an RCODE of REFUSED. The log message shows the query
+in the form of <query name>/<query type>/<query class>, and the client
+that sends the query in the form of <Source IP address>#<source port>.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_RECVMSG">
-<term>RESOLVER_RECVMSG resolver has received a DNS message</term>
+<varlistentry id="RESOLVER_QUERY_SETUP">
+<term>RESOLVER_QUERY_SETUP query setup</term>
<listitem><para>
-A debug message indicating that the resolver has received a message. Depending
-on the debug settings, subsequent log output will indicate the nature of the
-message.
+This is a debug message noting that the resolver is creating a
+RecursiveQuery object.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_QUERY_SHUTDOWN">
+<term>RESOLVER_QUERY_SHUTDOWN query shutdown</term>
+<listitem><para>
+This is a debug message noting that the resolver is destroying a
+RecursiveQuery object.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_QUERY_TIME_SMALL">
+<term>RESOLVER_QUERY_TIME_SMALL query timeout of %1 is too small</term>
+<listitem><para>
+During the update of the resolver's configuration parameters, the value
+of the query timeout was found to be too small. The configuration
+parameters were not changed.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_RETRYNEG">
-<term>RESOLVER_RETRYNEG negative number of retries (%1) specified in the configuration</term>
+<varlistentry id="RESOLVER_RECEIVED_MESSAGE">
+<term>RESOLVER_RECEIVED_MESSAGE resolver has received a DNS message</term>
<listitem><para>
-An error message indicating that the resolver configuration has specified a
-negative retry count. Only zero or positive values are valid.
+This is a debug message indicating that the resolver has received a
+DNS message. Depending on the debug settings, subsequent log output
+will indicate the nature of the message.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_ROOTADDR">
-<term>RESOLVER_ROOTADDR setting root address %1(%2)</term>
+<varlistentry id="RESOLVER_RECURSIVE">
+<term>RESOLVER_RECURSIVE running in recursive mode</term>
<listitem><para>
-This message may appear multiple times during startup; it lists the root
-addresses used by the resolver.
+This is an informational message that appears at startup noting that
+the resolver is running in recursive mode.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_SERVICE">
-<term>RESOLVER_SERVICE service object created</term>
+<varlistentry id="RESOLVER_SERVICE_CREATED">
+<term>RESOLVER_SERVICE_CREATED service object created</term>
<listitem><para>
-A debug message, output when the main service object (which handles the
-received queries) is created.
+This debug message is output when resolver creates the main service object
+(which handles the received queries).
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_SETPARAM">
-<term>RESOLVER_SETPARAM query timeout: %1, client timeout: %2, lookup timeout: %3, retry count: %4</term>
+<varlistentry id="RESOLVER_SET_PARAMS">
+<term>RESOLVER_SET_PARAMS query timeout: %1, client timeout: %2, lookup timeout: %3, retry count: %4</term>
<listitem><para>
-A debug message, lists the parameters associated with the message. These are:
+This debug message lists the parameters being set for the resolver. These are:
query timeout: the timeout (in ms) used for queries originated by the resolver
-to upstream servers. Client timeout: the interval to resolver a query by
+to upstream servers. Client timeout: the interval to resolve a query by
a client: after this time, the resolver sends back a SERVFAIL to the client
-whilst continuing to resolver the query. Lookup timeout: the time at which the
+whilst continuing to resolve the query. Lookup timeout: the time at which the
resolver gives up trying to resolve a query. Retry count: the number of times
the resolver will retry a query to an upstream server if it gets a timeout.
</para><para>
The client and lookup timeouts require a bit more explanation. The
-resolution of the clent query might require a large number of queries to
+resolution of the client query might require a large number of queries to
upstream nameservers. Even if none of these queries timeout, the total time
taken to perform all the queries may exceed the client timeout. When this
happens, a SERVFAIL is returned to the client, but the resolver continues
-with the resolution process. Data received is added to the cache. However,
-there comes a time - the lookup timeout - when even the resolve gives up.
+with the resolution process; data received is added to the cache. However,
+there comes a time - the lookup timeout - when even the resolver gives up.
At this point it will wait for pending upstream queries to complete or
timeout and drop the query.
</para></listitem>
</varlistentry>
+<varlistentry id="RESOLVER_SET_QUERY_ACL">
+<term>RESOLVER_SET_QUERY_ACL query ACL is configured</term>
+<listitem><para>
+This debug message is generated when a new query ACL is configured for
+the resolver.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_SET_ROOT_ADDRESS">
+<term>RESOLVER_SET_ROOT_ADDRESS setting root address %1(%2)</term>
+<listitem><para>
+This message gives the address of one of the root servers used by the
+resolver. It is output during startup and may appear multiple times,
+once for each root server address.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="RESOLVER_SHUTDOWN">
<term>RESOLVER_SHUTDOWN resolver shutdown complete</term>
<listitem><para>
-This information message is output when the resolver has shut down.
+This informational message is output when the resolver has shut down.
</para></listitem>
</varlistentry>
@@ -2005,11 +3820,982 @@ An informational message, this is output when the resolver starts up.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_UNEXRESP">
-<term>RESOLVER_UNEXRESP received unexpected response, ignoring</term>
+<varlistentry id="RESOLVER_UNEXPECTED_RESPONSE">
+<term>RESOLVER_UNEXPECTED_RESPONSE received unexpected response, ignoring</term>
+<listitem><para>
+This is a debug message noting that the resolver received a DNS response
+packet on the port on which is it listening for queries. The packet
+has been ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_UNSUPPORTED_OPCODE">
+<term>RESOLVER_UNSUPPORTED_OPCODE opcode %1 not supported by the resolver</term>
+<listitem><para>
+This is debug message output when the resolver received a message with an
+unsupported opcode (it can only process QUERY opcodes). It will return
+a message to the sender with the RCODE set to NOTIMP.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_ADDRESSES_NOT_LIST">
+<term>SRVCOMM_ADDRESSES_NOT_LIST the address and port specification is not a list in %1</term>
+<listitem><para>
+This points to an error in configuration. What was supposed to be a list of
+IP address - port pairs isn't a list at all but something else.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_ADDRESS_FAIL">
+<term>SRVCOMM_ADDRESS_FAIL failed to listen on addresses (%1)</term>
+<listitem><para>
+The server failed to bind to one of the address/port pair it should according
+to configuration, for reason listed in the message (usually because that pair
+is already used by other service or missing privileges). The server will try
+to recover and bind the address/port pairs it was listening to before (if any).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_ADDRESS_MISSING">
+<term>SRVCOMM_ADDRESS_MISSING address specification is missing "address" or "port" element in %1</term>
+<listitem><para>
+This points to an error in configuration. An address specification in the
+configuration is missing either an address or port and so cannot be used. The
+specification causing the error is given in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_ADDRESS_TYPE">
+<term>SRVCOMM_ADDRESS_TYPE address specification type is invalid in %1</term>
+<listitem><para>
+This points to an error in configuration. An address specification in the
+configuration malformed. The specification causing the error is given in the
+message. A valid specification contains an address part (which must be a string
+and must represent a valid IPv4 or IPv6 address) and port (which must be an
+integer in the range valid for TCP/UDP ports on your system).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_ADDRESS_UNRECOVERABLE">
+<term>SRVCOMM_ADDRESS_UNRECOVERABLE failed to recover original addresses also (%2)</term>
+<listitem><para>
+The recovery of old addresses after SRVCOMM_ADDRESS_FAIL also failed for
+the reason listed.
+</para><para>
+The condition indicates problems with the server and/or the system on
+which it is running. The server will continue running to allow
+reconfiguration, but will not be listening on any address or port until
+an administrator does so.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_ADDRESS_VALUE">
+<term>SRVCOMM_ADDRESS_VALUE address to set: %1#%2</term>
+<listitem><para>
+Debug message. This lists one address and port value of the set of
+addresses we are going to listen on (eg. there will be one log message
+per pair). This appears only after SRVCOMM_SET_LISTEN, but might
+be hidden, as it has higher debug level.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_KEYS_DEINIT">
+<term>SRVCOMM_KEYS_DEINIT deinitializing TSIG keyring</term>
+<listitem><para>
+Debug message indicating that the server is deinitializing the TSIG keyring.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_KEYS_INIT">
+<term>SRVCOMM_KEYS_INIT initializing TSIG keyring</term>
+<listitem><para>
+Debug message indicating that the server is initializing the global TSIG
+keyring. This should be seen only at server start.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_KEYS_UPDATE">
+<term>SRVCOMM_KEYS_UPDATE updating TSIG keyring</term>
+<listitem><para>
+Debug message indicating new keyring is being loaded from configuration (either
+on startup or as a result of configuration update).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_PORT_RANGE">
+<term>SRVCOMM_PORT_RANGE port out of valid range (%1 in %2)</term>
+<listitem><para>
+This points to an error in configuration. The port in an address
+specification is outside the valid range of 0 to 65535.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_SET_LISTEN">
+<term>SRVCOMM_SET_LISTEN setting addresses to listen to</term>
+<listitem><para>
+Debug message, noting that the server is about to start listening on a
+different set of IP addresses and ports than before.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_BAD_OPTION_VALUE">
+<term>STATHTTPD_BAD_OPTION_VALUE bad command line argument: %1</term>
+<listitem><para>
+The stats-httpd module was called with a bad command-line argument
+and will not start.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_CC_SESSION_ERROR">
+<term>STATHTTPD_CC_SESSION_ERROR error connecting to message bus: %1</term>
+<listitem><para>
+The stats-httpd module was unable to connect to the BIND 10 command
+and control bus. A likely problem is that the message bus daemon
+(b10-msgq) is not running. The stats-httpd module will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_CLOSING">
+<term>STATHTTPD_CLOSING closing %1#%2</term>
+<listitem><para>
+The stats-httpd daemon will stop listening for requests on the given
+address and port number.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_CLOSING_CC_SESSION">
+<term>STATHTTPD_CLOSING_CC_SESSION stopping cc session</term>
+<listitem><para>
+Debug message indicating that the stats-httpd module is disconnecting
+from the command and control bus.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_HANDLE_CONFIG">
+<term>STATHTTPD_HANDLE_CONFIG reading configuration: %1</term>
+<listitem><para>
+The stats-httpd daemon has received new configuration data and will now
+process it. The (changed) data is printed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_RECEIVED_SHUTDOWN_COMMAND">
+<term>STATHTTPD_RECEIVED_SHUTDOWN_COMMAND shutdown command received</term>
+<listitem><para>
+A shutdown command was sent to the stats-httpd module, and it will
+now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_RECEIVED_STATUS_COMMAND">
+<term>STATHTTPD_RECEIVED_STATUS_COMMAND received command to return status</term>
+<listitem><para>
+A status command was sent to the stats-httpd module, and it will
+respond with 'Stats Httpd is up.' and its PID.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_RECEIVED_UNKNOWN_COMMAND">
+<term>STATHTTPD_RECEIVED_UNKNOWN_COMMAND received unknown command: %1</term>
+<listitem><para>
+An unknown command has been sent to the stats-httpd module. The
+stats-httpd module will respond with an error, and the command will
+be ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_SERVER_ERROR">
+<term>STATHTTPD_SERVER_ERROR HTTP server error: %1</term>
+<listitem><para>
+An internal error occurred while handling an HTTP request. An HTTP 500
+response will be sent back, and the specific error is printed. This
+is an error condition that likely points to a module that is not
+responding correctly to statistic requests.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_SERVER_INIT_ERROR">
+<term>STATHTTPD_SERVER_INIT_ERROR HTTP server initialization error: %1</term>
+<listitem><para>
+There was a problem initializing the HTTP server in the stats-httpd
+module upon receiving its configuration data. The most likely cause
+is a port binding problem or a bad configuration value. The specific
+error is printed in the message. The new configuration is ignored,
+and an error is sent back.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_SHUTDOWN">
+<term>STATHTTPD_SHUTDOWN shutting down</term>
+<listitem><para>
+The stats-httpd daemon is shutting down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_STARTED">
+<term>STATHTTPD_STARTED listening on %1#%2</term>
+<listitem><para>
+The stats-httpd daemon will now start listening for requests on the
+given address and port number.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_STARTING_CC_SESSION">
+<term>STATHTTPD_STARTING_CC_SESSION starting cc session</term>
+<listitem><para>
+Debug message indicating that the stats-httpd module is connecting to
+the command and control bus.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_START_SERVER_INIT_ERROR">
+<term>STATHTTPD_START_SERVER_INIT_ERROR HTTP server initialization error: %1</term>
+<listitem><para>
+There was a problem initializing the HTTP server in the stats-httpd
+module upon startup. The most likely cause is that it was not able
+to bind to the listening port. The specific error is printed, and the
+module will shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_STOPPED_BY_KEYBOARD">
+<term>STATHTTPD_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
+<listitem><para>
+There was a keyboard interrupt signal to stop the stats-httpd
+daemon. The daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_UNKNOWN_CONFIG_ITEM">
+<term>STATHTTPD_UNKNOWN_CONFIG_ITEM unknown configuration item: %1</term>
+<listitem><para>
+The stats-httpd daemon received a configuration update from the
+configuration manager. However, one of the items in the
+configuration is unknown. The new configuration is ignored, and an
+error is sent back. As possible cause is that there was an upgrade
+problem, and the stats-httpd version is out of sync with the rest of
+the system.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_BAD_OPTION_VALUE">
+<term>STATS_BAD_OPTION_VALUE bad command line argument: %1</term>
+<listitem><para>
+The stats module was called with a bad command-line argument and will
+not start.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_CC_SESSION_ERROR">
+<term>STATS_CC_SESSION_ERROR error connecting to message bus: %1</term>
+<listitem><para>
+The stats module was unable to connect to the BIND 10 command and
+control bus. A likely problem is that the message bus daemon
+(b10-msgq) is not running. The stats module will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_NEW_CONFIG">
+<term>STATS_RECEIVED_NEW_CONFIG received new configuration: %1</term>
+<listitem><para>
+This debug message is printed when the stats module has received a
+configuration update from the configuration manager.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_REMOVE_COMMAND">
+<term>STATS_RECEIVED_REMOVE_COMMAND received command to remove %1</term>
+<listitem><para>
+A remove command for the given name was sent to the stats module, and
+the given statistics value will now be removed. It will not appear in
+statistics reports until it appears in a statistics update from a
+module again.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_RESET_COMMAND">
+<term>STATS_RECEIVED_RESET_COMMAND received command to reset all statistics</term>
+<listitem><para>
+The stats module received a command to clear all collected statistics.
+The data is cleared until it receives an update from the modules again.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_SHOW_ALL_COMMAND">
+<term>STATS_RECEIVED_SHOW_ALL_COMMAND received command to show all statistics</term>
+<listitem><para>
+The stats module received a command to show all statistics that it has
+collected.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_SHOW_NAME_COMMAND">
+<term>STATS_RECEIVED_SHOW_NAME_COMMAND received command to show statistics for %1</term>
+<listitem><para>
+The stats module received a command to show the statistics that it has
+collected for the given item.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_SHUTDOWN_COMMAND">
+<term>STATS_RECEIVED_SHUTDOWN_COMMAND shutdown command received</term>
+<listitem><para>
+A shutdown command was sent to the stats module and it will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_STATUS_COMMAND">
+<term>STATS_RECEIVED_STATUS_COMMAND received command to return status</term>
+<listitem><para>
+A status command was sent to the stats module. It will return a
+response indicating that it is running normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_UNKNOWN_COMMAND">
+<term>STATS_RECEIVED_UNKNOWN_COMMAND received unknown command: %1</term>
+<listitem><para>
+An unknown command has been sent to the stats module. The stats module
+will respond with an error and the command will be ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_SEND_REQUEST_BOSS">
+<term>STATS_SEND_REQUEST_BOSS requesting boss to send statistics</term>
+<listitem><para>
+This debug message is printed when a request is sent to the boss module
+to send its data to the stats module.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_STOPPED_BY_KEYBOARD">
+<term>STATS_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
+<listitem><para>
+There was a keyboard interrupt signal to stop the stats module. The
+daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_UNKNOWN_COMMAND_IN_SPEC">
+<term>STATS_UNKNOWN_COMMAND_IN_SPEC unknown command in specification file: %1</term>
+<listitem><para>
+The specification file for the stats module contains a command that
+is unknown in the implementation. The most likely cause is an
+installation problem, where the specification file stats.spec is
+from a different version of BIND 10 than the stats module itself.
+Please check your installation.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_AXFR_DATABASE_FAILURE">
+<term>XFRIN_AXFR_DATABASE_FAILURE AXFR transfer of zone %1 failed: %2</term>
+<listitem><para>
+The AXFR transfer for the given zone has failed due to a database problem.
+The error is shown in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_AXFR_INTERNAL_FAILURE">
+<term>XFRIN_AXFR_INTERNAL_FAILURE AXFR transfer of zone %1 failed: %2</term>
+<listitem><para>
+The AXFR transfer for the given zone has failed due to an internal
+problem in the bind10 python wrapper library.
+The error is shown in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_AXFR_TRANSFER_FAILURE">
+<term>XFRIN_AXFR_TRANSFER_FAILURE AXFR transfer of zone %1 failed: %2</term>
+<listitem><para>
+The AXFR transfer for the given zone has failed due to a protocol error.
+The error is shown in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_AXFR_TRANSFER_STARTED">
+<term>XFRIN_AXFR_TRANSFER_STARTED AXFR transfer of zone %1 started</term>
+<listitem><para>
+A connection to the master server has been made, the serial value in
+the SOA record has been checked, and a zone transfer has been started.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_AXFR_TRANSFER_SUCCESS">
+<term>XFRIN_AXFR_TRANSFER_SUCCESS AXFR transfer of zone %1 succeeded</term>
+<listitem><para>
+The AXFR transfer of the given zone was successfully completed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_BAD_MASTER_ADDR_FORMAT">
+<term>XFRIN_BAD_MASTER_ADDR_FORMAT bad format for master address: %1</term>
+<listitem><para>
+The given master address is not a valid IP address.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_BAD_MASTER_PORT_FORMAT">
+<term>XFRIN_BAD_MASTER_PORT_FORMAT bad format for master port: %1</term>
+<listitem><para>
+The master port as read from the configuration is not a valid port number.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_BAD_TSIG_KEY_STRING">
+<term>XFRIN_BAD_TSIG_KEY_STRING bad TSIG key string: %1</term>
+<listitem><para>
+The TSIG key string as read from the configuration does not represent
+a valid TSIG key.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_BAD_ZONE_CLASS">
+<term>XFRIN_BAD_ZONE_CLASS Invalid zone class: %1</term>
+<listitem><para>
+The zone class as read from the configuration is not a valid DNS class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_CC_SESSION_ERROR">
+<term>XFRIN_CC_SESSION_ERROR error reading from cc channel: %1</term>
+<listitem><para>
+There was a problem reading from the command and control channel. The
+most likely cause is that xfrin the msgq daemon is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_COMMAND_ERROR">
+<term>XFRIN_COMMAND_ERROR error while executing command '%1': %2</term>
+<listitem><para>
+There was an error while the given command was being processed. The
+error is given in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_CONNECT_MASTER">
+<term>XFRIN_CONNECT_MASTER error connecting to master at %1: %2</term>
+<listitem><para>
+There was an error opening a connection to the master. The error is
+shown in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_IMPORT_DNS">
+<term>XFRIN_IMPORT_DNS error importing python DNS module: %1</term>
+<listitem><para>
+There was an error importing the python DNS module pydnspp. The most
+likely cause is a PYTHONPATH problem.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_MSGQ_SEND_ERROR">
+<term>XFRIN_MSGQ_SEND_ERROR error while contacting %1 and %2</term>
+<listitem><para>
+There was a problem sending a message to the xfrout module or the
+zone manager. This most likely means that the msgq daemon has quit or
+was killed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER">
+<term>XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER error while contacting %1</term>
+<listitem><para>
+There was a problem sending a message to the zone manager. This most
+likely means that the msgq daemon has quit or was killed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_RETRANSFER_UNKNOWN_ZONE">
+<term>XFRIN_RETRANSFER_UNKNOWN_ZONE got notification to retransfer unknown zone %1</term>
+<listitem><para>
+There was an internal command to retransfer the given zone, but the
+zone is not known to the system. This may indicate that the configuration
+for xfrin is incomplete, or there was a typographical error in the
+zone name in the configuration.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_STARTING">
+<term>XFRIN_STARTING starting resolver with command line '%1'</term>
+<listitem><para>
+An informational message, this is output when the resolver starts up.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_STOPPED_BY_KEYBOARD">
+<term>XFRIN_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
+<listitem><para>
+There was a keyboard interrupt signal to stop the xfrin daemon. The
+daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_UNKNOWN_ERROR">
+<term>XFRIN_UNKNOWN_ERROR unknown error: %1</term>
+<listitem><para>
+An uncaught exception was raised while running the xfrin daemon. The
+exception message is printed in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_AXFR_TRANSFER_DONE">
+<term>XFROUT_AXFR_TRANSFER_DONE transfer of %1/%2 complete</term>
+<listitem><para>
+The transfer of the given zone has been completed successfully, or was
+aborted due to a shutdown event.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_AXFR_TRANSFER_ERROR">
+<term>XFROUT_AXFR_TRANSFER_ERROR error transferring zone %1/%2: %3</term>
+<listitem><para>
+An uncaught exception was encountered while sending the response to
+an AXFR query. The error message of the exception is included in the
+log message, but this error most likely points to incomplete exception
+handling in the code.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_AXFR_TRANSFER_FAILED">
+<term>XFROUT_AXFR_TRANSFER_FAILED transfer of %1/%2 failed, rcode: %3</term>
+<listitem><para>
+A transfer out for the given zone failed. An error response is sent
+to the client. The given rcode is the rcode that is set in the error
+response. This is either NOTAUTH (we are not authoritative for the
+zone), SERVFAIL (our internal database is missing the SOA record for
+the zone), or REFUSED (the limit of simultaneous outgoing AXFR
+transfers, as specified by the configuration value
+Xfrout/max_transfers_out, has been reached).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_AXFR_TRANSFER_STARTED">
+<term>XFROUT_AXFR_TRANSFER_STARTED transfer of zone %1/%2 has started</term>
+<listitem><para>
+A transfer out of the given zone has started.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_BAD_TSIG_KEY_STRING">
+<term>XFROUT_BAD_TSIG_KEY_STRING bad TSIG key string: %1</term>
+<listitem><para>
+The TSIG key string as read from the configuration does not represent
+a valid TSIG key.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_CC_SESSION_ERROR">
+<term>XFROUT_CC_SESSION_ERROR error reading from cc channel: %1</term>
+<listitem><para>
+There was a problem reading from the command and control channel. The
+most likely cause is that the msgq daemon is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_CC_SESSION_TIMEOUT_ERROR">
+<term>XFROUT_CC_SESSION_TIMEOUT_ERROR timeout waiting for cc response</term>
+<listitem><para>
+There was a problem reading a response from another module over the
+command and control channel. The most likely cause is that the
+configuration manager b10-cfgmgr is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_FETCH_REQUEST_ERROR">
+<term>XFROUT_FETCH_REQUEST_ERROR socket error while fetching a request from the auth daemon</term>
+<listitem><para>
+There was a socket error while contacting the b10-auth daemon to
+fetch a transfer request. The auth daemon may have shutdown.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_HANDLE_QUERY_ERROR">
+<term>XFROUT_HANDLE_QUERY_ERROR error while handling query: %1</term>
+<listitem><para>
+There was a general error handling an xfrout query. The error is shown
+in the message. In principle this error should not appear, and points
+to an oversight catching exceptions in the right place. However, to
+ensure the daemon keeps running, this error is caught and reported.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_IMPORT">
+<term>XFROUT_IMPORT error importing python module: %1</term>
+<listitem><para>
+There was an error importing a python module. One of the modules needed
+by xfrout could not be found. This suggests that either some libraries
+are missing on the system, or the PYTHONPATH variable is not correct.
+The specific place where this library needs to be depends on your
+system and your specific installation.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_NEW_CONFIG">
+<term>XFROUT_NEW_CONFIG Update xfrout configuration</term>
+<listitem><para>
+New configuration settings have been sent from the configuration
+manager. The xfrout daemon will now apply them.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_NEW_CONFIG_DONE">
+<term>XFROUT_NEW_CONFIG_DONE Update xfrout configuration done</term>
+<listitem><para>
+The xfrout daemon is now done reading the new configuration settings
+received from the configuration manager.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_NOTIFY_COMMAND">
+<term>XFROUT_NOTIFY_COMMAND received command to send notifies for %1/%2</term>
+<listitem><para>
+The xfrout daemon received a command on the command channel that
+NOTIFY packets should be sent for the given zone.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_PARSE_QUERY_ERROR">
+<term>XFROUT_PARSE_QUERY_ERROR error parsing query: %1</term>
+<listitem><para>
+There was a parse error while reading an incoming query. The parse
+error is shown in the log message. A remote client sent a packet we
+do not understand or support. The xfrout request will be ignored.
+In general, this should only occur for unexpected problems like
+memory allocation failures, as the query should already have been
+parsed by the b10-auth daemon, before it was passed here.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_PROCESS_REQUEST_ERROR">
+<term>XFROUT_PROCESS_REQUEST_ERROR error processing transfer request: %2</term>
+<listitem><para>
+There was an error processing a transfer request. The error is included
+in the log message, but at this point no specific information other
+than that could be given. This points to incomplete exception handling
+in the code.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_QUERY_DROPPED">
+<term>XFROUT_QUERY_DROPPED request to transfer %1/%2 to [%3]:%4 dropped</term>
+<listitem><para>
+The xfrout process silently dropped a request to transfer zone to given host.
+This is required by the ACLs. The %1 and %2 represent the zone name and class,
+the %3 and %4 the IP address and port of the peer requesting the transfer.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_QUERY_REJECTED">
+<term>XFROUT_QUERY_REJECTED request to transfer %1/%2 to [%3]:%4 rejected</term>
+<listitem><para>
+The xfrout process rejected (by REFUSED rcode) a request to transfer zone to
+given host. This is because of ACLs. The %1 and %2 represent the zone name and
+class, the %3 and %4 the IP address and port of the peer requesting the
+transfer.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_RECEIVED_SHUTDOWN_COMMAND">
+<term>XFROUT_RECEIVED_SHUTDOWN_COMMAND shutdown command received</term>
+<listitem><para>
+The xfrout daemon received a shutdown command from the command channel
+and will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR">
+<term>XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR error receiving the file descriptor for an XFR connection</term>
+<listitem><para>
+There was an error receiving the file descriptor for the transfer
+request. Normally, the request is received by b10-auth, and passed on
+to the xfrout daemon, so it can answer directly. However, there was a
+problem receiving this file descriptor. The request will be ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_REMOVE_OLD_UNIX_SOCKET_FILE_ERROR">
+<term>XFROUT_REMOVE_OLD_UNIX_SOCKET_FILE_ERROR error removing unix socket file %1: %2</term>
+<listitem><para>
+The unix socket file xfrout needs for contact with the auth daemon
+already exists, and needs to be removed first, but there is a problem
+removing it. It is likely that we do not have permission to remove
+this file. The specific error is show in the log message. The xfrout
+daemon will shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR">
+<term>XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR error clearing unix socket file %1: %2</term>
+<listitem><para>
+When shutting down, the xfrout daemon tried to clear the unix socket
+file used for communication with the auth daemon. It failed to remove
+the file. The reason for the failure is given in the error message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_SOCKET_SELECT_ERROR">
+<term>XFROUT_SOCKET_SELECT_ERROR error while calling select() on request socket: %1</term>
+<listitem><para>
+There was an error while calling select() on the socket that informs
+the xfrout daemon that a new xfrout request has arrived. This should
+be a result of rare local error such as memory allocation failure and
+shouldn't happen under normal conditions. The error is included in the
+log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_STOPPED_BY_KEYBOARD">
+<term>XFROUT_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
+<listitem><para>
+There was a keyboard interrupt signal to stop the xfrout daemon. The
+daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_STOPPING">
+<term>XFROUT_STOPPING the xfrout daemon is shutting down</term>
+<listitem><para>
+The current transfer is aborted, as the xfrout daemon is shutting down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_UNIX_SOCKET_FILE_IN_USE">
+<term>XFROUT_UNIX_SOCKET_FILE_IN_USE another xfrout process seems to be using the unix socket file %1</term>
+<listitem><para>
+While starting up, the xfrout daemon tried to clear the unix domain
+socket needed for contacting the b10-auth daemon to pass requests
+on, but the file is in use. The most likely cause is that another
+xfrout daemon process is still running. This xfrout daemon (the one
+printing this message) will not start.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_CCSESSION_ERROR">
+<term>ZONEMGR_CCSESSION_ERROR command channel session error: %1</term>
+<listitem><para>
+An error was encountered on the command channel. The message indicates
+the nature of the error.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_JITTER_TOO_BIG">
+<term>ZONEMGR_JITTER_TOO_BIG refresh_jitter is too big, setting to 0.5</term>
+<listitem><para>
+The value specified in the configuration for the refresh jitter is too large
+so its value has been set to the maximum of 0.5.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_KEYBOARD_INTERRUPT">
+<term>ZONEMGR_KEYBOARD_INTERRUPT exiting zonemgr process as result of keyboard interrupt</term>
+<listitem><para>
+An informational message output when the zone manager was being run at a
+terminal and it was terminated via a keyboard interrupt signal.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_LOAD_ZONE">
+<term>ZONEMGR_LOAD_ZONE loading zone %1 (class %2)</term>
+<listitem><para>
+This is a debug message indicating that the zone of the specified class
+is being loaded.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_NO_MASTER_ADDRESS">
+<term>ZONEMGR_NO_MASTER_ADDRESS internal BIND 10 command did not contain address of master</term>
+<listitem><para>
+A command received by the zone manager from the Auth module did not
+contain the address of the master server from which a NOTIFY message
+was received. This may be due to an internal programming error; please
+submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_NO_SOA">
+<term>ZONEMGR_NO_SOA zone %1 (class %2) does not have an SOA record</term>
+<listitem><para>
+When loading the named zone of the specified class the zone manager
+discovered that the data did not contain an SOA record. The load has
+been abandoned.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_NO_TIMER_THREAD">
+<term>ZONEMGR_NO_TIMER_THREAD trying to stop zone timer thread but it is not running</term>
+<listitem><para>
+An attempt was made to stop the timer thread (used to track when zones
+should be refreshed) but it was not running. This may indicate an
+internal program error. Please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_NO_ZONE_CLASS">
+<term>ZONEMGR_NO_ZONE_CLASS internal BIND 10 command did not contain class of zone</term>
+<listitem><para>
+A command received by the zone manager from another BIND 10 module did
+not contain the class of the zone on which the zone manager should act.
+This may be due to an internal programming error; please submit a
+bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_NO_ZONE_NAME">
+<term>ZONEMGR_NO_ZONE_NAME internal BIND 10 command did not contain name of zone</term>
+<listitem><para>
+A command received by the zone manager from another BIND 10 module did
+not contain the name of the zone on which the zone manager should act.
+This may be due to an internal programming error; please submit a
+bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_RECEIVE_NOTIFY">
+<term>ZONEMGR_RECEIVE_NOTIFY received NOTIFY command for zone %1 (class %2)</term>
+<listitem><para>
+This is a debug message indicating that the zone manager has received a
+NOTIFY command over the command channel. The command is sent by the Auth
+process when it is acting as a slave server for the zone and causes the
+zone manager to record the master server for the zone and start a timer;
+when the timer expires, the master will be polled to see if it contains
+new data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_RECEIVE_SHUTDOWN">
+<term>ZONEMGR_RECEIVE_SHUTDOWN received SHUTDOWN command</term>
+<listitem><para>
+This is a debug message indicating that the zone manager has received
+a SHUTDOWN command over the command channel from the Boss process.
+It will act on this command and shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_RECEIVE_UNKNOWN">
+<term>ZONEMGR_RECEIVE_UNKNOWN received unknown command '%1'</term>
+<listitem><para>
+This is a warning message indicating that the zone manager has received
+the stated command over the command channel. The command is not known
+to the zone manager and although the command is ignored, its receipt
+may indicate an internal error. Please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_RECEIVE_XFRIN_FAILED">
+<term>ZONEMGR_RECEIVE_XFRIN_FAILED received XFRIN FAILED command for zone %1 (class %2)</term>
+<listitem><para>
+This is a debug message indicating that the zone manager has received
+an XFRIN FAILED command over the command channel. The command is sent
+by the Xfrin process when a transfer of zone data into the system has
+failed, and causes the zone manager to schedule another transfer attempt.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_RECEIVE_XFRIN_SUCCESS">
+<term>ZONEMGR_RECEIVE_XFRIN_SUCCESS received XFRIN SUCCESS command for zone %1 (class %2)</term>
+<listitem><para>
+This is a debug message indicating that the zone manager has received
+an XFRIN SUCCESS command over the command channel. The command is sent
+by the Xfrin process when the transfer of zone data into the system has
+succeeded, and causes the data to be loaded and served by BIND 10.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_REFRESH_ZONE">
+<term>ZONEMGR_REFRESH_ZONE refreshing zone %1 (class %2)</term>
+<listitem><para>
+The zone manager is refreshing the named zone of the specified class
+with updated information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_SELECT_ERROR">
+<term>ZONEMGR_SELECT_ERROR error with select(): %1</term>
+<listitem><para>
+An attempt to wait for input from a socket failed. The failing operation
+is a call to the operating system's select() function, which failed for
+the given reason.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_SEND_FAIL">
+<term>ZONEMGR_SEND_FAIL failed to send command to %1, session has been closed</term>
+<listitem><para>
+The zone manager attempted to send a command to the named BIND 10 module,
+but the send failed. The session between the modules has been closed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_SESSION_ERROR">
+<term>ZONEMGR_SESSION_ERROR unable to establish session to command channel daemon</term>
+<listitem><para>
+The zonemgr process was not able to be started because it could not
+connect to the command channel daemon. The most usual cause of this
+problem is that the daemon is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_SESSION_TIMEOUT">
+<term>ZONEMGR_SESSION_TIMEOUT timeout on session to command channel daemon</term>
+<listitem><para>
+The zonemgr process was not able to be started because it timed out when
+connecting to the command channel daemon. The most usual cause of this
+problem is that the daemon is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_SHUTDOWN">
+<term>ZONEMGR_SHUTDOWN zone manager has shut down</term>
+<listitem><para>
+A debug message, output when the zone manager has shut down completely.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_STARTING">
+<term>ZONEMGR_STARTING zone manager starting</term>
+<listitem><para>
+A debug message output when the zone manager starts up.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_TIMER_THREAD_RUNNING">
+<term>ZONEMGR_TIMER_THREAD_RUNNING trying to start timer thread but one is already running</term>
+<listitem><para>
+This message is issued when an attempt is made to start the timer
+thread (which keeps track of when zones need a refresh) but one is
+already running. It indicates either an error in the program logic or
+a problem with stopping a previous instance of the timer. Please submit
+a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_UNKNOWN_ZONE_FAIL">
+<term>ZONEMGR_UNKNOWN_ZONE_FAIL zone %1 (class %2) is not known to the zone manager</term>
+<listitem><para>
+An XFRIN operation has failed but the zone that was the subject of the
+operation is not being managed by the zone manager. This may indicate
+an error in the program (as the operation should not have been initiated
+if this were the case). Please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_UNKNOWN_ZONE_NOTIFIED">
+<term>ZONEMGR_UNKNOWN_ZONE_NOTIFIED notified zone %1 (class %2) is not known to the zone manager</term>
+<listitem><para>
+A NOTIFY was received but the zone that was the subject of the operation
+is not being managed by the zone manager. This may indicate an error
+in the program (as the operation should not have been initiated if this
+were the case). Please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_UNKNOWN_ZONE_SUCCESS">
+<term>ZONEMGR_UNKNOWN_ZONE_SUCCESS zone %1 (class %2) is not known to the zone manager</term>
<listitem><para>
-A debug message noting that the server has received a response instead of a
-query and is ignoring it.
+An XFRIN operation has succeeded but the zone received is not being
+managed by the zone manager. This may indicate an error in the program
+(as the operation should not have been initiated if this were the case).
+Please submit a bug report.
</para></listitem>
</varlistentry>
</variablelist>
diff --git a/ext/asio/asio/impl/error_code.ipp b/ext/asio/asio/impl/error_code.ipp
index ed37a17..218c09b 100644
--- a/ext/asio/asio/impl/error_code.ipp
+++ b/ext/asio/asio/impl/error_code.ipp
@@ -11,6 +11,9 @@
#ifndef ASIO_IMPL_ERROR_CODE_IPP
#define ASIO_IMPL_ERROR_CODE_IPP
+// strerror() needs <cstring>
+#include <cstring>
+
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
diff --git a/src/bin/auth/Makefile.am b/src/bin/auth/Makefile.am
index 64136c1..4d8ec83 100644
--- a/src/bin/auth/Makefile.am
+++ b/src/bin/auth/Makefile.am
@@ -50,12 +50,19 @@ b10_auth_SOURCES += command.cc command.h
b10_auth_SOURCES += common.h common.cc
b10_auth_SOURCES += statistics.cc statistics.h
b10_auth_SOURCES += main.cc
+# This is a temporary workaround for #1206, where the InMemoryClient has been
+# moved to an ldopened library. We could add that library to LDADD, but that
+# is nonportable. When #1207 is done this becomes moot anyway, and the
+# specific workaround is not needed anymore, so we can then remove this
+# line again.
+b10_auth_SOURCES += ${top_srcdir}/src/lib/datasrc/memory_datasrc.cc
nodist_b10_auth_SOURCES = auth_messages.h auth_messages.cc
EXTRA_DIST += auth_messages.mes
b10_auth_LDADD = $(top_builddir)/src/lib/datasrc/libdatasrc.la
b10_auth_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
+b10_auth_LDADD += $(top_builddir)/src/lib/util/libutil.la
b10_auth_LDADD += $(top_builddir)/src/lib/config/libcfgclient.la
b10_auth_LDADD += $(top_builddir)/src/lib/cc/libcc.la
b10_auth_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
diff --git a/src/bin/auth/auth.spec.pre.in b/src/bin/auth/auth.spec.pre.in
index d88ffb5..2ce044e 100644
--- a/src/bin/auth/auth.spec.pre.in
+++ b/src/bin/auth/auth.spec.pre.in
@@ -122,6 +122,24 @@
}
]
}
+ ],
+ "statistics": [
+ {
+ "item_name": "queries.tcp",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 0,
+ "item_title": "Queries TCP ",
+ "item_description": "A number of total query counts which all auth servers receive over TCP since they started initially"
+ },
+ {
+ "item_name": "queries.udp",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 0,
+ "item_title": "Queries UDP",
+ "item_description": "A number of total query counts which all auth servers receive over UDP since they started initially"
+ }
]
}
}
diff --git a/src/bin/auth/auth_config.cc b/src/bin/auth/auth_config.cc
index 2943cb5..d684c68 100644
--- a/src/bin/auth/auth_config.cc
+++ b/src/bin/auth/auth_config.cc
@@ -107,7 +107,7 @@ DatasourcesConfig::commit() {
// server implementation details, and isn't scalable wrt the number of
// data source types, and should eventually be improved.
// Currently memory data source for class IN is the only possibility.
- server_.setMemoryDataSrc(RRClass::IN(), AuthSrv::MemoryDataSrcPtr());
+ server_.setInMemoryClient(RRClass::IN(), AuthSrv::InMemoryClientPtr());
BOOST_FOREACH(shared_ptr<AuthConfigParser> datasrc_config, datasources_) {
datasrc_config->commit();
@@ -125,12 +125,12 @@ public:
{}
virtual void build(ConstElementPtr config_value);
virtual void commit() {
- server_.setMemoryDataSrc(rrclass_, memory_datasrc_);
+ server_.setInMemoryClient(rrclass_, memory_client_);
}
private:
AuthSrv& server_;
RRClass rrclass_;
- AuthSrv::MemoryDataSrcPtr memory_datasrc_;
+ AuthSrv::InMemoryClientPtr memory_client_;
};
void
@@ -143,8 +143,8 @@ MemoryDatasourceConfig::build(ConstElementPtr config_value) {
// We'd eventually optimize building zones (in case of reloading) by
// selectively loading fresh zones. Right now we simply check the
// RR class is supported by the server implementation.
- server_.getMemoryDataSrc(rrclass_);
- memory_datasrc_ = AuthSrv::MemoryDataSrcPtr(new MemoryDataSrc());
+ server_.getInMemoryClient(rrclass_);
+ memory_client_ = AuthSrv::InMemoryClientPtr(new InMemoryClient());
ConstElementPtr zones_config = config_value->get("zones");
if (!zones_config) {
@@ -163,9 +163,10 @@ MemoryDatasourceConfig::build(ConstElementPtr config_value) {
isc_throw(AuthConfigError, "Missing zone file for zone: "
<< origin->str());
}
- shared_ptr<MemoryZone> new_zone(new MemoryZone(rrclass_,
+ shared_ptr<InMemoryZoneFinder> zone_finder(new
+ InMemoryZoneFinder(rrclass_,
Name(origin->stringValue())));
- const result::Result result = memory_datasrc_->addZone(new_zone);
+ const result::Result result = memory_client_->addZone(zone_finder);
if (result == result::EXIST) {
isc_throw(AuthConfigError, "zone "<< origin->str()
<< " already exists");
@@ -177,7 +178,7 @@ MemoryDatasourceConfig::build(ConstElementPtr config_value) {
* need the load method to be split into some kind of build and
* commit/abort parts.
*/
- new_zone->load(file->stringValue());
+ zone_finder->load(file->stringValue());
}
}
diff --git a/src/bin/auth/auth_messages.mes b/src/bin/auth/auth_messages.mes
index 2bb402c..1ffa687 100644
--- a/src/bin/auth/auth_messages.mes
+++ b/src/bin/auth/auth_messages.mes
@@ -63,7 +63,7 @@ datebase data source, listing the file that is being accessed.
% AUTH_DNS_SERVICES_CREATED DNS services created
This is a debug message indicating that the component that will handling
-incoming queries for the authoritiative server (DNSServices) has been
+incoming queries for the authoritative server (DNSServices) has been
successfully created. It is issued during server startup is an indication
that the initialization is proceeding normally.
@@ -74,7 +74,7 @@ reason for the failure is given in the message.) The server will drop the
packet.
% AUTH_LOAD_TSIG loading TSIG keys
-This is a debug message indicating that the authoritiative server
+This is a debug message indicating that the authoritative server
has requested the keyring holding TSIG keys from the configuration
database. It is issued during server startup is an indication that the
initialization is proceeding normally.
@@ -141,8 +141,8 @@ encountered an internal error whilst processing a received packet:
the cause of the error is included in the message.
The server will return a SERVFAIL error code to the sender of the packet.
-However, this message indicates a potential error in the server.
-Please open a bug ticket for this issue.
+This message indicates a potential error in the server. Please open a
+bug ticket for this issue.
% AUTH_RECEIVED_COMMAND command '%1' received
This is a debug message issued when the authoritative server has received
@@ -209,7 +209,7 @@ channel. It is issued during server startup is an indication that the
initialization is proceeding normally.
% AUTH_STATS_COMMS communication error in sending statistics data: %1
-An error was encountered when the authoritiative server tried to send data
+An error was encountered when the authoritative server tried to send data
to the statistics daemon. The message includes additional information
describing the reason for the failure.
@@ -257,4 +257,7 @@ request. The zone manager component has been informed of the request,
but has returned an error response (which is included in the message). The
NOTIFY request will not be honored.
+% AUTH_INVALID_STATISTICS_DATA invalid specification of statistics data specified
+An error was encountered when the authoritiative server specified
+statistics data which is invalid for the auth specification file.
diff --git a/src/bin/auth/auth_srv.cc b/src/bin/auth/auth_srv.cc
index f29fd05..c9dac88 100644
--- a/src/bin/auth/auth_srv.cc
+++ b/src/bin/auth/auth_srv.cc
@@ -108,8 +108,8 @@ public:
AbstractSession* xfrin_session_;
/// In-memory data source. Currently class IN only for simplicity.
- const RRClass memory_datasrc_class_;
- AuthSrv::MemoryDataSrcPtr memory_datasrc_;
+ const RRClass memory_client_class_;
+ AuthSrv::InMemoryClientPtr memory_client_;
/// Hot spot cache
isc::datasrc::HotCache cache_;
@@ -125,6 +125,10 @@ public:
/// The TSIG keyring
const shared_ptr<TSIGKeyRing>* keyring_;
+
+ /// Bind the ModuleSpec object in config_session_ with
+ /// isc:config::ModuleSpec::validateStatistics.
+ void registerStatisticsValidator();
private:
std::string db_file_;
@@ -139,13 +143,16 @@ private:
/// Increment query counter
void incCounter(const int protocol);
+
+ // validateStatistics
+ bool validateStatistics(isc::data::ConstElementPtr data) const;
};
AuthSrvImpl::AuthSrvImpl(const bool use_cache,
AbstractXfroutClient& xfrout_client) :
config_session_(NULL),
xfrin_session_(NULL),
- memory_datasrc_class_(RRClass::IN()),
+ memory_client_class_(RRClass::IN()),
statistics_timer_(io_service_),
counters_(),
keyring_(NULL),
@@ -290,7 +297,7 @@ makeErrorMessage(MessagePtr message, OutputBufferPtr buffer,
message->toWire(renderer);
}
LOG_DEBUG(auth_logger, DBG_AUTH_MESSAGES, AUTH_SEND_ERROR_RESPONSE)
- .arg(message->toText());
+ .arg(renderer.getLength()).arg(*message);
}
}
@@ -317,6 +324,7 @@ AuthSrv::setXfrinSession(AbstractSession* xfrin_session) {
void
AuthSrv::setConfigSession(ModuleCCSession* config_session) {
impl_->config_session_ = config_session;
+ impl_->registerStatisticsValidator();
}
void
@@ -329,34 +337,34 @@ AuthSrv::getConfigSession() const {
return (impl_->config_session_);
}
-AuthSrv::MemoryDataSrcPtr
-AuthSrv::getMemoryDataSrc(const RRClass& rrclass) {
+AuthSrv::InMemoryClientPtr
+AuthSrv::getInMemoryClient(const RRClass& rrclass) {
// XXX: for simplicity, we only support the IN class right now.
- if (rrclass != impl_->memory_datasrc_class_) {
+ if (rrclass != impl_->memory_client_class_) {
isc_throw(InvalidParameter,
"Memory data source is not supported for RR class "
<< rrclass);
}
- return (impl_->memory_datasrc_);
+ return (impl_->memory_client_);
}
void
-AuthSrv::setMemoryDataSrc(const isc::dns::RRClass& rrclass,
- MemoryDataSrcPtr memory_datasrc)
+AuthSrv::setInMemoryClient(const isc::dns::RRClass& rrclass,
+ InMemoryClientPtr memory_client)
{
// XXX: see above
- if (rrclass != impl_->memory_datasrc_class_) {
+ if (rrclass != impl_->memory_client_class_) {
isc_throw(InvalidParameter,
"Memory data source is not supported for RR class "
<< rrclass);
- } else if (!impl_->memory_datasrc_ && memory_datasrc) {
+ } else if (!impl_->memory_client_ && memory_client) {
LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_MEM_DATASRC_ENABLED)
.arg(rrclass);
- } else if (impl_->memory_datasrc_ && !memory_datasrc) {
+ } else if (impl_->memory_client_ && !memory_client) {
LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_MEM_DATASRC_DISABLED)
.arg(rrclass);
}
- impl_->memory_datasrc_ = memory_datasrc;
+ impl_->memory_client_ = memory_client;
}
uint32_t
@@ -505,10 +513,10 @@ AuthSrvImpl::processNormalQuery(const IOMessage& io_message, MessagePtr message,
// If a memory data source is configured call the separate
// Query::process()
const ConstQuestionPtr question = *message->beginQuestion();
- if (memory_datasrc_ && memory_datasrc_class_ == question->getClass()) {
+ if (memory_client_ && memory_client_class_ == question->getClass()) {
const RRType& qtype = question->getType();
const Name& qname = question->getName();
- auth::Query(*memory_datasrc_, qname, qtype, *message).process();
+ auth::Query(*memory_client_, qname, qtype, *message).process();
} else {
datasrc::Query query(*message, cache_, dnssec_ok);
data_sources_.doQuery(query);
@@ -670,6 +678,22 @@ AuthSrvImpl::incCounter(const int protocol) {
}
}
+void
+AuthSrvImpl::registerStatisticsValidator() {
+ counters_.registerStatisticsValidator(
+ boost::bind(&AuthSrvImpl::validateStatistics, this, _1));
+}
+
+bool
+AuthSrvImpl::validateStatistics(isc::data::ConstElementPtr data) const {
+ if (config_session_ == NULL) {
+ return (false);
+ }
+ return (
+ config_session_->getModuleSpec().validateStatistics(
+ data, true));
+}
+
ConstElementPtr
AuthSrvImpl::setDbFile(ConstElementPtr config) {
ConstElementPtr answer = isc::config::createAnswer();
diff --git a/src/bin/auth/auth_srv.h b/src/bin/auth/auth_srv.h
index 7eede97..f2259a2 100644
--- a/src/bin/auth/auth_srv.h
+++ b/src/bin/auth/auth_srv.h
@@ -17,7 +17,7 @@
#include <string>
-// For MemoryDataSrcPtr below. This should be a temporary definition until
+// For InMemoryClientPtr below. This should be a temporary definition until
// we reorganize the data source framework.
#include <boost/shared_ptr.hpp>
@@ -39,7 +39,7 @@
namespace isc {
namespace datasrc {
-class MemoryDataSrc;
+class InMemoryClient;
}
namespace xfr {
class AbstractXfroutClient;
@@ -133,7 +133,7 @@ public:
/// If there is a data source installed, it will be replaced with the
/// new one.
///
- /// In the current implementation, the SQLite data source and MemoryDataSrc
+ /// In the current implementation, the SQLite data source and InMemoryClient
/// are assumed.
/// We can enable memory data source and get the path of SQLite database by
/// the \c config parameter. If we disabled memory data source, the SQLite
@@ -233,16 +233,16 @@ public:
///
void setXfrinSession(isc::cc::AbstractSession* xfrin_session);
- /// A shared pointer type for \c MemoryDataSrc.
+ /// A shared pointer type for \c InMemoryClient.
///
/// This is defined inside the \c AuthSrv class as it's supposed to be
/// a short term interface until we integrate the in-memory and other
/// data source frameworks.
- typedef boost::shared_ptr<isc::datasrc::MemoryDataSrc> MemoryDataSrcPtr;
+ typedef boost::shared_ptr<isc::datasrc::InMemoryClient> InMemoryClientPtr;
- /// An immutable shared pointer type for \c MemoryDataSrc.
- typedef boost::shared_ptr<const isc::datasrc::MemoryDataSrc>
- ConstMemoryDataSrcPtr;
+ /// An immutable shared pointer type for \c InMemoryClient.
+ typedef boost::shared_ptr<const isc::datasrc::InMemoryClient>
+ ConstInMemoryClientPtr;
/// Returns the in-memory data source configured for the \c AuthSrv,
/// if any.
@@ -260,11 +260,11 @@ public:
/// \param rrclass The RR class of the requested in-memory data source.
/// \return A pointer to the in-memory data source, if configured;
/// otherwise NULL.
- MemoryDataSrcPtr getMemoryDataSrc(const isc::dns::RRClass& rrclass);
+ InMemoryClientPtr getInMemoryClient(const isc::dns::RRClass& rrclass);
/// Sets or replaces the in-memory data source of the specified RR class.
///
- /// As noted in \c getMemoryDataSrc(), some RR classes may not be
+ /// As noted in \c getInMemoryClient(), some RR classes may not be
/// supported, in which case an exception of class \c InvalidParameter
/// will be thrown.
/// This method never throws an exception otherwise.
@@ -275,9 +275,9 @@ public:
/// in-memory data source.
///
/// \param rrclass The RR class of the in-memory data source to be set.
- /// \param memory_datasrc A (shared) pointer to \c MemoryDataSrc to be set.
- void setMemoryDataSrc(const isc::dns::RRClass& rrclass,
- MemoryDataSrcPtr memory_datasrc);
+ /// \param memory_datasrc A (shared) pointer to \c InMemoryClient to be set.
+ void setInMemoryClient(const isc::dns::RRClass& rrclass,
+ InMemoryClientPtr memory_client);
/// \brief Set the communication session with Statistics.
///
diff --git a/src/bin/auth/b10-auth.8 b/src/bin/auth/b10-auth.8
index 0356683..aedadee 100644
--- a/src/bin/auth/b10-auth.8
+++ b/src/bin/auth/b10-auth.8
@@ -2,12 +2,12 @@
.\" Title: b10-auth
.\" Author: [FIXME: author] [see http://docbook.sf.net/el/author]
.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\" Date: March 8, 2011
+.\" Date: August 11, 2011
.\" Manual: BIND10
.\" Source: BIND10
.\" Language: English
.\"
-.TH "B10\-AUTH" "8" "March 8, 2011" "BIND10" "BIND10"
+.TH "B10\-AUTH" "8" "August 11, 2011" "BIND10" "BIND10"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
@@ -70,18 +70,6 @@ defines the path to the SQLite3 zone file when using the sqlite datasource\&. Th
/usr/local/var/bind10\-devel/zone\&.sqlite3\&.
.PP
-\fIlisten_on\fR
-is a list of addresses and ports for
-\fBb10\-auth\fR
-to listen on\&. The list items are the
-\fIaddress\fR
-string and
-\fIport\fR
-number\&. By default,
-\fBb10\-auth\fR
-listens on port 53 on the IPv6 (::) and IPv4 (0\&.0\&.0\&.0) wildcard addresses\&.
-.PP
-
\fIdatasources\fR
configures data sources\&. The list items include:
\fItype\fR
@@ -114,6 +102,18 @@ In this development version, currently this is only used for the memory data sou
.RE
.PP
+\fIlisten_on\fR
+is a list of addresses and ports for
+\fBb10\-auth\fR
+to listen on\&. The list items are the
+\fIaddress\fR
+string and
+\fIport\fR
+number\&. By default,
+\fBb10\-auth\fR
+listens on port 53 on the IPv6 (::) and IPv4 (0\&.0\&.0\&.0) wildcard addresses\&.
+.PP
+
\fIstatistics\-interval\fR
is the timer interval in seconds for
\fBb10\-auth\fR
@@ -164,6 +164,25 @@ immediately\&.
\fBshutdown\fR
exits
\fBb10\-auth\fR\&. (Note that the BIND 10 boss process will restart this service\&.)
+.SH "STATISTICS DATA"
+.PP
+The statistics data collected by the
+\fBb10\-stats\fR
+daemon include:
+.PP
+auth\&.queries\&.tcp
+.RS 4
+Total count of queries received by the
+\fBb10\-auth\fR
+server over TCP since startup\&.
+.RE
+.PP
+auth\&.queries\&.udp
+.RS 4
+Total count of queries received by the
+\fBb10\-auth\fR
+server over UDP since startup\&.
+.RE
.SH "FILES"
.PP
diff --git a/src/bin/auth/b10-auth.xml b/src/bin/auth/b10-auth.xml
index 2b53394..636f437 100644
--- a/src/bin/auth/b10-auth.xml
+++ b/src/bin/auth/b10-auth.xml
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>March 8, 2011</date>
+ <date>August 11, 2011</date>
</refentryinfo>
<refmeta>
@@ -132,15 +132,6 @@
</para>
<para>
- <varname>listen_on</varname> is a list of addresses and ports for
- <command>b10-auth</command> to listen on.
- The list items are the <varname>address</varname> string
- and <varname>port</varname> number.
- By default, <command>b10-auth</command> listens on port 53
- on the IPv6 (::) and IPv4 (0.0.0.0) wildcard addresses.
- </para>
-
- <para>
<varname>datasources</varname> configures data sources.
The list items include:
<varname>type</varname> to optionally choose the data source type
@@ -165,6 +156,15 @@
</para>
<para>
+ <varname>listen_on</varname> is a list of addresses and ports for
+ <command>b10-auth</command> to listen on.
+ The list items are the <varname>address</varname> string
+ and <varname>port</varname> number.
+ By default, <command>b10-auth</command> listens on port 53
+ on the IPv6 (::) and IPv4 (0.0.0.0) wildcard addresses.
+ </para>
+
+ <para>
<varname>statistics-interval</varname> is the timer interval
in seconds for <command>b10-auth</command> to share its
statistics information to
@@ -209,6 +209,34 @@
</refsect1>
<refsect1>
+ <title>STATISTICS DATA</title>
+
+ <para>
+ The statistics data collected by the <command>b10-stats</command>
+ daemon include:
+ </para>
+
+ <variablelist>
+
+ <varlistentry>
+ <term>auth.queries.tcp</term>
+ <listitem><simpara>Total count of queries received by the
+ <command>b10-auth</command> server over TCP since startup.
+ </simpara></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>auth.queries.udp</term>
+ <listitem><simpara>Total count of queries received by the
+ <command>b10-auth</command> server over UDP since startup.
+ </simpara></listitem>
+ </varlistentry>
+
+ </variablelist>
+
+ </refsect1>
+
+ <refsect1>
<title>FILES</title>
<para>
<filename>/usr/local/var/bind10-devel/zone.sqlite3</filename>
diff --git a/src/bin/auth/benchmarks/Makefile.am b/src/bin/auth/benchmarks/Makefile.am
index cf3fe4a..53c019f 100644
--- a/src/bin/auth/benchmarks/Makefile.am
+++ b/src/bin/auth/benchmarks/Makefile.am
@@ -13,10 +13,17 @@ query_bench_SOURCES += ../auth_srv.h ../auth_srv.cc
query_bench_SOURCES += ../auth_config.h ../auth_config.cc
query_bench_SOURCES += ../statistics.h ../statistics.cc
query_bench_SOURCES += ../auth_log.h ../auth_log.cc
+# This is a temporary workaround for #1206, where the InMemoryClient has been
+# moved to an ldopened library. We could add that library to LDADD, but that
+# is nonportable. When #1207 is done this becomes moot anyway, and the
+# specific workaround is not needed anymore, so we can then remove this
+# line again.
+query_bench_SOURCES += ${top_srcdir}/src/lib/datasrc/memory_datasrc.cc
nodist_query_bench_SOURCES = ../auth_messages.h ../auth_messages.cc
query_bench_LDADD = $(top_builddir)/src/lib/dns/libdns++.la
+query_bench_LDADD += $(top_builddir)/src/lib/util/libutil.la
query_bench_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
query_bench_LDADD += $(top_builddir)/src/lib/bench/libbench.la
query_bench_LDADD += $(top_builddir)/src/lib/datasrc/libdatasrc.la
diff --git a/src/bin/auth/command.cc b/src/bin/auth/command.cc
index fe3d729..940d57b 100644
--- a/src/bin/auth/command.cc
+++ b/src/bin/auth/command.cc
@@ -136,19 +136,21 @@ public:
// that doesn't block other server operations.
// TODO: we may (should?) want to check the "last load time" and
// the timestamp of the file and skip loading if the file isn't newer.
- shared_ptr<MemoryZone> newzone(new MemoryZone(oldzone->getClass(),
- oldzone->getOrigin()));
- newzone->load(oldzone->getFileName());
- oldzone->swap(*newzone);
+ shared_ptr<InMemoryZoneFinder> zone_finder(
+ new InMemoryZoneFinder(old_zone_finder->getClass(),
+ old_zone_finder->getOrigin()));
+ zone_finder->load(old_zone_finder->getFileName());
+ old_zone_finder->swap(*zone_finder);
LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_LOAD_ZONE)
- .arg(newzone->getOrigin()).arg(newzone->getClass());
+ .arg(zone_finder->getOrigin()).arg(zone_finder->getClass());
}
private:
- shared_ptr<MemoryZone> oldzone; // zone to be updated with the new file.
+ // zone finder to be updated with the new file.
+ shared_ptr<InMemoryZoneFinder> old_zone_finder;
// A helper private method to parse and validate command parameters.
- // On success, it sets 'oldzone' to the zone to be updated.
+ // On success, it sets 'old_zone_finder' to the zone to be updated.
// It returns true if everything is okay; and false if the command is
// valid but there's no need for further process.
bool validate(AuthSrv& server, isc::data::ConstElementPtr args) {
@@ -176,7 +178,7 @@ private:
const RRClass zone_class = class_elem ?
RRClass(class_elem->stringValue()) : RRClass::IN();
- AuthSrv::MemoryDataSrcPtr datasrc(server.getMemoryDataSrc(zone_class));
+ AuthSrv::InMemoryClientPtr datasrc(server.getInMemoryClient(zone_class));
if (datasrc == NULL) {
isc_throw(AuthCommandError, "Memory data source is disabled");
}
@@ -188,13 +190,14 @@ private:
const Name origin(origin_elem->stringValue());
// Get the current zone
- const MemoryDataSrc::FindResult result = datasrc->findZone(origin);
+ const InMemoryClient::FindResult result = datasrc->findZone(origin);
if (result.code != result::SUCCESS) {
isc_throw(AuthCommandError, "Zone " << origin <<
" is not found in data source");
}
- oldzone = boost::dynamic_pointer_cast<MemoryZone>(result.zone);
+ old_zone_finder = boost::dynamic_pointer_cast<InMemoryZoneFinder>(
+ result.zone_finder);
return (true);
}
diff --git a/src/bin/auth/query.cc b/src/bin/auth/query.cc
index 323f890..ab6404e 100644
--- a/src/bin/auth/query.cc
+++ b/src/bin/auth/query.cc
@@ -19,7 +19,7 @@
#include <dns/rcode.h>
#include <dns/rdataclass.h>
-#include <datasrc/memory_datasrc.h>
+#include <datasrc/client.h>
#include <auth/query.h>
@@ -31,14 +31,14 @@ namespace isc {
namespace auth {
void
-Query::getAdditional(const Zone& zone, const RRset& rrset) const {
+Query::getAdditional(ZoneFinder& zone, const RRset& rrset) const {
RdataIteratorPtr rdata_iterator(rrset.getRdataIterator());
for (; !rdata_iterator->isLast(); rdata_iterator->next()) {
const Rdata& rdata(rdata_iterator->getCurrent());
if (rrset.getType() == RRType::NS()) {
// Need to perform the search in the "GLUE OK" mode.
const generic::NS& ns = dynamic_cast<const generic::NS&>(rdata);
- findAddrs(zone, ns.getNSName(), Zone::FIND_GLUE_OK);
+ findAddrs(zone, ns.getNSName(), ZoneFinder::FIND_GLUE_OK);
} else if (rrset.getType() == RRType::MX()) {
const generic::MX& mx(dynamic_cast<const generic::MX&>(rdata));
findAddrs(zone, mx.getMXName());
@@ -47,8 +47,8 @@ Query::getAdditional(const Zone& zone, const RRset& rrset) const {
}
void
-Query::findAddrs(const Zone& zone, const Name& qname,
- const Zone::FindOptions options) const
+Query::findAddrs(ZoneFinder& zone, const Name& qname,
+ const ZoneFinder::FindOptions options) const
{
// Out of zone name
NameComparisonResult result = zone.getOrigin().compare(qname);
@@ -66,30 +66,31 @@ Query::findAddrs(const Zone& zone, const Name& qname,
// Find A rrset
if (qname_ != qname || qtype_ != RRType::A()) {
- Zone::FindResult a_result = zone.find(qname, RRType::A(), NULL,
- options);
- if (a_result.code == Zone::SUCCESS) {
+ ZoneFinder::FindResult a_result = zone.find(qname, RRType::A(), NULL,
+ options | dnssec_opt_);
+ if (a_result.code == ZoneFinder::SUCCESS) {
response_.addRRset(Message::SECTION_ADDITIONAL,
- boost::const_pointer_cast<RRset>(a_result.rrset));
+ boost::const_pointer_cast<RRset>(a_result.rrset), dnssec_);
}
}
// Find AAAA rrset
if (qname_ != qname || qtype_ != RRType::AAAA()) {
- Zone::FindResult aaaa_result =
- zone.find(qname, RRType::AAAA(), NULL, options);
- if (aaaa_result.code == Zone::SUCCESS) {
+ ZoneFinder::FindResult aaaa_result =
+ zone.find(qname, RRType::AAAA(), NULL, options | dnssec_opt_);
+ if (aaaa_result.code == ZoneFinder::SUCCESS) {
response_.addRRset(Message::SECTION_ADDITIONAL,
- boost::const_pointer_cast<RRset>(aaaa_result.rrset));
+ boost::const_pointer_cast<RRset>(aaaa_result.rrset),
+ dnssec_);
}
}
}
void
-Query::putSOA(const Zone& zone) const {
- Zone::FindResult soa_result(zone.find(zone.getOrigin(),
- RRType::SOA()));
- if (soa_result.code != Zone::SUCCESS) {
+Query::putSOA(ZoneFinder& zone) const {
+ ZoneFinder::FindResult soa_result(zone.find(zone.getOrigin(),
+ RRType::SOA(), NULL, dnssec_opt_));
+ if (soa_result.code != ZoneFinder::SUCCESS) {
isc_throw(NoSOA, "There's no SOA record in zone " <<
zone.getOrigin().toText());
} else {
@@ -99,21 +100,23 @@ Query::putSOA(const Zone& zone) const {
* to insist.
*/
response_.addRRset(Message::SECTION_AUTHORITY,
- boost::const_pointer_cast<RRset>(soa_result.rrset));
+ boost::const_pointer_cast<RRset>(soa_result.rrset), dnssec_);
}
}
void
-Query::getAuthAdditional(const Zone& zone) const {
+Query::getAuthAdditional(ZoneFinder& zone) const {
// Fill in authority and addtional sections.
- Zone::FindResult ns_result = zone.find(zone.getOrigin(), RRType::NS());
+ ZoneFinder::FindResult ns_result = zone.find(zone.getOrigin(),
+ RRType::NS(), NULL,
+ dnssec_opt_);
// zone origin name should have NS records
- if (ns_result.code != Zone::SUCCESS) {
+ if (ns_result.code != ZoneFinder::SUCCESS) {
isc_throw(NoApexNS, "There's no apex NS records in zone " <<
zone.getOrigin().toText());
} else {
response_.addRRset(Message::SECTION_AUTHORITY,
- boost::const_pointer_cast<RRset>(ns_result.rrset));
+ boost::const_pointer_cast<RRset>(ns_result.rrset), dnssec_);
// Handle additional for authority section
getAdditional(zone, *ns_result.rrset);
}
@@ -125,8 +128,8 @@ Query::process() const {
const bool qtype_is_any = (qtype_ == RRType::ANY());
response_.setHeaderFlag(Message::HEADERFLAG_AA, false);
- const MemoryDataSrc::FindResult result =
- memory_datasrc_.findZone(qname_);
+ const DataSourceClient::FindResult result =
+ datasrc_client_.findZone(qname_);
// If we have no matching authoritative zone for the query name, return
// REFUSED. In short, this is to be compatible with BIND 9, but the
@@ -145,14 +148,15 @@ Query::process() const {
while (keep_doing) {
keep_doing = false;
std::auto_ptr<RRsetList> target(qtype_is_any ? new RRsetList : NULL);
- const Zone::FindResult db_result(result.zone->find(qname_, qtype_,
- target.get()));
-
+ const ZoneFinder::FindResult db_result(
+ result.zone_finder->find(qname_, qtype_, target.get(),
+ dnssec_opt_));
switch (db_result.code) {
- case Zone::DNAME: {
+ case ZoneFinder::DNAME: {
// First, put the dname into the answer
response_.addRRset(Message::SECTION_ANSWER,
- boost::const_pointer_cast<RRset>(db_result.rrset));
+ boost::const_pointer_cast<RRset>(db_result.rrset),
+ dnssec_);
/*
* Empty DNAME should never get in, as it is impossible to
* create one in master file.
@@ -188,10 +192,10 @@ Query::process() const {
qname_.getLabelCount() -
db_result.rrset->getName().getLabelCount()).
concatenate(dname.getDname())));
- response_.addRRset(Message::SECTION_ANSWER, cname);
+ response_.addRRset(Message::SECTION_ANSWER, cname, dnssec_);
break;
}
- case Zone::CNAME:
+ case ZoneFinder::CNAME:
/*
* We don't do chaining yet. Therefore handling a CNAME is
* mostly the same as handling SUCCESS, but we didn't get
@@ -202,48 +206,59 @@ Query::process() const {
* So, just put it there.
*/
response_.addRRset(Message::SECTION_ANSWER,
- boost::const_pointer_cast<RRset>(db_result.rrset));
+ boost::const_pointer_cast<RRset>(db_result.rrset),
+ dnssec_);
break;
- case Zone::SUCCESS:
+ case ZoneFinder::SUCCESS:
if (qtype_is_any) {
// If quety type is ANY, insert all RRs under the domain
// into answer section.
BOOST_FOREACH(RRsetPtr rrset, *target) {
- response_.addRRset(Message::SECTION_ANSWER, rrset);
+ response_.addRRset(Message::SECTION_ANSWER, rrset,
+ dnssec_);
// Handle additional for answer section
- getAdditional(*result.zone, *rrset.get());
+ getAdditional(*result.zone_finder, *rrset.get());
}
} else {
response_.addRRset(Message::SECTION_ANSWER,
- boost::const_pointer_cast<RRset>(db_result.rrset));
+ boost::const_pointer_cast<RRset>(db_result.rrset),
+ dnssec_);
// Handle additional for answer section
- getAdditional(*result.zone, *db_result.rrset);
+ getAdditional(*result.zone_finder, *db_result.rrset);
}
// If apex NS records haven't been provided in the answer
// section, insert apex NS records into the authority section
// and AAAA/A RRS of each of the NS RDATA into the additional
// section.
- if (qname_ != result.zone->getOrigin() ||
- db_result.code != Zone::SUCCESS ||
+ if (qname_ != result.zone_finder->getOrigin() ||
+ db_result.code != ZoneFinder::SUCCESS ||
(qtype_ != RRType::NS() && !qtype_is_any))
{
- getAuthAdditional(*result.zone);
+ getAuthAdditional(*result.zone_finder);
}
break;
- case Zone::DELEGATION:
+ case ZoneFinder::DELEGATION:
response_.setHeaderFlag(Message::HEADERFLAG_AA, false);
response_.addRRset(Message::SECTION_AUTHORITY,
- boost::const_pointer_cast<RRset>(db_result.rrset));
- getAdditional(*result.zone, *db_result.rrset);
+ boost::const_pointer_cast<RRset>(db_result.rrset),
+ dnssec_);
+ getAdditional(*result.zone_finder, *db_result.rrset);
break;
- case Zone::NXDOMAIN:
+ case ZoneFinder::NXDOMAIN:
// Just empty answer with SOA in authority section
response_.setRcode(Rcode::NXDOMAIN());
- putSOA(*result.zone);
+ putSOA(*result.zone_finder);
break;
- case Zone::NXRRSET:
+ case ZoneFinder::NXRRSET:
// Just empty answer with SOA in authority section
- putSOA(*result.zone);
+ putSOA(*result.zone_finder);
+ break;
+ default:
+ // These are new result codes (WILDCARD and WILDCARD_NXRRSET)
+ // They should not happen from the in-memory and the database
+ // backend isn't used yet.
+ // TODO: Implement before letting the database backends in
+ isc_throw(isc::NotImplemented, "Unknown result code");
break;
}
}
diff --git a/src/bin/auth/query.h b/src/bin/auth/query.h
index e0c6323..0ebbed8 100644
--- a/src/bin/auth/query.h
+++ b/src/bin/auth/query.h
@@ -26,7 +26,7 @@ class RRset;
}
namespace datasrc {
-class MemoryDataSrc;
+class DataSourceClient;
}
namespace auth {
@@ -36,10 +36,8 @@ namespace auth {
///
/// Many of the design details for this class are still in flux.
/// We'll revisit and update them as we add more functionality, for example:
-/// - memory_datasrc parameter of the constructor. It is a data source that
-/// uses in memory dedicated backend.
/// - as a related point, we may have to pass the RR class of the query.
-/// in the initial implementation the RR class is an attribute of memory
+/// in the initial implementation the RR class is an attribute of
/// datasource and omitted. It's not clear if this assumption holds with
/// generic data sources. On the other hand, it will help keep
/// implementation simpler, and we might rather want to modify the design
@@ -51,7 +49,7 @@ namespace auth {
/// separate attribute setter.
/// - likewise, we'll eventually need to do per zone access control, for which
/// we need querier's information such as its IP address.
-/// - memory_datasrc and response may better be parameters to process() instead
+/// - datasrc_client and response may better be parameters to process() instead
/// of the constructor.
///
/// <b>Note:</b> The class name is intentionally the same as the one used in
@@ -71,7 +69,7 @@ private:
/// Adds a SOA of the zone into the authority zone of response_.
/// Can throw NoSOA.
///
- void putSOA(const isc::datasrc::Zone& zone) const;
+ void putSOA(isc::datasrc::ZoneFinder& zone) const;
/// \brief Look up additional data (i.e., address records for the names
/// included in NS or MX records).
@@ -83,11 +81,11 @@ private:
/// This method may throw a exception because its underlying methods may
/// throw exceptions.
///
- /// \param zone The Zone wherein the additional data to the query is bo be
- /// found.
+ /// \param zone The ZoneFinder through which the additional data for the
+ /// query is to be found.
/// \param rrset The RRset (i.e., NS or MX rrset) which require additional
/// processing.
- void getAdditional(const isc::datasrc::Zone& zone,
+ void getAdditional(isc::datasrc::ZoneFinder& zone,
const isc::dns::RRset& rrset) const;
/// \brief Find address records for a specified name.
@@ -102,18 +100,19 @@ private:
/// The glue records must exactly match the name in the NS RDATA, without
/// CNAME or wildcard processing.
///
- /// \param zone The \c Zone wherein the address records is to be found.
+ /// \param zone The \c ZoneFinder through which the address records is to
+ /// be found.
/// \param qname The name in rrset RDATA.
/// \param options The search options.
- void findAddrs(const isc::datasrc::Zone& zone,
+ void findAddrs(isc::datasrc::ZoneFinder& zone,
const isc::dns::Name& qname,
- const isc::datasrc::Zone::FindOptions options
- = isc::datasrc::Zone::FIND_DEFAULT) const;
+ const isc::datasrc::ZoneFinder::FindOptions options
+ = isc::datasrc::ZoneFinder::FIND_DEFAULT) const;
- /// \brief Look up \c Zone's NS and address records for the NS RDATA
- /// (domain name) for authoritative answer.
+ /// \brief Look up a zone's NS RRset and their address records for an
+ /// authoritative answer.
///
- /// On returning an authoritative answer, insert the \c Zone's NS into the
+ /// On returning an authoritative answer, insert a zone's NS into the
/// authority section and AAAA/A RRs of each of the NS RDATA into the
/// additional section.
///
@@ -126,25 +125,29 @@ private:
/// include AAAA/A RRs under a zone cut in additional section. (BIND 9
/// excludes under-cut RRs; NSD include them.)
///
- /// \param zone The \c Zone wherein the additional data to the query is to
- /// be found.
- void getAuthAdditional(const isc::datasrc::Zone& zone) const;
+ /// \param zone The \c ZoneFinder through which the NS and additional data
+ /// for the query are to be found.
+ void getAuthAdditional(isc::datasrc::ZoneFinder& zone) const;
public:
/// Constructor from query parameters.
///
/// This constructor never throws an exception.
///
- /// \param memory_datasrc The memory datasource wherein the answer to the query is
+ /// \param datasrc_client The datasource wherein the answer to the query is
/// to be found.
/// \param qname The query name
/// \param qtype The RR type of the query
/// \param response The response message to store the answer to the query.
- Query(const isc::datasrc::MemoryDataSrc& memory_datasrc,
+ /// \param dnssec If the answer should include signatures and NSEC/NSEC3 if
+ /// possible.
+ Query(const isc::datasrc::DataSourceClient& datasrc_client,
const isc::dns::Name& qname, const isc::dns::RRType& qtype,
- isc::dns::Message& response) :
- memory_datasrc_(memory_datasrc), qname_(qname), qtype_(qtype),
- response_(response)
+ isc::dns::Message& response, bool dnssec = false) :
+ datasrc_client_(datasrc_client), qname_(qname), qtype_(qtype),
+ response_(response), dnssec_(dnssec),
+ dnssec_opt_(dnssec ? isc::datasrc::ZoneFinder::FIND_DNSSEC :
+ isc::datasrc::ZoneFinder::FIND_DEFAULT)
{}
/// Process the query.
@@ -157,7 +160,7 @@ public:
/// successful search would result in adding a corresponding RRset to
/// the answer section of the response.
///
- /// If no matching zone is found in the memory datasource, the RCODE of
+ /// If no matching zone is found in the datasource, the RCODE of
/// SERVFAIL will be set in the response.
/// <b>Note:</b> this is different from the error code that BIND 9 returns
/// by default when it's configured as an authoritative-only server (and
@@ -208,10 +211,12 @@ public:
};
private:
- const isc::datasrc::MemoryDataSrc& memory_datasrc_;
+ const isc::datasrc::DataSourceClient& datasrc_client_;
const isc::dns::Name& qname_;
const isc::dns::RRType& qtype_;
isc::dns::Message& response_;
+ const bool dnssec_;
+ const isc::datasrc::ZoneFinder::FindOptions dnssec_opt_;
};
}
diff --git a/src/bin/auth/statistics.cc b/src/bin/auth/statistics.cc
index 76e5007..e62719f 100644
--- a/src/bin/auth/statistics.cc
+++ b/src/bin/auth/statistics.cc
@@ -37,11 +37,14 @@ public:
void inc(const AuthCounters::CounterType type);
bool submitStatistics() const;
void setStatisticsSession(isc::cc::AbstractSession* statistics_session);
+ void registerStatisticsValidator
+ (AuthCounters::validator_type validator);
// Currently for testing purpose only
uint64_t getCounter(const AuthCounters::CounterType type) const;
private:
std::vector<uint64_t> counters_;
isc::cc::AbstractSession* statistics_session_;
+ AuthCounters::validator_type validator_;
};
AuthCountersImpl::AuthCountersImpl() :
@@ -67,16 +70,25 @@ AuthCountersImpl::submitStatistics() const {
}
std::stringstream statistics_string;
statistics_string << "{\"command\": [\"set\","
- << "{ \"stats_data\": "
- << "{ \"auth.queries.udp\": "
+ << "{ \"owner\": \"Auth\","
+ << " \"data\":"
+ << "{ \"queries.udp\": "
<< counters_.at(AuthCounters::COUNTER_UDP_QUERY)
- << ", \"auth.queries.tcp\": "
+ << ", \"queries.tcp\": "
<< counters_.at(AuthCounters::COUNTER_TCP_QUERY)
<< " }"
<< "}"
<< "]}";
isc::data::ConstElementPtr statistics_element =
isc::data::Element::fromJSON(statistics_string);
+ // validate the statistics data before send
+ if (validator_) {
+ if (!validator_(
+ statistics_element->get("command")->get(1)->get("data"))) {
+ LOG_ERROR(auth_logger, AUTH_INVALID_STATISTICS_DATA);
+ return (false);
+ }
+ }
try {
// group_{send,recv}msg() can throw an exception when encountering
// an error, and group_recvmsg() will throw an exception on timeout.
@@ -105,6 +117,13 @@ AuthCountersImpl::setStatisticsSession
statistics_session_ = statistics_session;
}
+void
+AuthCountersImpl::registerStatisticsValidator
+ (AuthCounters::validator_type validator)
+{
+ validator_ = validator;
+}
+
// Currently for testing purpose only
uint64_t
AuthCountersImpl::getCounter(const AuthCounters::CounterType type) const {
@@ -139,3 +158,10 @@ uint64_t
AuthCounters::getCounter(const AuthCounters::CounterType type) const {
return (impl_->getCounter(type));
}
+
+void
+AuthCounters::registerStatisticsValidator
+ (AuthCounters::validator_type validator) const
+{
+ return (impl_->registerStatisticsValidator(validator));
+}
diff --git a/src/bin/auth/statistics.h b/src/bin/auth/statistics.h
index 5bf6436..c930414 100644
--- a/src/bin/auth/statistics.h
+++ b/src/bin/auth/statistics.h
@@ -131,6 +131,26 @@ public:
/// \return the value of the counter specified by \a type.
///
uint64_t getCounter(const AuthCounters::CounterType type) const;
+
+ /// \brief A type of validation function for the specification in
+ /// isc::config::ModuleSpec.
+ ///
+ /// This type might be useful for not only statistics
+ /// specificatoin but also for config_data specification and for
+ /// commnad.
+ ///
+ typedef boost::function<bool(const isc::data::ConstElementPtr&)>
+ validator_type;
+
+ /// \brief Register a function type of the statistics validation
+ /// function for AuthCounters.
+ ///
+ /// This method never throws an exception.
+ ///
+ /// \param validator A function type of the validation of
+ /// statistics specification.
+ ///
+ void registerStatisticsValidator(AuthCounters::validator_type validator) const;
};
#endif // __STATISTICS_H
diff --git a/src/bin/auth/tests/Makefile.am b/src/bin/auth/tests/Makefile.am
index 71520c2..d27386e 100644
--- a/src/bin/auth/tests/Makefile.am
+++ b/src/bin/auth/tests/Makefile.am
@@ -37,6 +37,13 @@ run_unittests_SOURCES += query_unittest.cc
run_unittests_SOURCES += change_user_unittest.cc
run_unittests_SOURCES += statistics_unittest.cc
run_unittests_SOURCES += run_unittests.cc
+# This is a temporary workaround for #1206, where the InMemoryClient has been
+# moved to an ldopened library. We could add that library to LDADD, but that
+# is nonportable. When #1207 is done this becomes moot anyway, and the
+# specific workaround is not needed anymore, so we can then remove this
+# line again.
+run_unittests_SOURCES += ${top_srcdir}/src/lib/datasrc/memory_datasrc.cc
+
nodist_run_unittests_SOURCES = ../auth_messages.h ../auth_messages.cc
@@ -47,6 +54,7 @@ run_unittests_LDADD += $(SQLITE_LIBS)
run_unittests_LDADD += $(top_builddir)/src/lib/testutils/libtestutils.la
run_unittests_LDADD += $(top_builddir)/src/lib/datasrc/libdatasrc.la
run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
run_unittests_LDADD += $(top_builddir)/src/lib/asiodns/libasiodns.la
run_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
run_unittests_LDADD += $(top_builddir)/src/lib/config/libcfgclient.la
diff --git a/src/bin/auth/tests/auth_srv_unittest.cc b/src/bin/auth/tests/auth_srv_unittest.cc
index 2b20d65..4698588 100644
--- a/src/bin/auth/tests/auth_srv_unittest.cc
+++ b/src/bin/auth/tests/auth_srv_unittest.cc
@@ -651,17 +651,17 @@ TEST_F(AuthSrvTest, updateConfigFail) {
QR_FLAG | AA_FLAG, 1, 1, 1, 0);
}
-TEST_F(AuthSrvTest, updateWithMemoryDataSrc) {
+TEST_F(AuthSrvTest, updateWithInMemoryClient) {
// Test configuring memory data source. Detailed test cases are covered
// in the configuration tests. We only check the AuthSrv interface here.
// By default memory data source isn't enabled
- EXPECT_EQ(AuthSrv::MemoryDataSrcPtr(), server.getMemoryDataSrc(rrclass));
+ EXPECT_EQ(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
updateConfig(&server,
"{\"datasources\": [{\"type\": \"memory\"}]}", true);
// after successful configuration, we should have one (with empty zoneset).
- ASSERT_NE(AuthSrv::MemoryDataSrcPtr(), server.getMemoryDataSrc(rrclass));
- EXPECT_EQ(0, server.getMemoryDataSrc(rrclass)->getZoneCount());
+ ASSERT_NE(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
+ EXPECT_EQ(0, server.getInMemoryClient(rrclass)->getZoneCount());
// The memory data source is empty, should return REFUSED rcode.
createDataFromFile("examplequery_fromWire.wire");
@@ -672,7 +672,7 @@ TEST_F(AuthSrvTest, updateWithMemoryDataSrc) {
opcode.getCode(), QR_FLAG, 1, 0, 0, 0);
}
-TEST_F(AuthSrvTest, chQueryWithMemoryDataSrc) {
+TEST_F(AuthSrvTest, chQueryWithInMemoryClient) {
// Configure memory data source for class IN
updateConfig(&server, "{\"datasources\": "
"[{\"class\": \"IN\", \"type\": \"memory\"}]}", true);
diff --git a/src/bin/auth/tests/command_unittest.cc b/src/bin/auth/tests/command_unittest.cc
index 3fdd086..8a82367 100644
--- a/src/bin/auth/tests/command_unittest.cc
+++ b/src/bin/auth/tests/command_unittest.cc
@@ -48,9 +48,9 @@ using namespace isc::datasrc;
using namespace isc::config;
namespace {
-class AuthConmmandTest : public ::testing::Test {
+class AuthCommandTest : public ::testing::Test {
protected:
- AuthConmmandTest() : server(false, xfrout), rcode(-1) {
+ AuthCommandTest() : server(false, xfrout), rcode(-1) {
server.setStatisticsSession(&statistics_session);
}
void checkAnswer(const int expected_code) {
@@ -60,21 +60,20 @@ protected:
MockSession statistics_session;
MockXfroutClient xfrout;
AuthSrv server;
- AuthSrv::ConstMemoryDataSrcPtr memory_datasrc;
ConstElementPtr result;
int rcode;
public:
void stopServer(); // need to be public for boost::bind
};
-TEST_F(AuthConmmandTest, unknownCommand) {
+TEST_F(AuthCommandTest, unknownCommand) {
result = execAuthServerCommand(server, "no_such_command",
ConstElementPtr());
parseAnswer(rcode, result);
EXPECT_EQ(1, rcode);
}
-TEST_F(AuthConmmandTest, DISABLED_unexpectedException) {
+TEST_F(AuthCommandTest, DISABLED_unexpectedException) {
// execAuthServerCommand() won't catch standard exceptions.
// Skip this test for now: ModuleCCSession doesn't seem to validate
// commands.
@@ -83,7 +82,7 @@ TEST_F(AuthConmmandTest, DISABLED_unexpectedException) {
runtime_error);
}
-TEST_F(AuthConmmandTest, sendStatistics) {
+TEST_F(AuthCommandTest, sendStatistics) {
result = execAuthServerCommand(server, "sendstats", ConstElementPtr());
// Just check some message has been sent. Detailed tests specific to
// statistics are done in its own tests.
@@ -92,15 +91,15 @@ TEST_F(AuthConmmandTest, sendStatistics) {
}
void
-AuthConmmandTest::stopServer() {
+AuthCommandTest::stopServer() {
result = execAuthServerCommand(server, "shutdown", ConstElementPtr());
parseAnswer(rcode, result);
assert(rcode == 0); // make sure the test stops when something is wrong
}
-TEST_F(AuthConmmandTest, shutdown) {
+TEST_F(AuthCommandTest, shutdown) {
isc::asiolink::IntervalTimer itimer(server.getIOService());
- itimer.setup(boost::bind(&AuthConmmandTest::stopServer, this), 1);
+ itimer.setup(boost::bind(&AuthCommandTest::stopServer, this), 1);
server.getIOService().run();
EXPECT_EQ(0, rcode);
}
@@ -110,18 +109,18 @@ TEST_F(AuthConmmandTest, shutdown) {
// zones, and checks the zones are correctly loaded.
void
zoneChecks(AuthSrv& server) {
- EXPECT_TRUE(server.getMemoryDataSrc(RRClass::IN()));
- EXPECT_EQ(Zone::SUCCESS, server.getMemoryDataSrc(RRClass::IN())->
- findZone(Name("ns.test1.example")).zone->
+ EXPECT_TRUE(server.getInMemoryClient(RRClass::IN()));
+ EXPECT_EQ(ZoneFinder::SUCCESS, server.getInMemoryClient(RRClass::IN())->
+ findZone(Name("ns.test1.example")).zone_finder->
find(Name("ns.test1.example"), RRType::A()).code);
- EXPECT_EQ(Zone::NXRRSET, server.getMemoryDataSrc(RRClass::IN())->
- findZone(Name("ns.test1.example")).zone->
+ EXPECT_EQ(ZoneFinder::NXRRSET, server.getInMemoryClient(RRClass::IN())->
+ findZone(Name("ns.test1.example")).zone_finder->
find(Name("ns.test1.example"), RRType::AAAA()).code);
- EXPECT_EQ(Zone::SUCCESS, server.getMemoryDataSrc(RRClass::IN())->
- findZone(Name("ns.test2.example")).zone->
+ EXPECT_EQ(ZoneFinder::SUCCESS, server.getInMemoryClient(RRClass::IN())->
+ findZone(Name("ns.test2.example")).zone_finder->
find(Name("ns.test2.example"), RRType::A()).code);
- EXPECT_EQ(Zone::NXRRSET, server.getMemoryDataSrc(RRClass::IN())->
- findZone(Name("ns.test2.example")).zone->
+ EXPECT_EQ(ZoneFinder::NXRRSET, server.getInMemoryClient(RRClass::IN())->
+ findZone(Name("ns.test2.example")).zone_finder->
find(Name("ns.test2.example"), RRType::AAAA()).code);
}
@@ -147,25 +146,25 @@ configureZones(AuthSrv& server) {
void
newZoneChecks(AuthSrv& server) {
- EXPECT_TRUE(server.getMemoryDataSrc(RRClass::IN()));
- EXPECT_EQ(Zone::SUCCESS, server.getMemoryDataSrc(RRClass::IN())->
- findZone(Name("ns.test1.example")).zone->
+ EXPECT_TRUE(server.getInMemoryClient(RRClass::IN()));
+ EXPECT_EQ(ZoneFinder::SUCCESS, server.getInMemoryClient(RRClass::IN())->
+ findZone(Name("ns.test1.example")).zone_finder->
find(Name("ns.test1.example"), RRType::A()).code);
// now test1.example should have ns/AAAA
- EXPECT_EQ(Zone::SUCCESS, server.getMemoryDataSrc(RRClass::IN())->
- findZone(Name("ns.test1.example")).zone->
+ EXPECT_EQ(ZoneFinder::SUCCESS, server.getInMemoryClient(RRClass::IN())->
+ findZone(Name("ns.test1.example")).zone_finder->
find(Name("ns.test1.example"), RRType::AAAA()).code);
// test2.example shouldn't change
- EXPECT_EQ(Zone::SUCCESS, server.getMemoryDataSrc(RRClass::IN())->
- findZone(Name("ns.test2.example")).zone->
+ EXPECT_EQ(ZoneFinder::SUCCESS, server.getInMemoryClient(RRClass::IN())->
+ findZone(Name("ns.test2.example")).zone_finder->
find(Name("ns.test2.example"), RRType::A()).code);
- EXPECT_EQ(Zone::NXRRSET, server.getMemoryDataSrc(RRClass::IN())->
- findZone(Name("ns.test2.example")).zone->
+ EXPECT_EQ(ZoneFinder::NXRRSET, server.getInMemoryClient(RRClass::IN())->
+ findZone(Name("ns.test2.example")).zone_finder->
find(Name("ns.test2.example"), RRType::AAAA()).code);
}
-TEST_F(AuthConmmandTest, loadZone) {
+TEST_F(AuthCommandTest, loadZone) {
configureZones(server);
ASSERT_EQ(0, system(INSTALL_PROG " " TEST_DATA_DIR
@@ -182,7 +181,7 @@ TEST_F(AuthConmmandTest, loadZone) {
newZoneChecks(server);
}
-TEST_F(AuthConmmandTest, loadBrokenZone) {
+TEST_F(AuthCommandTest, loadBrokenZone) {
configureZones(server);
ASSERT_EQ(0, system(INSTALL_PROG " " TEST_DATA_DIR
@@ -195,7 +194,7 @@ TEST_F(AuthConmmandTest, loadBrokenZone) {
zoneChecks(server); // zone shouldn't be replaced
}
-TEST_F(AuthConmmandTest, loadUnreadableZone) {
+TEST_F(AuthCommandTest, loadUnreadableZone) {
configureZones(server);
// install the zone file as unreadable
@@ -209,7 +208,7 @@ TEST_F(AuthConmmandTest, loadUnreadableZone) {
zoneChecks(server); // zone shouldn't be replaced
}
-TEST_F(AuthConmmandTest, loadZoneWithoutDataSrc) {
+TEST_F(AuthCommandTest, loadZoneWithoutDataSrc) {
// try to execute load command without configuring the zone beforehand.
// it should fail.
result = execAuthServerCommand(server, "loadzone",
@@ -218,7 +217,7 @@ TEST_F(AuthConmmandTest, loadZoneWithoutDataSrc) {
checkAnswer(1);
}
-TEST_F(AuthConmmandTest, loadSqlite3DataSrc) {
+TEST_F(AuthCommandTest, loadSqlite3DataSrc) {
// For sqlite3 data source we don't have to do anything (the data source
// (re)loads itself automatically)
result = execAuthServerCommand(server, "loadzone",
@@ -228,7 +227,7 @@ TEST_F(AuthConmmandTest, loadSqlite3DataSrc) {
checkAnswer(0);
}
-TEST_F(AuthConmmandTest, loadZoneInvalidParams) {
+TEST_F(AuthCommandTest, loadZoneInvalidParams) {
configureZones(server);
// null arg
diff --git a/src/bin/auth/tests/config_unittest.cc b/src/bin/auth/tests/config_unittest.cc
index 0890c55..dadb0ee 100644
--- a/src/bin/auth/tests/config_unittest.cc
+++ b/src/bin/auth/tests/config_unittest.cc
@@ -57,12 +57,12 @@ protected:
TEST_F(AuthConfigTest, datasourceConfig) {
// By default, we don't have any in-memory data source.
- EXPECT_EQ(AuthSrv::MemoryDataSrcPtr(), server.getMemoryDataSrc(rrclass));
+ EXPECT_EQ(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
configureAuthServer(server, Element::fromJSON(
"{\"datasources\": [{\"type\": \"memory\"}]}"));
// after successful configuration, we should have one (with empty zoneset).
- ASSERT_NE(AuthSrv::MemoryDataSrcPtr(), server.getMemoryDataSrc(rrclass));
- EXPECT_EQ(0, server.getMemoryDataSrc(rrclass)->getZoneCount());
+ ASSERT_NE(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
+ EXPECT_EQ(0, server.getInMemoryClient(rrclass)->getZoneCount());
}
TEST_F(AuthConfigTest, databaseConfig) {
@@ -82,7 +82,7 @@ TEST_F(AuthConfigTest, versionConfig) {
}
TEST_F(AuthConfigTest, exceptionGuarantee) {
- EXPECT_EQ(AuthSrv::MemoryDataSrcPtr(), server.getMemoryDataSrc(rrclass));
+ EXPECT_EQ(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
// This configuration contains an invalid item, which will trigger
// an exception.
EXPECT_THROW(configureAuthServer(
@@ -92,7 +92,7 @@ TEST_F(AuthConfigTest, exceptionGuarantee) {
" \"no_such_config_var\": 1}")),
AuthConfigError);
// The server state shouldn't change
- EXPECT_EQ(AuthSrv::MemoryDataSrcPtr(), server.getMemoryDataSrc(rrclass));
+ EXPECT_EQ(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
}
TEST_F(AuthConfigTest, exceptionConversion) {
@@ -154,22 +154,22 @@ protected:
TEST_F(MemoryDatasrcConfigTest, addZeroDataSrc) {
parser->build(Element::fromJSON("[]"));
parser->commit();
- EXPECT_EQ(AuthSrv::MemoryDataSrcPtr(), server.getMemoryDataSrc(rrclass));
+ EXPECT_EQ(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
}
TEST_F(MemoryDatasrcConfigTest, addEmpty) {
// By default, we don't have any in-memory data source.
- EXPECT_EQ(AuthSrv::MemoryDataSrcPtr(), server.getMemoryDataSrc(rrclass));
+ EXPECT_EQ(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
parser->build(Element::fromJSON("[{\"type\": \"memory\"}]"));
parser->commit();
- EXPECT_EQ(0, server.getMemoryDataSrc(rrclass)->getZoneCount());
+ EXPECT_EQ(0, server.getInMemoryClient(rrclass)->getZoneCount());
}
TEST_F(MemoryDatasrcConfigTest, addZeroZone) {
parser->build(Element::fromJSON("[{\"type\": \"memory\","
" \"zones\": []}]"));
parser->commit();
- EXPECT_EQ(0, server.getMemoryDataSrc(rrclass)->getZoneCount());
+ EXPECT_EQ(0, server.getInMemoryClient(rrclass)->getZoneCount());
}
TEST_F(MemoryDatasrcConfigTest, addOneZone) {
@@ -179,10 +179,10 @@ TEST_F(MemoryDatasrcConfigTest, addOneZone) {
" \"file\": \"" TEST_DATA_DIR
"/example.zone\"}]}]")));
EXPECT_NO_THROW(parser->commit());
- EXPECT_EQ(1, server.getMemoryDataSrc(rrclass)->getZoneCount());
+ EXPECT_EQ(1, server.getInMemoryClient(rrclass)->getZoneCount());
// Check it actually loaded something
- EXPECT_EQ(Zone::SUCCESS, server.getMemoryDataSrc(rrclass)->findZone(
- Name("ns.example.com.")).zone->find(Name("ns.example.com."),
+ EXPECT_EQ(ZoneFinder::SUCCESS, server.getInMemoryClient(rrclass)->findZone(
+ Name("ns.example.com.")).zone_finder->find(Name("ns.example.com."),
RRType::A()).code);
}
@@ -199,7 +199,7 @@ TEST_F(MemoryDatasrcConfigTest, addMultiZones) {
" \"file\": \"" TEST_DATA_DIR
"/example.net.zone\"}]}]")));
EXPECT_NO_THROW(parser->commit());
- EXPECT_EQ(3, server.getMemoryDataSrc(rrclass)->getZoneCount());
+ EXPECT_EQ(3, server.getInMemoryClient(rrclass)->getZoneCount());
}
TEST_F(MemoryDatasrcConfigTest, replace) {
@@ -209,9 +209,9 @@ TEST_F(MemoryDatasrcConfigTest, replace) {
" \"file\": \"" TEST_DATA_DIR
"/example.zone\"}]}]")));
EXPECT_NO_THROW(parser->commit());
- EXPECT_EQ(1, server.getMemoryDataSrc(rrclass)->getZoneCount());
+ EXPECT_EQ(1, server.getInMemoryClient(rrclass)->getZoneCount());
EXPECT_EQ(isc::datasrc::result::SUCCESS,
- server.getMemoryDataSrc(rrclass)->findZone(
+ server.getInMemoryClient(rrclass)->findZone(
Name("example.com")).code);
// create a new parser, and install a new set of configuration. It
@@ -227,9 +227,9 @@ TEST_F(MemoryDatasrcConfigTest, replace) {
" \"file\": \"" TEST_DATA_DIR
"/example.net.zone\"}]}]")));
EXPECT_NO_THROW(parser->commit());
- EXPECT_EQ(2, server.getMemoryDataSrc(rrclass)->getZoneCount());
+ EXPECT_EQ(2, server.getInMemoryClient(rrclass)->getZoneCount());
EXPECT_EQ(isc::datasrc::result::NOTFOUND,
- server.getMemoryDataSrc(rrclass)->findZone(
+ server.getInMemoryClient(rrclass)->findZone(
Name("example.com")).code);
}
@@ -241,9 +241,9 @@ TEST_F(MemoryDatasrcConfigTest, exception) {
" \"file\": \"" TEST_DATA_DIR
"/example.zone\"}]}]")));
EXPECT_NO_THROW(parser->commit());
- EXPECT_EQ(1, server.getMemoryDataSrc(rrclass)->getZoneCount());
+ EXPECT_EQ(1, server.getInMemoryClient(rrclass)->getZoneCount());
EXPECT_EQ(isc::datasrc::result::SUCCESS,
- server.getMemoryDataSrc(rrclass)->findZone(
+ server.getInMemoryClient(rrclass)->findZone(
Name("example.com")).code);
// create a new parser, and try to load something. It will throw,
@@ -262,9 +262,9 @@ TEST_F(MemoryDatasrcConfigTest, exception) {
// commit it
// The original should be untouched
- EXPECT_EQ(1, server.getMemoryDataSrc(rrclass)->getZoneCount());
+ EXPECT_EQ(1, server.getInMemoryClient(rrclass)->getZoneCount());
EXPECT_EQ(isc::datasrc::result::SUCCESS,
- server.getMemoryDataSrc(rrclass)->findZone(
+ server.getInMemoryClient(rrclass)->findZone(
Name("example.com")).code);
}
@@ -275,13 +275,13 @@ TEST_F(MemoryDatasrcConfigTest, remove) {
" \"file\": \"" TEST_DATA_DIR
"/example.zone\"}]}]")));
EXPECT_NO_THROW(parser->commit());
- EXPECT_EQ(1, server.getMemoryDataSrc(rrclass)->getZoneCount());
+ EXPECT_EQ(1, server.getInMemoryClient(rrclass)->getZoneCount());
delete parser;
parser = createAuthConfigParser(server, "datasources");
EXPECT_NO_THROW(parser->build(Element::fromJSON("[]")));
EXPECT_NO_THROW(parser->commit());
- EXPECT_EQ(AuthSrv::MemoryDataSrcPtr(), server.getMemoryDataSrc(rrclass));
+ EXPECT_EQ(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
}
TEST_F(MemoryDatasrcConfigTest, adDuplicateZones) {
diff --git a/src/bin/auth/tests/query_unittest.cc b/src/bin/auth/tests/query_unittest.cc
index c68b672..b2d1094 100644
--- a/src/bin/auth/tests/query_unittest.cc
+++ b/src/bin/auth/tests/query_unittest.cc
@@ -93,9 +93,9 @@ const char* const other_zone_rrs =
"mx.delegation.example.com. 3600 IN A 192.0.2.100\n";
// This is a mock Zone class for testing.
-// It is a derived class of Zone for the convenient of tests.
+// It is a derived class of ZoneFinder for the convenient of tests.
// Its find() method emulates the common behavior of protocol compliant
-// zone classes, but simplifies some minor cases and also supports broken
+// ZoneFinder classes, but simplifies some minor cases and also supports broken
// behavior.
// For simplicity, most names are assumed to be "in zone"; there's only
// one zone cut at the point of name "delegation.example.com".
@@ -103,15 +103,16 @@ const char* const other_zone_rrs =
// will result in DNAME.
// This mock zone doesn't handle empty non terminal nodes (if we need to test
// such cases find() should have specialized code for it).
-class MockZone : public Zone {
+class MockZoneFinder : public ZoneFinder {
public:
- MockZone() :
+ MockZoneFinder() :
origin_(Name("example.com")),
delegation_name_("delegation.example.com"),
dname_name_("dname.example.com"),
has_SOA_(true),
has_apex_NS_(true),
- rrclass_(RRClass::IN())
+ rrclass_(RRClass::IN()),
+ include_rrsig_anyway_(false)
{
stringstream zone_stream;
zone_stream << soa_txt << zone_ns_txt << ns_addrs_txt <<
@@ -120,14 +121,14 @@ public:
other_zone_rrs;
masterLoad(zone_stream, origin_, rrclass_,
- boost::bind(&MockZone::loadRRset, this, _1));
+ boost::bind(&MockZoneFinder::loadRRset, this, _1));
}
- virtual const isc::dns::Name& getOrigin() const { return (origin_); }
- virtual const isc::dns::RRClass& getClass() const { return (rrclass_); }
+ virtual isc::dns::Name getOrigin() const { return (origin_); }
+ virtual isc::dns::RRClass getClass() const { return (rrclass_); }
virtual FindResult find(const isc::dns::Name& name,
const isc::dns::RRType& type,
RRsetList* target = NULL,
- const FindOptions options = FIND_DEFAULT) const;
+ const FindOptions options = FIND_DEFAULT);
// If false is passed, it makes the zone broken as if it didn't have the
// SOA.
@@ -137,11 +138,18 @@ public:
// the apex NS.
void setApexNSFlag(bool on) { has_apex_NS_ = on; }
+ // Turn this on if you want it to return RRSIGs regardless of FIND_GLUE_OK
+ void setIncludeRRSIGAnyway(bool on) { include_rrsig_anyway_ = on; }
+
+ Name findPreviousName(const Name&) const {
+ isc_throw(isc::NotImplemented, "Mock doesn't support previous name");
+ }
+
private:
typedef map<RRType, ConstRRsetPtr> RRsetStore;
typedef map<Name, RRsetStore> Domains;
Domains domains_;
- void loadRRset(ConstRRsetPtr rrset) {
+ void loadRRset(RRsetPtr rrset) {
domains_[rrset->getName()][rrset->getType()] = rrset;
if (rrset->getName() == delegation_name_ &&
rrset->getType() == RRType::NS()) {
@@ -149,6 +157,26 @@ private:
} else if (rrset->getName() == dname_name_ &&
rrset->getType() == RRType::DNAME()) {
dname_rrset_ = rrset;
+ // Add some signatures
+ } else if (rrset->getName() == Name("example.com.") &&
+ rrset->getType() == RRType::NS()) {
+ rrset->addRRsig(RdataPtr(new generic::RRSIG("NS 5 3 3600 "
+ "20000101000000 "
+ "20000201000000 "
+ "12345 example.com. "
+ "FAKEFAKEFAKE")));
+ } else if (rrset->getType() == RRType::A()) {
+ rrset->addRRsig(RdataPtr(new generic::RRSIG("A 5 3 3600 "
+ "20000101000000 "
+ "20000201000000 "
+ "12345 example.com. "
+ "FAKEFAKEFAKE")));
+ } else if (rrset->getType() == RRType::AAAA()) {
+ rrset->addRRsig(RdataPtr(new generic::RRSIG("AAAA 5 3 3600 "
+ "20000101000000 "
+ "20000201000000 "
+ "12345 example.com. "
+ "FAKEFAKEFAKE")));
}
}
@@ -161,11 +189,12 @@ private:
ConstRRsetPtr delegation_rrset_;
ConstRRsetPtr dname_rrset_;
const RRClass rrclass_;
+ bool include_rrsig_anyway_;
};
-Zone::FindResult
-MockZone::find(const Name& name, const RRType& type,
- RRsetList* target, const FindOptions options) const
+ZoneFinder::FindResult
+MockZoneFinder::find(const Name& name, const RRType& type,
+ RRsetList* target, const FindOptions options)
{
// Emulating a broken zone: mandatory apex RRs are missing if specifically
// configured so (which are rare cases).
@@ -195,7 +224,26 @@ MockZone::find(const Name& name, const RRType& type,
RRsetStore::const_iterator found_rrset =
found_domain->second.find(type);
if (found_rrset != found_domain->second.end()) {
- return (FindResult(SUCCESS, found_rrset->second));
+ ConstRRsetPtr rrset;
+ // Strip whatever signature there is in case DNSSEC is not required
+ // Just to make sure the Query asks for it when it is needed
+ if (options & ZoneFinder::FIND_DNSSEC ||
+ include_rrsig_anyway_ ||
+ !found_rrset->second->getRRsig()) {
+ rrset = found_rrset->second;
+ } else {
+ RRsetPtr noconst(new RRset(found_rrset->second->getName(),
+ found_rrset->second->getClass(),
+ found_rrset->second->getType(),
+ found_rrset->second->getTTL()));
+ for (RdataIteratorPtr
+ i(found_rrset->second->getRdataIterator());
+ !i->isLast(); i->next()) {
+ noconst->addRdata(i->getCurrent());
+ }
+ rrset = noconst;
+ }
+ return (FindResult(SUCCESS, rrset));
}
// If not found but we have a target, fill it with all RRsets here
@@ -233,11 +281,15 @@ protected:
response.setRcode(Rcode::NOERROR());
response.setOpcode(Opcode::QUERY());
// create and add a matching zone.
- mock_zone = new MockZone();
- memory_datasrc.addZone(ZonePtr(mock_zone));
+ mock_finder = new MockZoneFinder();
+ memory_client.addZone(ZoneFinderPtr(mock_finder));
}
- MockZone* mock_zone;
- MemoryDataSrc memory_datasrc;
+ MockZoneFinder* mock_finder;
+ // We use InMemoryClient here. We could have some kind of mock client
+ // here, but historically, the Query supported only InMemoryClient
+ // (originally named MemoryDataSrc) and was tested with it, so we keep
+ // it like this for now.
+ InMemoryClient memory_client;
const Name qname;
const RRClass qclass;
const RRType qtype;
@@ -286,24 +338,76 @@ responseCheck(Message& response, const isc::dns::Rcode& rcode,
TEST_F(QueryTest, noZone) {
// There's no zone in the memory datasource. So the response should have
// REFUSED.
- MemoryDataSrc empty_memory_datasrc;
- Query nozone_query(empty_memory_datasrc, qname, qtype, response);
+ InMemoryClient empty_memory_client;
+ Query nozone_query(empty_memory_client, qname, qtype, response);
EXPECT_NO_THROW(nozone_query.process());
EXPECT_EQ(Rcode::REFUSED(), response.getRcode());
}
TEST_F(QueryTest, exactMatch) {
- Query query(memory_datasrc, qname, qtype, response);
+ Query query(memory_client, qname, qtype, response);
EXPECT_NO_THROW(query.process());
// find match rrset
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 3,
www_a_txt, zone_ns_txt, ns_addrs_txt);
}
+TEST_F(QueryTest, exactMatchIgnoreSIG) {
+ // Check that we do not include the RRSIG when not requested even when
+ // we receive it from the data source.
+ mock_finder->setIncludeRRSIGAnyway(true);
+ Query query(memory_client, qname, qtype, response);
+ EXPECT_NO_THROW(query.process());
+ // find match rrset
+ responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 3,
+ www_a_txt, zone_ns_txt, ns_addrs_txt);
+}
+
+TEST_F(QueryTest, dnssecPositive) {
+ // Just like exactMatch, but the signatures should be included as well
+ Query query(memory_client, qname, qtype, response, true);
+ EXPECT_NO_THROW(query.process());
+ // find match rrset
+ // We can't let responseCheck to check the additional section as well,
+ // it gets confused by the two RRs for glue.delegation.../RRSIG due
+ // to it's design and fixing it would be hard. Therefore we simply
+ // check manually this one time.
+ responseCheck(response, Rcode::NOERROR(), AA_FLAG, 2, 4, 6,
+ (www_a_txt + std::string("www.example.com. 3600 IN RRSIG "
+ "A 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.com. "
+ "FAKEFAKEFAKE\n")).c_str(),
+ (zone_ns_txt + std::string("example.com. 3600 IN RRSIG NS 5 "
+ "3 3600 20000101000000 "
+ "20000201000000 12345 "
+ "example.com. FAKEFAKEFAKE\n")).
+ c_str(), NULL);
+ RRsetIterator iterator(response.beginSection(Message::SECTION_ADDITIONAL));
+ const char* additional[] = {
+ "glue.delegation.example.com. 3600 IN A 192.0.2.153\n",
+ "glue.delegation.example.com. 3600 IN RRSIG A 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.com. FAKEFAKEFAKE\n",
+ "glue.delegation.example.com. 3600 IN AAAA 2001:db8::53\n",
+ "glue.delegation.example.com. 3600 IN RRSIG AAAA 5 3 3600 "
+ "20000101000000 20000201000000 12345 example.com. FAKEFAKEFAKE\n",
+ "noglue.example.com. 3600 IN A 192.0.2.53\n",
+ "noglue.example.com. 3600 IN RRSIG A 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.com. FAKEFAKEFAKE\n",
+ NULL
+ };
+ for (const char** rr(additional); *rr != NULL; ++ rr) {
+ ASSERT_FALSE(iterator ==
+ response.endSection(Message::SECTION_ADDITIONAL));
+ EXPECT_EQ(*rr, (*iterator)->toText());
+ iterator ++;
+ }
+ EXPECT_TRUE(iterator == response.endSection(Message::SECTION_ADDITIONAL));
+}
+
TEST_F(QueryTest, exactAddrMatch) {
// find match rrset, omit additional data which has already been provided
// in the answer section from the additional.
- EXPECT_NO_THROW(Query(memory_datasrc, Name("noglue.example.com"), qtype,
+ EXPECT_NO_THROW(Query(memory_client, Name("noglue.example.com"), qtype,
response).process());
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 2,
@@ -315,7 +419,7 @@ TEST_F(QueryTest, exactAddrMatch) {
TEST_F(QueryTest, apexNSMatch) {
// find match rrset, omit authority data which has already been provided
// in the answer section from the authority section.
- EXPECT_NO_THROW(Query(memory_datasrc, Name("example.com"), RRType::NS(),
+ EXPECT_NO_THROW(Query(memory_client, Name("example.com"), RRType::NS(),
response).process());
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 3, 0, 3,
@@ -326,7 +430,7 @@ TEST_F(QueryTest, apexNSMatch) {
TEST_F(QueryTest, exactAnyMatch) {
// find match rrset, omit additional data which has already been provided
// in the answer section from the additional.
- EXPECT_NO_THROW(Query(memory_datasrc, Name("noglue.example.com"),
+ EXPECT_NO_THROW(Query(memory_client, Name("noglue.example.com"),
RRType::ANY(), response).process());
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 2,
@@ -339,18 +443,18 @@ TEST_F(QueryTest, exactAnyMatch) {
TEST_F(QueryTest, apexAnyMatch) {
// find match rrset, omit additional data which has already been provided
// in the answer section from the additional.
- EXPECT_NO_THROW(Query(memory_datasrc, Name("example.com"),
+ EXPECT_NO_THROW(Query(memory_client, Name("example.com"),
RRType::ANY(), response).process());
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 4, 0, 3,
"example.com. 3600 IN SOA . . 0 0 0 0 0\n"
"example.com. 3600 IN NS glue.delegation.example.com.\n"
"example.com. 3600 IN NS noglue.example.com.\n"
"example.com. 3600 IN NS example.net.\n",
- NULL, ns_addrs_txt, mock_zone->getOrigin());
+ NULL, ns_addrs_txt, mock_finder->getOrigin());
}
TEST_F(QueryTest, mxANYMatch) {
- EXPECT_NO_THROW(Query(memory_datasrc, Name("mx.example.com"),
+ EXPECT_NO_THROW(Query(memory_client, Name("mx.example.com"),
RRType::ANY(), response).process());
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 3, 3, 4,
mx_txt, zone_ns_txt,
@@ -358,17 +462,17 @@ TEST_F(QueryTest, mxANYMatch) {
}
TEST_F(QueryTest, glueANYMatch) {
- EXPECT_NO_THROW(Query(memory_datasrc, Name("delegation.example.com"),
+ EXPECT_NO_THROW(Query(memory_client, Name("delegation.example.com"),
RRType::ANY(), response).process());
responseCheck(response, Rcode::NOERROR(), 0, 0, 4, 3,
NULL, delegation_txt, ns_addrs_txt);
}
TEST_F(QueryTest, nodomainANY) {
- EXPECT_NO_THROW(Query(memory_datasrc, Name("nxdomain.example.com"),
+ EXPECT_NO_THROW(Query(memory_client, Name("nxdomain.example.com"),
RRType::ANY(), response).process());
responseCheck(response, Rcode::NXDOMAIN(), AA_FLAG, 0, 1, 0,
- NULL, soa_txt, NULL, mock_zone->getOrigin());
+ NULL, soa_txt, NULL, mock_finder->getOrigin());
}
// This tests that when we need to look up Zone's apex NS records for
@@ -376,15 +480,15 @@ TEST_F(QueryTest, nodomainANY) {
// throw in that case.
TEST_F(QueryTest, noApexNS) {
// Disable apex NS record
- mock_zone->setApexNSFlag(false);
+ mock_finder->setApexNSFlag(false);
- EXPECT_THROW(Query(memory_datasrc, Name("noglue.example.com"), qtype,
+ EXPECT_THROW(Query(memory_client, Name("noglue.example.com"), qtype,
response).process(), Query::NoApexNS);
// We don't look into the response, as it threw
}
TEST_F(QueryTest, delegation) {
- EXPECT_NO_THROW(Query(memory_datasrc, Name("delegation.example.com"),
+ EXPECT_NO_THROW(Query(memory_client, Name("delegation.example.com"),
qtype, response).process());
responseCheck(response, Rcode::NOERROR(), 0, 0, 4, 3,
@@ -392,18 +496,18 @@ TEST_F(QueryTest, delegation) {
}
TEST_F(QueryTest, nxdomain) {
- EXPECT_NO_THROW(Query(memory_datasrc, Name("nxdomain.example.com"), qtype,
+ EXPECT_NO_THROW(Query(memory_client, Name("nxdomain.example.com"), qtype,
response).process());
responseCheck(response, Rcode::NXDOMAIN(), AA_FLAG, 0, 1, 0,
- NULL, soa_txt, NULL, mock_zone->getOrigin());
+ NULL, soa_txt, NULL, mock_finder->getOrigin());
}
TEST_F(QueryTest, nxrrset) {
- EXPECT_NO_THROW(Query(memory_datasrc, Name("www.example.com"),
+ EXPECT_NO_THROW(Query(memory_client, Name("www.example.com"),
RRType::TXT(), response).process());
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 1, 0,
- NULL, soa_txt, NULL, mock_zone->getOrigin());
+ NULL, soa_txt, NULL, mock_finder->getOrigin());
}
/*
@@ -412,22 +516,22 @@ TEST_F(QueryTest, nxrrset) {
*/
TEST_F(QueryTest, noSOA) {
// disable zone's SOA RR.
- mock_zone->setSOAFlag(false);
+ mock_finder->setSOAFlag(false);
// The NX Domain
- EXPECT_THROW(Query(memory_datasrc, Name("nxdomain.example.com"),
+ EXPECT_THROW(Query(memory_client, Name("nxdomain.example.com"),
qtype, response).process(), Query::NoSOA);
// Of course, we don't look into the response, as it throwed
// NXRRSET
- EXPECT_THROW(Query(memory_datasrc, Name("nxrrset.example.com"),
+ EXPECT_THROW(Query(memory_client, Name("nxrrset.example.com"),
qtype, response).process(), Query::NoSOA);
}
TEST_F(QueryTest, noMatchZone) {
// there's a zone in the memory datasource but it doesn't match the qname.
// should result in REFUSED.
- Query(memory_datasrc, Name("example.org"), qtype, response).process();
+ Query(memory_client, Name("example.org"), qtype, response).process();
EXPECT_EQ(Rcode::REFUSED(), response.getRcode());
}
@@ -438,7 +542,7 @@ TEST_F(QueryTest, noMatchZone) {
* A record, other to unknown out of zone one.
*/
TEST_F(QueryTest, MX) {
- Query(memory_datasrc, Name("mx.example.com"), RRType::MX(),
+ Query(memory_client, Name("mx.example.com"), RRType::MX(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 3, 3, 4,
@@ -452,7 +556,7 @@ TEST_F(QueryTest, MX) {
* This should not trigger the additional processing for the exchange.
*/
TEST_F(QueryTest, MXAlias) {
- Query(memory_datasrc, Name("cnamemx.example.com"), RRType::MX(),
+ Query(memory_client, Name("cnamemx.example.com"), RRType::MX(),
response).process();
// there shouldn't be no additional RRs for the exchanges (we have 3
@@ -472,7 +576,7 @@ TEST_F(QueryTest, MXAlias) {
* returned.
*/
TEST_F(QueryTest, CNAME) {
- Query(memory_datasrc, Name("cname.example.com"), RRType::A(),
+ Query(memory_client, Name("cname.example.com"), RRType::A(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 0, 0,
@@ -482,7 +586,7 @@ TEST_F(QueryTest, CNAME) {
TEST_F(QueryTest, explicitCNAME) {
// same owner name as the CNAME test but explicitly query for CNAME RR.
// expect the same response as we don't provide a full chain yet.
- Query(memory_datasrc, Name("cname.example.com"), RRType::CNAME(),
+ Query(memory_client, Name("cname.example.com"), RRType::CNAME(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 3,
@@ -494,7 +598,7 @@ TEST_F(QueryTest, CNAME_NX_RRSET) {
// note: with chaining, what should be expected is not trivial:
// BIND 9 returns the CNAME in answer and SOA in authority, no additional.
// NSD returns the CNAME, NS in authority, A/AAAA for NS in additional.
- Query(memory_datasrc, Name("cname.example.com"), RRType::TXT(),
+ Query(memory_client, Name("cname.example.com"), RRType::TXT(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 0, 0,
@@ -503,7 +607,7 @@ TEST_F(QueryTest, CNAME_NX_RRSET) {
TEST_F(QueryTest, explicitCNAME_NX_RRSET) {
// same owner name as the NXRRSET test but explicitly query for CNAME RR.
- Query(memory_datasrc, Name("cname.example.com"), RRType::CNAME(),
+ Query(memory_client, Name("cname.example.com"), RRType::CNAME(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 3,
@@ -517,7 +621,7 @@ TEST_F(QueryTest, CNAME_NX_DOMAIN) {
// RCODE being NXDOMAIN.
// NSD returns the CNAME, NS in authority, A/AAAA for NS in additional,
// RCODE being NOERROR.
- Query(memory_datasrc, Name("cnamenxdom.example.com"), RRType::A(),
+ Query(memory_client, Name("cnamenxdom.example.com"), RRType::A(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 0, 0,
@@ -526,7 +630,7 @@ TEST_F(QueryTest, CNAME_NX_DOMAIN) {
TEST_F(QueryTest, explicitCNAME_NX_DOMAIN) {
// same owner name as the NXDOMAIN test but explicitly query for CNAME RR.
- Query(memory_datasrc, Name("cnamenxdom.example.com"), RRType::CNAME(),
+ Query(memory_client, Name("cnamenxdom.example.com"), RRType::CNAME(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 3,
@@ -542,7 +646,7 @@ TEST_F(QueryTest, CNAME_OUT) {
* Then the same test should be done with .org included there and
* see what it does (depends on what we want to do)
*/
- Query(memory_datasrc, Name("cnameout.example.com"), RRType::A(),
+ Query(memory_client, Name("cnameout.example.com"), RRType::A(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 0, 0,
@@ -551,7 +655,7 @@ TEST_F(QueryTest, CNAME_OUT) {
TEST_F(QueryTest, explicitCNAME_OUT) {
// same owner name as the OUT test but explicitly query for CNAME RR.
- Query(memory_datasrc, Name("cnameout.example.com"), RRType::CNAME(),
+ Query(memory_client, Name("cnameout.example.com"), RRType::CNAME(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 3,
@@ -567,7 +671,7 @@ TEST_F(QueryTest, explicitCNAME_OUT) {
* pointing to NXRRSET and NXDOMAIN cases (similarly as with CNAME).
*/
TEST_F(QueryTest, DNAME) {
- Query(memory_datasrc, Name("www.dname.example.com"), RRType::A(),
+ Query(memory_client, Name("www.dname.example.com"), RRType::A(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 2, 0, 0,
@@ -583,7 +687,7 @@ TEST_F(QueryTest, DNAME) {
* DNAME.
*/
TEST_F(QueryTest, DNAME_ANY) {
- Query(memory_datasrc, Name("www.dname.example.com"), RRType::ANY(),
+ Query(memory_client, Name("www.dname.example.com"), RRType::ANY(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 2, 0, 0,
@@ -592,7 +696,7 @@ TEST_F(QueryTest, DNAME_ANY) {
// Test when we ask for DNAME explicitly, it does no synthetizing.
TEST_F(QueryTest, explicitDNAME) {
- Query(memory_datasrc, Name("dname.example.com"), RRType::DNAME(),
+ Query(memory_client, Name("dname.example.com"), RRType::DNAME(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 3,
@@ -604,7 +708,7 @@ TEST_F(QueryTest, explicitDNAME) {
* the CNAME, it should return the RRset.
*/
TEST_F(QueryTest, DNAME_A) {
- Query(memory_datasrc, Name("dname.example.com"), RRType::A(),
+ Query(memory_client, Name("dname.example.com"), RRType::A(),
response).process();
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 3,
@@ -616,11 +720,11 @@ TEST_F(QueryTest, DNAME_A) {
* It should not synthetize the CNAME.
*/
TEST_F(QueryTest, DNAME_NX_RRSET) {
- EXPECT_NO_THROW(Query(memory_datasrc, Name("dname.example.com"),
+ EXPECT_NO_THROW(Query(memory_client, Name("dname.example.com"),
RRType::TXT(), response).process());
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 1, 0,
- NULL, soa_txt, NULL, mock_zone->getOrigin());
+ NULL, soa_txt, NULL, mock_finder->getOrigin());
}
/*
@@ -636,7 +740,7 @@ TEST_F(QueryTest, LongDNAME) {
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa."
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa."
"dname.example.com.");
- EXPECT_NO_THROW(Query(memory_datasrc, longname, RRType::A(),
+ EXPECT_NO_THROW(Query(memory_client, longname, RRType::A(),
response).process());
responseCheck(response, Rcode::YXDOMAIN(), AA_FLAG, 1, 0, 0,
@@ -655,7 +759,7 @@ TEST_F(QueryTest, MaxLenDNAME) {
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa."
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa."
"dname.example.com.");
- EXPECT_NO_THROW(Query(memory_datasrc, longname, RRType::A(),
+ EXPECT_NO_THROW(Query(memory_client, longname, RRType::A(),
response).process());
// Check the answer is OK
diff --git a/src/bin/auth/tests/statistics_unittest.cc b/src/bin/auth/tests/statistics_unittest.cc
index 9a3dded..98e573b 100644
--- a/src/bin/auth/tests/statistics_unittest.cc
+++ b/src/bin/auth/tests/statistics_unittest.cc
@@ -16,6 +16,8 @@
#include <gtest/gtest.h>
+#include <boost/bind.hpp>
+
#include <cc/data.h>
#include <cc/session.h>
@@ -76,6 +78,13 @@ protected:
}
MockSession statistics_session_;
AuthCounters counters;
+ // no need to be inherited from the original class here.
+ class MockModuleSpec {
+ public:
+ bool validateStatistics(ConstElementPtr, const bool valid) const
+ { return (valid); }
+ };
+ MockModuleSpec module_spec_;
};
void
@@ -181,7 +190,7 @@ TEST_F(AuthCountersTest, submitStatisticsWithException) {
statistics_session_.setThrowSessionTimeout(false);
}
-TEST_F(AuthCountersTest, submitStatistics) {
+TEST_F(AuthCountersTest, submitStatisticsWithoutValidator) {
// Submit statistics data.
// Validate if it submits correct data.
@@ -201,12 +210,69 @@ TEST_F(AuthCountersTest, submitStatistics) {
// Command is "set".
EXPECT_EQ("set", statistics_session_.sent_msg->get("command")
->get(0)->stringValue());
+ EXPECT_EQ("Auth", statistics_session_.sent_msg->get("command")
+ ->get(1)->get("owner")->stringValue());
ConstElementPtr statistics_data = statistics_session_.sent_msg
->get("command")->get(1)
- ->get("stats_data");
+ ->get("data");
// UDP query counter is 2 and TCP query counter is 1.
- EXPECT_EQ(2, statistics_data->get("auth.queries.udp")->intValue());
- EXPECT_EQ(1, statistics_data->get("auth.queries.tcp")->intValue());
+ EXPECT_EQ(2, statistics_data->get("queries.udp")->intValue());
+ EXPECT_EQ(1, statistics_data->get("queries.tcp")->intValue());
}
+TEST_F(AuthCountersTest, submitStatisticsWithValidator) {
+
+ //a validator for the unittest
+ AuthCounters::validator_type validator;
+ ConstElementPtr el;
+
+ // Submit statistics data with correct statistics validator.
+ validator = boost::bind(
+ &AuthCountersTest::MockModuleSpec::validateStatistics,
+ &module_spec_, _1, true);
+
+ EXPECT_TRUE(validator(el));
+
+ // register validator to AuthCounters
+ counters.registerStatisticsValidator(validator);
+
+ // Counters should be initialized to 0.
+ EXPECT_EQ(0, counters.getCounter(AuthCounters::COUNTER_UDP_QUERY));
+ EXPECT_EQ(0, counters.getCounter(AuthCounters::COUNTER_TCP_QUERY));
+
+ // UDP query counter is set to 2.
+ counters.inc(AuthCounters::COUNTER_UDP_QUERY);
+ counters.inc(AuthCounters::COUNTER_UDP_QUERY);
+ // TCP query counter is set to 1.
+ counters.inc(AuthCounters::COUNTER_TCP_QUERY);
+
+ // checks the value returned by submitStatistics
+ EXPECT_TRUE(counters.submitStatistics());
+
+ // Destination is "Stats".
+ EXPECT_EQ("Stats", statistics_session_.msg_destination);
+ // Command is "set".
+ EXPECT_EQ("set", statistics_session_.sent_msg->get("command")
+ ->get(0)->stringValue());
+ EXPECT_EQ("Auth", statistics_session_.sent_msg->get("command")
+ ->get(1)->get("owner")->stringValue());
+ ConstElementPtr statistics_data = statistics_session_.sent_msg
+ ->get("command")->get(1)
+ ->get("data");
+ // UDP query counter is 2 and TCP query counter is 1.
+ EXPECT_EQ(2, statistics_data->get("queries.udp")->intValue());
+ EXPECT_EQ(1, statistics_data->get("queries.tcp")->intValue());
+
+ // Submit statistics data with incorrect statistics validator.
+ validator = boost::bind(
+ &AuthCountersTest::MockModuleSpec::validateStatistics,
+ &module_spec_, _1, false);
+
+ EXPECT_FALSE(validator(el));
+
+ counters.registerStatisticsValidator(validator);
+
+ // checks the value returned by submitStatistics
+ EXPECT_FALSE(counters.submitStatistics());
+}
}
diff --git a/src/bin/auth/tests/testdata/Makefile.am b/src/bin/auth/tests/testdata/Makefile.am
index f6f1f27..c86722f 100644
--- a/src/bin/auth/tests/testdata/Makefile.am
+++ b/src/bin/auth/tests/testdata/Makefile.am
@@ -23,4 +23,4 @@ EXTRA_DIST += example.com
EXTRA_DIST += example.sqlite3
.spec.wire:
- $(abs_top_builddir)/src/lib/dns/tests/testdata/gen-wiredata.py -o $@ $<
+ $(PYTHON) $(top_builddir)/src/lib/util/python/gen_wiredata.py -o $@ $<
diff --git a/src/bin/bind10/Makefile.am b/src/bin/bind10/Makefile.am
index cca4a53..5ec0c9f 100644
--- a/src/bin/bind10/Makefile.am
+++ b/src/bin/bind10/Makefile.am
@@ -1,16 +1,23 @@
SUBDIRS = . tests
sbin_SCRIPTS = bind10
-CLEANFILES = bind10 bind10.pyc
+CLEANFILES = bind10 bind10_src.pyc
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.pyc
pkglibexecdir = $(libexecdir)/@PACKAGE@
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+noinst_SCRIPTS = run_bind10.sh
+
bind10dir = $(pkgdatadir)
bind10_DATA = bob.spec
EXTRA_DIST = bob.spec
man_MANS = bind10.8
-EXTRA_DIST += $(man_MANS) bind10.xml
+EXTRA_DIST += $(man_MANS) bind10.xml bind10_messages.mes
if ENABLE_MAN
@@ -19,10 +26,14 @@ bind10.8: bind10.xml
endif
+$(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.py : bind10_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/bind10_messages.mes
+
# this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
-bind10: bind10.py
+bind10: bind10_src.py $(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.py
$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
- -e "s|@@LIBEXECDIR@@|$(pkglibexecdir)|" bind10.py >$@
+ -e "s|@@LIBEXECDIR@@|$(pkglibexecdir)|" bind10_src.py >$@
chmod a+x $@
pytest:
diff --git a/src/bin/bind10/bind10.8 b/src/bin/bind10/bind10.8
index d5ab905..1af4f14 100644
--- a/src/bin/bind10/bind10.8
+++ b/src/bin/bind10/bind10.8
@@ -2,12 +2,12 @@
.\" Title: bind10
.\" Author: [see the "AUTHORS" section]
.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\" Date: March 31, 2011
+.\" Date: August 11, 2011
.\" Manual: BIND10
.\" Source: BIND10
.\" Language: English
.\"
-.TH "BIND10" "8" "March 31, 2011" "BIND10" "BIND10"
+.TH "BIND10" "8" "August 11, 2011" "BIND10" "BIND10"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
@@ -107,6 +107,18 @@ Display more about what is going on for
\fBbind10\fR
and its child processes\&.
.RE
+.SH "STATISTICS DATA"
+.PP
+The statistics data collected by the
+\fBb10\-stats\fR
+daemon include:
+.PP
+bind10\&.boot_time
+.RS 4
+The date and time that the
+\fBbind10\fR
+process started\&. This is represented in ISO 8601 format\&.
+.RE
.SH "SEE ALSO"
.PP
diff --git a/src/bin/bind10/bind10.py.in b/src/bin/bind10/bind10.py.in
deleted file mode 100755
index 48d641d..0000000
--- a/src/bin/bind10/bind10.py.in
+++ /dev/null
@@ -1,1047 +0,0 @@
-#!@PYTHON@
-
-# Copyright (C) 2010,2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-This file implements the Boss of Bind (BoB, or bob) program.
-
-Its purpose is to start up the BIND 10 system, and then manage the
-processes, by starting and stopping processes, plus restarting
-processes that exit.
-
-To start the system, it first runs the c-channel program (msgq), then
-connects to that. It then runs the configuration manager, and reads
-its own configuration. Then it proceeds to starting other modules.
-
-The Python subprocess module is used for starting processes, but
-because this is not efficient for managing groups of processes,
-SIGCHLD signals are caught and processed using the signal module.
-
-Most of the logic is contained in the BoB class. However, since Python
-requires that signal processing happen in the main thread, we do
-signal handling outside of that class, in the code running for
-__main__.
-"""
-
-import sys; sys.path.append ('@@PYTHONPATH@@')
-import os
-
-# If B10_FROM_SOURCE is set in the environment, we use data files
-# from a directory relative to that, otherwise we use the ones
-# installed on the system
-if "B10_FROM_SOURCE" in os.environ:
- SPECFILE_LOCATION = os.environ["B10_FROM_SOURCE"] + "/src/bin/bind10/bob.spec"
-else:
- PREFIX = "@prefix@"
- DATAROOTDIR = "@datarootdir@"
- SPECFILE_LOCATION = "@datadir@/@PACKAGE@/bob.spec".replace("${datarootdir}", DATAROOTDIR).replace("${prefix}", PREFIX)
-
-import subprocess
-import signal
-import re
-import errno
-import time
-import select
-import random
-import socket
-from optparse import OptionParser, OptionValueError
-import io
-import pwd
-import posix
-
-import isc.cc
-import isc.util.process
-import isc.net.parse
-
-# Assign this process some longer name
-isc.util.process.rename(sys.argv[0])
-
-# This is the version that gets displayed to the user.
-# The VERSION string consists of the module name, the module version
-# number, and the overall BIND 10 version number (set in configure.ac).
-VERSION = "bind10 20110223 (BIND 10 @PACKAGE_VERSION@)"
-
-# This is for bind10.boottime of stats module
-_BASETIME = time.gmtime()
-
-class RestartSchedule:
- """
-Keeps state when restarting something (in this case, a process).
-
-When a process dies unexpectedly, we need to restart it. However, if
-it fails to restart for some reason, then we should not simply keep
-restarting it at high speed.
-
-A more sophisticated algorithm can be developed, but for now we choose
-a simple set of rules:
-
- * If a process was been running for >=10 seconds, we restart it
- right away.
- * If a process was running for <10 seconds, we wait until 10 seconds
- after it was started.
-
-To avoid programs getting into lockstep, we use a normal distribution
-to avoid being restarted at exactly 10 seconds."""
-
- def __init__(self, restart_frequency=10.0):
- self.restart_frequency = restart_frequency
- self.run_start_time = None
- self.run_stop_time = None
- self.restart_time = None
-
- def set_run_start_time(self, when=None):
- if when is None:
- when = time.time()
- self.run_start_time = when
- sigma = self.restart_frequency * 0.05
- self.restart_time = when + random.normalvariate(self.restart_frequency,
- sigma)
-
- def set_run_stop_time(self, when=None):
- """We don't actually do anything with stop time now, but it
- might be useful for future algorithms."""
- if when is None:
- when = time.time()
- self.run_stop_time = when
-
- def get_restart_time(self, when=None):
- if when is None:
- when = time.time()
- return max(when, self.restart_time)
-
-class ProcessInfoError(Exception): pass
-
-class ProcessInfo:
- """Information about a process"""
-
- dev_null = open(os.devnull, "w")
-
- def __init__(self, name, args, env={}, dev_null_stdout=False,
- dev_null_stderr=False, uid=None, username=None):
- self.name = name
- self.args = args
- self.env = env
- self.dev_null_stdout = dev_null_stdout
- self.dev_null_stderr = dev_null_stderr
- self.restart_schedule = RestartSchedule()
- self.uid = uid
- self.username = username
- self.process = None
- self.pid = None
-
- def _preexec_work(self):
- """Function used before running a program that needs to run as a
- different user."""
- # First, put us into a separate process group so we don't get
- # SIGINT signals on Ctrl-C (the boss will shut everthing down by
- # other means).
- os.setpgrp()
- # Second, set the user ID if one has been specified
- if self.uid is not None:
- try:
- posix.setuid(self.uid)
- except OSError as e:
- if e.errno == errno.EPERM:
- # if we failed to change user due to permission report that
- raise ProcessInfoError("Unable to change to user %s (uid %d)" % (self.username, self.uid))
- else:
- # otherwise simply re-raise whatever error we found
- raise
-
- def _spawn(self):
- if self.dev_null_stdout:
- spawn_stdout = self.dev_null
- else:
- spawn_stdout = None
- if self.dev_null_stderr:
- spawn_stderr = self.dev_null
- else:
- spawn_stderr = None
- # Environment variables for the child process will be a copy of those
- # of the boss process with any additional specific variables given
- # on construction (self.env).
- spawn_env = os.environ
- spawn_env.update(self.env)
- if 'B10_FROM_SOURCE' not in os.environ:
- spawn_env['PATH'] = "@@LIBEXECDIR@@:" + spawn_env['PATH']
- self.process = subprocess.Popen(self.args,
- stdin=subprocess.PIPE,
- stdout=spawn_stdout,
- stderr=spawn_stderr,
- close_fds=True,
- env=spawn_env,
- preexec_fn=self._preexec_work)
- self.pid = self.process.pid
- self.restart_schedule.set_run_start_time()
-
- # spawn() and respawn() are the same for now, but in the future they
- # may have different functionality
- def spawn(self):
- self._spawn()
-
- def respawn(self):
- self._spawn()
-
-class CChannelConnectError(Exception): pass
-
-class BoB:
- """Boss of BIND class."""
-
- def __init__(self, msgq_socket_file=None, data_path=None,
- config_filename=None, nocache=False, verbose=False, setuid=None,
- username=None, cmdctl_port=None, brittle=False):
- """
- Initialize the Boss of BIND. This is a singleton (only one can run).
-
- The msgq_socket_file specifies the UNIX domain socket file that the
- msgq process listens on. If verbose is True, then the boss reports
- what it is doing.
-
- Data path and config filename are passed trough to config manager
- (if provided) and specify the config file to be used.
-
- The cmdctl_port is passed to cmdctl and specify on which port it
- should listen.
- """
- self.cc_session = None
- self.ccs = None
- self.cfg_start_auth = True
- self.cfg_start_resolver = False
- self.cfg_start_dhcp6 = False
- self.cfg_start_dhcp4 = False
- self.started_auth_family = False
- self.started_resolver_family = False
- self.curproc = None
- self.dead_processes = {}
- self.msgq_socket_file = msgq_socket_file
- self.nocache = nocache
- self.processes = {}
- self.expected_shutdowns = {}
- self.runnable = False
- self.uid = setuid
- self.username = username
- self.verbose = verbose
- self.data_path = data_path
- self.config_filename = config_filename
- self.cmdctl_port = cmdctl_port
- self.brittle = brittle
-
- def config_handler(self, new_config):
- # If this is initial update, don't do anything now, leave it to startup
- if not self.runnable:
- return
- # Now we declare few functions used only internally here. Besides the
- # benefit of not polluting the name space, they are closures, so we
- # don't need to pass some variables
- def start_stop(name, started, start, stop):
- if not'start_' + name in new_config:
- return
- if new_config['start_' + name]:
- if not started:
- if self.uid is not None:
- sys.stderr.write("[bind10] Starting " + name + " as " +
- "a user, not root. This might fail.\n")
- start()
- else:
- stop()
- # These four functions are passed to start_stop (smells like functional
- # programming little bit)
- def resolver_on():
- self.start_resolver(self.c_channel_env)
- self.started_resolver_family = True
- def resolver_off():
- self.stop_resolver()
- self.started_resolver_family = False
- def auth_on():
- self.start_auth(self.c_channel_env)
- self.start_xfrout(self.c_channel_env)
- self.start_xfrin(self.c_channel_env)
- self.start_zonemgr(self.c_channel_env)
- self.started_auth_family = True
- def auth_off():
- self.stop_zonemgr()
- self.stop_xfrin()
- self.stop_xfrout()
- self.stop_auth()
- self.started_auth_family = False
-
- # The real code of the config handler function follows here
- if self.verbose:
- sys.stdout.write("[bind10] Handling new configuration: " +
- str(new_config) + "\n")
- start_stop('resolver', self.started_resolver_family, resolver_on,
- resolver_off)
- start_stop('auth', self.started_auth_family, auth_on, auth_off)
-
- answer = isc.config.ccsession.create_answer(0)
- return answer
-
- def get_processes(self):
- pids = list(self.processes.keys())
- pids.sort()
- process_list = [ ]
- for pid in pids:
- process_list.append([pid, self.processes[pid].name])
- return process_list
-
- def command_handler(self, command, args):
- if self.verbose:
- sys.stdout.write("[bind10] Boss got command: " + str(command) + "\n")
- answer = isc.config.ccsession.create_answer(1, "command not implemented")
- if type(command) != str:
- answer = isc.config.ccsession.create_answer(1, "bad command")
- else:
- if command == "shutdown":
- self.runnable = False
- answer = isc.config.ccsession.create_answer(0)
- elif command == "sendstats":
- # send statistics data to the stats daemon immediately
- cmd = isc.config.ccsession.create_command(
- 'set', { "stats_data": {
- 'bind10.boot_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME)
- }})
- seq = self.cc_session.group_sendmsg(cmd, 'Stats')
- self.cc_session.group_recvmsg(True, seq)
- answer = isc.config.ccsession.create_answer(0)
- elif command == "ping":
- answer = isc.config.ccsession.create_answer(0, "pong")
- elif command == "show_processes":
- answer = isc.config.ccsession. \
- create_answer(0, self.get_processes())
- else:
- answer = isc.config.ccsession.create_answer(1,
- "Unknown command")
- return answer
-
- def kill_started_processes(self):
- """
- Called as part of the exception handling when a process fails to
- start, this runs through the list of started processes, killing
- each one. It then clears that list.
- """
- if self.verbose:
- sys.stdout.write("[bind10] killing started processes:\n")
-
- for pid in self.processes:
- if self.verbose:
- sys.stdout.write("[bind10] - %s\n" % self.processes[pid].name)
- self.processes[pid].process.kill()
- self.processes = {}
-
- def read_bind10_config(self):
- """
- Reads the parameters associated with the BoB module itself.
-
- At present these are the components to start although arguably this
- information should be in the configuration for the appropriate
- module itself. (However, this would cause difficulty in the case of
- xfrin/xfrout and zone manager as we don't need to start those if we
- are not running the authoritative server.)
- """
- if self.verbose:
- sys.stdout.write("[bind10] Reading Boss configuration:\n")
-
- config_data = self.ccs.get_full_config()
- self.cfg_start_auth = config_data.get("start_auth")
- self.cfg_start_resolver = config_data.get("start_resolver")
-
- if self.verbose:
- sys.stdout.write("[bind10] - start_auth: %s\n" %
- str(self.cfg_start_auth))
- sys.stdout.write("[bind10] - start_resolver: %s\n" %
- str(self.cfg_start_resolver))
-
- def log_starting(self, process, port = None, address = None):
- """
- A convenience function to output a "Starting xxx" message if the
- verbose option is set. Putting this into a separate method ensures
- that the output form is consistent across all processes.
-
- The process name (passed as the first argument) is put into
- self.curproc, and is used to indicate which process failed to
- start if there is an error (and is used in the "Started" message
- on success). The optional port and address information are
- appended to the message (if present).
- """
- self.curproc = process
- if self.verbose:
- sys.stdout.write("[bind10] Starting %s" % self.curproc)
- if port is not None:
- sys.stdout.write(" on port %d" % port)
- if address is not None:
- sys.stdout.write(" (address %s)" % str(address))
- sys.stdout.write("\n")
-
- def log_started(self, pid = None):
- """
- A convenience function to output a 'Started xxxx (PID yyyy)'
- message. As with starting_message(), this ensures a consistent
- format.
- """
- if self.verbose:
- sys.stdout.write("[bind10] Started %s" % self.curproc)
- if pid is not None:
- sys.stdout.write(" (PID %d)" % pid)
- sys.stdout.write("\n")
-
- # The next few methods start the individual processes of BIND-10. They
- # are called via start_all_processes(). If any fail, an exception is
- # raised which is caught by the caller of start_all_processes(); this kills
- # processes started up to that point before terminating the program.
-
- def start_msgq(self, c_channel_env):
- """
- Start the message queue and connect to the command channel.
- """
- self.log_starting("b10-msgq")
- c_channel = ProcessInfo("b10-msgq", ["b10-msgq"], c_channel_env,
- True, not self.verbose, uid=self.uid,
- username=self.username)
- c_channel.spawn()
- self.processes[c_channel.pid] = c_channel
- self.log_started(c_channel.pid)
-
- # Now connect to the c-channel
- cc_connect_start = time.time()
- while self.cc_session is None:
- # if we have been trying for "a while" give up
- if (time.time() - cc_connect_start) > 5:
- raise CChannelConnectError("Unable to connect to c-channel after 5 seconds")
-
- # try to connect, and if we can't wait a short while
- try:
- self.cc_session = isc.cc.Session(self.msgq_socket_file)
- except isc.cc.session.SessionError:
- time.sleep(0.1)
-
- def start_cfgmgr(self, c_channel_env):
- """
- Starts the configuration manager process
- """
- self.log_starting("b10-cfgmgr")
- args = ["b10-cfgmgr"]
- if self.data_path is not None:
- args.append("--data-path=" + self.data_path)
- if self.config_filename is not None:
- args.append("--config-filename=" + self.config_filename)
- bind_cfgd = ProcessInfo("b10-cfgmgr", args,
- c_channel_env, uid=self.uid,
- username=self.username)
- bind_cfgd.spawn()
- self.processes[bind_cfgd.pid] = bind_cfgd
- self.log_started(bind_cfgd.pid)
-
- # sleep until b10-cfgmgr is fully up and running, this is a good place
- # to have a (short) timeout on synchronized groupsend/receive
- # TODO: replace the sleep by a listen for ConfigManager started
- # message
- time.sleep(1)
-
- def start_ccsession(self, c_channel_env):
- """
- Start the CC Session
-
- The argument c_channel_env is unused but is supplied to keep the
- argument list the same for all start_xxx methods.
- """
- self.log_starting("ccsession")
- self.ccs = isc.config.ModuleCCSession(SPECFILE_LOCATION,
- self.config_handler, self.command_handler)
- self.ccs.start()
- self.log_started()
-
- # A couple of utility methods for starting processes...
-
- def start_process(self, name, args, c_channel_env, port=None, address=None):
- """
- Given a set of command arguments, start the process and output
- appropriate log messages. If the start is successful, the process
- is added to the list of started processes.
-
- The port and address arguments are for log messages only.
- """
- self.log_starting(name, port, address)
- newproc = ProcessInfo(name, args, c_channel_env)
- newproc.spawn()
- self.processes[newproc.pid] = newproc
- self.log_started(newproc.pid)
-
- def start_simple(self, name, c_channel_env, port=None, address=None):
- """
- Most of the BIND-10 processes are started with the command:
-
- <process-name> [-v]
-
- ... where -v is appended if verbose is enabled. This method
- generates the arguments from the name and starts the process.
-
- The port and address arguments are for log messages only.
- """
- # Set up the command arguments.
- args = [name]
- if self.verbose:
- args += ['-v']
-
- # ... and start the process
- self.start_process(name, args, c_channel_env, port, address)
-
- # The next few methods start up the rest of the BIND-10 processes.
- # Although many of these methods are little more than a call to
- # start_simple, they are retained (a) for testing reasons and (b) as a place
- # where modifications can be made if the process start-up sequence changes
- # for a given process.
-
- def start_auth(self, c_channel_env):
- """
- Start the Authoritative server
- """
- authargs = ['b10-auth']
- if self.nocache:
- authargs += ['-n']
- if self.uid:
- authargs += ['-u', str(self.uid)]
- if self.verbose:
- authargs += ['-v']
-
- # ... and start
- self.start_process("b10-auth", authargs, c_channel_env)
-
- def start_resolver(self, c_channel_env):
- """
- Start the Resolver. At present, all these arguments and switches
- are pure speculation. As with the auth daemon, they should be
- read from the configuration database.
- """
- self.curproc = "b10-resolver"
- # XXX: this must be read from the configuration manager in the future
- resargs = ['b10-resolver']
- if self.uid:
- resargs += ['-u', str(self.uid)]
- if self.verbose:
- resargs += ['-v']
-
- # ... and start
- self.start_process("b10-resolver", resargs, c_channel_env)
-
- def start_xfrout(self, c_channel_env):
- self.start_simple("b10-xfrout", c_channel_env)
-
- def start_xfrin(self, c_channel_env):
- self.start_simple("b10-xfrin", c_channel_env)
-
- def start_zonemgr(self, c_channel_env):
- self.start_simple("b10-zonemgr", c_channel_env)
-
- def start_stats(self, c_channel_env):
- self.start_simple("b10-stats", c_channel_env)
-
- def start_stats_httpd(self, c_channel_env):
- self.start_simple("b10-stats-httpd", c_channel_env)
-
- def start_dhcp6(self, c_channel_env):
- self.start_simple("b10-dhcp6", c_channel_env)
-
- def start_cmdctl(self, c_channel_env):
- """
- Starts the command control process
- """
- args = ["b10-cmdctl"]
- if self.cmdctl_port is not None:
- args.append("--port=" + str(self.cmdctl_port))
- self.start_process("b10-cmdctl", args, c_channel_env, self.cmdctl_port)
-
- def start_all_processes(self):
- """
- Starts up all the processes. Any exception generated during the
- starting of the processes is handled by the caller.
- """
- c_channel_env = self.c_channel_env
- self.start_msgq(c_channel_env)
- self.start_cfgmgr(c_channel_env)
- self.start_ccsession(c_channel_env)
-
- # Extract the parameters associated with Bob. This can only be
- # done after the CC Session is started.
- self.read_bind10_config()
-
- # Continue starting the processes. The authoritative server (if
- # selected):
- if self.cfg_start_auth:
- self.start_auth(c_channel_env)
-
- # ... and resolver (if selected):
- if self.cfg_start_resolver:
- self.start_resolver(c_channel_env)
- self.started_resolver_family = True
-
- # Everything after the main components can run as non-root.
- # TODO: this is only temporary - once the privileged socket creator is
- # fully working, nothing else will run as root.
- if self.uid is not None:
- posix.setuid(self.uid)
-
- # xfrin/xfrout and the zone manager are only meaningful if the
- # authoritative server has been started.
- if self.cfg_start_auth:
- self.start_xfrout(c_channel_env)
- self.start_xfrin(c_channel_env)
- self.start_zonemgr(c_channel_env)
- self.started_auth_family = True
-
- # ... and finally start the remaining processes
- self.start_stats(c_channel_env)
- self.start_stats_httpd(c_channel_env)
- self.start_cmdctl(c_channel_env)
-
- if self.cfg_start_dhcp6:
- self.start_dhcp6(c_channel_env)
-
- def startup(self):
- """
- Start the BoB instance.
-
- Returns None if successful, otherwise an string describing the
- problem.
- """
- # Try to connect to the c-channel daemon, to see if it is already
- # running
- c_channel_env = {}
- if self.msgq_socket_file is not None:
- c_channel_env["BIND10_MSGQ_SOCKET_FILE"] = self.msgq_socket_file
- if self.verbose:
- sys.stdout.write("[bind10] Checking for already running b10-msgq\n")
- # try to connect, and if we can't wait a short while
- try:
- self.cc_session = isc.cc.Session(self.msgq_socket_file)
- return "b10-msgq already running, or socket file not cleaned , cannot start"
- except isc.cc.session.SessionError:
- # this is the case we want, where the msgq is not running
- pass
-
- # Start all processes. If any one fails to start, kill all started
- # processes and exit with an error indication.
- try:
- self.c_channel_env = c_channel_env
- self.start_all_processes()
- except Exception as e:
- self.kill_started_processes()
- return "Unable to start " + self.curproc + ": " + str(e)
-
- # Started successfully
- self.runnable = True
- return None
-
- def stop_all_processes(self):
- """Stop all processes."""
- cmd = { "command": ['shutdown']}
-
- self.cc_session.group_sendmsg(cmd, 'Cmdctl', 'Cmdctl')
- self.cc_session.group_sendmsg(cmd, "ConfigManager", "ConfigManager")
- self.cc_session.group_sendmsg(cmd, "Auth", "Auth")
- self.cc_session.group_sendmsg(cmd, "Resolver", "Resolver")
- self.cc_session.group_sendmsg(cmd, "Xfrout", "Xfrout")
- self.cc_session.group_sendmsg(cmd, "Xfrin", "Xfrin")
- self.cc_session.group_sendmsg(cmd, "Zonemgr", "Zonemgr")
- self.cc_session.group_sendmsg(cmd, "Stats", "Stats")
- self.cc_session.group_sendmsg(cmd, "StatsHttpd", "StatsHttpd")
-
- def stop_process(self, process, recipient):
- """
- Stop the given process, friendly-like. The process is the name it has
- (in logs, etc), the recipient is the address on msgq.
- """
- if self.verbose:
- sys.stdout.write("[bind10] Asking %s to terminate\n" % process)
- # TODO: Some timeout to solve processes that don't want to die would
- # help. We can even store it in the dict, it is used only as a set
- self.expected_shutdowns[process] = 1
- # Ask the process to die willingly
- self.cc_session.group_sendmsg({'command': ['shutdown']}, recipient,
- recipient)
-
- # Series of stop_process wrappers
- def stop_resolver(self):
- self.stop_process('b10-resolver', 'Resolver')
-
- def stop_auth(self):
- self.stop_process('b10-auth', 'Auth')
-
- def stop_xfrout(self):
- self.stop_process('b10-xfrout', 'Xfrout')
-
- def stop_xfrin(self):
- self.stop_process('b10-xfrin', 'Xfrin')
-
- def stop_zonemgr(self):
- self.stop_process('b10-zonemgr', 'Zonemgr')
-
- def shutdown(self):
- """Stop the BoB instance."""
- if self.verbose:
- sys.stdout.write("[bind10] Stopping the server.\n")
- # first try using the BIND 10 request to stop
- try:
- self.stop_all_processes()
- except:
- pass
- # XXX: some delay probably useful... how much is uncertain
- # I have changed the delay from 0.5 to 1, but sometime it's
- # still not enough.
- time.sleep(1)
- self.reap_children()
- # next try sending a SIGTERM
- processes_to_stop = list(self.processes.values())
- for proc_info in processes_to_stop:
- if self.verbose:
- sys.stdout.write("[bind10] Sending SIGTERM to %s (PID %d).\n" %
- (proc_info.name, proc_info.pid))
- try:
- proc_info.process.terminate()
- except OSError:
- # ignore these (usually ESRCH because the child
- # finally exited)
- pass
- # finally, send SIGKILL (unmaskable termination) until everybody dies
- while self.processes:
- # XXX: some delay probably useful... how much is uncertain
- time.sleep(0.1)
- self.reap_children()
- processes_to_stop = list(self.processes.values())
- for proc_info in processes_to_stop:
- if self.verbose:
- sys.stdout.write("[bind10] Sending SIGKILL to %s (PID %d).\n" %
- (proc_info.name, proc_info.pid))
- try:
- proc_info.process.kill()
- except OSError:
- # ignore these (usually ESRCH because the child
- # finally exited)
- pass
- if self.verbose:
- sys.stdout.write("[bind10] All processes ended, server done.\n")
-
- def _get_process_exit_status(self):
- return os.waitpid(-1, os.WNOHANG)
-
- def reap_children(self):
- """Check to see if any of our child processes have exited,
- and note this for later handling.
- """
- while True:
- try:
- (pid, exit_status) = self._get_process_exit_status()
- except OSError as o:
- if o.errno == errno.ECHILD: break
- # XXX: should be impossible to get any other error here
- raise
- if pid == 0: break
- if pid in self.processes:
- # One of the processes we know about. Get information on it.
- proc_info = self.processes.pop(pid)
- proc_info.restart_schedule.set_run_stop_time()
- self.dead_processes[proc_info.pid] = proc_info
-
- # Write out message, but only if in the running state:
- # During startup and shutdown, these messages are handled
- # elsewhere.
- if self.runnable:
- if exit_status is None:
- sys.stdout.write(
- "[bind10] Process %s (PID %d) died: exit status not available" %
- (proc_info.name, proc_info.pid))
- else:
- sys.stdout.write(
- "[bind10] Process %s (PID %d) terminated, exit status = %d\n" %
- (proc_info.name, proc_info.pid, exit_status))
-
- # Was it a special process?
- if proc_info.name == "b10-msgq":
- sys.stdout.write(
- "[bind10] The b10-msgq process died, shutting down.\n")
- self.runnable = False
-
- # If we're in 'brittle' mode, we want to shutdown after
- # any process dies.
- if self.brittle:
- self.runnable = False
- else:
- sys.stdout.write("[bind10] Unknown child pid %d exited.\n" % pid)
-
- def restart_processes(self):
- """
- Restart any dead processes:
-
- * Returns the time when the next process is ready to be restarted.
- * If the server is shutting down, returns 0.
- * If there are no processes, returns None.
-
- The values returned can be safely passed into select() as the
- timeout value.
- """
- next_restart = None
- # if we're shutting down, then don't restart
- if not self.runnable:
- return 0
- # otherwise look through each dead process and try to restart
- still_dead = {}
- now = time.time()
- for proc_info in self.dead_processes.values():
- if proc_info.name in self.expected_shutdowns:
- # We don't restart, we wanted it to die
- del self.expected_shutdowns[proc_info.name]
- continue
- restart_time = proc_info.restart_schedule.get_restart_time(now)
- if restart_time > now:
- if (next_restart is None) or (next_restart > restart_time):
- next_restart = restart_time
- still_dead[proc_info.pid] = proc_info
- else:
- if self.verbose:
- sys.stdout.write("[bind10] Resurrecting dead %s process...\n" %
- proc_info.name)
- try:
- proc_info.respawn()
- self.processes[proc_info.pid] = proc_info
- sys.stdout.write("[bind10] Resurrected %s (PID %d)\n" %
- (proc_info.name, proc_info.pid))
- except:
- still_dead[proc_info.pid] = proc_info
- # remember any processes that refuse to be resurrected
- self.dead_processes = still_dead
- # return the time when the next process is ready to be restarted
- return next_restart
-
-# global variables, needed for signal handlers
-options = None
-boss_of_bind = None
-
-def reaper(signal_number, stack_frame):
- """A child process has died (SIGCHLD received)."""
- # don't do anything...
- # the Python signal handler has been set up to write
- # down a pipe, waking up our select() bit
- pass
-
-def get_signame(signal_number):
- """Return the symbolic name for a signal."""
- for sig in dir(signal):
- if sig.startswith("SIG") and sig[3].isalnum():
- if getattr(signal, sig) == signal_number:
- return sig
- return "Unknown signal %d" % signal_number
-
-# XXX: perhaps register atexit() function and invoke that instead
-def fatal_signal(signal_number, stack_frame):
- """We need to exit (SIGINT or SIGTERM received)."""
- global options
- global boss_of_bind
- if options.verbose:
- sys.stdout.write("[bind10] Received %s.\n" % get_signame(signal_number))
- signal.signal(signal.SIGCHLD, signal.SIG_DFL)
- boss_of_bind.runnable = False
-
-def process_rename(option, opt_str, value, parser):
- """Function that renames the process if it is requested by a option."""
- isc.util.process.rename(value)
-
-def parse_args(args=sys.argv[1:], Parser=OptionParser):
- """
- Function for parsing command line arguments. Returns the
- options object from OptionParser.
- """
- parser = Parser(version=VERSION)
- parser.add_option("-m", "--msgq-socket-file", dest="msgq_socket_file",
- type="string", default=None,
- help="UNIX domain socket file the b10-msgq daemon will use")
- parser.add_option("-n", "--no-cache", action="store_true", dest="nocache",
- default=False, help="disable hot-spot cache in authoritative DNS server")
- parser.add_option("-u", "--user", dest="user", type="string", default=None,
- help="Change user after startup (must run as root)")
- parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
- help="display more about what is going on")
- parser.add_option("--pretty-name", type="string", action="callback",
- callback=process_rename,
- help="Set the process name (displayed in ps, top, ...)")
- parser.add_option("-c", "--config-file", action="store",
- dest="config_file", default=None,
- help="Configuration database filename")
- parser.add_option("-p", "--data-path", dest="data_path",
- help="Directory to search for configuration files",
- default=None)
- parser.add_option("--cmdctl-port", dest="cmdctl_port", type="int",
- default=None, help="Port of command control")
- parser.add_option("--pid-file", dest="pid_file", type="string",
- default=None,
- help="file to dump the PID of the BIND 10 process")
- parser.add_option("--brittle", dest="brittle", action="store_true",
- help="debugging flag: exit if any component dies")
-
- (options, args) = parser.parse_args(args)
-
- if options.cmdctl_port is not None:
- try:
- isc.net.parse.port_parse(options.cmdctl_port)
- except ValueError as e:
- parser.error(e)
-
- if args:
- parser.print_help()
- sys.exit(1)
-
- return options
-
-def dump_pid(pid_file):
- """
- Dump the PID of the current process to the specified file. If the given
- file is None this function does nothing. If the file already exists,
- the existing content will be removed. If a system error happens in
- creating or writing to the file, the corresponding exception will be
- propagated to the caller.
- """
- if pid_file is None:
- return
- f = open(pid_file, "w")
- f.write('%d\n' % os.getpid())
- f.close()
-
-def unlink_pid_file(pid_file):
- """
- Remove the given file, which is basically expected to be the PID file
- created by dump_pid(). The specified may or may not exist; if it
- doesn't this function does nothing. Other system level errors in removing
- the file will be propagated as the corresponding exception.
- """
- if pid_file is None:
- return
- try:
- os.unlink(pid_file)
- except OSError as error:
- if error.errno is not errno.ENOENT:
- raise
-
-
-def main():
- global options
- global boss_of_bind
- # Enforce line buffering on stdout, even when not a TTY
- sys.stdout = io.TextIOWrapper(sys.stdout.detach(), line_buffering=True)
-
- options = parse_args()
-
- # Check user ID.
- setuid = None
- username = None
- if options.user:
- # Try getting information about the user, assuming UID passed.
- try:
- pw_ent = pwd.getpwuid(int(options.user))
- setuid = pw_ent.pw_uid
- username = pw_ent.pw_name
- except ValueError:
- pass
- except KeyError:
- pass
-
- # Next try getting information about the user, assuming user name
- # passed.
- # If the information is both a valid user name and user number, we
- # prefer the name because we try it second. A minor point, hopefully.
- try:
- pw_ent = pwd.getpwnam(options.user)
- setuid = pw_ent.pw_uid
- username = pw_ent.pw_name
- except KeyError:
- pass
-
- if setuid is None:
- sys.stderr.write("bind10: invalid user: '%s'\n" % options.user)
- sys.exit(1)
-
- # Announce startup.
- if options.verbose:
- sys.stdout.write("%s\n" % VERSION)
-
- # Create wakeup pipe for signal handlers
- wakeup_pipe = os.pipe()
- signal.set_wakeup_fd(wakeup_pipe[1])
-
- # Set signal handlers for catching child termination, as well
- # as our own demise.
- signal.signal(signal.SIGCHLD, reaper)
- signal.siginterrupt(signal.SIGCHLD, False)
- signal.signal(signal.SIGINT, fatal_signal)
- signal.signal(signal.SIGTERM, fatal_signal)
-
- # Block SIGPIPE, as we don't want it to end this process
- signal.signal(signal.SIGPIPE, signal.SIG_IGN)
-
- # Go bob!
- boss_of_bind = BoB(options.msgq_socket_file, options.data_path,
- options.config_file, options.nocache, options.verbose,
- setuid, username, options.cmdctl_port, options.brittle)
- startup_result = boss_of_bind.startup()
- if startup_result:
- sys.stderr.write("[bind10] Error on startup: %s\n" % startup_result)
- sys.exit(1)
- sys.stdout.write("[bind10] BIND 10 started\n")
- dump_pid(options.pid_file)
-
- # In our main loop, we check for dead processes or messages
- # on the c-channel.
- wakeup_fd = wakeup_pipe[0]
- ccs_fd = boss_of_bind.ccs.get_socket().fileno()
- while boss_of_bind.runnable:
- # clean up any processes that exited
- boss_of_bind.reap_children()
- next_restart = boss_of_bind.restart_processes()
- if next_restart is None:
- wait_time = None
- else:
- wait_time = max(next_restart - time.time(), 0)
-
- # select() can raise EINTR when a signal arrives,
- # even if they are resumable, so we have to catch
- # the exception
- try:
- (rlist, wlist, xlist) = select.select([wakeup_fd, ccs_fd], [], [],
- wait_time)
- except select.error as err:
- if err.args[0] == errno.EINTR:
- (rlist, wlist, xlist) = ([], [], [])
- else:
- sys.stderr.write("[bind10] Error with select(); %s\n" % err)
- break
-
- for fd in rlist + xlist:
- if fd == ccs_fd:
- try:
- boss_of_bind.ccs.check_command()
- except isc.cc.session.ProtocolError:
- if options.verbose:
- sys.stderr.write("[bind10] msgq channel disappeared.\n")
- break
- elif fd == wakeup_fd:
- os.read(wakeup_fd, 32)
-
- # shutdown
- signal.signal(signal.SIGCHLD, signal.SIG_DFL)
- boss_of_bind.shutdown()
- sys.stdout.write("[bind10] BIND 10 exiting\n");
- unlink_pid_file(options.pid_file)
- sys.exit(0)
-
-if __name__ == "__main__":
- main()
diff --git a/src/bin/bind10/bind10.xml b/src/bin/bind10/bind10.xml
index 1128264..b101ba8 100644
--- a/src/bin/bind10/bind10.xml
+++ b/src/bin/bind10/bind10.xml
@@ -2,7 +2,7 @@
"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd"
[<!ENTITY mdash "—">]>
<!--
- - Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+ - Copyright (C) 2010-2011 Internet Systems Consortium, Inc. ("ISC")
-
- Permission to use, copy, modify, and/or distribute this software for any
- purpose with or without fee is hereby granted, provided that the above
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>March 31, 2011</date>
+ <date>August 11, 2011</date>
</refentryinfo>
<refmeta>
@@ -217,6 +217,30 @@ The default is the basename of ARG 0.
<!--
TODO: configuration section
-->
+
+ <refsect1>
+ <title>STATISTICS DATA</title>
+
+ <para>
+ The statistics data collected by the <command>b10-stats</command>
+ daemon include:
+ </para>
+
+ <variablelist>
+
+ <varlistentry>
+ <term>bind10.boot_time</term>
+ <listitem><para>
+ The date and time that the <command>bind10</command>
+ process started.
+ This is represented in ISO 8601 format.
+ </para></listitem>
+ </varlistentry>
+
+ </variablelist>
+
+ </refsect1>
+
<!--
<refsect1>
<title>FILES</title>
diff --git a/src/bin/bind10/bind10_messages.mes b/src/bin/bind10/bind10_messages.mes
new file mode 100644
index 0000000..4debcdb
--- /dev/null
+++ b/src/bin/bind10/bind10_messages.mes
@@ -0,0 +1,204 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the xfrin messages python module.
+
+% BIND10_CHECK_MSGQ_ALREADY_RUNNING checking if msgq is already running
+The boss process is starting up and will now check if the message bus
+daemon is already running. If so, it will not be able to start, as it
+needs a dedicated message bus.
+
+% BIND10_CONFIGURATION_START_AUTH start authoritative server: %1
+This message shows whether or not the authoritative server should be
+started according to the configuration.
+
+% BIND10_CONFIGURATION_START_RESOLVER start resolver: %1
+This message shows whether or not the resolver should be
+started according to the configuration.
+
+% BIND10_INVALID_USER invalid user: %1
+The boss process was started with the -u option, to drop root privileges
+and continue running as the specified user, but the user is unknown.
+
+% BIND10_KILLING_ALL_PROCESSES killing all started processes
+The boss module was not able to start every process it needed to start
+during startup, and will now kill the processes that did get started.
+
+% BIND10_KILL_PROCESS killing process %1
+The boss module is sending a kill signal to process with the given name,
+as part of the process of killing all started processes during a failed
+startup, as described for BIND10_KILLING_ALL_PROCESSES
+
+% BIND10_MSGQ_ALREADY_RUNNING msgq daemon already running, cannot start
+There already appears to be a message bus daemon running. Either an
+old process was not shut down correctly, and needs to be killed, or
+another instance of BIND10, with the same msgq domain socket, is
+running, which needs to be stopped.
+
+% BIND10_MSGQ_DAEMON_ENDED b10-msgq process died, shutting down
+The message bus daemon has died. This is a fatal error, since it may
+leave the system in an inconsistent state. BIND10 will now shut down.
+
+% BIND10_MSGQ_DISAPPEARED msgq channel disappeared
+While listening on the message bus channel for messages, it suddenly
+disappeared. The msgq daemon may have died. This might lead to an
+inconsistent state of the system, and BIND 10 will now shut down.
+
+% BIND10_PROCESS_ENDED_NO_EXIT_STATUS process %1 (PID %2) died: exit status not available
+The given process ended unexpectedly, but no exit status is
+available. See BIND10_PROCESS_ENDED_WITH_EXIT_STATUS for a longer
+description.
+
+% BIND10_PROCESS_ENDED_WITH_EXIT_STATUS process %1 (PID %2) terminated, exit status = %3
+The given process ended unexpectedly with the given exit status.
+Depending on which module it was, it may simply be restarted, or it
+may be a problem that will cause the boss module to shut down too.
+The latter happens if it was the message bus daemon, which, if it has
+died suddenly, may leave the system in an inconsistent state. BIND10
+will also shut down now if it has been run with --brittle.
+
+% BIND10_READING_BOSS_CONFIGURATION reading boss configuration
+The boss process is starting up, and will now process the initial
+configuration, as received from the configuration manager.
+
+% BIND10_RECEIVED_COMMAND received command: %1
+The boss module received a command and shall now process it. The command
+is printed.
+
+% BIND10_RECEIVED_NEW_CONFIGURATION received new configuration: %1
+The boss module received a configuration update and is going to apply
+it now. The new configuration is printed.
+
+% BIND10_RECEIVED_SIGNAL received signal %1
+The boss module received the given signal.
+
+% BIND10_RESURRECTED_PROCESS resurrected %1 (PID %2)
+The given process has been restarted successfully, and is now running
+with the given process id.
+
+% BIND10_RESURRECTING_PROCESS resurrecting dead %1 process...
+The given process has ended unexpectedly, and is now restarted.
+
+% BIND10_SELECT_ERROR error in select() call: %1
+There was a fatal error in the call to select(), used to see if a child
+process has ended or if there is a message on the message bus. This
+should not happen under normal circumstances and is considered fatal,
+so BIND 10 will now shut down. The specific error is printed.
+
+% BIND10_SEND_SIGKILL sending SIGKILL to %1 (PID %2)
+The boss module is sending a SIGKILL signal to the given process.
+
+% BIND10_SEND_SIGTERM sending SIGTERM to %1 (PID %2)
+The boss module is sending a SIGTERM signal to the given process.
+
+% BIND10_SHUTDOWN stopping the server
+The boss process received a command or signal telling it to shut down.
+It will send a shutdown command to each process. The processes that do
+not shut down will then receive a SIGTERM signal. If that doesn't work,
+it shall send SIGKILL signals to the processes still alive.
+
+% BIND10_SHUTDOWN_COMPLETE all processes ended, shutdown complete
+All child processes have been stopped, and the boss process will now
+stop itself.
+
+% BIND10_SOCKCREATOR_BAD_CAUSE unknown error cause from socket creator: %1
+The socket creator reported an error when creating a socket. But the function
+which failed is unknown (not one of 'S' for socket or 'B' for bind).
+
+% BIND10_SOCKCREATOR_BAD_RESPONSE unknown response for socket request: %1
+The boss requested a socket from the creator, but the answer is unknown. This
+looks like a programmer error.
+
+% BIND10_SOCKCREATOR_CRASHED the socket creator crashed
+The socket creator terminated unexpectedly. It is not possible to restart it
+(because the boss already gave up root privileges), so the system is going
+to terminate.
+
+% BIND10_SOCKCREATOR_EOF eof while expecting data from socket creator
+There should be more data from the socket creator, but it closed the socket.
+It probably crashed.
+
+% BIND10_SOCKCREATOR_INIT initializing socket creator parser
+The boss module initializes routines for parsing the socket creator
+protocol.
+
+% BIND10_SOCKCREATOR_KILL killing the socket creator
+The socket creator is being terminated the aggressive way, by sending it
+sigkill. This should not happen usually.
+
+% BIND10_SOCKCREATOR_TERMINATE terminating socket creator
+The boss module sends a request to terminate to the socket creator.
+
+% BIND10_SOCKCREATOR_TRANSPORT_ERROR transport error when talking to the socket creator: %1
+Either sending or receiving data from the socket creator failed with the given
+error. The creator probably crashed or some serious OS-level problem happened,
+as the communication happens only on local host.
+
+% BIND10_SOCKET_CREATED successfully created socket %1
+The socket creator successfully created and sent a requested socket, it has
+the given file number.
+
+% BIND10_SOCKET_ERROR error on %1 call in the creator: %2/%3
+The socket creator failed to create the requested socket. It failed on the
+indicated OS API function with given error.
+
+% BIND10_SOCKET_GET requesting socket [%1]:%2 of type %3 from the creator
+The boss forwards a request for a socket to the socket creator.
+
+% BIND10_STARTED_PROCESS started %1
+The given process has successfully been started.
+
+% BIND10_STARTED_PROCESS_PID started %1 (PID %2)
+The given process has successfully been started, and has the given PID.
+
+% BIND10_STARTING starting BIND10: %1
+Informational message on startup that shows the full version.
+
+% BIND10_STARTING_PROCESS starting process %1
+The boss module is starting the given process.
+
+% BIND10_STARTING_PROCESS_PORT starting process %1 (to listen on port %2)
+The boss module is starting the given process, which will listen on the
+given port number.
+
+% BIND10_STARTING_PROCESS_PORT_ADDRESS starting process %1 (to listen on %2#%3)
+The boss module is starting the given process, which will listen on the
+given address and port number (written as <address>#<port>).
+
+% BIND10_STARTUP_COMPLETE BIND 10 started
+All modules have been successfully started, and BIND 10 is now running.
+
+% BIND10_STARTUP_ERROR error during startup: %1
+There was a fatal error when BIND10 was trying to start. The error is
+shown, and BIND10 will now shut down.
+
+% BIND10_START_AS_NON_ROOT starting %1 as a user, not root. This might fail.
+The given module is being started or restarted without root privileges.
+If the module needs these privileges, it may have problems starting.
+Note that this issue should be resolved by the pending 'socket-creator'
+process; once that has been implemented, modules should not need root
+privileges anymore. See tickets #800 and #801 for more information.
+
+% BIND10_STOP_PROCESS asking %1 to shut down
+The boss module is sending a shutdown command to the given module over
+the message channel.
+
+% BIND10_UNKNOWN_CHILD_PROCESS_ENDED unknown child pid %1 exited
+An unknown child process has exited. The PID is printed, but no further
+action will be taken by the boss process.
+
+% BIND10_INVALID_STATISTICS_DATA invalid specification of statistics data specified
+An error was encountered when the boss module specified
+statistics data which is invalid for the boss specification file.
diff --git a/src/bin/bind10/bind10_src.py.in b/src/bin/bind10/bind10_src.py.in
new file mode 100755
index 0000000..1687cb1
--- /dev/null
+++ b/src/bin/bind10/bind10_src.py.in
@@ -0,0 +1,1087 @@
+#!@PYTHON@
+
+# Copyright (C) 2010,2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""
+This file implements the Boss of Bind (BoB, or bob) program.
+
+Its purpose is to start up the BIND 10 system, and then manage the
+processes, by starting and stopping processes, plus restarting
+processes that exit.
+
+To start the system, it first runs the c-channel program (msgq), then
+connects to that. It then runs the configuration manager, and reads
+its own configuration. Then it proceeds to starting other modules.
+
+The Python subprocess module is used for starting processes, but
+because this is not efficient for managing groups of processes,
+SIGCHLD signals are caught and processed using the signal module.
+
+Most of the logic is contained in the BoB class. However, since Python
+requires that signal processing happen in the main thread, we do
+signal handling outside of that class, in the code running for
+__main__.
+"""
+
+import sys; sys.path.append ('@@PYTHONPATH@@')
+import os
+
+# If B10_FROM_SOURCE is set in the environment, we use data files
+# from a directory relative to that, otherwise we use the ones
+# installed on the system
+if "B10_FROM_SOURCE" in os.environ:
+ SPECFILE_LOCATION = os.environ["B10_FROM_SOURCE"] + "/src/bin/bind10/bob.spec"
+else:
+ PREFIX = "@prefix@"
+ DATAROOTDIR = "@datarootdir@"
+ SPECFILE_LOCATION = "@datadir@/@PACKAGE@/bob.spec".replace("${datarootdir}", DATAROOTDIR).replace("${prefix}", PREFIX)
+
+import subprocess
+import signal
+import re
+import errno
+import time
+import select
+import random
+import socket
+from optparse import OptionParser, OptionValueError
+import io
+import pwd
+import posix
+
+import isc.cc
+import isc.util.process
+import isc.net.parse
+import isc.log
+from isc.log_messages.bind10_messages import *
+import isc.bind10.sockcreator
+
+isc.log.init("b10-boss")
+logger = isc.log.Logger("boss")
+
+# Pending system-wide debug level definitions, the ones we
+# use here are hardcoded for now
+DBG_PROCESS = 10
+DBG_COMMANDS = 30
+
+# Assign this process some longer name
+isc.util.process.rename(sys.argv[0])
+
+# This is the version that gets displayed to the user.
+# The VERSION string consists of the module name, the module version
+# number, and the overall BIND 10 version number (set in configure.ac).
+VERSION = "bind10 20110223 (BIND 10 @PACKAGE_VERSION@)"
+
+# This is for boot_time of Boss
+_BASETIME = time.gmtime()
+
+class RestartSchedule:
+ """
+Keeps state when restarting something (in this case, a process).
+
+When a process dies unexpectedly, we need to restart it. However, if
+it fails to restart for some reason, then we should not simply keep
+restarting it at high speed.
+
+A more sophisticated algorithm can be developed, but for now we choose
+a simple set of rules:
+
+ * If a process was been running for >=10 seconds, we restart it
+ right away.
+ * If a process was running for <10 seconds, we wait until 10 seconds
+ after it was started.
+
+To avoid programs getting into lockstep, we use a normal distribution
+to avoid being restarted at exactly 10 seconds."""
+
+ def __init__(self, restart_frequency=10.0):
+ self.restart_frequency = restart_frequency
+ self.run_start_time = None
+ self.run_stop_time = None
+ self.restart_time = None
+
+ def set_run_start_time(self, when=None):
+ if when is None:
+ when = time.time()
+ self.run_start_time = when
+ sigma = self.restart_frequency * 0.05
+ self.restart_time = when + random.normalvariate(self.restart_frequency,
+ sigma)
+
+ def set_run_stop_time(self, when=None):
+ """We don't actually do anything with stop time now, but it
+ might be useful for future algorithms."""
+ if when is None:
+ when = time.time()
+ self.run_stop_time = when
+
+ def get_restart_time(self, when=None):
+ if when is None:
+ when = time.time()
+ return max(when, self.restart_time)
+
+class ProcessInfoError(Exception): pass
+
+class ProcessInfo:
+ """Information about a process"""
+
+ dev_null = open(os.devnull, "w")
+
+ def __init__(self, name, args, env={}, dev_null_stdout=False,
+ dev_null_stderr=False, uid=None, username=None):
+ self.name = name
+ self.args = args
+ self.env = env
+ self.dev_null_stdout = dev_null_stdout
+ self.dev_null_stderr = dev_null_stderr
+ self.restart_schedule = RestartSchedule()
+ self.uid = uid
+ self.username = username
+ self.process = None
+ self.pid = None
+
+ def _preexec_work(self):
+ """Function used before running a program that needs to run as a
+ different user."""
+ # First, put us into a separate process group so we don't get
+ # SIGINT signals on Ctrl-C (the boss will shut everthing down by
+ # other means).
+ os.setpgrp()
+ # Second, set the user ID if one has been specified
+ if self.uid is not None:
+ try:
+ posix.setuid(self.uid)
+ except OSError as e:
+ if e.errno == errno.EPERM:
+ # if we failed to change user due to permission report that
+ raise ProcessInfoError("Unable to change to user %s (uid %d)" % (self.username, self.uid))
+ else:
+ # otherwise simply re-raise whatever error we found
+ raise
+
+ def _spawn(self):
+ if self.dev_null_stdout:
+ spawn_stdout = self.dev_null
+ else:
+ spawn_stdout = None
+ if self.dev_null_stderr:
+ spawn_stderr = self.dev_null
+ else:
+ spawn_stderr = None
+ # Environment variables for the child process will be a copy of those
+ # of the boss process with any additional specific variables given
+ # on construction (self.env).
+ spawn_env = os.environ
+ spawn_env.update(self.env)
+ if 'B10_FROM_SOURCE' not in os.environ:
+ spawn_env['PATH'] = "@@LIBEXECDIR@@:" + spawn_env['PATH']
+ self.process = subprocess.Popen(self.args,
+ stdin=subprocess.PIPE,
+ stdout=spawn_stdout,
+ stderr=spawn_stderr,
+ close_fds=True,
+ env=spawn_env,
+ preexec_fn=self._preexec_work)
+ self.pid = self.process.pid
+ self.restart_schedule.set_run_start_time()
+
+ # spawn() and respawn() are the same for now, but in the future they
+ # may have different functionality
+ def spawn(self):
+ self._spawn()
+
+ def respawn(self):
+ self._spawn()
+
+class CChannelConnectError(Exception): pass
+
+class BoB:
+ """Boss of BIND class."""
+
+ def __init__(self, msgq_socket_file=None, data_path=None,
+ config_filename=None, nocache=False, verbose=False, setuid=None,
+ username=None, cmdctl_port=None, brittle=False):
+ """
+ Initialize the Boss of BIND. This is a singleton (only one can run).
+
+ The msgq_socket_file specifies the UNIX domain socket file that the
+ msgq process listens on. If verbose is True, then the boss reports
+ what it is doing.
+
+ Data path and config filename are passed trough to config manager
+ (if provided) and specify the config file to be used.
+
+ The cmdctl_port is passed to cmdctl and specify on which port it
+ should listen.
+ """
+ self.cc_session = None
+ self.ccs = None
+ self.cfg_start_auth = True
+ self.cfg_start_resolver = False
+ self.cfg_start_dhcp6 = False
+ self.cfg_start_dhcp4 = False
+ self.started_auth_family = False
+ self.started_resolver_family = False
+ self.curproc = None
+ self.dead_processes = {}
+ self.msgq_socket_file = msgq_socket_file
+ self.nocache = nocache
+ self.processes = {}
+ self.expected_shutdowns = {}
+ self.runnable = False
+ self.uid = setuid
+ self.username = username
+ self.verbose = verbose
+ self.data_path = data_path
+ self.config_filename = config_filename
+ self.cmdctl_port = cmdctl_port
+ self.brittle = brittle
+ self.sockcreator = None
+
+ def config_handler(self, new_config):
+ # If this is initial update, don't do anything now, leave it to startup
+ if not self.runnable:
+ return
+ # Now we declare few functions used only internally here. Besides the
+ # benefit of not polluting the name space, they are closures, so we
+ # don't need to pass some variables
+ def start_stop(name, started, start, stop):
+ if not'start_' + name in new_config:
+ return
+ if new_config['start_' + name]:
+ if not started:
+ if self.uid is not None:
+ logger.info(BIND10_START_AS_NON_ROOT, name)
+ start()
+ else:
+ stop()
+ # These four functions are passed to start_stop (smells like functional
+ # programming little bit)
+ def resolver_on():
+ self.start_resolver(self.c_channel_env)
+ self.started_resolver_family = True
+ def resolver_off():
+ self.stop_resolver()
+ self.started_resolver_family = False
+ def auth_on():
+ self.start_auth(self.c_channel_env)
+ self.start_xfrout(self.c_channel_env)
+ self.start_xfrin(self.c_channel_env)
+ self.start_zonemgr(self.c_channel_env)
+ self.started_auth_family = True
+ def auth_off():
+ self.stop_zonemgr()
+ self.stop_xfrin()
+ self.stop_xfrout()
+ self.stop_auth()
+ self.started_auth_family = False
+
+ # The real code of the config handler function follows here
+ logger.debug(DBG_COMMANDS, BIND10_RECEIVED_NEW_CONFIGURATION,
+ new_config)
+ start_stop('resolver', self.started_resolver_family, resolver_on,
+ resolver_off)
+ start_stop('auth', self.started_auth_family, auth_on, auth_off)
+
+ answer = isc.config.ccsession.create_answer(0)
+ return answer
+
+ def get_processes(self):
+ pids = list(self.processes.keys())
+ pids.sort()
+ process_list = [ ]
+ for pid in pids:
+ process_list.append([pid, self.processes[pid].name])
+ return process_list
+
+ def _get_stats_data(self):
+ return { "owner": "Boss",
+ "data": { 'boot_time':
+ time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME)
+ }
+ }
+
+ def command_handler(self, command, args):
+ logger.debug(DBG_COMMANDS, BIND10_RECEIVED_COMMAND, command)
+ answer = isc.config.ccsession.create_answer(1, "command not implemented")
+ if type(command) != str:
+ answer = isc.config.ccsession.create_answer(1, "bad command")
+ else:
+ if command == "shutdown":
+ self.runnable = False
+ answer = isc.config.ccsession.create_answer(0)
+ elif command == "getstats":
+ answer = isc.config.ccsession.create_answer(0, self._get_stats_data())
+ elif command == "sendstats":
+ # send statistics data to the stats daemon immediately
+ stats_data = self._get_stats_data()
+ valid = self.ccs.get_module_spec().validate_statistics(
+ True, stats_data["data"])
+ if valid:
+ cmd = isc.config.ccsession.create_command('set', stats_data)
+ seq = self.cc_session.group_sendmsg(cmd, 'Stats')
+ # Consume the answer, in case it becomes a orphan message.
+ try:
+ self.cc_session.group_recvmsg(False, seq)
+ except isc.cc.session.SessionTimeout:
+ pass
+ answer = isc.config.ccsession.create_answer(0)
+ else:
+ logger.fatal(BIND10_INVALID_STATISTICS_DATA);
+ answer = isc.config.ccsession.create_answer(
+ 1, "specified statistics data is invalid")
+ elif command == "ping":
+ answer = isc.config.ccsession.create_answer(0, "pong")
+ elif command == "show_processes":
+ answer = isc.config.ccsession. \
+ create_answer(0, self.get_processes())
+ else:
+ answer = isc.config.ccsession.create_answer(1,
+ "Unknown command")
+ return answer
+
+ def start_creator(self):
+ self.curproc = 'b10-sockcreator'
+ self.sockcreator = isc.bind10.sockcreator.Creator("@@LIBEXECDIR@@:" +
+ os.environ['PATH'])
+
+ def stop_creator(self, kill=False):
+ if self.sockcreator is None:
+ return
+ if kill:
+ self.sockcreator.kill()
+ else:
+ self.sockcreator.terminate()
+ self.sockcreator = None
+
+ def kill_started_processes(self):
+ """
+ Called as part of the exception handling when a process fails to
+ start, this runs through the list of started processes, killing
+ each one. It then clears that list.
+ """
+ logger.info(BIND10_KILLING_ALL_PROCESSES)
+
+ self.stop_creator(True)
+
+ for pid in self.processes:
+ logger.info(BIND10_KILL_PROCESS, self.processes[pid].name)
+ self.processes[pid].process.kill()
+ self.processes = {}
+
+ def read_bind10_config(self):
+ """
+ Reads the parameters associated with the BoB module itself.
+
+ At present these are the components to start although arguably this
+ information should be in the configuration for the appropriate
+ module itself. (However, this would cause difficulty in the case of
+ xfrin/xfrout and zone manager as we don't need to start those if we
+ are not running the authoritative server.)
+ """
+ logger.info(BIND10_READING_BOSS_CONFIGURATION)
+
+ config_data = self.ccs.get_full_config()
+ self.cfg_start_auth = config_data.get("start_auth")
+ self.cfg_start_resolver = config_data.get("start_resolver")
+
+ logger.info(BIND10_CONFIGURATION_START_AUTH, self.cfg_start_auth)
+ logger.info(BIND10_CONFIGURATION_START_RESOLVER, self.cfg_start_resolver)
+
+ def log_starting(self, process, port = None, address = None):
+ """
+ A convenience function to output a "Starting xxx" message if the
+ logging is set to DEBUG with debuglevel DBG_PROCESS or higher.
+ Putting this into a separate method ensures
+ that the output form is consistent across all processes.
+
+ The process name (passed as the first argument) is put into
+ self.curproc, and is used to indicate which process failed to
+ start if there is an error (and is used in the "Started" message
+ on success). The optional port and address information are
+ appended to the message (if present).
+ """
+ self.curproc = process
+ if port is None and address is None:
+ logger.info(BIND10_STARTING_PROCESS, self.curproc)
+ elif address is None:
+ logger.info(BIND10_STARTING_PROCESS_PORT, self.curproc,
+ port)
+ else:
+ logger.info(BIND10_STARTING_PROCESS_PORT_ADDRESS,
+ self.curproc, address, port)
+
+ def log_started(self, pid = None):
+ """
+ A convenience function to output a 'Started xxxx (PID yyyy)'
+ message. As with starting_message(), this ensures a consistent
+ format.
+ """
+ if pid is None:
+ logger.debug(DBG_PROCESS, BIND10_STARTED_PROCESS, self.curproc)
+ else:
+ logger.debug(DBG_PROCESS, BIND10_STARTED_PROCESS_PID, self.curproc, pid)
+
+ # The next few methods start the individual processes of BIND-10. They
+ # are called via start_all_processes(). If any fail, an exception is
+ # raised which is caught by the caller of start_all_processes(); this kills
+ # processes started up to that point before terminating the program.
+
+ def start_msgq(self, c_channel_env):
+ """
+ Start the message queue and connect to the command channel.
+ """
+ self.log_starting("b10-msgq")
+ c_channel = ProcessInfo("b10-msgq", ["b10-msgq"], c_channel_env,
+ True, not self.verbose, uid=self.uid,
+ username=self.username)
+ c_channel.spawn()
+ self.processes[c_channel.pid] = c_channel
+ self.log_started(c_channel.pid)
+
+ # Now connect to the c-channel
+ cc_connect_start = time.time()
+ while self.cc_session is None:
+ # if we have been trying for "a while" give up
+ if (time.time() - cc_connect_start) > 5:
+ raise CChannelConnectError("Unable to connect to c-channel after 5 seconds")
+
+ # try to connect, and if we can't wait a short while
+ try:
+ self.cc_session = isc.cc.Session(self.msgq_socket_file)
+ except isc.cc.session.SessionError:
+ time.sleep(0.1)
+
+ def start_cfgmgr(self, c_channel_env):
+ """
+ Starts the configuration manager process
+ """
+ self.log_starting("b10-cfgmgr")
+ args = ["b10-cfgmgr"]
+ if self.data_path is not None:
+ args.append("--data-path=" + self.data_path)
+ if self.config_filename is not None:
+ args.append("--config-filename=" + self.config_filename)
+ bind_cfgd = ProcessInfo("b10-cfgmgr", args,
+ c_channel_env, uid=self.uid,
+ username=self.username)
+ bind_cfgd.spawn()
+ self.processes[bind_cfgd.pid] = bind_cfgd
+ self.log_started(bind_cfgd.pid)
+
+ # sleep until b10-cfgmgr is fully up and running, this is a good place
+ # to have a (short) timeout on synchronized groupsend/receive
+ # TODO: replace the sleep by a listen for ConfigManager started
+ # message
+ time.sleep(1)
+
+ def start_ccsession(self, c_channel_env):
+ """
+ Start the CC Session
+
+ The argument c_channel_env is unused but is supplied to keep the
+ argument list the same for all start_xxx methods.
+ """
+ self.log_starting("ccsession")
+ self.ccs = isc.config.ModuleCCSession(SPECFILE_LOCATION,
+ self.config_handler,
+ self.command_handler)
+ self.ccs.start()
+ self.log_started()
+
+ # A couple of utility methods for starting processes...
+
+ def start_process(self, name, args, c_channel_env, port=None, address=None):
+ """
+ Given a set of command arguments, start the process and output
+ appropriate log messages. If the start is successful, the process
+ is added to the list of started processes.
+
+ The port and address arguments are for log messages only.
+ """
+ self.log_starting(name, port, address)
+ newproc = ProcessInfo(name, args, c_channel_env)
+ newproc.spawn()
+ self.processes[newproc.pid] = newproc
+ self.log_started(newproc.pid)
+
+ def start_simple(self, name, c_channel_env, port=None, address=None):
+ """
+ Most of the BIND-10 processes are started with the command:
+
+ <process-name> [-v]
+
+ ... where -v is appended if verbose is enabled. This method
+ generates the arguments from the name and starts the process.
+
+ The port and address arguments are for log messages only.
+ """
+ # Set up the command arguments.
+ args = [name]
+ if self.verbose:
+ args += ['-v']
+
+ # ... and start the process
+ self.start_process(name, args, c_channel_env, port, address)
+
+ # The next few methods start up the rest of the BIND-10 processes.
+ # Although many of these methods are little more than a call to
+ # start_simple, they are retained (a) for testing reasons and (b) as a place
+ # where modifications can be made if the process start-up sequence changes
+ # for a given process.
+
+ def start_auth(self, c_channel_env):
+ """
+ Start the Authoritative server
+ """
+ authargs = ['b10-auth']
+ if self.nocache:
+ authargs += ['-n']
+ if self.uid:
+ authargs += ['-u', str(self.uid)]
+ if self.verbose:
+ authargs += ['-v']
+
+ # ... and start
+ self.start_process("b10-auth", authargs, c_channel_env)
+
+ def start_resolver(self, c_channel_env):
+ """
+ Start the Resolver. At present, all these arguments and switches
+ are pure speculation. As with the auth daemon, they should be
+ read from the configuration database.
+ """
+ self.curproc = "b10-resolver"
+ # XXX: this must be read from the configuration manager in the future
+ resargs = ['b10-resolver']
+ if self.uid:
+ resargs += ['-u', str(self.uid)]
+ if self.verbose:
+ resargs += ['-v']
+
+ # ... and start
+ self.start_process("b10-resolver", resargs, c_channel_env)
+
+ def start_xfrout(self, c_channel_env):
+ self.start_simple("b10-xfrout", c_channel_env)
+
+ def start_xfrin(self, c_channel_env):
+ self.start_simple("b10-xfrin", c_channel_env)
+
+ def start_zonemgr(self, c_channel_env):
+ self.start_simple("b10-zonemgr", c_channel_env)
+
+ def start_stats(self, c_channel_env):
+ self.start_simple("b10-stats", c_channel_env)
+
+ def start_stats_httpd(self, c_channel_env):
+ self.start_simple("b10-stats-httpd", c_channel_env)
+
+ def start_dhcp6(self, c_channel_env):
+ self.start_simple("b10-dhcp6", c_channel_env)
+
+ def start_cmdctl(self, c_channel_env):
+ """
+ Starts the command control process
+ """
+ args = ["b10-cmdctl"]
+ if self.cmdctl_port is not None:
+ args.append("--port=" + str(self.cmdctl_port))
+ self.start_process("b10-cmdctl", args, c_channel_env, self.cmdctl_port)
+
+ def start_all_processes(self):
+ """
+ Starts up all the processes. Any exception generated during the
+ starting of the processes is handled by the caller.
+ """
+ # The socket creator first, as it is the only thing that needs root
+ self.start_creator()
+ # TODO: Once everything uses the socket creator, we can drop root
+ # privileges right now
+
+ c_channel_env = self.c_channel_env
+ self.start_msgq(c_channel_env)
+ self.start_cfgmgr(c_channel_env)
+ self.start_ccsession(c_channel_env)
+
+ # Extract the parameters associated with Bob. This can only be
+ # done after the CC Session is started.
+ self.read_bind10_config()
+
+ # Continue starting the processes. The authoritative server (if
+ # selected):
+ if self.cfg_start_auth:
+ self.start_auth(c_channel_env)
+
+ # ... and resolver (if selected):
+ if self.cfg_start_resolver:
+ self.start_resolver(c_channel_env)
+ self.started_resolver_family = True
+
+ # Everything after the main components can run as non-root.
+ # TODO: this is only temporary - once the privileged socket creator is
+ # fully working, nothing else will run as root.
+ if self.uid is not None:
+ posix.setuid(self.uid)
+
+ # xfrin/xfrout and the zone manager are only meaningful if the
+ # authoritative server has been started.
+ if self.cfg_start_auth:
+ self.start_xfrout(c_channel_env)
+ self.start_xfrin(c_channel_env)
+ self.start_zonemgr(c_channel_env)
+ self.started_auth_family = True
+
+ # ... and finally start the remaining processes
+ self.start_stats(c_channel_env)
+ self.start_stats_httpd(c_channel_env)
+ self.start_cmdctl(c_channel_env)
+
+ if self.cfg_start_dhcp6:
+ self.start_dhcp6(c_channel_env)
+
+ def startup(self):
+ """
+ Start the BoB instance.
+
+ Returns None if successful, otherwise an string describing the
+ problem.
+ """
+ # Try to connect to the c-channel daemon, to see if it is already
+ # running
+ c_channel_env = {}
+ if self.msgq_socket_file is not None:
+ c_channel_env["BIND10_MSGQ_SOCKET_FILE"] = self.msgq_socket_file
+ logger.debug(DBG_PROCESS, BIND10_CHECK_MSGQ_ALREADY_RUNNING)
+ # try to connect, and if we can't wait a short while
+ try:
+ self.cc_session = isc.cc.Session(self.msgq_socket_file)
+ logger.fatal(BIND10_MSGQ_ALREADY_RUNNING)
+ return "b10-msgq already running, or socket file not cleaned , cannot start"
+ except isc.cc.session.SessionError:
+ # this is the case we want, where the msgq is not running
+ pass
+
+ # Start all processes. If any one fails to start, kill all started
+ # processes and exit with an error indication.
+ try:
+ self.c_channel_env = c_channel_env
+ self.start_all_processes()
+ except Exception as e:
+ self.kill_started_processes()
+ return "Unable to start " + self.curproc + ": " + str(e)
+
+ # Started successfully
+ self.runnable = True
+ return None
+
+ def stop_all_processes(self):
+ """Stop all processes."""
+ cmd = { "command": ['shutdown']}
+
+ self.cc_session.group_sendmsg(cmd, 'Cmdctl', 'Cmdctl')
+ self.cc_session.group_sendmsg(cmd, "ConfigManager", "ConfigManager")
+ self.cc_session.group_sendmsg(cmd, "Auth", "Auth")
+ self.cc_session.group_sendmsg(cmd, "Resolver", "Resolver")
+ self.cc_session.group_sendmsg(cmd, "Xfrout", "Xfrout")
+ self.cc_session.group_sendmsg(cmd, "Xfrin", "Xfrin")
+ self.cc_session.group_sendmsg(cmd, "Zonemgr", "Zonemgr")
+ self.cc_session.group_sendmsg(cmd, "Stats", "Stats")
+ self.cc_session.group_sendmsg(cmd, "StatsHttpd", "StatsHttpd")
+ # Terminate the creator last
+ self.stop_creator()
+
+ def stop_process(self, process, recipient):
+ """
+ Stop the given process, friendly-like. The process is the name it has
+ (in logs, etc), the recipient is the address on msgq.
+ """
+ logger.info(BIND10_STOP_PROCESS, process)
+ # TODO: Some timeout to solve processes that don't want to die would
+ # help. We can even store it in the dict, it is used only as a set
+ self.expected_shutdowns[process] = 1
+ # Ask the process to die willingly
+ self.cc_session.group_sendmsg({'command': ['shutdown']}, recipient,
+ recipient)
+
+ # Series of stop_process wrappers
+ def stop_resolver(self):
+ self.stop_process('b10-resolver', 'Resolver')
+
+ def stop_auth(self):
+ self.stop_process('b10-auth', 'Auth')
+
+ def stop_xfrout(self):
+ self.stop_process('b10-xfrout', 'Xfrout')
+
+ def stop_xfrin(self):
+ self.stop_process('b10-xfrin', 'Xfrin')
+
+ def stop_zonemgr(self):
+ self.stop_process('b10-zonemgr', 'Zonemgr')
+
+ def shutdown(self):
+ """Stop the BoB instance."""
+ logger.info(BIND10_SHUTDOWN)
+ # first try using the BIND 10 request to stop
+ try:
+ self.stop_all_processes()
+ except:
+ pass
+ # XXX: some delay probably useful... how much is uncertain
+ # I have changed the delay from 0.5 to 1, but sometime it's
+ # still not enough.
+ time.sleep(1)
+ self.reap_children()
+ # next try sending a SIGTERM
+ processes_to_stop = list(self.processes.values())
+ for proc_info in processes_to_stop:
+ logger.info(BIND10_SEND_SIGTERM, proc_info.name,
+ proc_info.pid)
+ try:
+ proc_info.process.terminate()
+ except OSError:
+ # ignore these (usually ESRCH because the child
+ # finally exited)
+ pass
+ # finally, send SIGKILL (unmaskable termination) until everybody dies
+ while self.processes:
+ # XXX: some delay probably useful... how much is uncertain
+ time.sleep(0.1)
+ self.reap_children()
+ processes_to_stop = list(self.processes.values())
+ for proc_info in processes_to_stop:
+ logger.info(BIND10_SEND_SIGKILL, proc_info.name,
+ proc_info.pid)
+ try:
+ proc_info.process.kill()
+ except OSError:
+ # ignore these (usually ESRCH because the child
+ # finally exited)
+ pass
+ logger.info(BIND10_SHUTDOWN_COMPLETE)
+
+ def _get_process_exit_status(self):
+ return os.waitpid(-1, os.WNOHANG)
+
+ def reap_children(self):
+ """Check to see if any of our child processes have exited,
+ and note this for later handling.
+ """
+ while True:
+ try:
+ (pid, exit_status) = self._get_process_exit_status()
+ except OSError as o:
+ if o.errno == errno.ECHILD: break
+ # XXX: should be impossible to get any other error here
+ raise
+ if pid == 0: break
+ if self.sockcreator is not None and self.sockcreator.pid() == pid:
+ # This is the socket creator, started and terminated
+ # differently. This can't be restarted.
+ if self.runnable:
+ logger.fatal(BIND10_SOCKCREATOR_CRASHED)
+ self.sockcreator = None
+ self.runnable = False
+ elif pid in self.processes:
+ # One of the processes we know about. Get information on it.
+ proc_info = self.processes.pop(pid)
+ proc_info.restart_schedule.set_run_stop_time()
+ self.dead_processes[proc_info.pid] = proc_info
+
+ # Write out message, but only if in the running state:
+ # During startup and shutdown, these messages are handled
+ # elsewhere.
+ if self.runnable:
+ if exit_status is None:
+ logger.warn(BIND10_PROCESS_ENDED_NO_EXIT_STATUS,
+ proc_info.name, proc_info.pid)
+ else:
+ logger.warn(BIND10_PROCESS_ENDED_WITH_EXIT_STATUS,
+ proc_info.name, proc_info.pid,
+ exit_status)
+
+ # Was it a special process?
+ if proc_info.name == "b10-msgq":
+ logger.fatal(BIND10_MSGQ_DAEMON_ENDED)
+ self.runnable = False
+
+ # If we're in 'brittle' mode, we want to shutdown after
+ # any process dies.
+ if self.brittle:
+ self.runnable = False
+ else:
+ logger.info(BIND10_UNKNOWN_CHILD_PROCESS_ENDED, pid)
+
+ def restart_processes(self):
+ """
+ Restart any dead processes:
+
+ * Returns the time when the next process is ready to be restarted.
+ * If the server is shutting down, returns 0.
+ * If there are no processes, returns None.
+
+ The values returned can be safely passed into select() as the
+ timeout value.
+ """
+ next_restart = None
+ # if we're shutting down, then don't restart
+ if not self.runnable:
+ return 0
+ # otherwise look through each dead process and try to restart
+ still_dead = {}
+ now = time.time()
+ for proc_info in self.dead_processes.values():
+ if proc_info.name in self.expected_shutdowns:
+ # We don't restart, we wanted it to die
+ del self.expected_shutdowns[proc_info.name]
+ continue
+ restart_time = proc_info.restart_schedule.get_restart_time(now)
+ if restart_time > now:
+ if (next_restart is None) or (next_restart > restart_time):
+ next_restart = restart_time
+ still_dead[proc_info.pid] = proc_info
+ else:
+ logger.info(BIND10_RESURRECTING_PROCESS, proc_info.name)
+ try:
+ proc_info.respawn()
+ self.processes[proc_info.pid] = proc_info
+ logger.info(BIND10_RESURRECTED_PROCESS, proc_info.name, proc_info.pid)
+ except:
+ still_dead[proc_info.pid] = proc_info
+ # remember any processes that refuse to be resurrected
+ self.dead_processes = still_dead
+ # return the time when the next process is ready to be restarted
+ return next_restart
+
+# global variables, needed for signal handlers
+options = None
+boss_of_bind = None
+
+def reaper(signal_number, stack_frame):
+ """A child process has died (SIGCHLD received)."""
+ # don't do anything...
+ # the Python signal handler has been set up to write
+ # down a pipe, waking up our select() bit
+ pass
+
+def get_signame(signal_number):
+ """Return the symbolic name for a signal."""
+ for sig in dir(signal):
+ if sig.startswith("SIG") and sig[3].isalnum():
+ if getattr(signal, sig) == signal_number:
+ return sig
+ return "Unknown signal %d" % signal_number
+
+# XXX: perhaps register atexit() function and invoke that instead
+def fatal_signal(signal_number, stack_frame):
+ """We need to exit (SIGINT or SIGTERM received)."""
+ global options
+ global boss_of_bind
+ logger.info(BIND10_RECEIVED_SIGNAL, get_signame(signal_number))
+ signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+ boss_of_bind.runnable = False
+
+def process_rename(option, opt_str, value, parser):
+ """Function that renames the process if it is requested by a option."""
+ isc.util.process.rename(value)
+
+def parse_args(args=sys.argv[1:], Parser=OptionParser):
+ """
+ Function for parsing command line arguments. Returns the
+ options object from OptionParser.
+ """
+ parser = Parser(version=VERSION)
+ parser.add_option("-m", "--msgq-socket-file", dest="msgq_socket_file",
+ type="string", default=None,
+ help="UNIX domain socket file the b10-msgq daemon will use")
+ parser.add_option("-n", "--no-cache", action="store_true", dest="nocache",
+ default=False, help="disable hot-spot cache in authoritative DNS server")
+ parser.add_option("-u", "--user", dest="user", type="string", default=None,
+ help="Change user after startup (must run as root)")
+ parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
+ help="display more about what is going on")
+ parser.add_option("--pretty-name", type="string", action="callback",
+ callback=process_rename,
+ help="Set the process name (displayed in ps, top, ...)")
+ parser.add_option("-c", "--config-file", action="store",
+ dest="config_file", default=None,
+ help="Configuration database filename")
+ parser.add_option("-p", "--data-path", dest="data_path",
+ help="Directory to search for configuration files",
+ default=None)
+ parser.add_option("--cmdctl-port", dest="cmdctl_port", type="int",
+ default=None, help="Port of command control")
+ parser.add_option("--pid-file", dest="pid_file", type="string",
+ default=None,
+ help="file to dump the PID of the BIND 10 process")
+ parser.add_option("--brittle", dest="brittle", action="store_true",
+ help="debugging flag: exit if any component dies")
+
+ (options, args) = parser.parse_args(args)
+
+ if options.cmdctl_port is not None:
+ try:
+ isc.net.parse.port_parse(options.cmdctl_port)
+ except ValueError as e:
+ parser.error(e)
+
+ if args:
+ parser.print_help()
+ sys.exit(1)
+
+ return options
+
+def dump_pid(pid_file):
+ """
+ Dump the PID of the current process to the specified file. If the given
+ file is None this function does nothing. If the file already exists,
+ the existing content will be removed. If a system error happens in
+ creating or writing to the file, the corresponding exception will be
+ propagated to the caller.
+ """
+ if pid_file is None:
+ return
+ f = open(pid_file, "w")
+ f.write('%d\n' % os.getpid())
+ f.close()
+
+def unlink_pid_file(pid_file):
+ """
+ Remove the given file, which is basically expected to be the PID file
+ created by dump_pid(). The specified may or may not exist; if it
+ doesn't this function does nothing. Other system level errors in removing
+ the file will be propagated as the corresponding exception.
+ """
+ if pid_file is None:
+ return
+ try:
+ os.unlink(pid_file)
+ except OSError as error:
+ if error.errno is not errno.ENOENT:
+ raise
+
+
+def main():
+ global options
+ global boss_of_bind
+ # Enforce line buffering on stdout, even when not a TTY
+ sys.stdout = io.TextIOWrapper(sys.stdout.detach(), line_buffering=True)
+
+ options = parse_args()
+
+ # Check user ID.
+ setuid = None
+ username = None
+ if options.user:
+ # Try getting information about the user, assuming UID passed.
+ try:
+ pw_ent = pwd.getpwuid(int(options.user))
+ setuid = pw_ent.pw_uid
+ username = pw_ent.pw_name
+ except ValueError:
+ pass
+ except KeyError:
+ pass
+
+ # Next try getting information about the user, assuming user name
+ # passed.
+ # If the information is both a valid user name and user number, we
+ # prefer the name because we try it second. A minor point, hopefully.
+ try:
+ pw_ent = pwd.getpwnam(options.user)
+ setuid = pw_ent.pw_uid
+ username = pw_ent.pw_name
+ except KeyError:
+ pass
+
+ if setuid is None:
+ logger.fatal(BIND10_INVALID_USER, options.user)
+ sys.exit(1)
+
+ # Announce startup.
+ logger.info(BIND10_STARTING, VERSION)
+
+ # Create wakeup pipe for signal handlers
+ wakeup_pipe = os.pipe()
+ signal.set_wakeup_fd(wakeup_pipe[1])
+
+ # Set signal handlers for catching child termination, as well
+ # as our own demise.
+ signal.signal(signal.SIGCHLD, reaper)
+ signal.siginterrupt(signal.SIGCHLD, False)
+ signal.signal(signal.SIGINT, fatal_signal)
+ signal.signal(signal.SIGTERM, fatal_signal)
+
+ # Block SIGPIPE, as we don't want it to end this process
+ signal.signal(signal.SIGPIPE, signal.SIG_IGN)
+
+ # Go bob!
+ boss_of_bind = BoB(options.msgq_socket_file, options.data_path,
+ options.config_file, options.nocache, options.verbose,
+ setuid, username, options.cmdctl_port, options.brittle)
+ startup_result = boss_of_bind.startup()
+ if startup_result:
+ logger.fatal(BIND10_STARTUP_ERROR, startup_result)
+ sys.exit(1)
+ logger.info(BIND10_STARTUP_COMPLETE)
+ dump_pid(options.pid_file)
+
+ # In our main loop, we check for dead processes or messages
+ # on the c-channel.
+ wakeup_fd = wakeup_pipe[0]
+ ccs_fd = boss_of_bind.ccs.get_socket().fileno()
+ while boss_of_bind.runnable:
+ # clean up any processes that exited
+ boss_of_bind.reap_children()
+ next_restart = boss_of_bind.restart_processes()
+ if next_restart is None:
+ wait_time = None
+ else:
+ wait_time = max(next_restart - time.time(), 0)
+
+ # select() can raise EINTR when a signal arrives,
+ # even if they are resumable, so we have to catch
+ # the exception
+ try:
+ (rlist, wlist, xlist) = select.select([wakeup_fd, ccs_fd], [], [],
+ wait_time)
+ except select.error as err:
+ if err.args[0] == errno.EINTR:
+ (rlist, wlist, xlist) = ([], [], [])
+ else:
+ logger.fatal(BIND10_SELECT_ERROR, err)
+ break
+
+ for fd in rlist + xlist:
+ if fd == ccs_fd:
+ try:
+ boss_of_bind.ccs.check_command()
+ except isc.cc.session.ProtocolError:
+ logger.fatal(BIND10_MSGQ_DISAPPEARED)
+ self.runnable = False
+ break
+ elif fd == wakeup_fd:
+ os.read(wakeup_fd, 32)
+
+ # shutdown
+ signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+ boss_of_bind.shutdown()
+ unlink_pid_file(options.pid_file)
+ sys.exit(0)
+
+if __name__ == "__main__":
+ main()
diff --git a/src/bin/bind10/bob.spec b/src/bin/bind10/bob.spec
index 1184fd1..b4cfac6 100644
--- a/src/bin/bind10/bob.spec
+++ b/src/bin/bind10/bob.spec
@@ -37,6 +37,17 @@
"command_description": "List the running BIND 10 processes",
"command_args": []
}
+ ],
+ "statistics": [
+ {
+ "item_name": "boot_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Boot time",
+ "item_description": "A date time when bind10 process starts initially",
+ "item_format": "date-time"
+ }
]
}
}
diff --git a/src/bin/bind10/creatorapi.txt b/src/bin/bind10/creatorapi.txt
new file mode 100644
index 0000000..c23d907
--- /dev/null
+++ b/src/bin/bind10/creatorapi.txt
@@ -0,0 +1,123 @@
+Socket creator API
+==================
+
+This API is between Boss and other modules to allow them requesting of sockets.
+For simplicity, we will use the socket creator for all (even non-privileged)
+ports for now, but we should have some function where we can abstract it later.
+
+Goals
+-----
+* Be able to request a socket of any combination IPv4/IPv6 UDP/TCP bound to given
+ port and address (sockets that are not bound to anything can be created
+ without privileges, therefore are not requested from the socket creator).
+* Allow to provide the same socket to multiple modules (eg. multiple running
+ auth servers).
+* Allow releasing the sockets (in case all modules using it give it up,
+ terminate or crash).
+* Allow restricting of the sharing (don't allow shared socket between auth
+ and recursive, as the packets would often get to the wrong application,
+ show error instead).
+* Get the socket to the application.
+
+Transport of sockets
+--------------------
+It seems we are stuck with current msgq for a while and there's a chance the
+new replacement will not be able to send sockets inbound. So, we need another
+channel.
+
+The boss will create a unix-domain socket and listen on it. When something
+requests a socket over the command channel and the socket is created, some kind
+of token is returned to the application (which will represent the future
+socket). The application then connects to the unix-domain socket, sends the
+token over the connection (so Boss will know which socket to send there, in case
+multiple applications ask for sockets simultaneously) and Boss sends the socket
+in return.
+
+In theory, we could send the requests directly over the unix-domain
+socket, but it has two disadvantages:
+* The msgq handles serializing/deserializing of structured
+ information (like the parameters to be used), we would have to do it
+ manually on the socket.
+* We could place some kind of security in front of msgq (in case file
+ permissions are not enough, for example if they are not honored on
+ socket files, as indicated in the first paragraph of:
+ http://lkml.indiana.edu/hypermail/linux/kernel/0505.2/0008.html).
+ The socket would have to be secured separately. With the tokens,
+ there's some level of security already - someone not having the
+ token can't request a priviledged socket.
+
+Caching of sockets
+------------------
+To allow sending the same socket to multiple application, the Boss process will
+hold a cache. Each socket that is created and sent is kept open in Boss and
+preserved there as well. A reference count is kept with each of them.
+
+When another application asks for the same socket, it is simply sent from the
+cache instead of creating it again by the creator.
+
+When application gives the socket willingly (by sending a message over the
+command channel), the reference count can be decreased without problems. But
+when the application terminates or crashes, we need to decrease it as well.
+There's a problem, since we don't know which command channel connection (eg.
+lname) belongs to which PID. Furthermore, the applications don't need to be
+started by boss.
+
+There are two possibilities:
+* Let the msgq send messages about disconnected clients (eg. group message to
+ some name). This one is better if we want to migrate to dbus, since dbus
+ already has this capability as well as sending the sockets inbound (at least it
+ seems so on unix) and we could get rid of the unix-domain socket completely.
+* Keep the unix-domain connections open forever. Boss can remember which socket
+ was sent to which connection and when the connection closes (because the
+ application crashed), it can drop all the references on the sockets. This
+ seems easier to implement.
+
+The commands
+------------
+* Command to release a socket. This one would have single parameter, the token
+ used to get the socket. After this, boss would decrease its reference count
+ and if it drops to zero, close its own copy of the socket. This should be used
+ when the module stops using the socket (and after closes it). The
+ library could remember the file-descriptor to token mapping (for
+ common applications that don't request the same socket multiple
+ times in parallel).
+* Command to request a socket. It would have parameters to specify which socket
+ (IP address, address family, port) and how to allow sharing. Sharing would be
+ one of:
+ - None
+ - Same kind of application (however, it is not entirely clear what
+ this means, in case it won't work out intuitively, we'll need to
+ define it somehow)
+ - Any kind of application
+ And a kind of application would be provided, to decide if the sharing is
+ possible (eg. if auth allows sharing with the same kind and something else
+ allows sharing with anything, the sharing is not possible, two auths can).
+
+ It would return either error (the socket can't be created or sharing is not
+ possible) or the token. Then there would be some time for the application to
+ pick up the requested socket.
+
+Examples
+--------
+We probably would have a library with blocking calls to request the
+sockets, so a code could look like:
+
+(socket_fd, token) = request_socket(address, port, 'UDP', SHARE_SAMENAME, 'test-application')
+sock = socket.fromfd(socket_fd)
+
+# Some sock.send and sock.recv stuff here
+
+sock.close()
+release_socket(socket_fd) # or release_socket(token)
+
+Known limitations
+-----------------
+Currently the socket creator doesn't support specifying any socket
+options. If it turns out there are any options that need to be set
+before bind(), we'll need to extend it (and extend the protocol as
+well). If we want to support them, we'll have to solve a possible
+conflict (what to do when two applications request the same socket and
+want to share it, but want different options).
+
+The current socket creator doesn't know raw sockets, but if they are
+needed, it should be easy to add.
diff --git a/src/bin/bind10/run_bind10.sh.in b/src/bin/bind10/run_bind10.sh.in
index 4020593..50e6e29 100755
--- a/src/bin/bind10/run_bind10.sh.in
+++ b/src/bin/bind10/run_bind10.sh.in
@@ -20,17 +20,17 @@ export PYTHON_EXEC
BIND10_PATH=@abs_top_builddir@/src/bin/bind10
-PATH=@abs_top_builddir@/src/bin/msgq:@abs_top_builddir@/src/bin/auth:@abs_top_builddir@/src/bin/resolver:@abs_top_builddir@/src/bin/cfgmgr:@abs_top_builddir@/src/bin/cmdctl:@abs_top_builddir@/src/bin/stats:@abs_top_builddir@/src/bin/xfrin:@abs_top_builddir@/src/bin/xfrout:@abs_top_builddir@/src/bin/zonemgr:@abs_top_builddir@/src/bin/dhcp6:$PATH
+PATH=@abs_top_builddir@/src/bin/msgq:@abs_top_builddir@/src/bin/auth:@abs_top_builddir@/src/bin/resolver:@abs_top_builddir@/src/bin/cfgmgr:@abs_top_builddir@/src/bin/cmdctl:@abs_top_builddir@/src/bin/stats:@abs_top_builddir@/src/bin/xfrin:@abs_top_builddir@/src/bin/xfrout:@abs_top_builddir@/src/bin/zonemgr:@abs_top_builddir@/src/bin/dhcp6:@abs_top_builddir@/src/bin/sockcreator:$PATH
export PATH
-PYTHONPATH=@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/xfr/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/python/isc/config
+PYTHONPATH=@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/xfr/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/python/isc/config:@abs_top_builddir@/src/lib/python/isc/acl/.libs:@abs_top_builddir@/src/lib/python/isc/datasrc/.libs
export PYTHONPATH
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
if test $SET_ENV_LIBRARY_PATH = yes; then
- @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:$@ENV_LIBRARY_PATH@
+ @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/acl/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
export @ENV_LIBRARY_PATH@
fi
diff --git a/src/bin/bind10/tests/Makefile.am b/src/bin/bind10/tests/Makefile.am
index 3d8d57a..d54ee56 100644
--- a/src/bin/bind10/tests/Makefile.am
+++ b/src/bin/bind10/tests/Makefile.am
@@ -2,13 +2,13 @@ PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
#PYTESTS = args_test.py bind10_test.py
# NOTE: this has a generated test found in the builddir
PYTESTS = bind10_test.py
-EXTRA_DIST = $(PYTESTS)
+noinst_SCRIPTS = $(PYTESTS)
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -20,8 +20,9 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
+ chmod +x $(abs_builddir)/$$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/bind10 \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_srcdir)/src/bin:$(abs_top_builddir)/src/bin/bind10:$(abs_top_builddir)/src/lib/util/io/.libs \
BIND10_MSGQ_SOCKET_FILE=$(abs_top_builddir)/msgq_socket \
$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
done
diff --git a/src/bin/bind10/tests/bind10_test.py.in b/src/bin/bind10/tests/bind10_test.py.in
index 91d326c..2efd940 100644
--- a/src/bin/bind10/tests/bind10_test.py.in
+++ b/src/bin/bind10/tests/bind10_test.py.in
@@ -13,7 +13,7 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-from bind10 import ProcessInfo, BoB, parse_args, dump_pid, unlink_pid_file, _BASETIME
+from bind10_src import ProcessInfo, BoB, parse_args, dump_pid, unlink_pid_file, _BASETIME
# XXX: environment tests are currently disabled, due to the preprocessor
# setup that we have now complicating the environment
@@ -26,6 +26,7 @@ import socket
from isc.net.addr import IPAddr
import time
import isc
+import isc.log
from isc.testutils.parse_args import TestOptParser, OptsError
@@ -136,9 +137,27 @@ class TestBoB(unittest.TestCase):
def group_sendmsg(self, msg, group):
(self.msg, self.group) = (msg, group)
def group_recvmsg(self, nonblock, seq): pass
+ class DummyModuleCCSession():
+ module_spec = isc.config.module_spec.ModuleSpec({
+ "module_name": "Boss",
+ "statistics": [
+ {
+ "item_name": "boot_time",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Boot time",
+ "item_description": "A date time when bind10 process starts initially",
+ "item_format": "date-time"
+ }
+ ]
+ })
+ def get_module_spec(self):
+ return self.module_spec
bob = BoB()
bob.verbose = True
bob.cc_session = DummySession()
+ bob.ccs = DummyModuleCCSession()
# a bad command
self.assertEqual(bob.command_handler(-1, None),
isc.config.ccsession.create_answer(1, "bad command"))
@@ -146,14 +165,22 @@ class TestBoB(unittest.TestCase):
self.assertEqual(bob.command_handler("shutdown", None),
isc.config.ccsession.create_answer(0))
self.assertFalse(bob.runnable)
+ # "getstats" command
+ self.assertEqual(bob.command_handler("getstats", None),
+ isc.config.ccsession.create_answer(0,
+ { "owner": "Boss",
+ "data": {
+ 'boot_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME)
+ }}))
# "sendstats" command
self.assertEqual(bob.command_handler("sendstats", None),
isc.config.ccsession.create_answer(0))
self.assertEqual(bob.cc_session.group, "Stats")
self.assertEqual(bob.cc_session.msg,
isc.config.ccsession.create_command(
- 'set', { "stats_data": {
- 'bind10.boot_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME)
+ "set", { "owner": "Boss",
+ "data": {
+ "boot_time": time.strftime("%Y-%m-%dT%H:%M:%SZ", _BASETIME)
}}))
# "ping" command
self.assertEqual(bob.command_handler("ping", None),
@@ -192,6 +219,13 @@ class MockBob(BoB):
self.cmdctl = False
self.c_channel_env = {}
self.processes = { }
+ self.creator = False
+
+ def start_creator(self):
+ self.creator = True
+
+ def stop_creator(self, kill=False):
+ self.creator = False
def read_bind10_config(self):
# Configuration options are set directly
@@ -336,6 +370,7 @@ class TestStartStopProcessesBob(unittest.TestCase):
self.assertEqual(bob.msgq, core)
self.assertEqual(bob.cfgmgr, core)
self.assertEqual(bob.ccsession, core)
+ self.assertEqual(bob.creator, core)
self.assertEqual(bob.auth, auth)
self.assertEqual(bob.resolver, resolver)
self.assertEqual(bob.xfrout, auth)
@@ -764,4 +799,5 @@ class TestBrittle(unittest.TestCase):
self.assertFalse(bob.runnable)
if __name__ == '__main__':
+ isc.log.resetUnitTestRootLogger()
unittest.main()
diff --git a/src/bin/bindctl/Makefile.am b/src/bin/bindctl/Makefile.am
index cd8bcb3..700f26e 100644
--- a/src/bin/bindctl/Makefile.am
+++ b/src/bin/bindctl/Makefile.am
@@ -5,6 +5,8 @@ man_MANS = bindctl.1
EXTRA_DIST = $(man_MANS) bindctl.xml
+noinst_SCRIPTS = run_bindctl.sh
+
python_PYTHON = __init__.py bindcmd.py cmdparse.py exception.py moduleinfo.py \
mycollections.py
pythondir = $(pyexecdir)/bindctl
diff --git a/src/bin/bindctl/bindcmd.py b/src/bin/bindctl/bindcmd.py
index 8973aa5..8c2b674 100644
--- a/src/bin/bindctl/bindcmd.py
+++ b/src/bin/bindctl/bindcmd.py
@@ -398,6 +398,8 @@ class BindCmdInterpreter(Cmd):
print("Error: " + str(dte))
except isc.cc.data.DataNotFoundError as dnfe:
print("Error: " + str(dnfe))
+ except isc.cc.data.DataAlreadyPresentError as dape:
+ print("Error: " + str(dape))
except KeyError as ke:
print("Error: missing " + str(ke))
else:
@@ -634,7 +636,15 @@ class BindCmdInterpreter(Cmd):
# we have more data to show
line += "/"
else:
- line += "\t" + json.dumps(value_map['value'])
+ # if type is named_set, don't print value if None
+ # (it is either {} meaning empty, or None, meaning
+ # there actually is data, but not to be shown with
+ # the current command
+ if value_map['type'] == 'named_set' and\
+ value_map['value'] is None:
+ line += "/\t"
+ else:
+ line += "\t" + json.dumps(value_map['value'])
line += "\t" + value_map['type']
line += "\t"
if value_map['default']:
@@ -649,10 +659,9 @@ class BindCmdInterpreter(Cmd):
data, default = self.config_data.get_value(identifier)
print(json.dumps(data))
elif cmd.command == "add":
- if 'value' in cmd.params:
- self.config_data.add_value(identifier, cmd.params['value'])
- else:
- self.config_data.add_value(identifier)
+ self.config_data.add_value(identifier,
+ cmd.params.get('value_or_name'),
+ cmd.params.get('value_for_set'))
elif cmd.command == "remove":
if 'value' in cmd.params:
self.config_data.remove_value(identifier, cmd.params['value'])
@@ -674,9 +683,12 @@ class BindCmdInterpreter(Cmd):
elif cmd.command == "revert":
self.config_data.clear_local_changes()
elif cmd.command == "commit":
- self.config_data.commit()
+ try:
+ self.config_data.commit()
+ except isc.config.ModuleCCSessionError as mcse:
+ print(str(mcse))
elif cmd.command == "diff":
- print(self.config_data.get_local_changes());
+ print(self.config_data.get_local_changes())
elif cmd.command == "go":
self.go(identifier)
diff --git a/src/bin/bindctl/bindctl_main.py.in b/src/bin/bindctl/bindctl_main.py.in
index 01307e9..ee4191d 100755
--- a/src/bin/bindctl/bindctl_main.py.in
+++ b/src/bin/bindctl/bindctl_main.py.in
@@ -50,17 +50,28 @@ def prepare_config_commands(tool):
cmd.add_param(param)
module.add_command(cmd)
- cmd = CommandInfo(name = "add", desc = "Add an entry to configuration list. If no value is given, a default value is added.")
+ cmd = CommandInfo(name = "add", desc =
+ "Add an entry to configuration list or a named set. "
+ "When adding to a list, the command has one optional argument, "
+ "a value to add to the list. The value must be in correct JSON "
+ "and complete. When adding to a named set, it has one "
+ "mandatory parameter (the name to add), and an optional "
+ "parameter value, similar to when adding to a list. "
+ "In either case, when no value is given, an entry will be "
+ "constructed with default values.")
param = ParamInfo(name = "identifier", type = "string", optional=True, desc = DEFAULT_IDENTIFIER_DESC)
cmd.add_param(param)
- param = ParamInfo(name = "value", type = "string", optional=True, desc = "Specifies a value to add to the list. It must be in correct JSON format and complete.")
+ param = ParamInfo(name = "value_or_name", type = "string", optional=True, desc = "Specifies a value to add to the list, or the name when adding to a named set. It must be in correct JSON format and complete.")
+ cmd.add_param(param)
+ module.add_command(cmd)
+ param = ParamInfo(name = "value_for_set", type = "string", optional=True, desc = "Specifies an optional value to add to the named map. It must be in correct JSON format and complete.")
cmd.add_param(param)
module.add_command(cmd)
- cmd = CommandInfo(name = "remove", desc = "Remove entry from configuration list.")
+ cmd = CommandInfo(name = "remove", desc = "Remove entry from configuration list or named set.")
param = ParamInfo(name = "identifier", type = "string", optional=True, desc = DEFAULT_IDENTIFIER_DESC)
cmd.add_param(param)
- param = ParamInfo(name = "value", type = "string", optional=True, desc = "Specifies a value to remove from the list. It must be in correct JSON format and complete.")
+ param = ParamInfo(name = "value", type = "string", optional=True, desc = "When identifier is a list, specifies a value to remove from the list. It must be in correct JSON format and complete. When it is a named set, specifies the name to remove.")
cmd.add_param(param)
module.add_command(cmd)
diff --git a/src/bin/bindctl/run_bindctl.sh.in b/src/bin/bindctl/run_bindctl.sh.in
index 8f6ba59..f4cc40c 100755
--- a/src/bin/bindctl/run_bindctl.sh.in
+++ b/src/bin/bindctl/run_bindctl.sh.in
@@ -20,14 +20,14 @@ export PYTHON_EXEC
BINDCTL_PATH=@abs_top_builddir@/src/bin/bindctl
-PYTHONPATH=@abs_top_srcdir@/src/bin:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/bin:@abs_top_srcdir@/src/lib/python
+PYTHONPATH=@abs_top_srcdir@/src/bin:@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/bin:@abs_top_srcdir@/src/lib/python
export PYTHONPATH
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
if test $SET_ENV_LIBRARY_PATH = yes; then
- @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:$@ENV_LIBRARY_PATH@
+ @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
export @ENV_LIBRARY_PATH@
fi
diff --git a/src/bin/bindctl/tests/Makefile.am b/src/bin/bindctl/tests/Makefile.am
index 891d413..3d08a17 100644
--- a/src/bin/bindctl/tests/Makefile.am
+++ b/src/bin/bindctl/tests/Makefile.am
@@ -6,7 +6,7 @@ EXTRA_DIST = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -19,6 +19,6 @@ endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/bindctl:$(abs_top_srcdir)/src/bin \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/bindctl:$(abs_top_srcdir)/src/bin \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/bin/cfgmgr/b10-cfgmgr.py.in b/src/bin/cfgmgr/b10-cfgmgr.py.in
index 8befbdf..2ccc430 100755
--- a/src/bin/cfgmgr/b10-cfgmgr.py.in
+++ b/src/bin/cfgmgr/b10-cfgmgr.py.in
@@ -28,7 +28,7 @@ import os.path
import isc.log
isc.log.init("b10-cfgmgr")
from isc.config.cfgmgr import ConfigManager, ConfigManagerDataReadError, logger
-from cfgmgr_messages import *
+from isc.log_messages.cfgmgr_messages import *
isc.util.process.rename()
diff --git a/src/bin/cfgmgr/plugins/tests/Makefile.am b/src/bin/cfgmgr/plugins/tests/Makefile.am
index 07b7a85..ffea2d7 100644
--- a/src/bin/cfgmgr/plugins/tests/Makefile.am
+++ b/src/bin/cfgmgr/plugins/tests/Makefile.am
@@ -7,7 +7,7 @@ EXTRA_DIST = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -19,8 +19,8 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env B10_TEST_PLUGIN_DIR=$(abs_srcdir)/..:$(abs_builddir)/.. \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/cfgmgr:$(abs_top_builddir)/src/lib/dns/python/.libs \
+ B10_TEST_PLUGIN_DIR=$(abs_srcdir)/..:$(abs_builddir)/.. \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/cfgmgr:$(abs_top_builddir)/src/lib/dns/python/.libs \
$(LIBRARY_PATH_PLACEHOLDER) \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/bin/cfgmgr/tests/Makefile.am b/src/bin/cfgmgr/tests/Makefile.am
index bd67241..a2e43ff 100644
--- a/src/bin/cfgmgr/tests/Makefile.am
+++ b/src/bin/cfgmgr/tests/Makefile.am
@@ -1,13 +1,14 @@
PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
PYTESTS = b10-cfgmgr_test.py
-EXTRA_DIST = $(PYTESTS) testdata/plugins/testplugin.py
+noinst_SCRIPTS = $(PYTESTS)
+EXTRA_DIST = testdata/plugins/testplugin.py
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -19,9 +20,10 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env TESTDATA_PATH=$(abs_srcdir)/testdata \
+ chmod +x $(abs_builddir)/$$pytest ; \
+ TESTDATA_PATH=$(abs_srcdir)/testdata \
$(LIBRARY_PATH_PLACEHOLDER) \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/cfgmgr:$(abs_top_builddir)/src/lib/python/isc/config \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/cfgmgr:$(abs_top_builddir)/src/lib/python/isc/config \
$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
done
diff --git a/src/bin/cmdctl/Makefile.am b/src/bin/cmdctl/Makefile.am
index 97a64ff..e302fa6 100644
--- a/src/bin/cmdctl/Makefile.am
+++ b/src/bin/cmdctl/Makefile.am
@@ -4,6 +4,9 @@ pkglibexecdir = $(libexecdir)/@PACKAGE@
pkglibexec_SCRIPTS = b10-cmdctl
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/cmdctl_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
b10_cmdctldir = $(pkgdatadir)
# NOTE: this will overwrite on install
@@ -18,10 +21,12 @@ b10_cmdctl_DATA += cmdctl.spec
EXTRA_DIST = $(CMDCTL_CONFIGURATIONS)
-CLEANFILES= b10-cmdctl cmdctl.pyc cmdctl.spec
+CLEANFILES= b10-cmdctl cmdctl.pyc cmdctl.spec
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/cmdctl_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/cmdctl_messages.pyc
man_MANS = b10-cmdctl.8
-EXTRA_DIST += $(man_MANS) b10-cmdctl.xml
+EXTRA_DIST += $(man_MANS) b10-cmdctl.xml cmdctl_messages.mes
if ENABLE_MAN
@@ -33,8 +38,12 @@ endif
cmdctl.spec: cmdctl.spec.pre
$(SED) -e "s|@@SYSCONFDIR@@|$(sysconfdir)|" cmdctl.spec.pre >$@
+$(PYTHON_LOGMSGPKG_DIR)/work/cmdctl_messages.py : cmdctl_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/cmdctl_messages.mes
+
# this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
-b10-cmdctl: cmdctl.py
+b10-cmdctl: cmdctl.py $(PYTHON_LOGMSGPKG_DIR)/work/cmdctl_messages.py
$(SED) "s|@@PYTHONPATH@@|@pyexecdir@|" cmdctl.py >$@
chmod a+x $@
diff --git a/src/bin/cmdctl/cmdctl.py.in b/src/bin/cmdctl/cmdctl.py.in
index f1c1021..fcd69b8 100755
--- a/src/bin/cmdctl/cmdctl.py.in
+++ b/src/bin/cmdctl/cmdctl.py.in
@@ -47,6 +47,18 @@ import isc.net.parse
from optparse import OptionParser, OptionValueError
from hashlib import sha1
from isc.util import socketserver_mixin
+from isc.log_messages.cmdctl_messages import *
+
+# TODO: these debug-levels are hard-coded here; we are planning on
+# creating a general set of debug levels, see ticket #1074. When done,
+# we should remove these values and use the general ones in the
+# logger.debug calls
+
+# Debug level for communication with BIND10
+DBG_CMDCTL_MESSAGING = 30
+
+isc.log.init("b10-cmdctl")
+logger = isc.log.Logger("cmdctl")
try:
import threading
@@ -173,7 +185,8 @@ class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
if not user_name:
return False, ["need user name"]
if not self.server.get_user_info(user_name):
- return False, ["user doesn't exist"]
+ logger.info(CMDCTL_NO_SUCH_USER, user_name)
+ return False, ["username or password error"]
user_pwd = user_info.get('password')
if not user_pwd:
@@ -181,7 +194,8 @@ class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
local_info = self.server.get_user_info(user_name)
pwd_hashval = sha1((user_pwd + local_info[1]).encode())
if pwd_hashval.hexdigest() != local_info[0]:
- return False, ["password doesn't match"]
+ logger.info(CMDCTL_BAD_PASSWORD, user_name)
+ return False, ["username or password error"]
return True, None
@@ -281,7 +295,7 @@ class CommandControl():
errstr = 'unknown config item: ' + key
if errstr != None:
- self.log_info('Fail to apply config data, ' + errstr)
+ logger.error(CMDCTL_BAD_CONFIG_DATA, errstr);
return ccsession.create_answer(1, errstr)
return ccsession.create_answer(0)
@@ -387,8 +401,8 @@ class CommandControl():
'''Send the command from bindctl to proper module. '''
errstr = 'unknown error'
answer = None
- if self._verbose:
- self.log_info("Begin send command '%s' to module '%s'" %(command_name, module_name))
+ logger.debug(DBG_CMDCTL_MESSAGING, CMDCTL_SEND_COMMAND,
+ command_name, module_name)
if module_name == self._module_name:
# Process the command sent to cmdctl directly.
@@ -396,15 +410,14 @@ class CommandControl():
else:
msg = ccsession.create_command(command_name, params)
seq = self._cc.group_sendmsg(msg, module_name)
+ logger.debug(DBG_CMDCTL_MESSAGING, CMDCTL_COMMAND_SENT,
+ command_name, module_name)
#TODO, it may be blocked, msqg need to add a new interface waiting in timeout.
try:
answer, env = self._cc.group_recvmsg(False, seq)
except isc.cc.session.SessionTimeout:
errstr = "Module '%s' not responding" % module_name
- if self._verbose:
- self.log_info("Finish send command '%s' to module '%s'" % (command_name, module_name))
-
if answer:
try:
rcode, arg = ccsession.parse_answer(answer)
@@ -415,16 +428,13 @@ class CommandControl():
else:
return rcode, {}
else:
- # TODO: exception
errstr = str(answer['result'][1])
except ccsession.ModuleCCSessionError as mcse:
errstr = str("Error in ccsession answer:") + str(mcse)
- self.log_info(errstr)
+
+ logger.error(CMDCTL_COMMAND_ERROR, command_name, module_name, errstr)
return 1, {'error': errstr}
- def log_info(self, msg):
- sys.stdout.write("[b10-cmdctl] %s\n" % str(msg))
-
def get_cmdctl_config_data(self):
''' If running in source code tree, use keyfile, certificate
and user accounts file in source code. '''
@@ -481,14 +491,15 @@ class SecureHTTPServer(socketserver_mixin.NoPollMixIn,
for row in reader:
self._user_infos[row[0]] = [row[1], row[2]]
except (IOError, IndexError) as e:
- self.log_info("Fail to read user database, %s" % e)
+ logger.error(CMDCTL_USER_DATABASE_READ_ERROR,
+ accounts_file, e)
finally:
if csvfile:
csvfile.close()
self._accounts_file = accounts_file
if len(self._user_infos) == 0:
- self.log_info("Fail to get user information, will deny any user")
+ logger.error(CMDCTL_NO_USER_ENTRIES_READ)
def get_user_info(self, username):
'''Get user's salt and hashed string. If the user
@@ -520,7 +531,7 @@ class SecureHTTPServer(socketserver_mixin.NoPollMixIn,
ssl_version = ssl.PROTOCOL_SSLv23)
return ssl_sock
except (ssl.SSLError, CmdctlException) as err :
- self.log_info("Deny client's connection because %s" % str(err))
+ logger.info(CMDCTL_SSL_SETUP_FAILURE_USER_DENIED, err)
self.close_request(sock)
# raise socket error to finish the request
raise socket.error
@@ -547,9 +558,6 @@ class SecureHTTPServer(socketserver_mixin.NoPollMixIn,
def send_command_to_module(self, module_name, command_name, params):
return self.cmdctl.send_command_with_check(module_name, command_name, params)
- def log_info(self, msg):
- sys.stdout.write("[b10-cmdctl] %s\n" % str(msg))
-
httpd = None
def signal_handler(signal, frame):
@@ -607,15 +615,13 @@ if __name__ == '__main__':
run(options.addr, options.port, options.idle_timeout, options.verbose)
result = 0
except isc.cc.SessionError as err:
- sys.stderr.write("[b10-cmdctl] Error creating b10-cmdctl, "
- "is the command channel daemon running?\n")
+ logger.fatal(CMDCTL_CC_SESSION_ERROR, err)
except isc.cc.SessionTimeout:
- sys.stderr.write("[b10-cmdctl] Error creating b10-cmdctl, "
- "is the configuration manager running?\n")
+ logger.fatal(CMDCTL_CC_SESSION_TIMEOUT)
except KeyboardInterrupt:
- sys.stderr.write("[b10-cmdctl] exit from Cmdctl\n")
+ logger.info(CMDCTL_STOPPED_BY_KEYBOARD)
except CmdctlException as err:
- sys.stderr.write("[b10-cmdctl] " + str(err) + "\n")
+ logger.fatal(CMDCTL_UNCAUGHT_EXCEPTION, err);
if httpd:
httpd.shutdown()
diff --git a/src/bin/cmdctl/cmdctl_messages.mes b/src/bin/cmdctl/cmdctl_messages.mes
new file mode 100644
index 0000000..e007296
--- /dev/null
+++ b/src/bin/cmdctl/cmdctl_messages.mes
@@ -0,0 +1,81 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the cmdctl_messages python module.
+
+% CMDCTL_BAD_CONFIG_DATA error in config data: %1
+There was an error reading the updated configuration data. The specific
+error is printed.
+
+% CMDCTL_BAD_PASSWORD bad password for user: %1
+A login attempt was made to b10-cmdctl, but the password was wrong.
+Users can be managed with the tool b10-cmdctl-usermgr.
+
+% CMDCTL_CC_SESSION_ERROR error reading from cc channel: %1
+There was a problem reading from the command and control channel. The
+most likely cause is that the message bus daemon is not running.
+
+% CMDCTL_CC_SESSION_TIMEOUT timeout on cc channel
+A timeout occurred when waiting for essential data from the cc session.
+This usually occurs when b10-cfgmgr is not running or not responding.
+Since we are waiting for essential information, this is a fatal error,
+and the cmdctl daemon will now shut down.
+
+% CMDCTL_COMMAND_ERROR error in command %1 to module %2: %3
+An error was encountered sending the given command to the given module.
+Either there was a communication problem with the module, or the module
+was not able to process the command, and sent back an error. The
+specific error is printed in the message.
+
+% CMDCTL_COMMAND_SENT command '%1' to module '%2' was sent
+This debug message indicates that the given command has been sent to
+the given module.
+
+% CMDCTL_NO_SUCH_USER username not found in user database: %1
+A login attempt was made to b10-cmdctl, but the username was not known.
+Users can be added with the tool b10-cmdctl-usermgr.
+
+% CMDCTL_NO_USER_ENTRIES_READ failed to read user information, all users will be denied
+The b10-cmdctl daemon was unable to find any user data in the user
+database file. Either it was unable to read the file (in which case
+this message follows a message CMDCTL_USER_DATABASE_READ_ERROR
+containing a specific error), or the file was empty. Users can be added
+with the tool b10-cmdctl-usermgr.
+
+% CMDCTL_SEND_COMMAND sending command %1 to module %2
+This debug message indicates that the given command is being sent to
+the given module.
+
+% CMDCTL_SSL_SETUP_FAILURE_USER_DENIED failed to create an SSL connection (user denied): %1
+The user was denied because the SSL connection could not successfully
+be set up. The specific error is given in the log message. Possible
+causes may be that the ssl request itself was bad, or the local key or
+certificate file could not be read.
+
+% CMDCTL_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down
+There was a keyboard interrupt signal to stop the cmdctl daemon. The
+daemon will now shut down.
+
+% CMDCTL_UNCAUGHT_EXCEPTION uncaught exception: %1
+The b10-cmdctl daemon encountered an uncaught exception and
+will now shut down. This is indicative of a programming error and
+should not happen under normal circumstances. The exception message
+is printed.
+
+% CMDCTL_USER_DATABASE_READ_ERROR failed to read user database file %1: %2
+The b10-cmdctl daemon was unable to read the user database file. The
+file may be unreadable for the daemon, or it may be corrupted. In the
+latter case, it can be recreated with b10-cmdctl-usermgr. The specific
+error is printed in the log message.
diff --git a/src/bin/cmdctl/run_b10-cmdctl.sh.in b/src/bin/cmdctl/run_b10-cmdctl.sh.in
index 6a519e1..7e63249 100644
--- a/src/bin/cmdctl/run_b10-cmdctl.sh.in
+++ b/src/bin/cmdctl/run_b10-cmdctl.sh.in
@@ -19,9 +19,17 @@ PYTHON_EXEC=${PYTHON_EXEC:- at PYTHON@}
export PYTHON_EXEC
CMD_CTRLD_PATH=@abs_top_builddir@/src/bin/cmdctl
-PYTHONPATH=@abs_top_srcdir@/src/lib/python
+PYTHONPATH=@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/xfr/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/python/isc/config:@abs_top_builddir@/src/lib/python/isc/acl/.libs:@abs_top_builddir@/src/lib/python/isc/datasrc/.libs
export PYTHONPATH
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
+if test $SET_ENV_LIBRARY_PATH = yes; then
+ @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
+ export @ENV_LIBRARY_PATH@
+fi
+
BIND10_MSGQ_SOCKET_FILE=@abs_top_builddir@/msgq_socket
export BIND10_MSGQ_SOCKET_FILE
diff --git a/src/bin/cmdctl/tests/Makefile.am b/src/bin/cmdctl/tests/Makefile.am
index e4ec9d4..89d89ea 100644
--- a/src/bin/cmdctl/tests/Makefile.am
+++ b/src/bin/cmdctl/tests/Makefile.am
@@ -6,7 +6,7 @@ EXTRA_DIST = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -19,7 +19,7 @@ endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/cmdctl \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/cmdctl \
CMDCTL_SPEC_PATH=$(abs_top_builddir)/src/bin/cmdctl \
CMDCTL_SRC_PATH=$(abs_top_srcdir)/src/bin/cmdctl \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
diff --git a/src/bin/cmdctl/tests/cmdctl_test.py b/src/bin/cmdctl/tests/cmdctl_test.py
index 5463c36..3103f47 100644
--- a/src/bin/cmdctl/tests/cmdctl_test.py
+++ b/src/bin/cmdctl/tests/cmdctl_test.py
@@ -19,6 +19,7 @@ import socket
import tempfile
import sys
from cmdctl import *
+import isc.log
SPEC_FILE_PATH = '..' + os.sep
if 'CMDCTL_SPEC_PATH' in os.environ:
@@ -173,7 +174,7 @@ class TestSecureHTTPRequestHandler(unittest.TestCase):
self.handler.server._user_infos['root'] = ['aa', 'aaa']
ret, msg = self.handler._check_user_name_and_pwd()
self.assertFalse(ret)
- self.assertEqual(msg, ['password doesn\'t match'])
+ self.assertEqual(msg, ['username or password error'])
def test_check_user_name_and_pwd_2(self):
user_info = {'username':'root', 'password':'abc123'}
@@ -214,7 +215,7 @@ class TestSecureHTTPRequestHandler(unittest.TestCase):
ret, msg = self.handler._check_user_name_and_pwd()
self.assertFalse(ret)
- self.assertEqual(msg, ['user doesn\'t exist'])
+ self.assertEqual(msg, ['username or password error'])
def test_do_POST(self):
self.handler.headers = {}
@@ -447,6 +448,7 @@ class TestFuncNotInClass(unittest.TestCase):
if __name__== "__main__":
+ isc.log.resetUnitTestRootLogger()
unittest.main()
diff --git a/src/bin/dhcp6/Makefile.am b/src/bin/dhcp6/Makefile.am
index 40b80df..805d6bb 100644
--- a/src/bin/dhcp6/Makefile.am
+++ b/src/bin/dhcp6/Makefile.am
@@ -35,6 +35,7 @@ b10_dhcp6_SOURCES = main.cc iface_mgr.cc pkt6.cc dhcp6_srv.cc
b10_dhcp6_SOURCES += iface_mgr.h pkt6.h dhcp6_srv.h dhcp6.h
b10_dhcp6_LDADD = $(top_builddir)/src/lib/datasrc/libdatasrc.la
b10_dhcp6_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
+b10_dhcp6_LDADD += $(top_builddir)/src/lib/util/libutil.la
b10_dhcp6_LDADD += $(top_builddir)/src/lib/config/libcfgclient.la
b10_dhcp6_LDADD += $(top_builddir)/src/lib/cc/libcc.la
b10_dhcp6_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
diff --git a/src/bin/dhcp6/tests/Makefile.am b/src/bin/dhcp6/tests/Makefile.am
index 91eabce..873b21f 100644
--- a/src/bin/dhcp6/tests/Makefile.am
+++ b/src/bin/dhcp6/tests/Makefile.am
@@ -8,14 +8,14 @@ EXTRA_DIST = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/bind10 \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_srcdir)/src/bin:$(abs_top_builddir)/src/bin/bind10:$(abs_top_builddir)/src/lib/util/io/.libs \
$(LIBRARY_PATH_PLACEHOLDER) \
BIND10_MSGQ_SOCKET_FILE=$(abs_top_builddir)/msgq_socket \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
diff --git a/src/bin/dhcp6/tests/dhcp6_test.py b/src/bin/dhcp6/tests/dhcp6_test.py
index 61ec009..5ae1f5e 100644
--- a/src/bin/dhcp6/tests/dhcp6_test.py
+++ b/src/bin/dhcp6/tests/dhcp6_test.py
@@ -13,7 +13,7 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-from bind10 import ProcessInfo, parse_args, dump_pid, unlink_pid_file, _BASETIME
+from bind10_src import ProcessInfo, parse_args, dump_pid, unlink_pid_file, _BASETIME
import unittest
import sys
diff --git a/src/bin/host/Makefile.am b/src/bin/host/Makefile.am
index ec34ce7..a8f96c2 100644
--- a/src/bin/host/Makefile.am
+++ b/src/bin/host/Makefile.am
@@ -13,6 +13,7 @@ CLEANFILES = *.gcno *.gcda
bin_PROGRAMS = b10-host
b10_host_SOURCES = host.cc
b10_host_LDADD = $(top_builddir)/src/lib/dns/libdns++.la
+b10_host_LDADD += $(top_builddir)/src/lib/util/libutil.la
b10_host_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
man_MANS = b10-host.1
diff --git a/src/bin/host/b10-host.1 b/src/bin/host/b10-host.1
index ed0068b..050f6a3 100644
--- a/src/bin/host/b10-host.1
+++ b/src/bin/host/b10-host.1
@@ -103,10 +103,6 @@ It doesn\'t use
at this time\&. The default name server used is 127\&.0\&.0\&.1\&.
.PP
-\fBb10\-host\fR
-does not do reverse lookups by default yet (by detecting if name is a IPv4 or IPv6 address)\&.
-.PP
-
\fB\-p\fR
is not a standard feature\&.
.SH "HISTORY"
diff --git a/src/bin/host/b10-host.xml b/src/bin/host/b10-host.xml
index 7da07dd..a17ef67 100644
--- a/src/bin/host/b10-host.xml
+++ b/src/bin/host/b10-host.xml
@@ -176,11 +176,6 @@
</para>
<para>
- <command>b10-host</command> does not do reverse lookups by
- default yet (by detecting if name is a IPv4 or IPv6 address).
- </para>
-
- <para>
<option>-p</option> is not a standard feature.
</para>
</refsect1>
diff --git a/src/bin/loadzone/Makefile.am b/src/bin/loadzone/Makefile.am
index 74d4dd4..a235d68 100644
--- a/src/bin/loadzone/Makefile.am
+++ b/src/bin/loadzone/Makefile.am
@@ -1,5 +1,6 @@
SUBDIRS = . tests/correct tests/error
bin_SCRIPTS = b10-loadzone
+noinst_SCRIPTS = run_loadzone.sh
CLEANFILES = b10-loadzone
diff --git a/src/bin/loadzone/run_loadzone.sh.in b/src/bin/loadzone/run_loadzone.sh.in
index 95de396..43b7920 100755
--- a/src/bin/loadzone/run_loadzone.sh.in
+++ b/src/bin/loadzone/run_loadzone.sh.in
@@ -18,14 +18,14 @@
PYTHON_EXEC=${PYTHON_EXEC:- at PYTHON@}
export PYTHON_EXEC
-PYTHONPATH=@abs_top_builddir@/src/lib/python
+PYTHONPATH=@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python
export PYTHONPATH
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
if test $SET_ENV_LIBRARY_PATH = yes; then
- @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:$@ENV_LIBRARY_PATH@
+ @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
export @ENV_LIBRARY_PATH@
fi
diff --git a/src/bin/loadzone/tests/correct/Makefile.am b/src/bin/loadzone/tests/correct/Makefile.am
index 3507bfa..fb882ba 100644
--- a/src/bin/loadzone/tests/correct/Makefile.am
+++ b/src/bin/loadzone/tests/correct/Makefile.am
@@ -13,11 +13,13 @@ EXTRA_DIST += ttl2.db
EXTRA_DIST += ttlext.db
EXTRA_DIST += example.db
+noinst_SCRIPTS = correct_test.sh
+
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# TODO: maybe use TESTS?
diff --git a/src/bin/loadzone/tests/correct/correct_test.sh.in b/src/bin/loadzone/tests/correct/correct_test.sh.in
old mode 100644
new mode 100755
index 509d8e5..d944451
--- a/src/bin/loadzone/tests/correct/correct_test.sh.in
+++ b/src/bin/loadzone/tests/correct/correct_test.sh.in
@@ -18,7 +18,7 @@
PYTHON_EXEC=${PYTHON_EXEC:- at PYTHON@}
export PYTHON_EXEC
-PYTHONPATH=@abs_top_srcdir@/src/lib/python:@abs_top_builddir@/src/lib/python
+PYTHONPATH=@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_srcdir@/src/lib/python:@abs_top_builddir@/src/lib/python
export PYTHONPATH
LOADZONE_PATH=@abs_top_builddir@/src/bin/loadzone
diff --git a/src/bin/loadzone/tests/error/Makefile.am b/src/bin/loadzone/tests/error/Makefile.am
index 87bb1cf..03263b7 100644
--- a/src/bin/loadzone/tests/error/Makefile.am
+++ b/src/bin/loadzone/tests/error/Makefile.am
@@ -12,11 +12,13 @@ EXTRA_DIST += keyerror3.db
EXTRA_DIST += originerr1.db
EXTRA_DIST += originerr2.db
+noinst_SCRIPTS = error_test.sh
+
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# TODO: use TESTS ?
diff --git a/src/bin/loadzone/tests/error/error_test.sh.in b/src/bin/loadzone/tests/error/error_test.sh.in
old mode 100644
new mode 100755
index d1d6bd1..94c5edb
--- a/src/bin/loadzone/tests/error/error_test.sh.in
+++ b/src/bin/loadzone/tests/error/error_test.sh.in
@@ -18,7 +18,7 @@
PYTHON_EXEC=${PYTHON_EXEC:- at PYTHON@}
export PYTHON_EXEC
-PYTHONPATH=@abs_top_srcdir@/src/lib/python:@abs_top_builddir@/src/lib/python
+PYTHONPATH=@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_srcdir@/src/lib/python:@abs_top_builddir@/src/lib/python
export PYTHONPATH
LOADZONE_PATH=@abs_top_builddir@/src/bin/loadzone
diff --git a/src/bin/msgq/tests/Makefile.am b/src/bin/msgq/tests/Makefile.am
index 50c1e6e..50b218b 100644
--- a/src/bin/msgq/tests/Makefile.am
+++ b/src/bin/msgq/tests/Makefile.am
@@ -6,7 +6,7 @@ EXTRA_DIST = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -19,7 +19,7 @@ endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
- env PYTHONPATH=$(abs_top_builddir)/src/bin/msgq:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/msgq \
BIND10_TEST_SOCKET_FILE=$(builddir)/test_msgq_socket.sock \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/bin/resolver/Makefile.am b/src/bin/resolver/Makefile.am
index bce8307..3f5f049 100644
--- a/src/bin/resolver/Makefile.am
+++ b/src/bin/resolver/Makefile.am
@@ -59,6 +59,8 @@ nodist_b10_resolver_SOURCES = resolver_messages.cc resolver_messages.h
b10_resolver_LDADD = $(top_builddir)/src/lib/dns/libdns++.la
b10_resolver_LDADD += $(top_builddir)/src/lib/config/libcfgclient.la
b10_resolver_LDADD += $(top_builddir)/src/lib/cc/libcc.la
+b10_resolver_LDADD += $(top_builddir)/src/lib/util/libutil.la
+b10_resolver_LDADD += $(top_builddir)/src/lib/acl/libdnsacl.la
b10_resolver_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
b10_resolver_LDADD += $(top_builddir)/src/lib/asiodns/libasiodns.la
b10_resolver_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
diff --git a/src/bin/resolver/b10-resolver.8 b/src/bin/resolver/b10-resolver.8
index 849092c..9161ec2 100644
--- a/src/bin/resolver/b10-resolver.8
+++ b/src/bin/resolver/b10-resolver.8
@@ -2,12 +2,12 @@
.\" Title: b10-resolver
.\" Author: [FIXME: author] [see http://docbook.sf.net/el/author]
.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\" Date: February 17, 2011
+.\" Date: August 17, 2011
.\" Manual: BIND10
.\" Source: BIND10
.\" Language: English
.\"
-.TH "B10\-RESOLVER" "8" "February 17, 2011" "BIND10" "BIND10"
+.TH "B10\-RESOLVER" "8" "August 17, 2011" "BIND10" "BIND10"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
@@ -54,7 +54,7 @@ must be either a valid numeric user ID or a valid user name\&. By default the da
.PP
\fB\-v\fR
.RS 4
-Enabled verbose mode\&. This enables diagnostic messages to STDERR\&.
+Enable verbose mode\&. This sets logging to the maximum debugging level\&.
.RE
.SH "CONFIGURATION AND COMMANDS"
.PP
@@ -77,6 +77,25 @@ string and
number\&. The defaults are address ::1 port 53 and address 127\&.0\&.0\&.1 port 53\&.
.PP
+
+
+
+
+
+\fIquery_acl\fR
+is a list of query access control rules\&. The list items are the
+\fIaction\fR
+string and the
+\fIfrom\fR
+or
+\fIkey\fR
+strings\&. The possible actions are ACCEPT, REJECT and DROP\&. The
+\fIfrom\fR
+is a remote (source) IPv4 or IPv6 address or special keyword\&. The
+\fIkey\fR
+is a TSIG key name\&. The default configuration accepts queries from 127\&.0\&.0\&.1 and ::1\&.
+.PP
+
\fIretries\fR
is the number of times to retry (resend query) after a query timeout (\fItimeout_query\fR)\&. The default is 3\&.
.PP
@@ -88,7 +107,7 @@ to use directly as root servers to start resolving\&. The list items are the
\fIaddress\fR
string and
\fIport\fR
-number\&. If empty, a hardcoded address for F\-root (192\&.5\&.5\&.241) is used\&.
+number\&. By default, a hardcoded address for l\&.root\-servers\&.net (199\&.7\&.83\&.42 or 2001:500:3::42) is used\&.
.PP
\fItimeout_client\fR
@@ -121,8 +140,7 @@ BIND 10 Guide\&.
.PP
The
\fBb10\-resolver\fR
-daemon was first coded in September 2010\&. The initial implementation only provided forwarding\&. Iteration was introduced in January 2011\&.
-
+daemon was first coded in September 2010\&. The initial implementation only provided forwarding\&. Iteration was introduced in January 2011\&. Caching was implemented in February 2011\&. Access control was introduced in June 2011\&.
.SH "COPYRIGHT"
.br
Copyright \(co 2010 Internet Systems Consortium, Inc. ("ISC")
diff --git a/src/bin/resolver/b10-resolver.xml b/src/bin/resolver/b10-resolver.xml
index bdf4f8a..75cced7 100644
--- a/src/bin/resolver/b10-resolver.xml
+++ b/src/bin/resolver/b10-resolver.xml
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>February 17, 2011</date>
+ <date>August 17, 2011</date>
</refentryinfo>
<refmeta>
@@ -99,11 +99,14 @@
</listitem>
</varlistentry>
+<!-- TODO: this needs to be fixed as -v on command line
+should imply stdout or stderr output also -->
+<!-- TODO: can this -v be overidden by configuration or bindctl? -->
<varlistentry>
<term><option>-v</option></term>
<listitem><para>
- Enabled verbose mode. This enables diagnostic messages to
- STDERR.
+ Enable verbose mode.
+ This sets logging to the maximum debugging level.
</para></listitem>
</varlistentry>
@@ -147,6 +150,22 @@ once that is merged you can for instance do 'config add Resolver/forward_address
</para>
<para>
+<!-- TODO: need more explanation or point to guide. -->
+<!-- TODO: what about a netmask or cidr? -->
+<!-- TODO: document "key" -->
+<!-- TODO: where are the TSIG keys defined? -->
+<!-- TODO: key and from are mutually exclusive? what if both defined? -->
+ <varname>query_acl</varname> is a list of query access control
+ rules. The list items are the <varname>action</varname> string
+ and the <varname>from</varname> or <varname>key</varname> strings.
+ The possible actions are ACCEPT, REJECT and DROP.
+ The <varname>from</varname> is a remote (source) IPv4 or IPv6
+ address or special keyword.
+ The <varname>key</varname> is a TSIG key name.
+ The default configuration accepts queries from 127.0.0.1 and ::1.
+ </para>
+
+ <para>
<varname>retries</varname> is the number of times to retry
(resend query) after a query timeout
(<varname>timeout_query</varname>).
@@ -159,8 +178,10 @@ once that is merged you can for instance do 'config add Resolver/forward_address
root servers to start resolving.
The list items are the <varname>address</varname> string
and <varname>port</varname> number.
- If empty, a hardcoded address for F-root (192.5.5.241) is used.
+ By default, a hardcoded address for l.root-servers.net
+ (199.7.83.42 or 2001:500:3::42) is used.
</para>
+<!-- TODO: this is broken, see ticket #1184 -->
<para>
<varname>timeout_client</varname> is the number of milliseconds
@@ -234,7 +255,8 @@ once that is merged you can for instance do 'config add Resolver/forward_address
The <command>b10-resolver</command> daemon was first coded in
September 2010. The initial implementation only provided
forwarding. Iteration was introduced in January 2011.
-<!-- TODO: document when caching was added -->
+ Caching was implemented in February 2011.
+ Access control was introduced in June 2011.
<!-- TODO: document when validation was added -->
</para>
</refsect1>
diff --git a/src/bin/resolver/main.cc b/src/bin/resolver/main.cc
index d9c30b9..79146da 100644
--- a/src/bin/resolver/main.cc
+++ b/src/bin/resolver/main.cc
@@ -208,8 +208,7 @@ main(int argc, char* argv[]) {
cc_session = new Session(io_service.get_io_service());
config_session = new ModuleCCSession(specfile, *cc_session,
my_config_handler,
- my_command_handler,
- true, true);
+ my_command_handler);
LOG_DEBUG(resolver_logger, RESOLVER_DBG_INIT, RESOLVER_CONFIG_CHANNEL);
// FIXME: This does not belong here, but inside Boss
diff --git a/src/bin/resolver/resolver.cc b/src/bin/resolver/resolver.cc
index be254b7..6af383a 100644
--- a/src/bin/resolver/resolver.cc
+++ b/src/bin/resolver/resolver.cc
@@ -26,7 +26,7 @@
#include <exceptions/exceptions.h>
-#include <acl/acl.h>
+#include <acl/dns.h>
#include <acl/loader.h>
#include <asiodns/asiodns.h>
@@ -62,6 +62,7 @@ using boost::shared_ptr;
using namespace isc;
using namespace isc::util;
using namespace isc::acl;
+using isc::acl::dns::RequestACL;
using namespace isc::dns;
using namespace isc::data;
using namespace isc::config;
@@ -82,7 +83,9 @@ public:
client_timeout_(4000),
lookup_timeout_(30000),
retries_(3),
- query_acl_(new Resolver::ClientACL(REJECT)),
+ // we apply "reject all" (implicit default of the loader) ACL by
+ // default:
+ query_acl_(acl::dns::getRequestLoader().load(Element::fromJSON("[]"))),
rec_query_(NULL)
{}
@@ -160,11 +163,11 @@ public:
OutputBufferPtr buffer,
DNSServer* server);
- const Resolver::ClientACL& getQueryACL() const {
+ const RequestACL& getQueryACL() const {
return (*query_acl_);
}
- void setQueryACL(shared_ptr<const Resolver::ClientACL> new_acl) {
+ void setQueryACL(shared_ptr<const RequestACL> new_acl) {
query_acl_ = new_acl;
}
@@ -192,7 +195,7 @@ public:
private:
/// ACL on incoming queries
- shared_ptr<const Resolver::ClientACL> query_acl_;
+ shared_ptr<const RequestACL> query_acl_;
/// Object to handle upstream queries
RecursiveQuery* rec_query_;
@@ -514,8 +517,11 @@ ResolverImpl::processNormalQuery(const IOMessage& io_message,
const RRClass qclass = question->getClass();
// Apply query ACL
- Client client(io_message);
- const BasicAction query_action(getQueryACL().execute(client));
+ const Client client(io_message);
+ const BasicAction query_action(
+ getQueryACL().execute(acl::dns::RequestContext(
+ client.getRequestSourceIPAddress(),
+ query_message->getTSIGRecord())));
if (query_action == isc::acl::REJECT) {
LOG_INFO(resolver_logger, RESOLVER_QUERY_REJECTED)
.arg(question->getName()).arg(qtype).arg(qclass).arg(client);
@@ -574,32 +580,6 @@ ResolverImpl::processNormalQuery(const IOMessage& io_message,
return (RECURSION);
}
-namespace {
-// This is a simplified ACL parser for the initial implementation with minimal
-// external dependency. For a longer term we'll switch to a more generic
-// loader with allowing more complicated ACL syntax.
-shared_ptr<const Resolver::ClientACL>
-createQueryACL(isc::data::ConstElementPtr acl_config) {
- if (!acl_config) {
- return (shared_ptr<const Resolver::ClientACL>());
- }
-
- shared_ptr<Resolver::ClientACL> new_acl(
- new Resolver::ClientACL(REJECT));
- BOOST_FOREACH(ConstElementPtr rule, acl_config->listValue()) {
- ConstElementPtr action = rule->get("action");
- ConstElementPtr from = rule->get("from");
- if (!action || !from) {
- isc_throw(BadValue, "query ACL misses mandatory parameter");
- }
- new_acl->append(shared_ptr<IPCheck<Client> >(
- new IPCheck<Client>(from->stringValue())),
- defaultActionLoader(action));
- }
- return (new_acl);
-}
-}
-
ConstElementPtr
Resolver::updateConfig(ConstElementPtr config) {
LOG_DEBUG(resolver_logger, RESOLVER_DBG_CONFIG, RESOLVER_CONFIG_UPDATED)
@@ -616,8 +596,10 @@ Resolver::updateConfig(ConstElementPtr config) {
ConstElementPtr listenAddressesE(config->get("listen_on"));
AddressList listenAddresses(parseAddresses(listenAddressesE,
"listen_on"));
- shared_ptr<const ClientACL> query_acl(createQueryACL(
- config->get("query_acl")));
+ const ConstElementPtr query_acl_cfg(config->get("query_acl"));
+ const shared_ptr<const RequestACL> query_acl =
+ query_acl_cfg ? acl::dns::getRequestLoader().load(query_acl_cfg) :
+ shared_ptr<RequestACL>();
bool set_timeouts(false);
int qtimeout = impl_->query_timeout_;
int ctimeout = impl_->client_timeout_;
@@ -777,13 +759,13 @@ Resolver::getListenAddresses() const {
return (impl_->listen_);
}
-const Resolver::ClientACL&
+const RequestACL&
Resolver::getQueryACL() const {
return (impl_->getQueryACL());
}
void
-Resolver::setQueryACL(shared_ptr<const ClientACL> new_acl) {
+Resolver::setQueryACL(shared_ptr<const RequestACL> new_acl) {
if (!new_acl) {
isc_throw(InvalidParameter, "NULL pointer is passed to setQueryACL");
}
diff --git a/src/bin/resolver/resolver.h b/src/bin/resolver/resolver.h
index 9c78126..4b9c773 100644
--- a/src/bin/resolver/resolver.h
+++ b/src/bin/resolver/resolver.h
@@ -21,10 +21,9 @@
#include <boost/shared_ptr.hpp>
-#include <acl/acl.h>
-
#include <cc/data.h>
#include <config/ccsession.h>
+#include <acl/dns.h>
#include <dns/message.h>
#include <util/buffer.h>
@@ -41,12 +40,6 @@
#include <resolve/resolver_interface.h>
-namespace isc {
-namespace server_common {
-class Client;
-}
-}
-
class ResolverImpl;
/**
@@ -246,13 +239,10 @@ public:
*/
int getRetries() const;
- // Shortcut typedef used for query ACL.
- typedef isc::acl::ACL<isc::server_common::Client> ClientACL;
-
/// Get the query ACL.
///
/// \exception None
- const ClientACL& getQueryACL() const;
+ const isc::acl::dns::RequestACL& getQueryACL() const;
/// Set the new query ACL.
///
@@ -265,7 +255,8 @@ public:
/// \exception InvalidParameter The given pointer is NULL
///
/// \param new_acl The new ACL to replace the existing one.
- void setQueryACL(boost::shared_ptr<const ClientACL> new_acl);
+ void setQueryACL(boost::shared_ptr<const isc::acl::dns::RequestACL>
+ new_acl);
private:
ResolverImpl* impl_;
diff --git a/src/bin/resolver/resolver_messages.mes b/src/bin/resolver/resolver_messages.mes
index 6c5be64..7930c52 100644
--- a/src/bin/resolver/resolver_messages.mes
+++ b/src/bin/resolver/resolver_messages.mes
@@ -16,151 +16,174 @@
# along with the resolver methods.
% RESOLVER_AXFR_TCP AXFR request received over TCP
-A debug message, the resolver received a NOTIFY message over TCP. The server
-cannot process it and will return an error message to the sender with the
-RCODE set to NOTIMP.
+This is a debug message output when the resolver received a request for
+an AXFR (full transfer of a zone) over TCP. Only authoritative servers
+are able to handle AXFR requests, so the resolver will return an error
+message to the sender with the RCODE set to NOTIMP.
% RESOLVER_AXFR_UDP AXFR request received over UDP
-A debug message, the resolver received a NOTIFY message over UDP. The server
-cannot process it (and in any case, an AXFR request should be sent over TCP)
-and will return an error message to the sender with the RCODE set to FORMERR.
+This is a debug message output when the resolver received a request for
+an AXFR (full transfer of a zone) over UDP. Only authoritative servers
+are able to handle AXFR requests (and in any case, an AXFR request should
+be sent over TCP), so the resolver will return an error message to the
+sender with the RCODE set to NOTIMP.
% RESOLVER_CLIENT_TIME_SMALL client timeout of %1 is too small
-An error indicating that the configuration value specified for the query
-timeout is too small.
+During the update of the resolver's configuration parameters, the value
+of the client timeout was found to be too small. The configuration
+update was abandoned and the parameters were not changed.
% RESOLVER_CONFIG_CHANNEL configuration channel created
-A debug message, output when the resolver has successfully established a
-connection to the configuration channel.
+This is a debug message output when the resolver has successfully
+established a connection to the configuration channel.
% RESOLVER_CONFIG_ERROR error in configuration: %1
-An error was detected in a configuration update received by the resolver. This
-may be in the format of the configuration message (in which case this is a
-programming error) or it may be in the data supplied (in which case it is
-a user error). The reason for the error, given as a parameter in the message,
-will give more details.
+An error was detected in a configuration update received by the
+resolver. This may be in the format of the configuration message (in
+which case this is a programming error) or it may be in the data supplied
+(in which case it is a user error). The reason for the error, included
+in the message, will give more details. The configuration update is
+not applied and the resolver parameters were not changed.
% RESOLVER_CONFIG_LOADED configuration loaded
-A debug message, output when the resolver configuration has been successfully
-loaded.
+This is a debug message output when the resolver configuration has been
+successfully loaded.
% RESOLVER_CONFIG_UPDATED configuration updated: %1
-A debug message, the configuration has been updated with the specified
-information.
+This is a debug message output when the resolver configuration is being
+updated with the specified information.
% RESOLVER_CREATED main resolver object created
-A debug message, output when the Resolver() object has been created.
+This is a debug message indicating that the main resolver object has
+been created.
% RESOLVER_DNS_MESSAGE_RECEIVED DNS message received: %1
-A debug message, this always precedes some other logging message and is the
-formatted contents of the DNS packet that the other message refers to.
+This is a debug message from the resolver listing the contents of a
+received DNS message.
% RESOLVER_DNS_MESSAGE_SENT DNS message of %1 bytes sent: %2
-A debug message, this contains details of the response sent back to the querying
-system.
+This is a debug message containing details of the response returned by
+the resolver to the querying system.
% RESOLVER_FAILED resolver failed, reason: %1
-This is an error message output when an unhandled exception is caught by the
-resolver. All it can do is to shut down.
+This is an error message output when an unhandled exception is caught
+by the resolver. After this, the resolver will shut itself down.
+Please submit a bug report.
% RESOLVER_FORWARD_ADDRESS setting forward address %1(%2)
-This message may appear multiple times during startup, and it lists the
-forward addresses used by the resolver when running in forwarding mode.
+If the resolver is running in forward mode, this message will appear
+during startup to list the forward address. If multiple addresses are
+specified, it will appear once for each address.
% RESOLVER_FORWARD_QUERY processing forward query
-The received query has passed all checks and is being forwarded to upstream
+This is a debug message indicating that a query received by the resolver
+has passed a set of checks (message is well-formed, it is allowed by the
+ACL, it is a supported opcode, etc.) and is being forwarded to upstream
servers.
% RESOLVER_HEADER_ERROR message received, exception when processing header: %1
-A debug message noting that an exception occurred during the processing of
-a received packet. The packet has been dropped.
+This is a debug message from the resolver noting that an exception
+occurred during the processing of a received packet. The packet has
+been dropped.
% RESOLVER_IXFR IXFR request received
-The resolver received a NOTIFY message over TCP. The server cannot process it
-and will return an error message to the sender with the RCODE set to NOTIMP.
+This is a debug message indicating that the resolver received a request
+for an IXFR (incremental transfer of a zone). Only authoritative servers
+are able to handle IXFR requests, so the resolver will return an error
+message to the sender with the RCODE set to NOTIMP.
% RESOLVER_LOOKUP_TIME_SMALL lookup timeout of %1 is too small
-An error indicating that the configuration value specified for the lookup
-timeout is too small.
+During the update of the resolver's configuration parameters, the value
+of the lookup timeout was found to be too small. The configuration
+update will not be applied.
% RESOLVER_MESSAGE_ERROR error parsing received message: %1 - returning %2
-A debug message noting that the resolver received a message and the
-parsing of the body of the message failed due to some error (although
-the parsing of the header succeeded). The message parameters give a
-textual description of the problem and the RCODE returned.
+This is a debug message noting that parsing of the body of a received
+message by the resolver failed due to some error (although the parsing of
+the header succeeded). The message parameters give a textual description
+of the problem and the RCODE returned.
% RESOLVER_NEGATIVE_RETRIES negative number of retries (%1) specified in the configuration
-An error message indicating that the resolver configuration has specified a
-negative retry count. Only zero or positive values are valid.
+This error is issued when a resolver configuration update has specified
+a negative retry count: only zero or positive values are valid. The
+configuration update was abandoned and the parameters were not changed.
% RESOLVER_NON_IN_PACKET non-IN class request received, returning REFUSED message
-A debug message, the resolver has received a DNS packet that was not IN class.
-The resolver cannot handle such packets, so is returning a REFUSED response to
-the sender.
+This debug message is issued when resolver has received a DNS packet that
+was not IN (Internet) class. The resolver cannot handle such packets,
+so is returning a REFUSED response to the sender.
% RESOLVER_NORMAL_QUERY processing normal query
-The received query has passed all checks and is being processed by the resolver.
+This is a debug message indicating that the query received by the resolver
+has passed a set of checks (message is well-formed, it is allowed by the
+ACL, it is a supported opcode, etc.) and is being processed by the resolver.
% RESOLVER_NOTIFY_RECEIVED NOTIFY arrived but server is not authoritative
-The resolver received a NOTIFY message. As the server is not authoritative it
-cannot process it, so it returns an error message to the sender with the RCODE
-set to NOTAUTH.
+The resolver has received a NOTIFY message. As the server is not
+authoritative it cannot process it, so it returns an error message to
+the sender with the RCODE set to NOTAUTH.
% RESOLVER_NOT_ONE_QUESTION query contained %1 questions, exactly one question was expected
-A debug message, the resolver received a query that contained the number of
-entires in the question section detailed in the message. This is a malformed
-message, as a DNS query must contain only one question. The resolver will
-return a message to the sender with the RCODE set to FORMERR.
+This debug message indicates that the resolver received a query that
+contained the number of entries in the question section detailed in
+the message. This is a malformed message, as a DNS query must contain
+only one question. The resolver will return a message to the sender
+with the RCODE set to FORMERR.
% RESOLVER_NO_ROOT_ADDRESS no root addresses available
-A warning message during startup, indicates that no root addresses have been
-set. This may be because the resolver will get them from a priming query.
+A warning message issued during resolver startup, this indicates that
+no root addresses have been set. This may be because the resolver will
+get them from a priming query.
% RESOLVER_PARSE_ERROR error parsing received message: %1 - returning %2
-A debug message noting that the resolver received a message and the parsing
-of the body of the message failed due to some non-protocol related reason
-(although the parsing of the header succeeded). The message parameters give
-a textual description of the problem and the RCODE returned.
+This is a debug message noting that the resolver received a message and
+the parsing of the body of the message failed due to some non-protocol
+related reason (although the parsing of the header succeeded).
+The message parameters give a textual description of the problem and
+the RCODE returned.
% RESOLVER_PRINT_COMMAND print message command, arguments are: %1
-This message is logged when a "print_message" command is received over the
-command channel.
+This debug message is logged when a "print_message" command is received
+by the resolver over the command channel.
% RESOLVER_PROTOCOL_ERROR protocol error parsing received message: %1 - returning %2
-A debug message noting that the resolver received a message and the parsing
-of the body of the message failed due to some protocol error (although the
-parsing of the header succeeded). The message parameters give a textual
-description of the problem and the RCODE returned.
+This is a debug message noting that the resolver received a message and
+the parsing of the body of the message failed due to some protocol error
+(although the parsing of the header succeeded). The message parameters
+give a textual description of the problem and the RCODE returned.
% RESOLVER_QUERY_SETUP query setup
-A debug message noting that the resolver is creating a RecursiveQuery object.
+This is a debug message noting that the resolver is creating a
+RecursiveQuery object.
% RESOLVER_QUERY_SHUTDOWN query shutdown
-A debug message noting that the resolver is destroying a RecursiveQuery object.
+This is a debug message noting that the resolver is destroying a
+RecursiveQuery object.
% RESOLVER_QUERY_TIME_SMALL query timeout of %1 is too small
-An error indicating that the configuration value specified for the query
-timeout is too small.
+During the update of the resolver's configuration parameters, the value
+of the query timeout was found to be too small. The configuration
+parameters were not changed.
% RESOLVER_RECEIVED_MESSAGE resolver has received a DNS message
-A debug message indicating that the resolver has received a message. Depending
-on the debug settings, subsequent log output will indicate the nature of the
-message.
+This is a debug message indicating that the resolver has received a
+DNS message. Depending on the debug settings, subsequent log output
+will indicate the nature of the message.
% RESOLVER_RECURSIVE running in recursive mode
-This is an informational message that appears at startup noting that the
-resolver is running in recursive mode.
+This is an informational message that appears at startup noting that
+the resolver is running in recursive mode.
% RESOLVER_SERVICE_CREATED service object created
-A debug message, output when the main service object (which handles the
-received queries) is created.
+This debug message is output when resolver creates the main service object
+(which handles the received queries).
% RESOLVER_SET_PARAMS query timeout: %1, client timeout: %2, lookup timeout: %3, retry count: %4
-A debug message, lists the parameters being set for the resolver. These are:
+This debug message lists the parameters being set for the resolver. These are:
query timeout: the timeout (in ms) used for queries originated by the resolver
-to upstream servers. Client timeout: the interval to resolver a query by
+to upstream servers. Client timeout: the interval to resolve a query by
a client: after this time, the resolver sends back a SERVFAIL to the client
-whilst continuing to resolver the query. Lookup timeout: the time at which the
+whilst continuing to resolve the query. Lookup timeout: the time at which the
resolver gives up trying to resolve a query. Retry count: the number of times
the resolver will retry a query to an upstream server if it gets a timeout.
@@ -169,17 +192,18 @@ resolution of the client query might require a large number of queries to
upstream nameservers. Even if none of these queries timeout, the total time
taken to perform all the queries may exceed the client timeout. When this
happens, a SERVFAIL is returned to the client, but the resolver continues
-with the resolution process. Data received is added to the cache. However,
+with the resolution process; data received is added to the cache. However,
there comes a time - the lookup timeout - when even the resolver gives up.
At this point it will wait for pending upstream queries to complete or
timeout and drop the query.
% RESOLVER_SET_ROOT_ADDRESS setting root address %1(%2)
-This message may appear multiple times during startup; it lists the root
-addresses used by the resolver.
+This message gives the address of one of the root servers used by the
+resolver. It is output during startup and may appear multiple times,
+once for each root server address.
% RESOLVER_SHUTDOWN resolver shutdown complete
-This information message is output when the resolver has shut down.
+This informational message is output when the resolver has shut down.
% RESOLVER_STARTED resolver started
This informational message is output by the resolver when all initialization
@@ -189,31 +213,36 @@ has been completed and it is entering its main loop.
An informational message, this is output when the resolver starts up.
% RESOLVER_UNEXPECTED_RESPONSE received unexpected response, ignoring
-A debug message noting that the server has received a response instead of a
-query and is ignoring it.
+This is a debug message noting that the resolver received a DNS response
+packet on the port on which is it listening for queries. The packet
+has been ignored.
% RESOLVER_UNSUPPORTED_OPCODE opcode %1 not supported by the resolver
-A debug message, the resolver received a message with an unsupported opcode
-(it can only process QUERY opcodes). It will return a message to the sender
-with the RCODE set to NOTIMP.
-
-% RESOLVER_SET_QUERY_ACL query ACL is configured
-A debug message that appears when a new query ACL is configured for the
-resolver.
-
-% RESOLVER_QUERY_ACCEPTED query accepted: '%1/%2/%3' from %4
-A debug message that indicates an incoming query is accepted in terms of
-the query ACL. The log message shows the query in the form of
-<query name>/<query type>/<query class>, and the client that sends the
-query in the form of <Source IP address>#<source port>.
-
-% RESOLVER_QUERY_REJECTED query rejected: '%1/%2/%3' from %4
-An informational message that indicates an incoming query is rejected
-in terms of the query ACL. This results in a response with an RCODE of
-REFUSED. See QUERYACCEPTED for the information given in the message.
-
-% RESOLVER_QUERY_DROPPED query dropped: '%1/%2/%3' from %4
-An informational message that indicates an incoming query is dropped
-in terms of the query ACL. Unlike the QUERYREJECTED case, the server does
-not return any response. See QUERYACCEPTED for the information given in
-the message.
+This is debug message output when the resolver received a message with an
+unsupported opcode (it can only process QUERY opcodes). It will return
+a message to the sender with the RCODE set to NOTIMP.
+
+% RESOLVER_SET_QUERY_ACL query ACL is configured
+This debug message is generated when a new query ACL is configured for
+the resolver.
+
+% RESOLVER_QUERY_ACCEPTED query accepted: '%1/%2/%3' from %4
+This debug message is produced by the resolver when an incoming query
+is accepted in terms of the query ACL. The log message shows the query
+in the form of <query name>/<query type>/<query class>, and the client
+that sends the query in the form of <Source IP address>#<source port>.
+
+% RESOLVER_QUERY_REJECTED query rejected: '%1/%2/%3' from %4
+This is an informational message that indicates an incoming query has
+been rejected by the resolver because of the query ACL. This results
+in a response with an RCODE of REFUSED. The log message shows the query
+in the form of <query name>/<query type>/<query class>, and the client
+that sends the query in the form of <Source IP address>#<source port>.
+
+% RESOLVER_QUERY_DROPPED query dropped: '%1/%2/%3' from %4
+This is an informational message that indicates an incoming query has
+been dropped by the resolver because of the query ACL. Unlike the
+RESOLVER_QUERY_REJECTED case, the server does not return any response.
+The log message shows the query in the form of <query name>/<query
+type>/<query class>, and the client that sends the query in the form of
+<Source IP address>#<source port>.
diff --git a/src/bin/resolver/tests/Makefile.am b/src/bin/resolver/tests/Makefile.am
index c519617..97a2ba6 100644
--- a/src/bin/resolver/tests/Makefile.am
+++ b/src/bin/resolver/tests/Makefile.am
@@ -39,6 +39,7 @@ run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
run_unittests_LDADD += $(top_builddir)/src/lib/asiodns/libasiodns.la
run_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
run_unittests_LDADD += $(top_builddir)/src/lib/config/libcfgclient.la
+run_unittests_LDADD += $(top_builddir)/src/lib/acl/libdnsacl.la
run_unittests_LDADD += $(top_builddir)/src/lib/cc/libcc.la
run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
run_unittests_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
diff --git a/src/bin/resolver/tests/resolver_config_unittest.cc b/src/bin/resolver/tests/resolver_config_unittest.cc
index 9006301..c089041 100644
--- a/src/bin/resolver/tests/resolver_config_unittest.cc
+++ b/src/bin/resolver/tests/resolver_config_unittest.cc
@@ -43,6 +43,7 @@
using namespace std;
using boost::scoped_ptr;
using namespace isc::acl;
+using isc::acl::dns::RequestContext;
using namespace isc::data;
using namespace isc::testutils;
using namespace isc::asiodns;
@@ -57,19 +58,23 @@ protected:
DNSService dnss;
Resolver server;
scoped_ptr<const IOEndpoint> endpoint;
- scoped_ptr<const IOMessage> request;
+ scoped_ptr<const IOMessage> query_message;
scoped_ptr<const Client> client;
+ scoped_ptr<const RequestContext> request;
ResolverConfig() : dnss(ios, NULL, NULL, NULL) {
server.setDNSService(dnss);
server.setConfigured();
}
- const Client& createClient(const string& source_addr) {
+ const RequestContext& createRequest(const string& source_addr) {
endpoint.reset(IOEndpoint::create(IPPROTO_UDP, IOAddress(source_addr),
53210));
- request.reset(new IOMessage(NULL, 0, IOSocket::getDummyUDPSocket(),
- *endpoint));
- client.reset(new Client(*request));
- return (*client);
+ query_message.reset(new IOMessage(NULL, 0,
+ IOSocket::getDummyUDPSocket(),
+ *endpoint));
+ client.reset(new Client(*query_message));
+ request.reset(new RequestContext(client->getRequestSourceIPAddress(),
+ NULL));
+ return (*request);
}
void invalidTest(const string &JSON, const string& name);
};
@@ -100,14 +105,14 @@ TEST_F(ResolverConfig, forwardAddresses) {
TEST_F(ResolverConfig, forwardAddressConfig) {
// Try putting there some address
- ElementPtr config(Element::fromJSON("{"
- "\"forward_addresses\": ["
- " {"
- " \"address\": \"192.0.2.1\","
- " \"port\": 53"
- " }"
- "]"
- "}"));
+ ConstElementPtr config(Element::fromJSON("{"
+ "\"forward_addresses\": ["
+ " {"
+ " \"address\": \"192.0.2.1\","
+ " \"port\": 53"
+ " }"
+ "]"
+ "}"));
ConstElementPtr result(server.updateConfig(config));
EXPECT_EQ(result->toWire(), isc::config::createAnswer()->toWire());
EXPECT_TRUE(server.isForwarding());
@@ -127,14 +132,14 @@ TEST_F(ResolverConfig, forwardAddressConfig) {
TEST_F(ResolverConfig, rootAddressConfig) {
// Try putting there some address
- ElementPtr config(Element::fromJSON("{"
- "\"root_addresses\": ["
- " {"
- " \"address\": \"192.0.2.1\","
- " \"port\": 53"
- " }"
- "]"
- "}"));
+ ConstElementPtr config(Element::fromJSON("{"
+ "\"root_addresses\": ["
+ " {"
+ " \"address\": \"192.0.2.1\","
+ " \"port\": 53"
+ " }"
+ "]"
+ "}"));
ConstElementPtr result(server.updateConfig(config));
EXPECT_EQ(result->toWire(), isc::config::createAnswer()->toWire());
ASSERT_EQ(1, server.getRootAddresses().size());
@@ -210,12 +215,12 @@ TEST_F(ResolverConfig, timeouts) {
}
TEST_F(ResolverConfig, timeoutsConfig) {
- ElementPtr config = Element::fromJSON("{"
- "\"timeout_query\": 1000,"
- "\"timeout_client\": 2000,"
- "\"timeout_lookup\": 3000,"
- "\"retries\": 4"
- "}");
+ ConstElementPtr config = Element::fromJSON("{"
+ "\"timeout_query\": 1000,"
+ "\"timeout_client\": 2000,"
+ "\"timeout_lookup\": 3000,"
+ "\"retries\": 4"
+ "}");
ConstElementPtr result(server.updateConfig(config));
EXPECT_EQ(result->toWire(), isc::config::createAnswer()->toWire());
EXPECT_EQ(1000, server.getQueryTimeout());
@@ -253,51 +258,51 @@ TEST_F(ResolverConfig, invalidTimeoutsConfig) {
TEST_F(ResolverConfig, defaultQueryACL) {
// If no configuration is loaded, the default ACL should reject everything.
- EXPECT_EQ(REJECT, server.getQueryACL().execute(createClient("192.0.2.1")));
+ EXPECT_EQ(REJECT, server.getQueryACL().execute(createRequest("192.0.2.1")));
EXPECT_EQ(REJECT, server.getQueryACL().execute(
- createClient("2001:db8::1")));
+ createRequest("2001:db8::1")));
// The following would be allowed if the server had loaded the default
// configuration from the spec file. In this context it should not have
// happened, and they should be rejected just like the above cases.
- EXPECT_EQ(REJECT, server.getQueryACL().execute(createClient("127.0.0.1")));
- EXPECT_EQ(REJECT, server.getQueryACL().execute(createClient("::1")));
+ EXPECT_EQ(REJECT, server.getQueryACL().execute(createRequest("127.0.0.1")));
+ EXPECT_EQ(REJECT, server.getQueryACL().execute(createRequest("::1")));
}
TEST_F(ResolverConfig, emptyQueryACL) {
// Explicitly configured empty ACL should have the same effect.
- ElementPtr config(Element::fromJSON("{ \"query_acl\": [] }"));
+ ConstElementPtr config(Element::fromJSON("{ \"query_acl\": [] }"));
ConstElementPtr result(server.updateConfig(config));
EXPECT_EQ(result->toWire(), isc::config::createAnswer()->toWire());
- EXPECT_EQ(REJECT, server.getQueryACL().execute(createClient("192.0.2.1")));
+ EXPECT_EQ(REJECT, server.getQueryACL().execute(createRequest("192.0.2.1")));
EXPECT_EQ(REJECT, server.getQueryACL().execute(
- createClient("2001:db8::1")));
+ createRequest("2001:db8::1")));
}
TEST_F(ResolverConfig, queryACLIPv4) {
// A simple "accept" query for a specific IPv4 address
- ElementPtr config(Element::fromJSON(
- "{ \"query_acl\": "
- " [ {\"action\": \"ACCEPT\","
- " \"from\": \"192.0.2.1\"} ] }"));
+ ConstElementPtr config(Element::fromJSON(
+ "{ \"query_acl\": "
+ " [ {\"action\": \"ACCEPT\","
+ " \"from\": \"192.0.2.1\"} ] }"));
ConstElementPtr result(server.updateConfig(config));
EXPECT_EQ(result->toWire(), isc::config::createAnswer()->toWire());
- EXPECT_EQ(ACCEPT, server.getQueryACL().execute(createClient("192.0.2.1")));
+ EXPECT_EQ(ACCEPT, server.getQueryACL().execute(createRequest("192.0.2.1")));
EXPECT_EQ(REJECT, server.getQueryACL().execute(
- createClient("2001:db8::1")));
+ createRequest("2001:db8::1")));
}
TEST_F(ResolverConfig, queryACLIPv6) {
// same for IPv6
- ElementPtr config(Element::fromJSON(
- "{ \"query_acl\": "
- " [ {\"action\": \"ACCEPT\","
- " \"from\": \"2001:db8::1\"} ] }"));
+ ConstElementPtr config(Element::fromJSON(
+ "{ \"query_acl\": "
+ " [ {\"action\": \"ACCEPT\","
+ " \"from\": \"2001:db8::1\"} ] }"));
ConstElementPtr result(server.updateConfig(config));
EXPECT_EQ(result->toWire(), isc::config::createAnswer()->toWire());
- EXPECT_EQ(REJECT, server.getQueryACL().execute(createClient("192.0.2.1")));
+ EXPECT_EQ(REJECT, server.getQueryACL().execute(createRequest("192.0.2.1")));
EXPECT_EQ(ACCEPT, server.getQueryACL().execute(
- createClient("2001:db8::1")));
+ createRequest("2001:db8::1")));
}
TEST_F(ResolverConfig, multiEntryACL) {
@@ -306,25 +311,26 @@ TEST_F(ResolverConfig, multiEntryACL) {
// as it should have been tested in the underlying ACL module. All we
// have to do to check is a reasonably complicated ACL configuration is
// loaded as expected.
- ElementPtr config(Element::fromJSON(
- "{ \"query_acl\": "
- " [ {\"action\": \"ACCEPT\","
- " \"from\": \"192.0.2.1\"},"
- " {\"action\": \"REJECT\","
- " \"from\": \"192.0.2.0/24\"},"
- " {\"action\": \"DROP\","
- " \"from\": \"2001:db8::1\"},"
- "] }"));
+ ConstElementPtr config(Element::fromJSON(
+ "{ \"query_acl\": "
+ " [ {\"action\": \"ACCEPT\","
+ " \"from\": \"192.0.2.1\"},"
+ " {\"action\": \"REJECT\","
+ " \"from\": \"192.0.2.0/24\"},"
+ " {\"action\": \"DROP\","
+ " \"from\": \"2001:db8::1\"},"
+ "] }"));
ConstElementPtr result(server.updateConfig(config));
EXPECT_EQ(result->toWire(), isc::config::createAnswer()->toWire());
- EXPECT_EQ(ACCEPT, server.getQueryACL().execute(createClient("192.0.2.1")));
- EXPECT_EQ(REJECT, server.getQueryACL().execute(createClient("192.0.2.2")));
+ EXPECT_EQ(ACCEPT, server.getQueryACL().execute(createRequest("192.0.2.1")));
+ EXPECT_EQ(REJECT, server.getQueryACL().execute(createRequest("192.0.2.2")));
EXPECT_EQ(DROP, server.getQueryACL().execute(
- createClient("2001:db8::1")));
+ createRequest("2001:db8::1")));
EXPECT_EQ(REJECT, server.getQueryACL().execute(
- createClient("2001:db8::2"))); // match the default rule
+ createRequest("2001:db8::2"))); // match the default rule
}
+
int
getResultCode(ConstElementPtr result) {
int rcode;
@@ -332,6 +338,22 @@ getResultCode(ConstElementPtr result) {
return (rcode);
}
+TEST_F(ResolverConfig, queryACLActionOnly) {
+ // "action only" rule will be accepted by the loader, which can
+ // effectively change the default action.
+ ConstElementPtr config(Element::fromJSON(
+ "{ \"query_acl\": "
+ " [ {\"action\": \"ACCEPT\","
+ " \"from\": \"192.0.2.1\"},"
+ " {\"action\": \"DROP\"} ] }"));
+ EXPECT_EQ(0, getResultCode(server.updateConfig(config)));
+ EXPECT_EQ(ACCEPT, server.getQueryACL().execute(createRequest("192.0.2.1")));
+
+ // We reject non matching queries by default, but the last resort
+ // rule should have changed the action in that case to "DROP".
+ EXPECT_EQ(DROP, server.getQueryACL().execute(createRequest("192.0.2.2")));
+}
+
TEST_F(ResolverConfig, badQueryACL) {
// Most of these cases shouldn't happen in practice because the syntax
// check should be performed before updateConfig(). But we check at
@@ -346,10 +368,6 @@ TEST_F(ResolverConfig, badQueryACL) {
server.updateConfig(
Element::fromJSON("{ \"query_acl\":"
" [ {\"from\": \"192.0.2.1\"} ] }"))));
- EXPECT_EQ(1, getResultCode(
- server.updateConfig(
- Element::fromJSON("{ \"query_acl\":"
- " [ {\"action\": \"DROP\"} ] }"))));
// invalid "action"
EXPECT_EQ(1, getResultCode(
server.updateConfig(
@@ -361,7 +379,6 @@ TEST_F(ResolverConfig, badQueryACL) {
Element::fromJSON("{ \"query_acl\":"
" [ {\"action\": \"BADACTION\","
" \"from\": \"192.0.2.1\"}]}"))));
-
// invalid "from"
EXPECT_EQ(1, getResultCode(
server.updateConfig(
diff --git a/src/bin/resolver/tests/resolver_unittest.cc b/src/bin/resolver/tests/resolver_unittest.cc
index 9bcc261..71474dd 100644
--- a/src/bin/resolver/tests/resolver_unittest.cc
+++ b/src/bin/resolver/tests/resolver_unittest.cc
@@ -27,6 +27,7 @@
using namespace std;
using namespace isc::dns;
using namespace isc::data;
+using isc::acl::dns::RequestACL;
using namespace isc::testutils;
using isc::UnitTestUtil;
@@ -156,8 +157,7 @@ TEST_F(ResolverTest, notifyFail) {
TEST_F(ResolverTest, setQueryACL) {
// valid cases are tested through other tests. We only explicitly check
// an invalid case: passing a NULL shared pointer.
- EXPECT_THROW(server.setQueryACL(
- boost::shared_ptr<const Resolver::ClientACL>()),
+ EXPECT_THROW(server.setQueryACL(boost::shared_ptr<const RequestACL>()),
isc::InvalidParameter);
}
diff --git a/src/bin/sockcreator/README b/src/bin/sockcreator/README
index 4dbbee7..e142d19 100644
--- a/src/bin/sockcreator/README
+++ b/src/bin/sockcreator/README
@@ -3,7 +3,7 @@ The socket creator
The only thing we need higher rights than standard user is binding sockets to
ports lower than 1024. So we will have a separate process that keeps the
-rights, while the rests drop them for security reasons.
+rights, while the rest drops them for security reasons.
This process is the socket creator. Its goal is to be as simple as possible
and to contain as little code as possible to minimise the amount of code
diff --git a/src/bin/stats/Makefile.am b/src/bin/stats/Makefile.am
index c8b18c9..63e2a3b 100644
--- a/src/bin/stats/Makefile.am
+++ b/src/bin/stats/Makefile.am
@@ -5,16 +5,25 @@ pkglibexecdir = $(libexecdir)/@PACKAGE@
pkglibexec_SCRIPTS = b10-stats b10-stats-httpd
b10_statsdir = $(pkgdatadir)
-b10_stats_DATA = stats.spec stats-httpd.spec stats-schema.spec
+b10_stats_DATA = stats.spec stats-httpd.spec
b10_stats_DATA += stats-httpd-xml.tpl stats-httpd-xsd.tpl stats-httpd-xsl.tpl
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/stats_messages.py
+nodist_pylogmessage_PYTHON += $(PYTHON_LOGMSGPKG_DIR)/work/stats_httpd_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
CLEANFILES = b10-stats stats.pyc
CLEANFILES += b10-stats-httpd stats_httpd.pyc
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/stats_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/stats_messages.pyc
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/stats_httpd_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/stats_httpd_messages.pyc
man_MANS = b10-stats.8 b10-stats-httpd.8
EXTRA_DIST = $(man_MANS) b10-stats.xml b10-stats-httpd.xml
-EXTRA_DIST += stats.spec stats-httpd.spec stats-schema.spec
+EXTRA_DIST += stats.spec stats-httpd.spec
EXTRA_DIST += stats-httpd-xml.tpl stats-httpd-xsd.tpl stats-httpd-xsl.tpl
+EXTRA_DIST += stats_messages.mes stats_httpd_messages.mes
if ENABLE_MAN
@@ -26,12 +35,20 @@ b10-stats-httpd.8: b10-stats-httpd.xml
endif
+$(PYTHON_LOGMSGPKG_DIR)/work/stats_messages.py : stats_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/stats_messages.mes
+
+$(PYTHON_LOGMSGPKG_DIR)/work/stats_httpd_messages.py : stats_httpd_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/stats_httpd_messages.mes
+
# this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
-b10-stats: stats.py
+b10-stats: stats.py $(PYTHON_LOGMSGPKG_DIR)/work/stats_messages.py
$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" stats.py >$@
chmod a+x $@
-b10-stats-httpd: stats_httpd.py
+b10-stats-httpd: stats_httpd.py $(PYTHON_LOGMSGPKG_DIR)/work/stats_httpd_messages.py
$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" stats_httpd.py >$@
chmod a+x $@
diff --git a/src/bin/stats/b10-stats-httpd.8 b/src/bin/stats/b10-stats-httpd.8
index ed4aafa..1206e1d 100644
--- a/src/bin/stats/b10-stats-httpd.8
+++ b/src/bin/stats/b10-stats-httpd.8
@@ -36,7 +36,7 @@ b10-stats-httpd \- BIND 10 HTTP server for HTTP/XML interface of statistics
.PP
\fBb10\-stats\-httpd\fR
-is a standalone HTTP server\&. It is intended for HTTP/XML interface for statistics module\&. This server process runs as a process separated from the process of the BIND 10 Stats daemon (\fBb10\-stats\fR)\&. The server is initially executed by the BIND 10 boss process (\fBbind10\fR) and eventually exited by it\&. The server is intended to be server requests by HTTP clients like web browsers and third\-party modules\&. When the server is asked, it requests BIND 10 statistics data from
+is a standalone HTTP server\&. It is intended for HTTP/XML interface for statistics module\&. This server process runs as a process separated from the process of the BIND 10 Stats daemon (\fBb10\-stats\fR)\&. The server is initially executed by the BIND 10 boss process (\fBbind10\fR) and eventually exited by it\&. The server is intended to be server requests by HTTP clients like web browsers and third\-party modules\&. When the server is asked, it requests BIND 10 statistics data or its schema from
\fBb10\-stats\fR, and it sends the data back in Python dictionary format and the server converts it into XML format\&. The server sends it to the HTTP client\&. The server can send three types of document, which are XML (Extensible Markup Language), XSD (XML Schema definition) and XSL (Extensible Stylesheet Language)\&. The XML document is the statistics data of BIND 10, The XSD document is the data schema of it, and The XSL document is the style sheet to be showed for the web browsers\&. There is different URL for each document\&. But please note that you would be redirected to the URL of XML document if you request the URL of the root document\&. For example, you would be redirected to http://127\&.0\&.0\&.1:8000/bind10/statistics/xml if you request http://127\&.0\&.0\&.1:8000/\&. Please see the manual and the spec file of
\fBb10\-stats\fR
for more details about the items of BIND 10 statistics\&. The server uses CC session in communication with
@@ -66,10 +66,6 @@ bindctl(1)\&. Please see the manual of
bindctl(1)
about how to configure the settings\&.
.PP
-/usr/local/share/bind10\-devel/stats\-schema\&.spec
-\(em This is a spec file for data schema of of BIND 10 statistics\&. This schema cannot be configured via
-bindctl(1)\&.
-.PP
/usr/local/share/bind10\-devel/stats\-httpd\-xml\&.tpl
\(em the template file of XML document\&.
diff --git a/src/bin/stats/b10-stats-httpd.xml b/src/bin/stats/b10-stats-httpd.xml
index 34c704f..c8df9b8 100644
--- a/src/bin/stats/b10-stats-httpd.xml
+++ b/src/bin/stats/b10-stats-httpd.xml
@@ -57,7 +57,7 @@
by the BIND 10 boss process (<command>bind10</command>) and eventually
exited by it. The server is intended to be server requests by HTTP
clients like web browsers and third-party modules. When the server is
- asked, it requests BIND 10 statistics data from
+ asked, it requests BIND 10 statistics data or its schema from
<command>b10-stats</command>, and it sends the data back in Python
dictionary format and the server converts it into XML format. The server
sends it to the HTTP client. The server can send three types of document,
@@ -112,12 +112,6 @@
of <refentrytitle>bindctl</refentrytitle><manvolnum>1</manvolnum> about
how to configure the settings.
</para>
- <para><filename>/usr/local/share/bind10-devel/stats-schema.spec</filename>
- <!--TODO: The filename should be computed from prefix-->
- — This is a spec file for data schema of
- of BIND 10 statistics. This schema cannot be configured
- via <refentrytitle>bindctl</refentrytitle><manvolnum>1</manvolnum>.
- </para>
<para>
<filename>/usr/local/share/bind10-devel/stats-httpd-xml.tpl</filename>
<!--TODO: The filename should be computed from prefix-->
@@ -138,7 +132,7 @@
<refsect1>
<title>CONFIGURATION AND COMMANDS</title>
<para>
- The configurable setting in
+ The configurable setting in
<filename>stats-httpd.spec</filename> is:
</para>
<variablelist>
diff --git a/src/bin/stats/b10-stats.8 b/src/bin/stats/b10-stats.8
index f69e4d3..0204ca1 100644
--- a/src/bin/stats/b10-stats.8
+++ b/src/bin/stats/b10-stats.8
@@ -1,22 +1,13 @@
'\" t
.\" Title: b10-stats
.\" Author: [FIXME: author] [see http://docbook.sf.net/el/author]
-.\" Generator: DocBook XSL Stylesheets v1.76.1 <http://docbook.sf.net/>
-.\" Date: Oct 15, 2010
+.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
+.\" Date: August 11, 2011
.\" Manual: BIND10
.\" Source: BIND10
.\" Language: English
.\"
-.TH "B10\-STATS" "8" "Oct 15, 2010" "BIND10" "BIND10"
-.\" -----------------------------------------------------------------
-.\" * Define some portability stuff
-.\" -----------------------------------------------------------------
-.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.\" http://bugs.debian.org/507673
-.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html
-.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.ie \n(.g .ds Aq \(aq
-.el .ds Aq '
+.TH "B10\-STATS" "8" "August 11, 2011" "BIND10" "BIND10"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
@@ -45,9 +36,9 @@ with other modules like
\fBb10\-auth\fR
and so on\&. It waits for coming data from other modules, then other modules send data to stats module periodically\&. Other modules send stats data to stats module independently from implementation of stats module, so the frequency of sending data may not be constant\&. Stats module collects data and aggregates it\&.
\fBb10\-stats\fR
-invokes "sendstats" command for
+invokes an internal command for
\fBbind10\fR
-after its initial starting because it\*(Aqs sure to collect statistics data from
+after its initial starting because it\'s sure to collect statistics data from
\fBbind10\fR\&.
.SH "OPTIONS"
.PP
@@ -59,6 +50,84 @@ This
\fBb10\-stats\fR
switches to verbose mode\&. It sends verbose messages to STDOUT\&.
.RE
+.SH "CONFIGURATION AND COMMANDS"
+.PP
+The
+\fBb10\-stats\fR
+command does not have any configurable settings\&.
+.PP
+The configuration commands are:
+.PP
+
+
+\fBremove\fR
+removes the named statistics name and data\&.
+.PP
+
+
+\fBreset\fR
+will reset all statistics data to default values except for constant names\&. This may re\-add previously removed statistics names\&.
+.PP
+
+\fBset\fR
+.PP
+
+\fBshow\fR
+will send the statistics data in JSON format\&. By default, it outputs all the statistics data it has collected\&. An optional item name may be specified to receive individual output\&.
+.PP
+
+\fBshutdown\fR
+will shutdown the
+\fBb10\-stats\fR
+process\&. (Note that the
+\fBbind10\fR
+parent may restart it\&.)
+.PP
+
+\fBstatus\fR
+simply indicates that the daemon is running\&.
+.SH "STATISTICS DATA"
+.PP
+The
+\fBb10\-stats\fR
+daemon contains these statistics:
+.PP
+report_time
+.RS 4
+The latest report date and time in ISO 8601 format\&.
+.RE
+.PP
+stats\&.boot_time
+.RS 4
+The date and time when this daemon was started in ISO 8601 format\&. This is a constant which can\'t be reset except by restarting
+\fBb10\-stats\fR\&.
+.RE
+.PP
+stats\&.last_update_time
+.RS 4
+The date and time (in ISO 8601 format) when this daemon last received data from another component\&.
+.RE
+.PP
+stats\&.lname
+.RS 4
+This is the name used for the
+\fBb10\-msgq\fR
+command\-control channel\&. (This is a constant which can\'t be reset except by restarting
+\fBb10\-stats\fR\&.)
+.RE
+.PP
+stats\&.start_time
+.RS 4
+This is the date and time (in ISO 8601 format) when this daemon started collecting data\&.
+.RE
+.PP
+stats\&.timestamp
+.RS 4
+The current date and time represented in seconds since UNIX epoch (1970\-01\-01T0 0:00:00Z) with precision (delimited with a period) up to one hundred thousandth of second\&.
+.RE
+.PP
+See other manual pages for explanations for their statistics that are kept track by
+\fBb10\-stats\fR\&.
.SH "FILES"
.PP
/usr/local/share/bind10\-devel/stats\&.spec
@@ -66,10 +135,6 @@ switches to verbose mode\&. It sends verbose messages to STDOUT\&.
\fBb10\-stats\fR\&. It contains commands for
\fBb10\-stats\fR\&. They can be invoked via
bindctl(1)\&.
-.PP
-/usr/local/share/bind10\-devel/stats\-schema\&.spec
-\(em This is a spec file for data schema of of BIND 10 statistics\&. This schema cannot be configured via
-bindctl(1)\&.
.SH "SEE ALSO"
.PP
@@ -82,7 +147,7 @@ BIND 10 Guide\&.
.PP
The
\fBb10\-stats\fR
-daemon was initially designed and implemented by Naoki Kambe of JPRS in Oct 2010\&.
+daemon was initially designed and implemented by Naoki Kambe of JPRS in October 2010\&.
.SH "COPYRIGHT"
.br
Copyright \(co 2010 Internet Systems Consortium, Inc. ("ISC")
diff --git a/src/bin/stats/b10-stats.xml b/src/bin/stats/b10-stats.xml
index f0c472d..13ada7a 100644
--- a/src/bin/stats/b10-stats.xml
+++ b/src/bin/stats/b10-stats.xml
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>Oct 15, 2010</date>
+ <date>August 11, 2011</date>
</refentryinfo>
<refmeta>
@@ -64,9 +64,10 @@
send stats data to stats module independently from
implementation of stats module, so the frequency of sending data
may not be constant. Stats module collects data and aggregates
- it. <command>b10-stats</command> invokes "sendstats" command
+ it. <command>b10-stats</command> invokes an internal command
for <command>bind10</command> after its initial starting because it's
sure to collect statistics data from <command>bind10</command>.
+<!-- TODO: reword that last sentence? -->
</para>
</refsect1>
@@ -87,6 +88,123 @@
</refsect1>
<refsect1>
+ <title>CONFIGURATION AND COMMANDS</title>
+
+ <para>
+ The <command>b10-stats</command> command does not have any
+ configurable settings.
+ </para>
+
+<!-- TODO: formating -->
+ <para>
+ The configuration commands are:
+ </para>
+
+ <para>
+<!-- TODO: remove is removed in trac930 -->
+ <command>remove</command> removes the named statistics name and data.
+ </para>
+
+ <para>
+<!-- TODO: reset is removed in trac930 -->
+ <command>reset</command> will reset all statistics data to
+ default values except for constant names.
+ This may re-add previously removed statistics names.
+ </para>
+
+ <para>
+ <command>set</command>
+<!-- TODO: document this -->
+ </para>
+
+ <para>
+ <command>show</command> will send the statistics data
+ in JSON format.
+ By default, it outputs all the statistics data it has collected.
+ An optional item name may be specified to receive individual output.
+ </para>
+
+<!-- TODO: document showschema -->
+
+ <para>
+ <command>shutdown</command> will shutdown the
+ <command>b10-stats</command> process.
+ (Note that the <command>bind10</command> parent may restart it.)
+ </para>
+
+ <para>
+ <command>status</command> simply indicates that the daemon is
+ running.
+ </para>
+
+ </refsect1>
+
+ <refsect1>
+ <title>STATISTICS DATA</title>
+
+ <para>
+ The <command>b10-stats</command> daemon contains these statistics:
+ </para>
+
+ <variablelist>
+
+ <varlistentry>
+ <term>report_time</term>
+<!-- TODO: why not named stats.report_time? -->
+ <listitem><simpara>The latest report date and time in
+ ISO 8601 format.</simpara></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>stats.boot_time</term>
+ <listitem><simpara>The date and time when this daemon was
+ started in ISO 8601 format.
+ This is a constant which can't be reset except by restarting
+ <command>b10-stats</command>.
+ </simpara></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>stats.last_update_time</term>
+ <listitem><simpara>The date and time (in ISO 8601 format)
+ when this daemon last received data from another component.
+ </simpara></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>stats.lname</term>
+ <listitem><simpara>This is the name used for the
+ <command>b10-msgq</command> command-control channel.
+ (This is a constant which can't be reset except by restarting
+ <command>b10-stats</command>.)
+ </simpara></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>stats.start_time</term>
+ <listitem><simpara>This is the date and time (in ISO 8601 format)
+ when this daemon started collecting data.
+ </simpara></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>stats.timestamp</term>
+ <listitem><simpara>The current date and time represented in
+ seconds since UNIX epoch (1970-01-01T0 0:00:00Z) with
+ precision (delimited with a period) up to
+ one hundred thousandth of second.</simpara></listitem>
+ </varlistentry>
+
+ </variablelist>
+
+ <para>
+ See other manual pages for explanations for their statistics
+ that are kept track by <command>b10-stats</command>.
+ </para>
+
+ </refsect1>
+
+ <refsect1>
<title>FILES</title>
<para><filename>/usr/local/share/bind10-devel/stats.spec</filename>
<!--TODO: The filename should be computed from prefix-->
@@ -95,12 +213,6 @@
invoked
via <refentrytitle>bindctl</refentrytitle><manvolnum>1</manvolnum>.
</para>
- <para><filename>/usr/local/share/bind10-devel/stats-schema.spec</filename>
- <!--TODO: The filename should be computed from prefix-->
- — This is a spec file for data schema of
- of BIND 10 statistics. This schema cannot be configured
- via <refentrytitle>bindctl</refentrytitle><manvolnum>1</manvolnum>.
- </para>
</refsect1>
<refsect1>
@@ -126,7 +238,7 @@
<title>HISTORY</title>
<para>
The <command>b10-stats</command> daemon was initially designed
- and implemented by Naoki Kambe of JPRS in Oct 2010.
+ and implemented by Naoki Kambe of JPRS in October 2010.
</para>
</refsect1>
</refentry><!--
diff --git a/src/bin/stats/stats-httpd-xsl.tpl b/src/bin/stats/stats-httpd-xsl.tpl
index 01ffdc6..a1f6406 100644
--- a/src/bin/stats/stats-httpd-xsl.tpl
+++ b/src/bin/stats/stats-httpd-xsl.tpl
@@ -44,6 +44,7 @@ td.title {
<h1>BIND 10 Statistics</h1>
<table>
<tr>
+ <th>Owner</th>
<th>Title</th>
<th>Value</th>
</tr>
diff --git a/src/bin/stats/stats-schema.spec b/src/bin/stats/stats-schema.spec
deleted file mode 100644
index 37e9c1a..0000000
--- a/src/bin/stats/stats-schema.spec
+++ /dev/null
@@ -1,87 +0,0 @@
-{
- "module_spec": {
- "module_name": "Stats",
- "module_description": "Statistics data schema",
- "config_data": [
- {
- "item_name": "report_time",
- "item_type": "string",
- "item_optional": false,
- "item_default": "1970-01-01T00:00:00Z",
- "item_title": "Report time",
- "item_description": "A date time when stats module reports",
- "item_format": "date-time"
- },
- {
- "item_name": "bind10.boot_time",
- "item_type": "string",
- "item_optional": false,
- "item_default": "1970-01-01T00:00:00Z",
- "item_title": "bind10.BootTime",
- "item_description": "A date time when bind10 process starts initially",
- "item_format": "date-time"
- },
- {
- "item_name": "stats.boot_time",
- "item_type": "string",
- "item_optional": false,
- "item_default": "1970-01-01T00:00:00Z",
- "item_title": "stats.BootTime",
- "item_description": "A date time when the stats module starts initially or when the stats module restarts",
- "item_format": "date-time"
- },
- {
- "item_name": "stats.start_time",
- "item_type": "string",
- "item_optional": false,
- "item_default": "1970-01-01T00:00:00Z",
- "item_title": "stats.StartTime",
- "item_description": "A date time when the stats module starts collecting data or resetting values last time",
- "item_format": "date-time"
- },
- {
- "item_name": "stats.last_update_time",
- "item_type": "string",
- "item_optional": false,
- "item_default": "1970-01-01T00:00:00Z",
- "item_title": "stats.LastUpdateTime",
- "item_description": "The latest date time when the stats module receives from other modules like auth server or boss process and so on",
- "item_format": "date-time"
- },
- {
- "item_name": "stats.timestamp",
- "item_type": "real",
- "item_optional": false,
- "item_default": 0.0,
- "item_title": "stats.Timestamp",
- "item_description": "A current time stamp since epoch time (1970-01-01T00:00:00Z)",
- "item_format": "second"
- },
- {
- "item_name": "stats.lname",
- "item_type": "string",
- "item_optional": false,
- "item_default": "",
- "item_title": "stats.LocalName",
- "item_description": "A localname of stats module given via CC protocol"
- },
- {
- "item_name": "auth.queries.tcp",
- "item_type": "integer",
- "item_optional": false,
- "item_default": 0,
- "item_title": "auth.queries.tcp",
- "item_description": "A number of total query counts which all auth servers receive over TCP since they started initially"
- },
- {
- "item_name": "auth.queries.udp",
- "item_type": "integer",
- "item_optional": false,
- "item_default": 0,
- "item_title": "auth.queries.udp",
- "item_description": "A number of total query counts which all auth servers receive over UDP since they started initially"
- }
- ],
- "commands": []
- }
-}
diff --git a/src/bin/stats/stats.py.in b/src/bin/stats/stats.py.in
old mode 100644
new mode 100755
index 969676e..da00818
--- a/src/bin/stats/stats.py.in
+++ b/src/bin/stats/stats.py.in
@@ -15,399 +15,401 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+"""
+Statistics daemon in BIND 10
+
+"""
import sys; sys.path.append ('@@PYTHONPATH@@')
import os
-import signal
-import select
from time import time, strftime, gmtime
from optparse import OptionParser, OptionValueError
-from collections import defaultdict
-from isc.config.ccsession import ModuleCCSession, create_answer
-from isc.cc import Session, SessionError
-# for setproctitle
+import isc
import isc.util.process
+import isc.log
+from isc.log_messages.stats_messages import *
+
+isc.log.init("b10-stats")
+logger = isc.log.Logger("stats")
+
+# Some constants for debug levels, these should be removed when we
+# have #1074
+DBG_STATS_MESSAGING = 30
+
+# This is for boot_time of Stats
+_BASETIME = gmtime()
+
+# for setproctitle
isc.util.process.rename()
# If B10_FROM_SOURCE is set in the environment, we use data files
# from a directory relative to that, otherwise we use the ones
# installed on the system
if "B10_FROM_SOURCE" in os.environ:
- BASE_LOCATION = os.environ["B10_FROM_SOURCE"] + os.sep + \
- "src" + os.sep + "bin" + os.sep + "stats"
+ SPECFILE_LOCATION = os.environ["B10_FROM_SOURCE"] + os.sep + \
+ "src" + os.sep + "bin" + os.sep + "stats" + os.sep + "stats.spec"
else:
PREFIX = "@prefix@"
DATAROOTDIR = "@datarootdir@"
- BASE_LOCATION = "@datadir@" + os.sep + "@PACKAGE@"
- BASE_LOCATION = BASE_LOCATION.replace("${datarootdir}", DATAROOTDIR).replace("${prefix}", PREFIX)
-SPECFILE_LOCATION = BASE_LOCATION + os.sep + "stats.spec"
-SCHEMA_SPECFILE_LOCATION = BASE_LOCATION + os.sep + "stats-schema.spec"
+ SPECFILE_LOCATION = "@datadir@" + os.sep + "@PACKAGE@" + os.sep + "stats.spec"
+ SPECFILE_LOCATION = SPECFILE_LOCATION.replace("${datarootdir}", DATAROOTDIR)\
+ .replace("${prefix}", PREFIX)
-class Singleton(type):
+def get_timestamp():
"""
- A abstract class of singleton pattern
+ get current timestamp
"""
- # Because of singleton pattern:
- # At the beginning of coding, one UNIX domain socket is needed
- # for config manager, another socket is needed for stats module,
- # then stats module might need two sockets. So I adopted the
- # singleton pattern because I avoid creating multiple sockets in
- # one stats module. But in the initial version stats module
- # reports only via bindctl, so just one socket is needed. To use
- # the singleton pattern is not important now. :(
+ return time()
- def __init__(self, *args, **kwargs):
- type.__init__(self, *args, **kwargs)
- self._instances = {}
+def get_datetime(gmt=None):
+ """
+ get current datetime
+ """
+ if not gmt: gmt = gmtime()
+ return strftime("%Y-%m-%dT%H:%M:%SZ", gmt)
- def __call__(self, *args, **kwargs):
- if args not in self._instances:
- self._instances[args]={}
- kw = tuple(kwargs.items())
- if kw not in self._instances[args]:
- self._instances[args][kw] = type.__call__(self, *args, **kwargs)
- return self._instances[args][kw]
+def get_spec_defaults(spec):
+ """
+ extracts the default values of the items from spec specified in
+ arg, and returns the dict-type variable which is a set of the item
+ names and the default values
+ """
+ if type(spec) is not list: return {}
+ def _get_spec_defaults(spec):
+ item_type = spec['item_type']
+ if item_type == "integer":
+ return int(spec.get('item_default', 0))
+ elif item_type == "real":
+ return float(spec.get('item_default', 0.0))
+ elif item_type == "boolean":
+ return bool(spec.get('item_default', False))
+ elif item_type == "string":
+ return str(spec.get('item_default', ""))
+ elif item_type == "list":
+ return spec.get(
+ "item_default",
+ [ _get_spec_defaults(spec["list_item_spec"]) ])
+ elif item_type == "map":
+ return spec.get(
+ "item_default",
+ dict([ (s["item_name"], _get_spec_defaults(s)) for s in spec["map_item_spec"] ]) )
+ else:
+ return spec.get("item_default", None)
+ return dict([ (s['item_name'], _get_spec_defaults(s)) for s in spec ])
class Callback():
"""
A Callback handler class
"""
- def __init__(self, name=None, callback=None, args=(), kwargs={}):
- self.name = name
- self.callback = callback
+ def __init__(self, command=None, args=(), kwargs={}):
+ self.command = command
self.args = args
self.kwargs = kwargs
def __call__(self, *args, **kwargs):
- if not args:
- args = self.args
- if not kwargs:
- kwargs = self.kwargs
- if self.callback:
- return self.callback(*args, **kwargs)
-
-class Subject():
- """
- A abstract subject class of observer pattern
- """
- # Because of observer pattern:
- # In the initial release, I'm also sure that observer pattern
- # isn't definitely needed because the interface between gathering
- # and reporting statistics data is single. However in the future
- # release, the interfaces may be multiple, that is, multiple
- # listeners may be needed. For example, one interface, which
- # stats module has, is for between ''config manager'' and stats
- # module, another interface is for between ''HTTP server'' and
- # stats module, and one more interface is for between ''SNMP
- # server'' and stats module. So by considering that stats module
- # needs multiple interfaces in the future release, I adopted the
- # observer pattern in stats module. But I don't have concrete
- # ideas in case of multiple listener currently.
-
- def __init__(self):
- self._listeners = []
-
- def attach(self, listener):
- if not listener in self._listeners:
- self._listeners.append(listener)
+ if not args: args = self.args
+ if not kwargs: kwargs = self.kwargs
+ if self.command: return self.command(*args, **kwargs)
- def detach(self, listener):
- try:
- self._listeners.remove(listener)
- except ValueError:
- pass
+class StatsError(Exception):
+ """Exception class for Stats class"""
+ pass
- def notify(self, event, modifier=None):
- for listener in self._listeners:
- if modifier != listener:
- listener.update(event)
-
-class Listener():
+class Stats:
"""
- A abstract listener class of observer pattern
+ Main class of stats module
"""
- def __init__(self, subject):
- self.subject = subject
- self.subject.attach(self)
- self.events = {}
-
- def update(self, name):
- if name in self.events:
- callback = self.events[name]
- return callback()
-
- def add_event(self, event):
- self.events[event.name]=event
-
-class SessionSubject(Subject, metaclass=Singleton):
- """
- A concrete subject class which creates CC session object
- """
- def __init__(self, session=None, verbose=False):
- Subject.__init__(self)
- self.verbose = verbose
- self.session=session
- self.running = False
-
- def start(self):
- self.running = True
- self.notify('start')
-
- def stop(self):
+ def __init__(self):
self.running = False
- self.notify('stop')
-
- def check(self):
- self.notify('check')
-
-class CCSessionListener(Listener):
- """
- A concrete listener class which creates SessionSubject object and
- ModuleCCSession object
- """
- def __init__(self, subject, verbose=False):
- Listener.__init__(self, subject)
- self.verbose = verbose
- self.session = subject.session
- self.boot_time = get_datetime()
-
# create ModuleCCSession object
- self.cc_session = ModuleCCSession(SPECFILE_LOCATION,
- self.config_handler,
- self.command_handler,
- self.session)
-
- self.session = self.subject.session = self.cc_session._session
-
- # initialize internal data
- self.stats_spec = isc.config.module_spec_from_file(SCHEMA_SPECFILE_LOCATION).get_config_spec()
- self.stats_data = self.initialize_data(self.stats_spec)
-
- # add event handler invoked via SessionSubject object
- self.add_event(Callback('start', self.start))
- self.add_event(Callback('stop', self.stop))
- self.add_event(Callback('check', self.check))
- # don't add 'command_' suffix to the special commands in
- # order to prevent executing internal command via bindctl
-
+ self.mccs = isc.config.ModuleCCSession(SPECFILE_LOCATION,
+ self.config_handler,
+ self.command_handler)
+ self.cc_session = self.mccs._session
+ # get module spec
+ self.module_name = self.mccs.get_module_spec().get_module_name()
+ self.modules = {}
+ self.statistics_data = {}
# get commands spec
- self.commands_spec = self.cc_session.get_module_spec().get_commands_spec()
-
+ self.commands_spec = self.mccs.get_module_spec().get_commands_spec()
# add event handler related command_handler of ModuleCCSession
- # invoked via bindctl
+ self.callbacks = {}
for cmd in self.commands_spec:
+ # add prefix "command_"
+ name = "command_" + cmd["command_name"]
try:
- # add prefix "command_"
- name = "command_" + cmd["command_name"]
callback = getattr(self, name)
- kwargs = self.initialize_data(cmd["command_args"])
- self.add_event(Callback(name=name, callback=callback, args=(), kwargs=kwargs))
- except AttributeError as ae:
- sys.stderr.write("[b10-stats] Caught undefined command while parsing spec file: "
- +str(cmd["command_name"])+"\n")
+ kwargs = get_spec_defaults(cmd["command_args"])
+ self.callbacks[name] = Callback(command=callback, kwargs=kwargs)
+ except AttributeError:
+ raise StatsError(STATS_UNKNOWN_COMMAND_IN_SPEC, cmd["command_name"])
+ self.mccs.start()
def start(self):
"""
- start the cc chanel
+ Start stats module
"""
- # set initial value
- self.stats_data['stats.boot_time'] = self.boot_time
- self.stats_data['stats.start_time'] = get_datetime()
- self.stats_data['stats.last_update_time'] = get_datetime()
- self.stats_data['stats.lname'] = self.session.lname
- self.cc_session.start()
+ self.running = True
+ logger.info(STATS_STARTING)
+
# request Bob to send statistics data
- if self.verbose:
- sys.stdout.write("[b10-stats] request Bob to send statistics data\n")
- cmd = isc.config.ccsession.create_command("sendstats", None)
- seq = self.session.group_sendmsg(cmd, 'Boss')
- self.session.group_recvmsg(True, seq)
+ logger.debug(DBG_STATS_MESSAGING, STATS_SEND_REQUEST_BOSS)
+ cmd = isc.config.ccsession.create_command("getstats", None)
+ seq = self.cc_session.group_sendmsg(cmd, 'Boss')
+ try:
+ answer, env = self.cc_session.group_recvmsg(False, seq)
+ if answer:
+ rcode, args = isc.config.ccsession.parse_answer(answer)
+ if rcode == 0:
+ errors = self.update_statistics_data(
+ args["owner"], **args["data"])
+ if errors:
+ raise StatsError("boss spec file is incorrect: "
+ + ", ".join(errors))
+ errors = self.update_statistics_data(
+ self.module_name,
+ last_update_time=get_datetime())
+ if errors:
+ raise StatsError("stats spec file is incorrect: "
+ + ", ".join(errors))
+ except isc.cc.session.SessionTimeout:
+ pass
- def stop(self):
- """
- stop the cc chanel
- """
- return self.cc_session.close()
+ # initialized Statistics data
+ errors = self.update_statistics_data(
+ self.module_name,
+ lname=self.cc_session.lname,
+ boot_time=get_datetime(_BASETIME)
+ )
+ if errors:
+ raise StatsError("stats spec file is incorrect: "
+ + ", ".join(errors))
- def check(self):
- """
- check the cc chanel
- """
- return self.cc_session.check_command(False)
+ while self.running:
+ self.mccs.check_command(False)
def config_handler(self, new_config):
"""
handle a configure from the cc channel
"""
- if self.verbose:
- sys.stdout.write("[b10-stats] newconfig received: "+str(new_config)+"\n")
-
+ logger.debug(DBG_STATS_MESSAGING, STATS_RECEIVED_NEW_CONFIG,
+ new_config)
# do nothing currently
- return create_answer(0)
+ return isc.config.create_answer(0)
- def command_handler(self, command, *args, **kwargs):
+ def command_handler(self, command, kwargs):
"""
handle commands from the cc channel
"""
- # add 'command_' suffix in order to executing command via bindctl
name = 'command_' + command
-
- if name in self.events:
- event = self.events[name]
- return event(*args, **kwargs)
+ if name in self.callbacks:
+ callback = self.callbacks[name]
+ if kwargs:
+ return callback(**kwargs)
+ else:
+ return callback()
else:
- return self.command_unknown(command, args)
+ logger.error(STATS_RECEIVED_UNKNOWN_COMMAND, command)
+ return isc.config.create_answer(1, "Unknown command: '"+str(command)+"'")
- def command_shutdown(self, args):
+ def update_modules(self):
"""
- handle shutdown command
+ updates information of each module. This method gets each
+ module's information from the config manager and sets it into
+ self.modules. If its getting from the config manager fails, it
+ raises StatsError.
"""
- if self.verbose:
- sys.stdout.write("[b10-stats] 'shutdown' command received\n")
- self.subject.running = False
- return create_answer(0)
+ modules = {}
+ seq = self.cc_session.group_sendmsg(
+ isc.config.ccsession.create_command(
+ isc.config.ccsession.COMMAND_GET_STATISTICS_SPEC),
+ 'ConfigManager')
+ (answer, env) = self.cc_session.group_recvmsg(False, seq)
+ if answer:
+ (rcode, value) = isc.config.ccsession.parse_answer(answer)
+ if rcode == 0:
+ for mod in value:
+ spec = { "module_name" : mod }
+ if value[mod] and type(value[mod]) is list:
+ spec["statistics"] = value[mod]
+ modules[mod] = isc.config.module_spec.ModuleSpec(spec)
+ else:
+ raise StatsError("Updating module spec fails: " + str(value))
+ modules[self.module_name] = self.mccs.get_module_spec()
+ self.modules = modules
- def command_set(self, args, stats_data={}):
+ def get_statistics_data(self, owner=None, name=None):
"""
- handle set command
+ returns statistics data which stats module has of each
+ module. If it can't find specified statistics data, it raises
+ StatsError.
"""
- # 'args' must be dictionary type
- self.stats_data.update(args['stats_data'])
-
- # overwrite "stats.LastUpdateTime"
- self.stats_data['stats.last_update_time'] = get_datetime()
-
- return create_answer(0)
+ self.update_statistics_data()
+ if owner and name:
+ try:
+ return self.statistics_data[owner][name]
+ except KeyError:
+ pass
+ elif owner:
+ try:
+ return self.statistics_data[owner]
+ except KeyError:
+ pass
+ elif name:
+ pass
+ else:
+ return self.statistics_data
+ raise StatsError("No statistics data found: "
+ + "owner: " + str(owner) + ", "
+ + "name: " + str(name))
- def command_remove(self, args, stats_item_name=''):
+ def update_statistics_data(self, owner=None, **data):
"""
- handle remove command
+ change statistics date of specified module into specified
+ data. It updates information of each module first, and it
+ updates statistics data. If specified data is invalid for
+ statistics spec of specified owner, it returns a list of error
+ messeges. If there is no error or if neither owner nor data is
+ specified in args, it returns None.
"""
- if self.verbose:
- sys.stdout.write("[b10-stats] 'remove' command received, args: "+str(args)+"\n")
-
- # 'args' must be dictionary type
- if args and args['stats_item_name'] in self.stats_data:
- stats_item_name = args['stats_item_name']
-
- # just remove one item
- self.stats_data.pop(stats_item_name)
-
- return create_answer(0)
-
- def command_show(self, args, stats_item_name=''):
+ self.update_modules()
+ statistics_data = {}
+ for (name, module) in self.modules.items():
+ value = get_spec_defaults(module.get_statistics_spec())
+ if module.validate_statistics(True, value):
+ statistics_data[name] = value
+ for (name, value) in self.statistics_data.items():
+ if name in statistics_data:
+ statistics_data[name].update(value)
+ else:
+ statistics_data[name] = value
+ self.statistics_data = statistics_data
+ if owner and data:
+ errors = []
+ try:
+ if self.modules[owner].validate_statistics(False, data, errors):
+ self.statistics_data[owner].update(data)
+ return
+ except KeyError:
+ errors.append("unknown module name: " + str(owner))
+ return errors
+
+ def command_status(self):
"""
- handle show command
+ handle status command
"""
- if self.verbose:
- sys.stdout.write("[b10-stats] 'show' command received, args: "+str(args)+"\n")
-
- # always overwrite 'report_time' and 'stats.timestamp'
- # if "show" command invoked
- self.stats_data['report_time'] = get_datetime()
- self.stats_data['stats.timestamp'] = get_timestamp()
-
- # if with args
- if args and args['stats_item_name'] in self.stats_data:
- stats_item_name = args['stats_item_name']
- return create_answer(0, {stats_item_name: self.stats_data[stats_item_name]})
+ logger.debug(DBG_STATS_MESSAGING, STATS_RECEIVED_STATUS_COMMAND)
+ return isc.config.create_answer(
+ 0, "Stats is up. (PID " + str(os.getpid()) + ")")
- return create_answer(0, self.stats_data)
-
- def command_reset(self, args):
+ def command_shutdown(self):
"""
- handle reset command
+ handle shutdown command
"""
- if self.verbose:
- sys.stdout.write("[b10-stats] 'reset' command received\n")
-
- # re-initialize internal variables
- self.stats_data = self.initialize_data(self.stats_spec)
-
- # reset initial value
- self.stats_data['stats.boot_time'] = self.boot_time
- self.stats_data['stats.start_time'] = get_datetime()
- self.stats_data['stats.last_update_time'] = get_datetime()
- self.stats_data['stats.lname'] = self.session.lname
-
- return create_answer(0)
+ logger.info(STATS_RECEIVED_SHUTDOWN_COMMAND)
+ self.running = False
+ return isc.config.create_answer(0)
- def command_status(self, args):
+ def command_show(self, owner=None, name=None):
"""
- handle status command
+ handle show command
"""
- if self.verbose:
- sys.stdout.write("[b10-stats] 'status' command received\n")
- # just return "I'm alive."
- return create_answer(0, "I'm alive.")
-
- def command_unknown(self, command, args):
+ if owner or name:
+ logger.debug(DBG_STATS_MESSAGING,
+ STATS_RECEIVED_SHOW_NAME_COMMAND,
+ str(owner)+", "+str(name))
+ else:
+ logger.debug(DBG_STATS_MESSAGING,
+ STATS_RECEIVED_SHOW_ALL_COMMAND)
+ errors = self.update_statistics_data(
+ self.module_name,
+ timestamp=get_timestamp(),
+ report_time=get_datetime()
+ )
+ if errors:
+ raise StatsError("stats spec file is incorrect: "
+ + ", ".join(errors))
+ try:
+ return isc.config.create_answer(
+ 0, self.get_statistics_data(owner, name))
+ except StatsError:
+ return isc.config.create_answer(
+ 1, "specified arguments are incorrect: " \
+ + "owner: " + str(owner) + ", name: " + str(name))
+
+ def command_showschema(self, owner=None, name=None):
"""
- handle an unknown command
+ handle show command
"""
- if self.verbose:
- sys.stdout.write("[b10-stats] Unknown command received: '"
- + str(command) + "'\n")
- return create_answer(1, "Unknown command: '"+str(command)+"'")
-
+ if owner or name:
+ logger.debug(DBG_STATS_MESSAGING,
+ STATS_RECEIVED_SHOWSCHEMA_NAME_COMMAND,
+ str(owner)+", "+str(name))
+ else:
+ logger.debug(DBG_STATS_MESSAGING,
+ STATS_RECEIVED_SHOWSCHEMA_ALL_COMMAND)
+ self.update_modules()
+ schema = {}
+ schema_byname = {}
+ for mod in self.modules:
+ spec = self.modules[mod].get_statistics_spec()
+ schema_byname[mod] = {}
+ if spec:
+ schema[mod] = spec
+ for item in spec:
+ schema_byname[mod][item['item_name']] = item
+ if owner:
+ try:
+ if name:
+ return isc.config.create_answer(0, schema_byname[owner][name])
+ else:
+ return isc.config.create_answer(0, schema[owner])
+ except KeyError:
+ pass
+ else:
+ if name:
+ return isc.config.create_answer(1, "module name is not specified")
+ else:
+ return isc.config.create_answer(0, schema)
+ return isc.config.create_answer(
+ 1, "specified arguments are incorrect: " \
+ + "owner: " + str(owner) + ", name: " + str(name))
- def initialize_data(self, spec):
+ def command_set(self, owner, data):
"""
- initialize stats data
+ handle set command
"""
- def __get_init_val(spec):
- if spec['item_type'] == 'null':
- return None
- elif spec['item_type'] == 'boolean':
- return bool(spec.get('item_default', False))
- elif spec['item_type'] == 'string':
- return str(spec.get('item_default', ''))
- elif spec['item_type'] in set(['number', 'integer']):
- return int(spec.get('item_default', 0))
- elif spec['item_type'] in set(['float', 'double', 'real']):
- return float(spec.get('item_default', 0.0))
- elif spec['item_type'] in set(['list', 'array']):
- return spec.get('item_default',
- [ __get_init_val(s) for s in spec['list_item_spec'] ])
- elif spec['item_type'] in set(['map', 'object']):
- return spec.get('item_default',
- dict([ (s['item_name'], __get_init_val(s)) for s in spec['map_item_spec'] ]) )
- else:
- return spec.get('item_default')
- return dict([ (s['item_name'], __get_init_val(s)) for s in spec ])
+ errors = self.update_statistics_data(owner, **data)
+ if errors:
+ return isc.config.create_answer(
+ 1, "errors while setting statistics data: " \
+ + ", ".join(errors))
+ errors = self.update_statistics_data(
+ self.module_name, last_update_time=get_datetime() )
+ if errors:
+ raise StatsError("stats spec file is incorrect: "
+ + ", ".join(errors))
+ return isc.config.create_answer(0)
-def get_timestamp():
- """
- get current timestamp
- """
- return time()
-
-def get_datetime():
- """
- get current datetime
- """
- return strftime("%Y-%m-%dT%H:%M:%SZ", gmtime())
-
-def main(session=None):
+if __name__ == "__main__":
try:
parser = OptionParser()
- parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
- help="display more about what is going on")
+ parser.add_option(
+ "-v", "--verbose", dest="verbose", action="store_true",
+ help="display more about what is going on")
(options, args) = parser.parse_args()
- subject = SessionSubject(session=session, verbose=options.verbose)
- listener = CCSessionListener(subject, verbose=options.verbose)
- subject.start()
- while subject.running:
- subject.check()
- subject.stop()
-
- except OptionValueError:
- sys.stderr.write("[b10-stats] Error parsing options\n")
- except SessionError as se:
- sys.stderr.write("[b10-stats] Error creating Stats module, "
- + "is the command channel daemon running?\n")
+ if options.verbose:
+ isc.log.init("b10-stats", "DEBUG", 99)
+ stats = Stats()
+ stats.start()
+ except OptionValueError as ove:
+ logger.fatal(STATS_BAD_OPTION_VALUE, ove)
+ sys.exit(1)
+ except isc.cc.session.SessionError as se:
+ logger.fatal(STATS_CC_SESSION_ERROR, se)
+ sys.exit(1)
+ except StatsError as se:
+ logger.fatal(STATS_START_ERROR, se)
+ sys.exit(1)
except KeyboardInterrupt as kie:
- sys.stderr.write("[b10-stats] Interrupted, exiting\n")
-
-if __name__ == "__main__":
- main()
+ logger.info(STATS_STOPPED_BY_KEYBOARD)
diff --git a/src/bin/stats/stats.spec b/src/bin/stats/stats.spec
index 25f6b54..e716b62 100644
--- a/src/bin/stats/stats.spec
+++ b/src/bin/stats/stats.spec
@@ -6,55 +6,119 @@
"commands": [
{
"command_name": "status",
- "command_description": "identify whether stats module is alive or not",
+ "command_description": "Show status of the stats daemon",
+ "command_args": []
+ },
+ {
+ "command_name": "shutdown",
+ "command_description": "Shut down the stats module",
"command_args": []
},
{
"command_name": "show",
- "command_description": "show the specified/all statistics data",
+ "command_description": "Show the specified/all statistics data",
"command_args": [
{
- "item_name": "stats_item_name",
+ "item_name": "owner",
"item_type": "string",
"item_optional": true,
- "item_default": ""
+ "item_default": "",
+ "item_description": "module name of the owner of the statistics data"
+ },
+ {
+ "item_name": "name",
+ "item_type": "string",
+ "item_optional": true,
+ "item_default": "",
+ "item_description": "statistics item name of the owner"
}
]
},
{
- "command_name": "set",
- "command_description": "set the value of specified name in statistics data",
+ "command_name": "showschema",
+ "command_description": "show the specified/all statistics shema",
"command_args": [
{
- "item_name": "stats_data",
- "item_type": "map",
- "item_optional": false,
- "item_default": {},
- "map_item_spec": []
+ "item_name": "owner",
+ "item_type": "string",
+ "item_optional": true,
+ "item_default": "",
+ "item_description": "module name of the owner of the statistics data"
+ },
+ {
+ "item_name": "name",
+ "item_type": "string",
+ "item_optional": true,
+ "item_default": "",
+ "item_description": "statistics item name of the owner"
}
]
},
{
- "command_name": "remove",
- "command_description": "remove the specified name from statistics data",
+ "command_name": "set",
+ "command_description": "set the value of specified name in statistics data",
"command_args": [
{
- "item_name": "stats_item_name",
+ "item_name": "owner",
"item_type": "string",
"item_optional": false,
- "item_default": ""
+ "item_default": "",
+ "item_description": "module name of the owner of the statistics data"
+ },
+ {
+ "item_name": "data",
+ "item_type": "map",
+ "item_optional": false,
+ "item_default": {},
+ "item_description": "statistics data set of the owner",
+ "map_item_spec": []
}
]
+ }
+ ],
+ "statistics": [
+ {
+ "item_name": "report_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Report time",
+ "item_description": "A date time when stats module reports",
+ "item_format": "date-time"
},
{
- "command_name": "reset",
- "command_description": "reset all statistics data to default values except for several constant names",
- "command_args": []
+ "item_name": "boot_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Boot time",
+ "item_description": "A date time when the stats module starts initially or when the stats module restarts",
+ "item_format": "date-time"
},
{
- "command_name": "shutdown",
- "command_description": "Shut down the stats module",
- "command_args": []
+ "item_name": "last_update_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Last update time",
+ "item_description": "The latest date time when the stats module receives from other modules like auth server or boss process and so on",
+ "item_format": "date-time"
+ },
+ {
+ "item_name": "timestamp",
+ "item_type": "real",
+ "item_optional": false,
+ "item_default": 0.0,
+ "item_title": "Timestamp",
+ "item_description": "A current time stamp since epoch time (1970-01-01T00:00:00Z)"
+ },
+ {
+ "item_name": "lname",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "",
+ "item_title": "Local Name",
+ "item_description": "A localname of stats module given via CC protocol"
}
]
}
diff --git a/src/bin/stats/stats_httpd.py.in b/src/bin/stats/stats_httpd.py.in
old mode 100755
new mode 100644
index a6fd066..596870a
--- a/src/bin/stats/stats_httpd.py.in
+++ b/src/bin/stats/stats_httpd.py.in
@@ -34,6 +34,17 @@ import isc.cc
import isc.config
import isc.util.process
+import isc.log
+from isc.log_messages.stats_httpd_messages import *
+
+isc.log.init("b10-stats-httpd")
+logger = isc.log.Logger("stats-httpd")
+
+# Some constants for debug levels, these should be removed when we
+# have #1074
+DBG_STATHTTPD_INIT = 10
+DBG_STATHTTPD_MESSAGING = 30
+
# If B10_FROM_SOURCE is set in the environment, we use data files
# from a directory relative to that, otherwise we use the ones
# installed on the system
@@ -46,7 +57,6 @@ else:
BASE_LOCATION = "@datadir@" + os.sep + "@PACKAGE@"
BASE_LOCATION = BASE_LOCATION.replace("${datarootdir}", DATAROOTDIR).replace("${prefix}", PREFIX)
SPECFILE_LOCATION = BASE_LOCATION + os.sep + "stats-httpd.spec"
-SCHEMA_SPECFILE_LOCATION = BASE_LOCATION + os.sep + "stats-schema.spec"
XML_TEMPLATE_LOCATION = BASE_LOCATION + os.sep + "stats-httpd-xml.tpl"
XSD_TEMPLATE_LOCATION = BASE_LOCATION + os.sep + "stats-httpd-xsd.tpl"
XSL_TEMPLATE_LOCATION = BASE_LOCATION + os.sep + "stats-httpd-xsl.tpl"
@@ -58,7 +68,6 @@ XSD_URL_PATH = '/bind10/statistics/xsd'
XSL_URL_PATH = '/bind10/statistics/xsl'
# TODO: This should be considered later.
XSD_NAMESPACE = 'http://bind10.isc.org' + XSD_URL_PATH
-DEFAULT_CONFIG = dict(listen_on=[('127.0.0.1', 8000)])
# Assign this process name
isc.util.process.rename()
@@ -98,9 +107,7 @@ class HttpHandler(http.server.BaseHTTPRequestHandler):
return None
except StatsHttpdError as err:
self.send_error(500)
- if self.server.verbose:
- self.server.log_writer(
- "[b10-stats-httpd] %s\n" % err)
+ logger.error(STATHTTPD_SERVER_ERROR, err)
return None
else:
self.send_response(200)
@@ -109,15 +116,6 @@ class HttpHandler(http.server.BaseHTTPRequestHandler):
self.end_headers()
return body
- def log_message(self, format, *args):
- """Change the default log format"""
- if self.server.verbose:
- self.server.log_writer(
- "[b10-stats-httpd] %s - - [%s] %s\n" %
- (self.address_string(),
- self.log_date_time_string(),
- format%args))
-
class HttpServerError(Exception):
"""Exception class for HttpServer class. It is intended to be
passed from the HttpServer object to the StatsHttpd object."""
@@ -134,13 +132,12 @@ class HttpServer(http.server.HTTPServer):
sys.stderr.write. They are intended to be referred by HttpHandler
object."""
def __init__(self, server_address, handler,
- xml_handler, xsd_handler, xsl_handler, log_writer, verbose=False):
+ xml_handler, xsd_handler, xsl_handler, log_writer):
self.server_address = server_address
self.xml_handler = xml_handler
self.xsd_handler = xsd_handler
self.xsl_handler = xsl_handler
self.log_writer = log_writer
- self.verbose = verbose
http.server.HTTPServer.__init__(self, server_address, handler)
class StatsHttpdError(Exception):
@@ -154,37 +151,33 @@ class StatsHttpd:
statistics module. It handles HTTP requests, and command channel
and config channel CC session. It uses select.select function
while waiting for clients requests."""
- def __init__(self, verbose=False):
- self.verbose = verbose
+ def __init__(self):
self.running = False
self.poll_intval = 0.5
self.write_log = sys.stderr.write
self.mccs = None
self.httpd = []
self.open_mccs()
+ self.config = {}
self.load_config()
- self.load_templates()
+ self.http_addrs = []
+ self.mccs.start()
self.open_httpd()
def open_mccs(self):
"""Opens a ModuleCCSession object"""
# create ModuleCCSession
- if self.verbose:
- self.write_log("[b10-stats-httpd] Starting CC Session\n")
+ logger.debug(DBG_STATHTTPD_INIT, STATHTTPD_STARTING_CC_SESSION)
self.mccs = isc.config.ModuleCCSession(
SPECFILE_LOCATION, self.config_handler, self.command_handler)
self.cc_session = self.mccs._session
- # read spec file of stats module and subscribe 'Stats'
- self.stats_module_spec = isc.config.module_spec_from_file(SCHEMA_SPECFILE_LOCATION)
- self.stats_config_spec = self.stats_module_spec.get_config_spec()
- self.stats_module_name = self.stats_module_spec.get_module_name()
def close_mccs(self):
"""Closes a ModuleCCSession object"""
if self.mccs is None:
return
- if self.verbose:
- self.write_log("[b10-stats-httpd] Closing CC Session\n")
+
+ logger.debug(DBG_STATHTTPD_INIT, STATHTTPD_CLOSING_CC_SESSION)
self.mccs.close()
self.mccs = None
@@ -192,18 +185,19 @@ class StatsHttpd:
"""Loads configuration from spec file or new configuration
from the config manager"""
# load config
- if len(new_config) > 0:
- self.config.update(new_config)
- else:
- self.config = DEFAULT_CONFIG
- self.config.update(
- dict([
- (itm['item_name'], self.mccs.get_value(itm['item_name'])[0])
- for itm in self.mccs.get_module_spec().get_config_spec()
- ])
- )
+ if len(self.config) == 0:
+ self.config = dict([
+ (itm['item_name'], self.mccs.get_value(itm['item_name'])[0])
+ for itm in self.mccs.get_module_spec().get_config_spec()
+ ])
+ self.config.update(new_config)
# set addresses and ports for HTTP
- self.http_addrs = [ (cf['address'], cf['port']) for cf in self.config['listen_on'] ]
+ addrs = []
+ if 'listen_on' in self.config:
+ for cf in self.config['listen_on']:
+ if 'address' in cf and 'port' in cf:
+ addrs.append((cf['address'], cf['port']))
+ self.http_addrs = addrs
def open_httpd(self):
"""Opens sockets for HTTP. Iterating each HTTP address to be
@@ -211,51 +205,44 @@ class StatsHttpd:
for addr in self.http_addrs:
self.httpd.append(self._open_httpd(addr))
- def _open_httpd(self, server_address, address_family=None):
+ def _open_httpd(self, server_address):
+ httpd = None
try:
- # try IPv6 at first
- if address_family is not None:
- HttpServer.address_family = address_family
- elif socket.has_ipv6:
- HttpServer.address_family = socket.AF_INET6
+ # get address family for the server_address before
+ # creating HttpServer object. If a specified address is
+ # not numerical, gaierror may be thrown.
+ address_family = socket.getaddrinfo(
+ server_address[0], server_address[1], 0,
+ socket.SOCK_STREAM, socket.IPPROTO_TCP, socket.AI_NUMERICHOST
+ )[0][0]
+ HttpServer.address_family = address_family
httpd = HttpServer(
server_address, HttpHandler,
self.xml_handler, self.xsd_handler, self.xsl_handler,
- self.write_log, self.verbose)
+ self.write_log)
+ logger.info(STATHTTPD_STARTED, server_address[0],
+ server_address[1])
+ return httpd
except (socket.gaierror, socket.error,
OverflowError, TypeError) as err:
- # try IPv4 next
- if HttpServer.address_family == socket.AF_INET6:
- httpd = self._open_httpd(server_address, socket.AF_INET)
- else:
- raise HttpServerError(
- "Invalid address %s, port %s: %s: %s" %
- (server_address[0], server_address[1],
- err.__class__.__name__, err))
- else:
- if self.verbose:
- self.write_log(
- "[b10-stats-httpd] Started on address %s, port %s\n" %
- server_address)
- return httpd
+ if httpd:
+ httpd.server_close()
+ raise HttpServerError(
+ "Invalid address %s, port %s: %s: %s" %
+ (server_address[0], server_address[1],
+ err.__class__.__name__, err))
def close_httpd(self):
"""Closes sockets for HTTP"""
- if len(self.httpd) == 0:
- return
- for ht in self.httpd:
- if self.verbose:
- self.write_log(
- "[b10-stats-httpd] Closing address %s, port %s\n" %
- (ht.server_address[0], ht.server_address[1])
- )
+ while len(self.httpd)>0:
+ ht = self.httpd.pop()
+ logger.info(STATHTTPD_CLOSING, ht.server_address[0],
+ ht.server_address[1])
ht.server_close()
- self.httpd = []
def start(self):
"""Starts StatsHttpd objects to run. Waiting for client
requests by using select.select functions"""
- self.mccs.start()
self.running = True
while self.running:
try:
@@ -285,10 +272,10 @@ class StatsHttpd:
def stop(self):
"""Stops the running StatsHttpd objects. Closes CC session and
HTTP handling sockets"""
- if self.verbose:
- self.write_log("[b10-stats-httpd] Shutting down\n")
+ logger.info(STATHTTPD_SHUTDOWN)
self.close_httpd()
self.close_mccs()
+ self.running = False
def get_sockets(self):
"""Returns sockets to select.select"""
@@ -303,29 +290,29 @@ class StatsHttpd:
def config_handler(self, new_config):
"""Config handler for the ModuleCCSession object. It resets
addresses and ports to listen HTTP requests on."""
- if self.verbose:
- self.write_log("[b10-stats-httpd] Loading config : %s\n" % str(new_config))
- for key in new_config.keys():
- if key not in DEFAULT_CONFIG:
- if self.verbose:
- self.write_log(
- "[b10-stats-httpd] Unknown known config: %s" % key)
+ logger.debug(DBG_STATHTTPD_MESSAGING, STATHTTPD_HANDLE_CONFIG,
+ new_config)
+ errors = []
+ if not self.mccs.get_module_spec().\
+ validate_config(False, new_config, errors):
return isc.config.ccsession.create_answer(
- 1, "Unknown known config: %s" % key)
+ 1, ", ".join(errors))
# backup old config
old_config = self.config.copy()
- self.close_httpd()
self.load_config(new_config)
+ # If the http sockets aren't opened or
+ # if new_config doesn't have'listen_on', it returns
+ if len(self.httpd) == 0 or 'listen_on' not in new_config:
+ return isc.config.ccsession.create_answer(0)
+ self.close_httpd()
try:
self.open_httpd()
except HttpServerError as err:
- if self.verbose:
- self.write_log("[b10-stats-httpd] %s\n" % err)
- self.write_log("[b10-stats-httpd] Restoring old config\n")
+ logger.error(STATHTTPD_SERVER_ERROR, err)
# restore old config
- self.config_handler(old_config)
- return isc.config.ccsession.create_answer(
- 1, "[b10-stats-httpd] %s" % err)
+ self.load_config(old_config)
+ self.open_httpd()
+ return isc.config.ccsession.create_answer(1, str(err))
else:
return isc.config.ccsession.create_answer(0)
@@ -333,19 +320,18 @@ class StatsHttpd:
"""Command handler for the ModuleCCSesson object. It handles
"status" and "shutdown" commands."""
if command == "status":
- if self.verbose:
- self.write_log("[b10-stats-httpd] Received 'status' command\n")
+ logger.debug(DBG_STATHTTPD_MESSAGING,
+ STATHTTPD_RECEIVED_STATUS_COMMAND)
return isc.config.ccsession.create_answer(
0, "Stats Httpd is up. (PID " + str(os.getpid()) + ")")
elif command == "shutdown":
- if self.verbose:
- self.write_log("[b10-stats-httpd] Received 'shutdown' command\n")
+ logger.debug(DBG_STATHTTPD_MESSAGING,
+ STATHTTPD_RECEIVED_SHUTDOWN_COMMAND)
self.running = False
- return isc.config.ccsession.create_answer(
- 0, "Stats Httpd is shutting down.")
+ return isc.config.ccsession.create_answer(0)
else:
- if self.verbose:
- self.write_log("[b10-stats-httpd] Received unknown command\n")
+ logger.debug(DBG_STATHTTPD_MESSAGING,
+ STATHTTPD_RECEIVED_UNKNOWN_COMMAND, command)
return isc.config.ccsession.create_answer(
1, "Unknown command: " + str(command))
@@ -354,8 +340,7 @@ class StatsHttpd:
the data which obtains from it"""
try:
seq = self.cc_session.group_sendmsg(
- isc.config.ccsession.create_command('show'),
- self.stats_module_name)
+ isc.config.ccsession.create_command('show'), 'Stats')
(answer, env) = self.cc_session.group_recvmsg(False, seq)
if answer:
(rcode, value) = isc.config.ccsession.parse_answer(answer)
@@ -370,73 +355,34 @@ class StatsHttpd:
raise StatsHttpdError("Stats module: %s" % str(value))
def get_stats_spec(self):
- """Just returns spec data"""
- return self.stats_config_spec
-
- def load_templates(self):
- """Setup the bodies of XSD and XSL documents to be responds to
- HTTP clients. Before that it also creates XML tag structures by
- using xml.etree.ElementTree.Element class and substitutes
- concrete strings with parameters embed in the string.Template
- object."""
- # for XSD
- xsd_root = xml.etree.ElementTree.Element("all") # started with "all" tag
- for item in self.get_stats_spec():
- element = xml.etree.ElementTree.Element(
- "element",
- dict( name=item["item_name"],
- type=item["item_type"] if item["item_type"].lower() != 'real' else 'float',
- minOccurs="1",
- maxOccurs="1" ),
- )
- annotation = xml.etree.ElementTree.Element("annotation")
- appinfo = xml.etree.ElementTree.Element("appinfo")
- documentation = xml.etree.ElementTree.Element("documentation")
- appinfo.text = item["item_title"]
- documentation.text = item["item_description"]
- annotation.append(appinfo)
- annotation.append(documentation)
- element.append(annotation)
- xsd_root.append(element)
- xsd_string = xml.etree.ElementTree.tostring(xsd_root)
- self.xsd_body = self.open_template(XSD_TEMPLATE_LOCATION).substitute(
- xsd_string=xsd_string,
- xsd_namespace=XSD_NAMESPACE
- )
- assert self.xsd_body is not None
-
- # for XSL
- xsd_root = xml.etree.ElementTree.Element(
- "xsl:template",
- dict(match="*")) # started with xml:template tag
- for item in self.get_stats_spec():
- tr = xml.etree.ElementTree.Element("tr")
- td1 = xml.etree.ElementTree.Element(
- "td", { "class" : "title",
- "title" : item["item_description"] })
- td1.text = item["item_title"]
- td2 = xml.etree.ElementTree.Element("td")
- xsl_valueof = xml.etree.ElementTree.Element(
- "xsl:value-of",
- dict(select=item["item_name"]))
- td2.append(xsl_valueof)
- tr.append(td1)
- tr.append(td2)
- xsd_root.append(tr)
- xsl_string = xml.etree.ElementTree.tostring(xsd_root)
- self.xsl_body = self.open_template(XSL_TEMPLATE_LOCATION).substitute(
- xsl_string=xsl_string,
- xsd_namespace=XSD_NAMESPACE)
- assert self.xsl_body is not None
+ """Requests statistics data to the Stats daemon and returns
+ the data which obtains from it"""
+ try:
+ seq = self.cc_session.group_sendmsg(
+ isc.config.ccsession.create_command('showschema'), 'Stats')
+ (answer, env) = self.cc_session.group_recvmsg(False, seq)
+ if answer:
+ (rcode, value) = isc.config.ccsession.parse_answer(answer)
+ if rcode == 0:
+ return value
+ else:
+ raise StatsHttpdError("Stats module: %s" % str(value))
+ except (isc.cc.session.SessionTimeout,
+ isc.cc.session.SessionError) as err:
+ raise StatsHttpdError("%s: %s" %
+ (err.__class__.__name__, err))
def xml_handler(self):
"""Handler which requests to Stats daemon to obtain statistics
data and returns the body of XML document"""
xml_list=[]
- for (k, v) in self.get_stats_data().items():
- (k, v) = (str(k), str(v))
- elem = xml.etree.ElementTree.Element(k)
- elem.text = v
+ for (mod, spec) in self.get_stats_data().items():
+ if not spec: continue
+ elem1 = xml.etree.ElementTree.Element(str(mod))
+ for (k, v) in spec.items():
+ elem2 = xml.etree.ElementTree.Element(str(k))
+ elem2.text = str(v)
+ elem1.append(elem2)
# The coding conversion is tricky. xml..tostring() of Python 3.2
# returns bytes (not string) regardless of the coding, while
# tostring() of Python 3.1 returns a string. To support both
@@ -444,7 +390,7 @@ class StatsHttpd:
# bytes by specifying utf-8 and then convert the result to a
# plain string (code below assume it).
xml_list.append(
- str(xml.etree.ElementTree.tostring(elem, encoding='utf-8'),
+ str(xml.etree.ElementTree.tostring(elem1, encoding='utf-8'),
encoding='us-ascii'))
xml_string = "".join(xml_list)
self.xml_body = self.open_template(XML_TEMPLATE_LOCATION).substitute(
@@ -457,18 +403,95 @@ class StatsHttpd:
def xsd_handler(self):
"""Handler which just returns the body of XSD document"""
+ # for XSD
+ xsd_root = xml.etree.ElementTree.Element("all") # started with "all" tag
+ for (mod, spec) in self.get_stats_spec().items():
+ if not spec: continue
+ alltag = xml.etree.ElementTree.Element("all")
+ for item in spec:
+ element = xml.etree.ElementTree.Element(
+ "element",
+ dict( name=item["item_name"],
+ type=item["item_type"] if item["item_type"].lower() != 'real' else 'float',
+ minOccurs="1",
+ maxOccurs="1" ),
+ )
+ annotation = xml.etree.ElementTree.Element("annotation")
+ appinfo = xml.etree.ElementTree.Element("appinfo")
+ documentation = xml.etree.ElementTree.Element("documentation")
+ appinfo.text = item["item_title"]
+ documentation.text = item["item_description"]
+ annotation.append(appinfo)
+ annotation.append(documentation)
+ element.append(annotation)
+ alltag.append(element)
+
+ complextype = xml.etree.ElementTree.Element("complexType")
+ complextype.append(alltag)
+ mod_element = xml.etree.ElementTree.Element("element", { "name" : mod })
+ mod_element.append(complextype)
+ xsd_root.append(mod_element)
+ # The coding conversion is tricky. xml..tostring() of Python 3.2
+ # returns bytes (not string) regardless of the coding, while
+ # tostring() of Python 3.1 returns a string. To support both
+ # cases transparently, we first make sure tostring() returns
+ # bytes by specifying utf-8 and then convert the result to a
+ # plain string (code below assume it).
+ xsd_string = str(xml.etree.ElementTree.tostring(xsd_root, encoding='utf-8'),
+ encoding='us-ascii')
+ self.xsd_body = self.open_template(XSD_TEMPLATE_LOCATION).substitute(
+ xsd_string=xsd_string,
+ xsd_namespace=XSD_NAMESPACE
+ )
+ assert self.xsd_body is not None
return self.xsd_body
def xsl_handler(self):
"""Handler which just returns the body of XSL document"""
+ # for XSL
+ xsd_root = xml.etree.ElementTree.Element(
+ "xsl:template",
+ dict(match="*")) # started with xml:template tag
+ for (mod, spec) in self.get_stats_spec().items():
+ if not spec: continue
+ for item in spec:
+ tr = xml.etree.ElementTree.Element("tr")
+ td0 = xml.etree.ElementTree.Element("td")
+ td0.text = str(mod)
+ td1 = xml.etree.ElementTree.Element(
+ "td", { "class" : "title",
+ "title" : item["item_description"] })
+ td1.text = item["item_title"]
+ td2 = xml.etree.ElementTree.Element("td")
+ xsl_valueof = xml.etree.ElementTree.Element(
+ "xsl:value-of",
+ dict(select=mod+'/'+item["item_name"]))
+ td2.append(xsl_valueof)
+ tr.append(td0)
+ tr.append(td1)
+ tr.append(td2)
+ xsd_root.append(tr)
+ # The coding conversion is tricky. xml..tostring() of Python 3.2
+ # returns bytes (not string) regardless of the coding, while
+ # tostring() of Python 3.1 returns a string. To support both
+ # cases transparently, we first make sure tostring() returns
+ # bytes by specifying utf-8 and then convert the result to a
+ # plain string (code below assume it).
+ xsl_string = str(xml.etree.ElementTree.tostring(xsd_root, encoding='utf-8'),
+ encoding='us-ascii')
+ self.xsl_body = self.open_template(XSL_TEMPLATE_LOCATION).substitute(
+ xsl_string=xsl_string,
+ xsd_namespace=XSD_NAMESPACE)
+ assert self.xsl_body is not None
return self.xsl_body
def open_template(self, file_name):
"""It opens a template file, and it loads all lines to a
string variable and returns string. Template object includes
the variable. Limitation of a file size isn't needed there."""
- lines = "".join(
- open(file_name, 'r').readlines())
+ f = open(file_name, 'r')
+ lines = "".join(f.readlines())
+ f.close()
assert lines is not None
return string.Template(lines)
@@ -479,14 +502,18 @@ if __name__ == "__main__":
"-v", "--verbose", dest="verbose", action="store_true",
help="display more about what is going on")
(options, args) = parser.parse_args()
- stats_httpd = StatsHttpd(verbose=options.verbose)
+ if options.verbose:
+ isc.log.init("b10-stats-httpd", "DEBUG", 99)
+ stats_httpd = StatsHttpd()
stats_httpd.start()
- except OptionValueError:
- sys.exit("[b10-stats-httpd] Error parsing options")
+ except OptionValueError as ove:
+ logger.fatal(STATHTTPD_BAD_OPTION_VALUE, ove)
+ sys.exit(1)
except isc.cc.session.SessionError as se:
- sys.exit("[b10-stats-httpd] Error creating module, "
- + "is the command channel daemon running?")
+ logger.fatal(STATHTTPD_CC_SESSION_ERROR, se)
+ sys.exit(1)
except HttpServerError as hse:
- sys.exit("[b10-stats-httpd] %s" % hse)
+ logger.fatal(STATHTTPD_START_SERVER_INIT_ERROR, hse)
+ sys.exit(1)
except KeyboardInterrupt as kie:
- sys.exit("[b10-stats-httpd] Interrupted, exiting")
+ logger.info(STATHTTPD_STOPPED_BY_KEYBOARD)
diff --git a/src/bin/stats/stats_httpd_messages.mes b/src/bin/stats/stats_httpd_messages.mes
new file mode 100644
index 0000000..0e984dc
--- /dev/null
+++ b/src/bin/stats/stats_httpd_messages.mes
@@ -0,0 +1,92 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the stats_httpd_messages python module.
+
+% STATHTTPD_BAD_OPTION_VALUE bad command line argument: %1
+The stats-httpd module was called with a bad command-line argument
+and will not start.
+
+% STATHTTPD_CC_SESSION_ERROR error connecting to message bus: %1
+The stats-httpd module was unable to connect to the BIND 10 command
+and control bus. A likely problem is that the message bus daemon
+(b10-msgq) is not running. The stats-httpd module will now shut down.
+
+% STATHTTPD_CLOSING_CC_SESSION stopping cc session
+Debug message indicating that the stats-httpd module is disconnecting
+from the command and control bus.
+
+% STATHTTPD_CLOSING closing %1#%2
+The stats-httpd daemon will stop listening for requests on the given
+address and port number.
+
+% STATHTTPD_HANDLE_CONFIG reading configuration: %1
+The stats-httpd daemon has received new configuration data and will now
+process it. The (changed) data is printed.
+
+% STATHTTPD_RECEIVED_SHUTDOWN_COMMAND shutdown command received
+A shutdown command was sent to the stats-httpd module, and it will
+now shut down.
+
+% STATHTTPD_RECEIVED_STATUS_COMMAND received command to return status
+A status command was sent to the stats-httpd module, and it will
+respond with 'Stats Httpd is up.' and its PID.
+
+% STATHTTPD_RECEIVED_UNKNOWN_COMMAND received unknown command: %1
+An unknown command has been sent to the stats-httpd module. The
+stats-httpd module will respond with an error, and the command will
+be ignored.
+
+% STATHTTPD_SERVER_ERROR HTTP server error: %1
+An internal error occurred while handling an HTTP request. An HTTP 500
+response will be sent back, and the specific error is printed. This
+is an error condition that likely points to a module that is not
+responding correctly to statistic requests.
+
+% STATHTTPD_SERVER_INIT_ERROR HTTP server initialization error: %1
+There was a problem initializing the HTTP server in the stats-httpd
+module upon receiving its configuration data. The most likely cause
+is a port binding problem or a bad configuration value. The specific
+error is printed in the message. The new configuration is ignored,
+and an error is sent back.
+
+% STATHTTPD_SHUTDOWN shutting down
+The stats-httpd daemon is shutting down.
+
+% STATHTTPD_START_SERVER_INIT_ERROR HTTP server initialization error: %1
+There was a problem initializing the HTTP server in the stats-httpd
+module upon startup. The most likely cause is that it was not able
+to bind to the listening port. The specific error is printed, and the
+module will shut down.
+
+% STATHTTPD_STARTED listening on %1#%2
+The stats-httpd daemon will now start listening for requests on the
+given address and port number.
+
+% STATHTTPD_STARTING_CC_SESSION starting cc session
+Debug message indicating that the stats-httpd module is connecting to
+the command and control bus.
+
+% STATHTTPD_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down
+There was a keyboard interrupt signal to stop the stats-httpd
+daemon. The daemon will now shut down.
+
+% STATHTTPD_UNKNOWN_CONFIG_ITEM unknown configuration item: %1
+The stats-httpd daemon received a configuration update from the
+configuration manager. However, one of the items in the
+configuration is unknown. The new configuration is ignored, and an
+error is sent back. As possible cause is that there was an upgrade
+problem, and the stats-httpd version is out of sync with the rest of
+the system.
diff --git a/src/bin/stats/stats_messages.mes b/src/bin/stats/stats_messages.mes
new file mode 100644
index 0000000..cfffb3a
--- /dev/null
+++ b/src/bin/stats/stats_messages.mes
@@ -0,0 +1,76 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the stats_messages python module.
+
+% STATS_BAD_OPTION_VALUE bad command line argument: %1
+The stats module was called with a bad command-line argument and will
+not start.
+
+% STATS_CC_SESSION_ERROR error connecting to message bus: %1
+The stats module was unable to connect to the BIND 10 command and
+control bus. A likely problem is that the message bus daemon
+(b10-msgq) is not running. The stats module will now shut down.
+
+% STATS_RECEIVED_NEW_CONFIG received new configuration: %1
+This debug message is printed when the stats module has received a
+configuration update from the configuration manager.
+
+% STATS_RECEIVED_SHOW_ALL_COMMAND received command to show all statistics
+The stats module received a command to show all statistics that it has
+collected.
+
+% STATS_RECEIVED_SHOW_NAME_COMMAND received command to show statistics for %1
+The stats module received a command to show the statistics that it has
+collected for the given item.
+
+% STATS_RECEIVED_SHUTDOWN_COMMAND shutdown command received
+A shutdown command was sent to the stats module and it will now shut down.
+
+% STATS_RECEIVED_STATUS_COMMAND received command to return status
+A status command was sent to the stats module. It will return a
+response indicating that it is running normally.
+
+% STATS_RECEIVED_UNKNOWN_COMMAND received unknown command: %1
+An unknown command has been sent to the stats module. The stats module
+will respond with an error and the command will be ignored.
+
+% STATS_SEND_REQUEST_BOSS requesting boss to send statistics
+This debug message is printed when a request is sent to the boss module
+to send its data to the stats module.
+
+% STATS_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down
+There was a keyboard interrupt signal to stop the stats module. The
+daemon will now shut down.
+
+% STATS_UNKNOWN_COMMAND_IN_SPEC unknown command in specification file: %1
+The specification file for the stats module contains a command that
+is unknown in the implementation. The most likely cause is an
+installation problem, where the specification file stats.spec is
+from a different version of BIND 10 than the stats module itself.
+Please check your installation.
+
+% STATS_STARTING starting
+The stats module will be now starting.
+
+% STATS_RECEIVED_SHOWSCHEMA_ALL_COMMAND received command to show all statistics schema
+The stats module received a command to show all statistics schemas of all modules.
+
+% STATS_RECEIVED_SHOWSCHEMA_NAME_COMMAND received command to show statistics schema for %1
+The stats module received a command to show the specified statistics schema of the specified module.
+
+% STATS_START_ERROR stats module error: %1
+An internal error occurred while starting the stats module. The stats
+module will be now shutting down.
diff --git a/src/bin/stats/tests/Makefile.am b/src/bin/stats/tests/Makefile.am
index 8163c7f..b5edc59 100644
--- a/src/bin/stats/tests/Makefile.am
+++ b/src/bin/stats/tests/Makefile.am
@@ -1,20 +1,28 @@
-SUBDIRS = isc http testdata
PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
PYTESTS = b10-stats_test.py b10-stats-httpd_test.py
-EXTRA_DIST = $(PYTESTS) fake_time.py fake_socket.py fake_select.py
-CLEANFILES = fake_time.pyc fake_socket.pyc fake_select.pyc
+EXTRA_DIST = $(PYTESTS) test_utils.py
+CLEANFILES = test_utils.pyc
+
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
if ENABLE_PYTHON_COVERAGE
- touch $(abs_top_srcdir)/.coverage
+ touch $(abs_top_srcdir)/.coverage
rm -f .coverage
${LN_S} $(abs_top_srcdir)/.coverage .coverage
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/stats:$(abs_top_builddir)/src/bin/stats/tests \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/stats:$(abs_top_builddir)/src/bin/stats/tests:$(abs_top_builddir)/src/bin/msgq:$(abs_top_builddir)/src/lib/python/isc/config \
B10_FROM_SOURCE=$(abs_top_srcdir) \
+ CONFIG_TESTDATA_PATH=$(abs_top_srcdir)/src/lib/config/tests/testdata \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/bin/stats/tests/b10-stats-httpd_test.py b/src/bin/stats/tests/b10-stats-httpd_test.py
index 07999ea..e867080 100644
--- a/src/bin/stats/tests/b10-stats-httpd_test.py
+++ b/src/bin/stats/tests/b10-stats-httpd_test.py
@@ -13,166 +13,269 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+"""
+In each of these tests we start several virtual components. They are
+not the real components, no external processes are started. They are
+just simple mock objects running each in its own thread and pretending
+to be bind10 modules. This helps testing the stats http server in a
+close to real environment.
+"""
+
import unittest
import os
-import http.server
-import string
-import fake_select
import imp
-import sys
-import fake_socket
-
-import isc.cc
+import socket
+import errno
+import select
+import string
+import time
+import threading
+import http.client
+import xml.etree.ElementTree
+import random
+import isc
import stats_httpd
-stats_httpd.socket = fake_socket
-stats_httpd.select = fake_select
+import stats
+from test_utils import BaseModules, ThreadingServerManager, MyStats, MyStatsHttpd, SignalHandler, send_command, send_shutdown
DUMMY_DATA = {
- "auth.queries.tcp": 10000,
- "auth.queries.udp": 12000,
- "bind10.boot_time": "2011-03-04T11:59:05Z",
- "report_time": "2011-03-04T11:59:19Z",
- "stats.boot_time": "2011-03-04T11:59:06Z",
- "stats.last_update_time": "2011-03-04T11:59:07Z",
- "stats.lname": "4d70d40a_c at host",
- "stats.start_time": "2011-03-04T11:59:06Z",
- "stats.timestamp": 1299239959.560846
+ 'Boss' : {
+ "boot_time": "2011-03-04T11:59:06Z"
+ },
+ 'Auth' : {
+ "queries.tcp": 2,
+ "queries.udp": 3
+ },
+ 'Stats' : {
+ "report_time": "2011-03-04T11:59:19Z",
+ "boot_time": "2011-03-04T11:59:06Z",
+ "last_update_time": "2011-03-04T11:59:07Z",
+ "lname": "4d70d40a_c at host",
+ "timestamp": 1299239959.560846
+ }
}
-def push_answer(stats_httpd):
- stats_httpd.cc_session.group_sendmsg(
- { 'result':
- [ 0, DUMMY_DATA ] }, "Stats")
-
-def pull_query(stats_httpd):
- (msg, env) = stats_httpd.cc_session.group_recvmsg()
- if 'result' in msg:
- (ret, arg) = isc.config.ccsession.parse_answer(msg)
- else:
- (ret, arg) = isc.config.ccsession.parse_command(msg)
- return (ret, arg, env)
+def get_availaddr(address='127.0.0.1', port=8001):
+ """returns a tuple of address and port which is available to
+ listen on the platform. The first argument is a address for
+ search. The second argument is a port for search. If a set of
+ address and port is failed on the search for the availability, the
+ port number is increased and it goes on the next trial until the
+ available set of address and port is looked up. If the port number
+ reaches over 65535, it may stop the search and raise a
+ OverflowError exception."""
+ while True:
+ for addr in socket.getaddrinfo(
+ address, port, 0,
+ socket.SOCK_STREAM, socket.IPPROTO_TCP):
+ sock = socket.socket(addr[0], socket.SOCK_STREAM)
+ try:
+ sock.bind((address, port))
+ return (address, port)
+ except socket.error:
+ continue
+ finally:
+ if sock: sock.close()
+ # This address and port number are already in use.
+ # next port number is added
+ port = port + 1
+
+def is_ipv6_enabled(address='::1', port=8001):
+ """checks IPv6 enabled on the platform. address for check is '::1'
+ and port for check is random number between 8001 and
+ 65535. Retrying is 3 times even if it fails. The built-in socket
+ module provides a 'has_ipv6' parameter, but it's not used here
+ because there may be a situation where the value is True on an
+ environment where the IPv6 config is disabled."""
+ for p in random.sample(range(port, 65535), 3):
+ try:
+ sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
+ sock.bind((address, p))
+ return True
+ except socket.error:
+ continue
+ finally:
+ if sock: sock.close()
+ return False
class TestHttpHandler(unittest.TestCase):
"""Tests for HttpHandler class"""
-
def setUp(self):
- self.verbose = True
- self.stats_httpd = stats_httpd.StatsHttpd(self.verbose)
- self.assertTrue(type(self.stats_httpd.httpd) is list)
- self.httpd = self.stats_httpd.httpd
- for ht in self.httpd:
- self.assertTrue(ht.verbose)
- self.stats_httpd.cc_session.verbose = False
+ # set the signal handler for deadlock
+ self.sig_handler = SignalHandler(self.fail)
+ self.base = BaseModules()
+ self.stats_server = ThreadingServerManager(MyStats)
+ self.stats = self.stats_server.server
+ self.stats_server.run()
+ (self.address, self.port) = get_availaddr()
+ self.stats_httpd_server = ThreadingServerManager(MyStatsHttpd, (self.address, self.port))
+ self.stats_httpd = self.stats_httpd_server.server
+ self.stats_httpd_server.run()
+ self.client = http.client.HTTPConnection(self.address, self.port)
+ self.client._http_vsn_str = 'HTTP/1.0\n'
+ self.client.connect()
- def test_do_GET(self):
- for ht in self.httpd:
- self._test_do_GET(ht._handler)
+ def tearDown(self):
+ self.client.close()
+ self.stats_httpd_server.shutdown()
+ self.stats_server.shutdown()
+ self.base.shutdown()
+ # reset the signal handler
+ self.sig_handler.reset()
- def _test_do_GET(self, handler):
+ def test_do_GET(self):
+ self.assertTrue(type(self.stats_httpd.httpd) is list)
+ self.assertEqual(len(self.stats_httpd.httpd), 1)
+ self.assertEqual((self.address, self.port), self.stats_httpd.http_addrs[0])
# URL is '/bind10/statistics/xml'
- handler.path = stats_httpd.XML_URL_PATH
- push_answer(self.stats_httpd)
- handler.do_GET()
- (ret, arg, env) = pull_query(self.stats_httpd)
- self.assertEqual(ret, "show")
- self.assertIsNone(arg)
- self.assertTrue('group' in env)
- self.assertEqual(env['group'], 'Stats')
- self.assertEqual(handler.response.code, 200)
- self.assertEqual(handler.response.headers["Content-type"], "text/xml")
- self.assertTrue(handler.response.headers["Content-Length"] > 0)
- self.assertTrue(handler.response.wrote_headers)
- self.assertTrue(handler.response.body.find(stats_httpd.XSD_NAMESPACE)>0)
- self.assertTrue(handler.response.body.find(stats_httpd.XSD_URL_PATH)>0)
- for (k, v) in DUMMY_DATA.items():
- self.assertTrue(handler.response.body.find(str(k))>0)
- self.assertTrue(handler.response.body.find(str(v))>0)
+ self.client.putrequest('GET', stats_httpd.XML_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.getheader("Content-type"), "text/xml")
+ self.assertTrue(int(response.getheader("Content-Length")) > 0)
+ self.assertEqual(response.status, 200)
+ root = xml.etree.ElementTree.parse(response).getroot()
+ self.assertTrue(root.tag.find('stats_data') > 0)
+ for (k,v) in root.attrib.items():
+ if k.find('schemaLocation') > 0:
+ self.assertEqual(v, stats_httpd.XSD_NAMESPACE + ' ' + stats_httpd.XSD_URL_PATH)
+ for mod in DUMMY_DATA:
+ for (item, value) in DUMMY_DATA[mod].items():
+ self.assertIsNotNone(root.find(mod + '/' + item))
# URL is '/bind10/statitics/xsd'
- handler.path = stats_httpd.XSD_URL_PATH
- handler.do_GET()
- self.assertEqual(handler.response.code, 200)
- self.assertEqual(handler.response.headers["Content-type"], "text/xml")
- self.assertTrue(handler.response.headers["Content-Length"] > 0)
- self.assertTrue(handler.response.wrote_headers)
- self.assertTrue(handler.response.body.find(stats_httpd.XSD_NAMESPACE)>0)
- for (k, v) in DUMMY_DATA.items():
- self.assertTrue(handler.response.body.find(str(k))>0)
+ self.client.putrequest('GET', stats_httpd.XSD_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.getheader("Content-type"), "text/xml")
+ self.assertTrue(int(response.getheader("Content-Length")) > 0)
+ self.assertEqual(response.status, 200)
+ root = xml.etree.ElementTree.parse(response).getroot()
+ url_xmlschema = '{http://www.w3.org/2001/XMLSchema}'
+ tags = [ url_xmlschema + t for t in [ 'element', 'complexType', 'all', 'element' ] ]
+ xsdpath = '/'.join(tags)
+ self.assertTrue(root.tag.find('schema') > 0)
+ self.assertTrue(hasattr(root, 'attrib'))
+ self.assertTrue('targetNamespace' in root.attrib)
+ self.assertEqual(root.attrib['targetNamespace'],
+ stats_httpd.XSD_NAMESPACE)
+ for elm in root.findall(xsdpath):
+ self.assertIsNotNone(elm.attrib['name'])
+ self.assertTrue(elm.attrib['name'] in DUMMY_DATA)
# URL is '/bind10/statitics/xsl'
- handler.path = stats_httpd.XSL_URL_PATH
- handler.do_GET()
- self.assertEqual(handler.response.code, 200)
- self.assertEqual(handler.response.headers["Content-type"], "text/xml")
- self.assertTrue(handler.response.headers["Content-Length"] > 0)
- self.assertTrue(handler.response.wrote_headers)
- self.assertTrue(handler.response.body.find(stats_httpd.XSD_NAMESPACE)>0)
- for (k, v) in DUMMY_DATA.items():
- self.assertTrue(handler.response.body.find(str(k))>0)
+ self.client.putrequest('GET', stats_httpd.XSL_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.getheader("Content-type"), "text/xml")
+ self.assertTrue(int(response.getheader("Content-Length")) > 0)
+ self.assertEqual(response.status, 200)
+ root = xml.etree.ElementTree.parse(response).getroot()
+ url_trans = '{http://www.w3.org/1999/XSL/Transform}'
+ url_xhtml = '{http://www.w3.org/1999/xhtml}'
+ xslpath = url_trans + 'template/' + url_xhtml + 'tr'
+ self.assertEqual(root.tag, url_trans + 'stylesheet')
+ for tr in root.findall(xslpath):
+ tds = tr.findall(url_xhtml + 'td')
+ self.assertIsNotNone(tds)
+ self.assertEqual(type(tds), list)
+ self.assertTrue(len(tds) > 2)
+ self.assertTrue(hasattr(tds[0], 'text'))
+ self.assertTrue(tds[0].text in DUMMY_DATA)
+ valueof = tds[2].find(url_trans + 'value-of')
+ self.assertIsNotNone(valueof)
+ self.assertTrue(hasattr(valueof, 'attrib'))
+ self.assertIsNotNone(valueof.attrib)
+ self.assertTrue('select' in valueof.attrib)
+ self.assertTrue(valueof.attrib['select'] in \
+ [ tds[0].text+'/'+item for item in DUMMY_DATA[tds[0].text].keys() ])
# 302 redirect
- handler.path = '/'
- handler.headers = {'Host': 'my.host.domain'}
- handler.do_GET()
- self.assertEqual(handler.response.code, 302)
- self.assertEqual(handler.response.headers["Location"],
- "http://my.host.domain%s" % stats_httpd.XML_URL_PATH)
+ self.client._http_vsn_str = 'HTTP/1.1'
+ self.client.putrequest('GET', '/')
+ self.client.putheader('Host', self.address)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 302)
+ self.assertEqual(response.getheader('Location'),
+ "http://%s:%d%s" % (self.address, self.port, stats_httpd.XML_URL_PATH))
# 404 NotFound
- handler.path = '/path/to/foo/bar'
- handler.headers = {}
- handler.do_GET()
- self.assertEqual(handler.response.code, 404)
-
- # failure case(connection with Stats is down)
- handler.path = stats_httpd.XML_URL_PATH
- push_answer(self.stats_httpd)
- self.assertFalse(self.stats_httpd.cc_session._socket._closed)
- self.stats_httpd.cc_session._socket._closed = True
- handler.do_GET()
- self.stats_httpd.cc_session._socket._closed = False
- self.assertEqual(handler.response.code, 500)
- self.stats_httpd.cc_session._clear_queues()
-
- # failure case(Stats module returns err)
- handler.path = stats_httpd.XML_URL_PATH
- self.stats_httpd.cc_session.group_sendmsg(
- { 'result': [ 1, "I have an error." ] }, "Stats")
- self.assertFalse(self.stats_httpd.cc_session._socket._closed)
- self.stats_httpd.cc_session._socket._closed = False
- handler.do_GET()
- self.assertEqual(handler.response.code, 500)
- self.stats_httpd.cc_session._clear_queues()
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', '/path/to/foo/bar')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 404)
+
+
+ def test_do_GET_failed1(self):
+ # checks status
+ self.assertEqual(send_command("status", "Stats"),
+ (0, "Stats is up. (PID " + str(os.getpid()) + ")"))
+ # failure case(Stats is down)
+ self.assertTrue(self.stats.running)
+ self.assertEqual(send_shutdown("Stats"), (0, None)) # Stats is down
+ self.assertFalse(self.stats.running)
+ self.stats_httpd.cc_session.set_timeout(milliseconds=100)
+
+ # request XML
+ self.client.putrequest('GET', stats_httpd.XML_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 500)
+
+ # request XSD
+ self.client.putrequest('GET', stats_httpd.XSD_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 500)
+
+ # request XSL
+ self.client.putrequest('GET', stats_httpd.XSL_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 500)
+
+ def test_do_GET_failed2(self):
+ # failure case(Stats replies an error)
+ self.stats.mccs.set_command_handler(
+ lambda cmd, args: \
+ isc.config.ccsession.create_answer(1, "I have an error.")
+ )
+
+ # request XML
+ self.client.putrequest('GET', stats_httpd.XML_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 500)
+
+ # request XSD
+ self.client.putrequest('GET', stats_httpd.XSD_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 500)
+
+ # request XSL
+ self.client.putrequest('GET', stats_httpd.XSL_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 500)
def test_do_HEAD(self):
- for ht in self.httpd:
- self._test_do_HEAD(ht._handler)
-
- def _test_do_HEAD(self, handler):
- handler.path = '/path/to/foo/bar'
- handler.do_HEAD()
- self.assertEqual(handler.response.code, 404)
-
- def test_log_message(self):
- for ht in self.httpd:
- self._test_log_message(ht._handler)
-
- def _test_log_message(self, handler):
- # switch write_log function
- handler.server.log_writer = handler.response._write_log
- log_message = 'ABCDEFG'
- handler.log_message("%s", log_message)
- self.assertEqual(handler.response.log,
- "[b10-stats-httpd] %s - - [%s] %s\n" %
- (handler.address_string(),
- handler.log_date_time_string(),
- log_message))
+ self.client.putrequest('HEAD', stats_httpd.XML_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 200)
+
+ self.client.putrequest('HEAD', '/path/to/foo/bar')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 404)
class TestHttpServerError(unittest.TestCase):
"""Tests for HttpServerError exception"""
-
def test_raises(self):
try:
raise stats_httpd.HttpServerError('Nothing')
@@ -181,20 +284,24 @@ class TestHttpServerError(unittest.TestCase):
class TestHttpServer(unittest.TestCase):
"""Tests for HttpServer class"""
+ def setUp(self):
+ # set the signal handler for deadlock
+ self.sig_handler = SignalHandler(self.fail)
+ self.base = BaseModules()
+
+ def tearDown(self):
+ if hasattr(self, "stats_httpd"):
+ self.stats_httpd.stop()
+ self.base.shutdown()
+ # reset the signal handler
+ self.sig_handler.reset()
def test_httpserver(self):
- self.verbose = True
- self.stats_httpd = stats_httpd.StatsHttpd(self.verbose)
- self.stats_httpd.cc_session.verbose = False
- for ht in self.stats_httpd.httpd:
- self.assertTrue(ht.server_address in self.stats_httpd.http_addrs)
- self.assertEqual(ht.verbose, self.verbose)
- self.assertEqual(ht.xml_handler, self.stats_httpd.xml_handler)
- self.assertEqual(ht.xsd_handler, self.stats_httpd.xsd_handler)
- self.assertEqual(ht.xsl_handler, self.stats_httpd.xsl_handler)
- self.assertEqual(ht.log_writer, self.stats_httpd.write_log)
- self.assertTrue(isinstance(ht._handler, stats_httpd.HttpHandler))
- self.assertTrue(isinstance(ht.socket, fake_socket.socket))
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.assertEqual(type(self.stats_httpd.httpd), list)
+ self.assertEqual(len(self.stats_httpd.httpd), 1)
+ for httpd in self.stats_httpd.httpd:
+ self.assertTrue(isinstance(httpd, stats_httpd.HttpServer))
class TestStatsHttpdError(unittest.TestCase):
"""Tests for StatsHttpdError exception"""
@@ -209,136 +316,173 @@ class TestStatsHttpd(unittest.TestCase):
"""Tests for StatsHttpd class"""
def setUp(self):
- self.verbose = True
- fake_socket._CLOSED = False
- fake_socket.has_ipv6 = True
- self.stats_httpd = stats_httpd.StatsHttpd(self.verbose)
- self.stats_httpd.cc_session.verbose = False
+ # set the signal handler for deadlock
+ self.sig_handler = SignalHandler(self.fail)
+ self.base = BaseModules()
+ self.stats_server = ThreadingServerManager(MyStats)
+ self.stats_server.run()
+ # checking IPv6 enabled on this platform
+ self.ipv6_enabled = is_ipv6_enabled()
def tearDown(self):
- self.stats_httpd.stop()
+ if hasattr(self, "stats_httpd"):
+ self.stats_httpd.stop()
+ self.stats_server.shutdown()
+ self.base.shutdown()
+ # reset the signal handler
+ self.sig_handler.reset()
def test_init(self):
- self.assertTrue(self.stats_httpd.verbose)
- self.assertFalse(self.stats_httpd.mccs.get_socket()._closed)
- self.assertEqual(self.stats_httpd.mccs.get_socket().fileno(),
- id(self.stats_httpd.mccs.get_socket()))
- for ht in self.stats_httpd.httpd:
- self.assertFalse(ht.socket._closed)
- self.assertEqual(ht.socket.fileno(), id(ht.socket))
- fake_socket._CLOSED = True
- self.assertRaises(isc.cc.session.SessionError,
- stats_httpd.StatsHttpd)
- fake_socket._CLOSED = False
+ server_address = get_availaddr()
+ self.stats_httpd = MyStatsHttpd(server_address)
+ self.assertEqual(self.stats_httpd.running, False)
+ self.assertEqual(self.stats_httpd.poll_intval, 0.5)
+ self.assertNotEqual(len(self.stats_httpd.httpd), 0)
+ self.assertEqual(type(self.stats_httpd.mccs), isc.config.ModuleCCSession)
+ self.assertEqual(type(self.stats_httpd.cc_session), isc.cc.Session)
+ self.assertEqual(len(self.stats_httpd.config), 2)
+ self.assertTrue('listen_on' in self.stats_httpd.config)
+ self.assertEqual(len(self.stats_httpd.config['listen_on']), 1)
+ self.assertTrue('address' in self.stats_httpd.config['listen_on'][0])
+ self.assertTrue('port' in self.stats_httpd.config['listen_on'][0])
+ self.assertTrue(server_address in set(self.stats_httpd.http_addrs))
+
+ def test_openclose_mccs(self):
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.stats_httpd.close_mccs()
+ self.assertEqual(self.stats_httpd.mccs, None)
+ self.stats_httpd.open_mccs()
+ self.assertIsNotNone(self.stats_httpd.mccs)
+ self.stats_httpd.mccs = None
+ self.assertEqual(self.stats_httpd.mccs, None)
+ self.assertEqual(self.stats_httpd.close_mccs(), None)
def test_mccs(self):
- self.stats_httpd.open_mccs()
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.assertIsNotNone(self.stats_httpd.mccs.get_socket())
self.assertTrue(
- isinstance(self.stats_httpd.mccs.get_socket(), fake_socket.socket))
+ isinstance(self.stats_httpd.mccs.get_socket(), socket.socket))
self.assertTrue(
isinstance(self.stats_httpd.cc_session, isc.cc.session.Session))
- self.assertTrue(
- isinstance(self.stats_httpd.stats_module_spec, isc.config.ModuleSpec))
- for cfg in self.stats_httpd.stats_config_spec:
- self.assertTrue('item_name' in cfg)
- self.assertTrue(cfg['item_name'] in DUMMY_DATA)
- self.assertTrue(len(self.stats_httpd.stats_config_spec), len(DUMMY_DATA))
-
- def test_load_config(self):
- self.stats_httpd.load_config()
- self.assertTrue(('127.0.0.1', 8000) in set(self.stats_httpd.http_addrs))
+ statistics_spec = self.stats_httpd.get_stats_spec()
+ for mod in DUMMY_DATA:
+ self.assertTrue(mod in statistics_spec)
+ for cfg in statistics_spec[mod]:
+ self.assertTrue('item_name' in cfg)
+ self.assertTrue(cfg['item_name'] in DUMMY_DATA[mod])
+ self.assertTrue(len(statistics_spec[mod]), len(DUMMY_DATA[mod]))
+ self.stats_httpd.close_mccs()
+ self.assertIsNone(self.stats_httpd.mccs)
def test_httpd(self):
# dual stack (addresses is ipv4 and ipv6)
- fake_socket.has_ipv6 = True
- self.assertTrue(('127.0.0.1', 8000) in set(self.stats_httpd.http_addrs))
- self.stats_httpd.http_addrs = [ ('::1', 8000), ('127.0.0.1', 8000) ]
- self.assertTrue(
- stats_httpd.HttpServer.address_family in set([fake_socket.AF_INET, fake_socket.AF_INET6]))
- self.stats_httpd.open_httpd()
- for ht in self.stats_httpd.httpd:
- self.assertTrue(isinstance(ht.socket, fake_socket.socket))
- self.stats_httpd.close_httpd()
+ if self.ipv6_enabled:
+ server_addresses = (get_availaddr('::1'), get_availaddr())
+ self.stats_httpd = MyStatsHttpd(*server_addresses)
+ for ht in self.stats_httpd.httpd:
+ self.assertTrue(isinstance(ht, stats_httpd.HttpServer))
+ self.assertTrue(ht.address_family in set([socket.AF_INET, socket.AF_INET6]))
+ self.assertTrue(isinstance(ht.socket, socket.socket))
# dual stack (address is ipv6)
- fake_socket.has_ipv6 = True
- self.stats_httpd.http_addrs = [ ('::1', 8000) ]
- self.stats_httpd.open_httpd()
- for ht in self.stats_httpd.httpd:
- self.assertTrue(isinstance(ht.socket, fake_socket.socket))
- self.stats_httpd.close_httpd()
-
- # dual stack (address is ipv4)
- fake_socket.has_ipv6 = True
- self.stats_httpd.http_addrs = [ ('127.0.0.1', 8000) ]
- self.stats_httpd.open_httpd()
- for ht in self.stats_httpd.httpd:
- self.assertTrue(isinstance(ht.socket, fake_socket.socket))
- self.stats_httpd.close_httpd()
-
- # only-ipv4 single stack
- fake_socket.has_ipv6 = False
- self.stats_httpd.http_addrs = [ ('127.0.0.1', 8000) ]
- self.stats_httpd.open_httpd()
+ if self.ipv6_enabled:
+ server_addresses = get_availaddr('::1')
+ self.stats_httpd = MyStatsHttpd(server_addresses)
+ for ht in self.stats_httpd.httpd:
+ self.assertTrue(isinstance(ht, stats_httpd.HttpServer))
+ self.assertEqual(ht.address_family, socket.AF_INET6)
+ self.assertTrue(isinstance(ht.socket, socket.socket))
+
+ # dual/single stack (address is ipv4)
+ server_addresses = get_availaddr()
+ self.stats_httpd = MyStatsHttpd(server_addresses)
for ht in self.stats_httpd.httpd:
- self.assertTrue(isinstance(ht.socket, fake_socket.socket))
- self.stats_httpd.close_httpd()
-
- # only-ipv4 single stack (force set ipv6 )
- fake_socket.has_ipv6 = False
- self.stats_httpd.http_addrs = [ ('::1', 8000) ]
- self.assertRaises(stats_httpd.HttpServerError,
- self.stats_httpd.open_httpd)
-
- # hostname
- self.stats_httpd.http_addrs = [ ('localhost', 8000) ]
- self.stats_httpd.open_httpd()
- for ht in self.stats_httpd.httpd:
- self.assertTrue(isinstance(ht.socket, fake_socket.socket))
- self.stats_httpd.close_httpd()
+ self.assertTrue(isinstance(ht, stats_httpd.HttpServer))
+ self.assertEqual(ht.address_family, socket.AF_INET)
+ self.assertTrue(isinstance(ht.socket, socket.socket))
- self.stats_httpd.http_addrs = [ ('my.host.domain', 8000) ]
- self.stats_httpd.open_httpd()
+ # any address (IPv4)
+ server_addresses = get_availaddr(address='0.0.0.0')
+ self.stats_httpd = MyStatsHttpd(server_addresses)
for ht in self.stats_httpd.httpd:
- self.assertTrue(isinstance(ht.socket, fake_socket.socket))
- self.stats_httpd.close_httpd()
+ self.assertTrue(isinstance(ht, stats_httpd.HttpServer))
+ self.assertEqual(ht.address_family,socket.AF_INET)
+ self.assertTrue(isinstance(ht.socket, socket.socket))
+
+ # any address (IPv6)
+ if self.ipv6_enabled:
+ server_addresses = get_availaddr(address='::')
+ self.stats_httpd = MyStatsHttpd(server_addresses)
+ for ht in self.stats_httpd.httpd:
+ self.assertTrue(isinstance(ht, stats_httpd.HttpServer))
+ self.assertEqual(ht.address_family,socket.AF_INET6)
+ self.assertTrue(isinstance(ht.socket, socket.socket))
+
+ # existent hostname
+ self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd,
+ get_availaddr(address='localhost'))
+
+ # nonexistent hostname
+ self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd, ('my.host.domain', 8000))
# over flow of port number
- self.stats_httpd.http_addrs = [ ('', 80000) ]
- self.assertRaises(stats_httpd.HttpServerError, self.stats_httpd.open_httpd)
+ self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd, ('127.0.0.1', 80000))
+
# negative
- self.stats_httpd.http_addrs = [ ('', -8000) ]
- self.assertRaises(stats_httpd.HttpServerError, self.stats_httpd.open_httpd)
- # alphabet
- self.stats_httpd.http_addrs = [ ('', 'ABCDE') ]
- self.assertRaises(stats_httpd.HttpServerError, self.stats_httpd.open_httpd)
-
- def test_start(self):
- self.stats_httpd.cc_session.group_sendmsg(
- { 'command': [ "shutdown" ] }, "StatsHttpd")
- self.stats_httpd.start()
- self.stats_httpd = stats_httpd.StatsHttpd(self.verbose)
- self.stats_httpd.cc_session.verbose = False
- self.assertRaises(
- fake_select.error, self.stats_httpd.start)
+ self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd, ('127.0.0.1', -8000))
- def test_stop(self):
- # success case
- fake_socket._CLOSED = False
- self.stats_httpd.stop()
+ # alphabet
+ self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd, ('127.0.0.1', 'ABCDE'))
+
+ # Address already in use
+ server_addresses = get_availaddr()
+ self.stats_httpd_server = ThreadingServerManager(MyStatsHttpd, server_addresses)
+ self.stats_httpd_server.run()
+ self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd, server_addresses)
+ send_shutdown("StatsHttpd")
+
+ def test_running(self):
+ self.stats_httpd_server = ThreadingServerManager(MyStatsHttpd, get_availaddr())
+ self.stats_httpd = self.stats_httpd_server.server
self.assertFalse(self.stats_httpd.running)
- self.assertIsNone(self.stats_httpd.mccs)
- for ht in self.stats_httpd.httpd:
- self.assertTrue(ht.socket._closed)
- self.assertTrue(self.stats_httpd.cc_session._socket._closed)
+ self.stats_httpd_server.run()
+ self.assertEqual(send_command("status", "StatsHttpd"),
+ (0, "Stats Httpd is up. (PID " + str(os.getpid()) + ")"))
+ self.assertTrue(self.stats_httpd.running)
+ self.assertEqual(send_shutdown("StatsHttpd"), (0, None))
+ self.assertFalse(self.stats_httpd.running)
+ self.stats_httpd_server.shutdown()
+
# failure case
- self.stats_httpd.cc_session._socket._closed = False
- self.stats_httpd.open_mccs()
- self.stats_httpd.cc_session._socket._closed = True
- self.stats_httpd.stop() # No excetion raises
- self.stats_httpd.cc_session._socket._closed = False
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.stats_httpd.cc_session.close()
+ self.assertRaises(ValueError, self.stats_httpd.start)
+
+ def test_failure_with_a_select_error (self):
+ """checks select.error is raised if the exception except
+ errno.EINTR is raised while it's selecting"""
+ def raise_select_except(*args):
+ raise select.error('dummy error')
+ orig_select = stats_httpd.select.select
+ stats_httpd.select.select = raise_select_except
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.assertRaises(select.error, self.stats_httpd.start)
+ stats_httpd.select.select = orig_select
+
+ def test_nofailure_with_errno_EINTR(self):
+ """checks no exception is raised if errno.EINTR is raised
+ while it's selecting"""
+ def raise_select_except(*args):
+ raise select.error(errno.EINTR)
+ orig_select = stats_httpd.select.select
+ stats_httpd.select.select = raise_select_except
+ self.stats_httpd_server = ThreadingServerManager(MyStatsHttpd, get_availaddr())
+ self.stats_httpd_server.run()
+ self.stats_httpd_server.shutdown()
+ stats_httpd.select.select = orig_select
def test_open_template(self):
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
# successful conditions
tmpl = self.stats_httpd.open_template(stats_httpd.XML_TEMPLATE_LOCATION)
self.assertTrue(isinstance(tmpl, string.Template))
@@ -372,13 +516,13 @@ class TestStatsHttpd(unittest.TestCase):
self.stats_httpd.open_template, '/path/to/foo/bar')
def test_commands(self):
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
self.assertEqual(self.stats_httpd.command_handler("status", None),
isc.config.ccsession.create_answer(
0, "Stats Httpd is up. (PID " + str(os.getpid()) + ")"))
self.stats_httpd.running = True
self.assertEqual(self.stats_httpd.command_handler("shutdown", None),
- isc.config.ccsession.create_answer(
- 0, "Stats Httpd is shutting down."))
+ isc.config.ccsession.create_answer(0))
self.assertFalse(self.stats_httpd.running)
self.assertEqual(
self.stats_httpd.command_handler("__UNKNOWN_COMMAND__", None),
@@ -386,48 +530,153 @@ class TestStatsHttpd(unittest.TestCase):
1, "Unknown command: __UNKNOWN_COMMAND__"))
def test_config(self):
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
self.assertEqual(
self.stats_httpd.config_handler(dict(_UNKNOWN_KEY_=None)),
isc.config.ccsession.create_answer(
- 1, "Unknown known config: _UNKNOWN_KEY_"))
- self.assertEqual(
- self.stats_httpd.config_handler(
- dict(listen_on=[dict(address="::2",port=8000)])),
- isc.config.ccsession.create_answer(0))
- self.assertTrue("listen_on" in self.stats_httpd.config)
- for addr in self.stats_httpd.config["listen_on"]:
- self.assertTrue("address" in addr)
- self.assertTrue("port" in addr)
- self.assertTrue(addr["address"] == "::2")
- self.assertTrue(addr["port"] == 8000)
+ 1, "unknown item _UNKNOWN_KEY_"))
+ addresses = get_availaddr()
self.assertEqual(
self.stats_httpd.config_handler(
- dict(listen_on=[dict(address="::1",port=80)])),
+ dict(listen_on=[dict(address=addresses[0],port=addresses[1])])),
isc.config.ccsession.create_answer(0))
self.assertTrue("listen_on" in self.stats_httpd.config)
for addr in self.stats_httpd.config["listen_on"]:
self.assertTrue("address" in addr)
self.assertTrue("port" in addr)
- self.assertTrue(addr["address"] == "::1")
- self.assertTrue(addr["port"] == 80)
-
+ self.assertTrue(addr["address"] == addresses[0])
+ self.assertTrue(addr["port"] == addresses[1])
+
+ if self.ipv6_enabled:
+ addresses = get_availaddr("::1")
+ self.assertEqual(
+ self.stats_httpd.config_handler(
+ dict(listen_on=[dict(address=addresses[0],port=addresses[1])])),
+ isc.config.ccsession.create_answer(0))
+ self.assertTrue("listen_on" in self.stats_httpd.config)
+ for addr in self.stats_httpd.config["listen_on"]:
+ self.assertTrue("address" in addr)
+ self.assertTrue("port" in addr)
+ self.assertTrue(addr["address"] == addresses[0])
+ self.assertTrue(addr["port"] == addresses[1])
+
+ addresses = get_availaddr()
self.assertEqual(
self.stats_httpd.config_handler(
- dict(listen_on=[dict(address="1.2.3.4",port=54321)])),
+ dict(listen_on=[dict(address=addresses[0],port=addresses[1])])),
isc.config.ccsession.create_answer(0))
self.assertTrue("listen_on" in self.stats_httpd.config)
for addr in self.stats_httpd.config["listen_on"]:
self.assertTrue("address" in addr)
self.assertTrue("port" in addr)
- self.assertTrue(addr["address"] == "1.2.3.4")
- self.assertTrue(addr["port"] == 54321)
+ self.assertTrue(addr["address"] == addresses[0])
+ self.assertTrue(addr["port"] == addresses[1])
(ret, arg) = isc.config.ccsession.parse_answer(
self.stats_httpd.config_handler(
dict(listen_on=[dict(address="1.2.3.4",port=543210)]))
)
self.assertEqual(ret, 1)
+ def test_xml_handler(self):
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.stats_httpd.get_stats_data = lambda: \
+ { 'Dummy' : { 'foo':'bar' } }
+ xml_body1 = self.stats_httpd.open_template(
+ stats_httpd.XML_TEMPLATE_LOCATION).substitute(
+ xml_string='<Dummy><foo>bar</foo></Dummy>',
+ xsd_namespace=stats_httpd.XSD_NAMESPACE,
+ xsd_url_path=stats_httpd.XSD_URL_PATH,
+ xsl_url_path=stats_httpd.XSL_URL_PATH)
+ xml_body2 = self.stats_httpd.xml_handler()
+ self.assertEqual(type(xml_body1), str)
+ self.assertEqual(type(xml_body2), str)
+ self.assertEqual(xml_body1, xml_body2)
+ self.stats_httpd.get_stats_data = lambda: \
+ { 'Dummy' : {'bar':'foo'} }
+ xml_body2 = self.stats_httpd.xml_handler()
+ self.assertNotEqual(xml_body1, xml_body2)
+
+ def test_xsd_handler(self):
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.stats_httpd.get_stats_spec = lambda: \
+ { "Dummy" :
+ [{
+ "item_name": "foo",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "bar",
+ "item_description": "foo is bar",
+ "item_title": "Foo"
+ }]
+ }
+ xsd_body1 = self.stats_httpd.open_template(
+ stats_httpd.XSD_TEMPLATE_LOCATION).substitute(
+ xsd_string=\
+ '<all><element name="Dummy"><complexType><all>' \
+ + '<element maxOccurs="1" minOccurs="1" name="foo" type="string">' \
+ + '<annotation><appinfo>Foo</appinfo>' \
+ + '<documentation>foo is bar</documentation>' \
+ + '</annotation></element></all>' \
+ + '</complexType></element></all>',
+ xsd_namespace=stats_httpd.XSD_NAMESPACE)
+ xsd_body2 = self.stats_httpd.xsd_handler()
+ self.assertEqual(type(xsd_body1), str)
+ self.assertEqual(type(xsd_body2), str)
+ self.assertEqual(xsd_body1, xsd_body2)
+ self.stats_httpd.get_stats_spec = lambda: \
+ { "Dummy" :
+ [{
+ "item_name": "bar",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "foo",
+ "item_description": "bar is foo",
+ "item_title": "bar"
+ }]
+ }
+ xsd_body2 = self.stats_httpd.xsd_handler()
+ self.assertNotEqual(xsd_body1, xsd_body2)
+
+ def test_xsl_handler(self):
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.stats_httpd.get_stats_spec = lambda: \
+ { "Dummy" :
+ [{
+ "item_name": "foo",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "bar",
+ "item_description": "foo is bar",
+ "item_title": "Foo"
+ }]
+ }
+ xsl_body1 = self.stats_httpd.open_template(
+ stats_httpd.XSL_TEMPLATE_LOCATION).substitute(
+ xsl_string='<xsl:template match="*"><tr>' \
+ + '<td>Dummy</td>' \
+ + '<td class="title" title="foo is bar">Foo</td>' \
+ + '<td><xsl:value-of select="Dummy/foo" /></td>' \
+ + '</tr></xsl:template>',
+ xsd_namespace=stats_httpd.XSD_NAMESPACE)
+ xsl_body2 = self.stats_httpd.xsl_handler()
+ self.assertEqual(type(xsl_body1), str)
+ self.assertEqual(type(xsl_body2), str)
+ self.assertEqual(xsl_body1, xsl_body2)
+ self.stats_httpd.get_stats_spec = lambda: \
+ { "Dummy" :
+ [{
+ "item_name": "bar",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "foo",
+ "item_description": "bar is foo",
+ "item_title": "bar"
+ }]
+ }
+ xsl_body2 = self.stats_httpd.xsl_handler()
+ self.assertNotEqual(xsl_body1, xsl_body2)
+
def test_for_without_B10_FROM_SOURCE(self):
# just lets it go through the code without B10_FROM_SOURCE env
# variable
@@ -437,8 +686,6 @@ class TestStatsHttpd(unittest.TestCase):
imp.reload(stats_httpd)
os.environ["B10_FROM_SOURCE"] = tmppath
imp.reload(stats_httpd)
- stats_httpd.socket = fake_socket
- stats_httpd.select = fake_select
if __name__ == "__main__":
unittest.main()
diff --git a/src/bin/stats/tests/b10-stats_test.py b/src/bin/stats/tests/b10-stats_test.py
index eccabdc..3813c7e 100644
--- a/src/bin/stats/tests/b10-stats_test.py
+++ b/src/bin/stats/tests/b10-stats_test.py
@@ -13,649 +13,593 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-#
-# Tests for the stats module
-#
+"""
+In each of these tests we start several virtual components. They are
+not the real components, no external processes are started. They are
+just simple mock objects running each in its own thread and pretending
+to be bind10 modules. This helps testing the stats module in a close
+to real environment.
+"""
+
+import unittest
import os
-import sys
+import threading
+import io
import time
-import unittest
import imp
-from isc.cc.session import Session, SessionError
-from isc.config.ccsession import ModuleCCSession, ModuleCCSessionError
-from fake_time import time, strftime, gmtime
-import stats
-stats.time = time
-stats.strftime = strftime
-stats.gmtime = gmtime
-from stats import SessionSubject, CCSessionListener, get_timestamp, get_datetime
-from fake_time import _TEST_TIME_SECS, _TEST_TIME_STRF
-
-# setting Constant
-if sys.path[0] == '':
- TEST_SPECFILE_LOCATION = "./testdata/stats_test.spec"
-else:
- TEST_SPECFILE_LOCATION = sys.path[0] + "/testdata/stats_test.spec"
-class TestStats(unittest.TestCase):
+import stats
+import isc.cc.session
+from test_utils import BaseModules, ThreadingServerManager, MyStats, SignalHandler, send_command, send_shutdown
+
+class TestUtilties(unittest.TestCase):
+ items = [
+ { 'item_name': 'test_int1', 'item_type': 'integer', 'item_default': 12345 },
+ { 'item_name': 'test_real1', 'item_type': 'real', 'item_default': 12345.6789 },
+ { 'item_name': 'test_bool1', 'item_type': 'boolean', 'item_default': True },
+ { 'item_name': 'test_str1', 'item_type': 'string', 'item_default': 'ABCD' },
+ { 'item_name': 'test_list1', 'item_type': 'list', 'item_default': [1,2,3],
+ 'list_item_spec' : { 'item_name': 'number', 'item_type': 'integer' } },
+ { 'item_name': 'test_map1', 'item_type': 'map', 'item_default': {'a':1,'b':2,'c':3},
+ 'map_item_spec' : [ { 'item_name': 'a', 'item_type': 'integer'},
+ { 'item_name': 'b', 'item_type': 'integer'},
+ { 'item_name': 'c', 'item_type': 'integer'} ] },
+ { 'item_name': 'test_int2', 'item_type': 'integer' },
+ { 'item_name': 'test_real2', 'item_type': 'real' },
+ { 'item_name': 'test_bool2', 'item_type': 'boolean' },
+ { 'item_name': 'test_str2', 'item_type': 'string' },
+ { 'item_name': 'test_list2', 'item_type': 'list',
+ 'list_item_spec' : { 'item_name': 'number', 'item_type': 'integer' } },
+ { 'item_name': 'test_map2', 'item_type': 'map',
+ 'map_item_spec' : [ { 'item_name': 'A', 'item_type': 'integer'},
+ { 'item_name': 'B', 'item_type': 'integer'},
+ { 'item_name': 'C', 'item_type': 'integer'} ] },
+ { 'item_name': 'test_none', 'item_type': 'none' },
+ { 'item_name': 'test_list3', 'item_type': 'list', 'item_default': ["one","two","three"],
+ 'list_item_spec' : { 'item_name': 'number', 'item_type': 'string' } },
+ { 'item_name': 'test_map3', 'item_type': 'map', 'item_default': {'a':'one','b':'two','c':'three'},
+ 'map_item_spec' : [ { 'item_name': 'a', 'item_type': 'string'},
+ { 'item_name': 'b', 'item_type': 'string'},
+ { 'item_name': 'c', 'item_type': 'string'} ] }
+ ]
def setUp(self):
- self.session = Session()
- self.subject = SessionSubject(session=self.session, verbose=True)
- self.listener = CCSessionListener(self.subject, verbose=True)
- self.stats_spec = self.listener.cc_session.get_module_spec().get_config_spec()
- self.module_name = self.listener.cc_session.get_module_spec().get_module_name()
- self.stats_data = {
- 'report_time' : get_datetime(),
- 'bind10.boot_time' : "1970-01-01T00:00:00Z",
- 'stats.timestamp' : get_timestamp(),
- 'stats.lname' : self.session.lname,
- 'auth.queries.tcp': 0,
- 'auth.queries.udp': 0,
- "stats.boot_time": get_datetime(),
- "stats.start_time": get_datetime(),
- "stats.last_update_time": get_datetime()
- }
- # check starting
- self.assertFalse(self.subject.running)
- self.subject.start()
- self.assertTrue(self.subject.running)
- self.assertEqual(len(self.session.message_queue), 0)
- self.assertEqual(self.module_name, 'Stats')
-
- def tearDown(self):
- # check closing
- self.subject.stop()
- self.assertFalse(self.subject.running)
- self.subject.detach(self.listener)
- self.listener.stop()
- self.session.close()
-
- def test_local_func(self):
- """
- Test for local function
-
- """
- # test for result_ok
- self.assertEqual(type(result_ok()), dict)
- self.assertEqual(result_ok(), {'result': [0]})
- self.assertEqual(result_ok(1), {'result': [1]})
- self.assertEqual(result_ok(0,'OK'), {'result': [0, 'OK']})
- self.assertEqual(result_ok(1,'Not good'), {'result': [1, 'Not good']})
- self.assertEqual(result_ok(None,"It's None"), {'result': [None, "It's None"]})
- self.assertNotEqual(result_ok(), {'RESULT': [0]})
-
- # test for get_timestamp
- self.assertEqual(get_timestamp(), _TEST_TIME_SECS)
-
- # test for get_datetime
- self.assertEqual(get_datetime(), _TEST_TIME_STRF)
-
- def test_show_command(self):
- """
- Test for show command
-
- """
- # test show command without arg
- self.session.group_sendmsg({"command": [ "show", None ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- # ignore under 0.9 seconds
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test show command with arg
- self.session.group_sendmsg({"command": [ "show", {"stats_item_name": "stats.lname"}]}, "Stats")
- self.assertEqual(len(self.subject.session.message_queue), 1)
- self.subject.check()
- result_data = self.subject.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'stats.lname': self.stats_data['stats.lname']}),
- result_data)
- self.assertEqual(len(self.subject.session.message_queue), 0)
-
- # test show command with arg which has wrong name
- self.session.group_sendmsg({"command": [ "show", {"stats_item_name": "stats.dummy"}]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- # ignore under 0.9 seconds
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_set_command(self):
- """
- Test for set command
-
- """
- # test set command
- self.stats_data['auth.queries.udp'] = 54321
- self.assertEqual(self.stats_data['auth.queries.udp'], 54321)
- self.assertEqual(self.stats_data['auth.queries.tcp'], 0)
- self.session.group_sendmsg({ "command": [
- "set", {
- 'stats_data': {'auth.queries.udp': 54321 }
- } ] },
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test show command
- self.session.group_sendmsg({"command": [ "show", None ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test set command 2
- self.stats_data['auth.queries.udp'] = 0
- self.assertEqual(self.stats_data['auth.queries.udp'], 0)
- self.assertEqual(self.stats_data['auth.queries.tcp'], 0)
- self.session.group_sendmsg({ "command": [ "set", {'stats_data': {'auth.queries.udp': 0}} ]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test show command 2
- self.session.group_sendmsg({"command": [ "show", None ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test set command 3
- self.stats_data['auth.queries.tcp'] = 54322
- self.assertEqual(self.stats_data['auth.queries.udp'], 0)
- self.assertEqual(self.stats_data['auth.queries.tcp'], 54322)
- self.session.group_sendmsg({ "command": [
- "set", {
- 'stats_data': {'auth.queries.tcp': 54322 }
- } ] },
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test show command 3
- self.session.group_sendmsg({"command": [ "show", None ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_remove_command(self):
- """
- Test for remove command
-
- """
- self.session.group_sendmsg({"command":
- [ "remove", {"stats_item_name": 'bind10.boot_time' }]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
- self.assertEqual(self.stats_data.pop('bind10.boot_time'), "1970-01-01T00:00:00Z")
- self.assertFalse('bind10.boot_time' in self.stats_data)
-
- # test show command with arg
- self.session.group_sendmsg({"command":
- [ "show", {"stats_item_name": 'bind10.boot_time'}]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertFalse('bind10.boot_time' in result_data['result'][1])
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_reset_command(self):
- """
- Test for reset command
-
- """
- self.session.group_sendmsg({"command": [ "reset" ] }, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test show command
- self.session.group_sendmsg({"command": [ "show" ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_status_command(self):
- """
- Test for status command
-
- """
- self.session.group_sendmsg({"command": [ "status" ] }, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(0, "I'm alive."),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_unknown_command(self):
- """
- Test for unknown command
-
- """
- self.session.group_sendmsg({"command": [ "hoge", None ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(1, "Unknown command: 'hoge'"),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_shutdown_command(self):
- """
- Test for shutdown command
-
- """
- self.session.group_sendmsg({"command": [ "shutdown", None ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.assertTrue(self.subject.running)
- self.subject.check()
- self.assertFalse(self.subject.running)
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
+ self.const_timestamp = 1308730448.965706
+ self.const_timetuple = (2011, 6, 22, 8, 14, 8, 2, 173, 0)
+ self.const_datetime = '2011-06-22T08:14:08Z'
+ stats.time = lambda : self.const_timestamp
+ stats.gmtime = lambda : self.const_timetuple
- def test_some_commands(self):
- """
- Test for some commands in a row
-
- """
- # test set command
- self.stats_data['bind10.boot_time'] = '2010-08-02T14:47:56Z'
- self.assertEqual(self.stats_data['bind10.boot_time'], '2010-08-02T14:47:56Z')
- self.session.group_sendmsg({ "command": [
- "set", {
- 'stats_data': {'bind10.boot_time': '2010-08-02T14:47:56Z' }
- }]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # check its value
- self.session.group_sendmsg({ "command": [
- "show", { 'stats_item_name': 'bind10.boot_time' }
- ] }, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'bind10.boot_time': '2010-08-02T14:47:56Z'}),
- result_data)
- self.assertEqual(result_ok(0, {'bind10.boot_time': self.stats_data['bind10.boot_time']}),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test set command 2nd
- self.stats_data['auth.queries.udp'] = 98765
- self.assertEqual(self.stats_data['auth.queries.udp'], 98765)
- self.session.group_sendmsg({ "command": [
- "set", { 'stats_data': {
- 'auth.queries.udp':
- self.stats_data['auth.queries.udp']
- } }
- ] }, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # check its value
- self.session.group_sendmsg({"command": [
- "show", {'stats_item_name': 'auth.queries.udp'}
- ] }, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'auth.queries.udp': 98765}),
- result_data)
- self.assertEqual(result_ok(0, {'auth.queries.udp': self.stats_data['auth.queries.udp']}),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test set command 3
- self.stats_data['auth.queries.tcp'] = 4321
- self.session.group_sendmsg({"command": [
- "set",
- {'stats_data': {'auth.queries.tcp': 4321 }} ]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # check value
- self.session.group_sendmsg({"command": [ "show", {'stats_item_name': 'auth.queries.tcp'} ]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'auth.queries.tcp': 4321}),
- result_data)
- self.assertEqual(result_ok(0, {'auth.queries.tcp': self.stats_data['auth.queries.tcp']}),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- self.session.group_sendmsg({"command": [ "show", {'stats_item_name': 'auth.queries.udp'} ]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'auth.queries.udp': 98765}),
- result_data)
- self.assertEqual(result_ok(0, {'auth.queries.udp': self.stats_data['auth.queries.udp']}),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test set command 4
- self.stats_data['auth.queries.tcp'] = 67890
- self.session.group_sendmsg({"command": [
- "set", {'stats_data': {'auth.queries.tcp': 67890 }} ]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test show command for all values
- self.session.group_sendmsg({"command": [ "show", None ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_some_commands2(self):
- """
- Test for some commands in a row using list-type value
-
- """
- self.stats_data['listtype'] = [1, 2, 3]
- self.assertEqual(self.stats_data['listtype'], [1, 2, 3])
- self.session.group_sendmsg({ "command": [
- "set", {'stats_data': {'listtype': [1, 2, 3] }}
- ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # check its value
- self.session.group_sendmsg({ "command": [
- "show", { 'stats_item_name': 'listtype'}
- ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'listtype': [1, 2, 3]}),
- result_data)
- self.assertEqual(result_ok(0, {'listtype': self.stats_data['listtype']}),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test set list-type value
- self.assertEqual(self.stats_data['listtype'], [1, 2, 3])
- self.session.group_sendmsg({"command": [
- "set", {'stats_data': {'listtype': [3, 2, 1, 0] }}
- ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # check its value
- self.session.group_sendmsg({ "command": [
- "show", { 'stats_item_name': 'listtype' }
- ] }, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'listtype': [3, 2, 1, 0]}),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_some_commands3(self):
- """
- Test for some commands in a row using dictionary-type value
-
- """
- self.stats_data['dicttype'] = {"a": 1, "b": 2, "c": 3}
- self.assertEqual(self.stats_data['dicttype'], {"a": 1, "b": 2, "c": 3})
- self.session.group_sendmsg({ "command": [
- "set", {
- 'stats_data': {'dicttype': {"a": 1, "b": 2, "c": 3} }
- }]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # check its value
- self.session.group_sendmsg({ "command": [ "show", { 'stats_item_name': 'dicttype' } ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'dicttype': {"a": 1, "b": 2, "c": 3}}),
- result_data)
- self.assertEqual(result_ok(0, {'dicttype': self.stats_data['dicttype']}),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test set list-type value
- self.assertEqual(self.stats_data['dicttype'], {"a": 1, "b": 2, "c": 3})
- self.session.group_sendmsg({"command": [
- "set", {'stats_data': {'dicttype': {"a": 3, "b": 2, "c": 1, "d": 0} }} ]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # check its value
- self.session.group_sendmsg({ "command": [ "show", { 'stats_item_name': 'dicttype' }]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'dicttype': {"a": 3, "b": 2, "c": 1, "d": 0} }),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_config_update(self):
- """
- Test for config update
-
- """
- # test show command without arg
- self.session.group_sendmsg({"command": [ "config_update", {"x-version":999} ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
-
- def test_for_boss(self):
- last_queue = self.session.old_message_queue.pop()
- self.assertEqual(
- last_queue.msg, {'command': ['sendstats']})
+ def test_get_spec_defaults(self):
self.assertEqual(
- last_queue.env['group'], 'Boss')
-
-class TestStats2(unittest.TestCase):
+ stats.get_spec_defaults(self.items), {
+ 'test_int1' : 12345 ,
+ 'test_real1' : 12345.6789 ,
+ 'test_bool1' : True ,
+ 'test_str1' : 'ABCD' ,
+ 'test_list1' : [1,2,3] ,
+ 'test_map1' : {'a':1,'b':2,'c':3},
+ 'test_int2' : 0 ,
+ 'test_real2' : 0.0,
+ 'test_bool2' : False,
+ 'test_str2' : "",
+ 'test_list2' : [0],
+ 'test_map2' : { 'A' : 0, 'B' : 0, 'C' : 0 },
+ 'test_none' : None,
+ 'test_list3' : [ "one", "two", "three" ],
+ 'test_map3' : { 'a' : 'one', 'b' : 'two', 'c' : 'three' } })
+ self.assertEqual(stats.get_spec_defaults(None), {})
+ self.assertRaises(KeyError, stats.get_spec_defaults, [{'item_name':'Foo'}])
+
+ def test_get_timestamp(self):
+ self.assertEqual(stats.get_timestamp(), self.const_timestamp)
+
+ def test_get_datetime(self):
+ self.assertEqual(stats.get_datetime(), self.const_datetime)
+ self.assertNotEqual(stats.get_datetime(
+ (2011, 6, 22, 8, 23, 40, 2, 173, 0)), self.const_datetime)
+
+class TestCallback(unittest.TestCase):
+ def setUp(self):
+ self.dummy_func = lambda *x, **y : (x, y)
+ self.dummy_args = (1,2,3)
+ self.dummy_kwargs = {'a':1,'b':2,'c':3}
+ self.cback1 = stats.Callback(
+ command=self.dummy_func,
+ args=self.dummy_args,
+ kwargs=self.dummy_kwargs
+ )
+ self.cback2 = stats.Callback(
+ args=self.dummy_args,
+ kwargs=self.dummy_kwargs
+ )
+ self.cback3 = stats.Callback(
+ command=self.dummy_func,
+ kwargs=self.dummy_kwargs
+ )
+ self.cback4 = stats.Callback(
+ command=self.dummy_func,
+ args=self.dummy_args
+ )
+
+ def test_init(self):
+ self.assertEqual((self.cback1.command, self.cback1.args, self.cback1.kwargs),
+ (self.dummy_func, self.dummy_args, self.dummy_kwargs))
+ self.assertEqual((self.cback2.command, self.cback2.args, self.cback2.kwargs),
+ (None, self.dummy_args, self.dummy_kwargs))
+ self.assertEqual((self.cback3.command, self.cback3.args, self.cback3.kwargs),
+ (self.dummy_func, (), self.dummy_kwargs))
+ self.assertEqual((self.cback4.command, self.cback4.args, self.cback4.kwargs),
+ (self.dummy_func, self.dummy_args, {}))
+
+ def test_call(self):
+ self.assertEqual(self.cback1(), (self.dummy_args, self.dummy_kwargs))
+ self.assertEqual(self.cback1(100, 200), ((100, 200), self.dummy_kwargs))
+ self.assertEqual(self.cback1(a=100, b=200), (self.dummy_args, {'a':100, 'b':200}))
+ self.assertEqual(self.cback2(), None)
+ self.assertEqual(self.cback3(), ((), self.dummy_kwargs))
+ self.assertEqual(self.cback3(100, 200), ((100, 200), self.dummy_kwargs))
+ self.assertEqual(self.cback3(a=100, b=200), ((), {'a':100, 'b':200}))
+ self.assertEqual(self.cback4(), (self.dummy_args, {}))
+ self.assertEqual(self.cback4(100, 200), ((100, 200), {}))
+ self.assertEqual(self.cback4(a=100, b=200), (self.dummy_args, {'a':100, 'b':200}))
+class TestStats(unittest.TestCase):
def setUp(self):
- self.session = Session(verbose=True)
- self.subject = SessionSubject(session=self.session, verbose=True)
- self.listener = CCSessionListener(self.subject, verbose=True)
- self.module_name = self.listener.cc_session.get_module_spec().get_module_name()
- # check starting
- self.assertFalse(self.subject.running)
- self.subject.start()
- self.assertTrue(self.subject.running)
- self.assertEqual(len(self.session.message_queue), 0)
- self.assertEqual(self.module_name, 'Stats')
+ # set the signal handler for deadlock
+ self.sig_handler = SignalHandler(self.fail)
+ self.base = BaseModules()
+ self.stats = stats.Stats()
+ self.const_timestamp = 1308730448.965706
+ self.const_datetime = '2011-06-22T08:14:08Z'
+ self.const_default_datetime = '1970-01-01T00:00:00Z'
def tearDown(self):
- # check closing
- self.subject.stop()
- self.assertFalse(self.subject.running)
- self.subject.detach(self.listener)
- self.listener.stop()
+ self.base.shutdown()
+ # reset the signal handler
+ self.sig_handler.reset()
+
+ def test_init(self):
+ self.assertEqual(self.stats.module_name, 'Stats')
+ self.assertFalse(self.stats.running)
+ self.assertTrue('command_show' in self.stats.callbacks)
+ self.assertTrue('command_status' in self.stats.callbacks)
+ self.assertTrue('command_shutdown' in self.stats.callbacks)
+ self.assertTrue('command_show' in self.stats.callbacks)
+ self.assertTrue('command_showschema' in self.stats.callbacks)
+ self.assertTrue('command_set' in self.stats.callbacks)
+
+ def test_init_undefcmd(self):
+ spec_str = """\
+{
+ "module_spec": {
+ "module_name": "Stats",
+ "module_description": "Stats daemon",
+ "config_data": [],
+ "commands": [
+ {
+ "command_name": "_undef_command_",
+ "command_description": "a undefined command in stats",
+ "command_args": []
+ }
+ ],
+ "statistics": []
+ }
+}
+"""
+ orig_spec_location = stats.SPECFILE_LOCATION
+ stats.SPECFILE_LOCATION = io.StringIO(spec_str)
+ self.assertRaises(stats.StatsError, stats.Stats)
+ stats.SPECFILE_LOCATION = orig_spec_location
+
+ def test_start(self):
+ # start without err
+ self.stats_server = ThreadingServerManager(MyStats)
+ self.stats = self.stats_server.server
+ self.assertFalse(self.stats.running)
+ self.stats_server.run()
+ self.assertEqual(send_command("status", "Stats"),
+ (0, "Stats is up. (PID " + str(os.getpid()) + ")"))
+ self.assertTrue(self.stats.running)
+ self.assertEqual(send_shutdown("Stats"), (0, None))
+ self.assertFalse(self.stats.running)
+ self.stats_server.shutdown()
+
+ # start with err
+ self.stats = stats.Stats()
+ self.stats.update_statistics_data = lambda x,**y: ['an error']
+ self.assertRaises(stats.StatsError, self.stats.start)
+
+ def test_handlers(self):
+ self.stats_server = ThreadingServerManager(MyStats)
+ self.stats = self.stats_server.server
+ self.stats_server.run()
+ # config_handler
+ self.assertEqual(self.stats.config_handler({'foo':'bar'}),
+ isc.config.create_answer(0))
+
+ # command_handler
+ self.base.boss.server._started.wait()
+ self.base.boss.server._started.clear()
+ self.assertEqual(
+ send_command(
+ 'show', 'Stats',
+ params={ 'owner' : 'Boss',
+ 'name' : 'boot_time' }),
+ (0, self.const_datetime))
+ self.assertEqual(
+ send_command(
+ 'set', 'Stats',
+ params={ 'owner' : 'Boss',
+ 'data' : { 'boot_time' : self.const_datetime } }),
+ (0, None))
+ self.assertEqual(
+ send_command(
+ 'show', 'Stats',
+ params={ 'owner' : 'Boss',
+ 'name' : 'boot_time' }),
+ (0, self.const_datetime))
+ self.assertEqual(
+ send_command('status', 'Stats'),
+ (0, "Stats is up. (PID " + str(os.getpid()) + ")"))
+
+ (rcode, value) = send_command('show', 'Stats')
+ self.assertEqual(rcode, 0)
+ self.assertEqual(len(value), 3)
+ self.assertTrue('Boss' in value)
+ self.assertTrue('Stats' in value)
+ self.assertTrue('Auth' in value)
+ self.assertEqual(len(value['Stats']), 5)
+ self.assertEqual(len(value['Boss']), 1)
+ self.assertTrue('boot_time' in value['Boss'])
+ self.assertEqual(value['Boss']['boot_time'], self.const_datetime)
+ self.assertTrue('report_time' in value['Stats'])
+ self.assertTrue('boot_time' in value['Stats'])
+ self.assertTrue('last_update_time' in value['Stats'])
+ self.assertTrue('timestamp' in value['Stats'])
+ self.assertTrue('lname' in value['Stats'])
+ (rcode, value) = send_command('showschema', 'Stats')
+ self.assertEqual(rcode, 0)
+ self.assertEqual(len(value), 3)
+ self.assertTrue('Boss' in value)
+ self.assertTrue('Stats' in value)
+ self.assertTrue('Auth' in value)
+ self.assertEqual(len(value['Stats']), 5)
+ self.assertEqual(len(value['Boss']), 1)
+ for item in value['Boss']:
+ self.assertTrue(len(item) == 7)
+ self.assertTrue('item_name' in item)
+ self.assertTrue('item_type' in item)
+ self.assertTrue('item_optional' in item)
+ self.assertTrue('item_default' in item)
+ self.assertTrue('item_title' in item)
+ self.assertTrue('item_description' in item)
+ self.assertTrue('item_format' in item)
+ for item in value['Stats']:
+ self.assertTrue(len(item) == 6 or len(item) == 7)
+ self.assertTrue('item_name' in item)
+ self.assertTrue('item_type' in item)
+ self.assertTrue('item_optional' in item)
+ self.assertTrue('item_default' in item)
+ self.assertTrue('item_title' in item)
+ self.assertTrue('item_description' in item)
+ if len(item) == 7:
+ self.assertTrue('item_format' in item)
- def test_specfile(self):
+ self.assertEqual(
+ send_command('__UNKNOWN__', 'Stats'),
+ (1, "Unknown command: '__UNKNOWN__'"))
+
+ self.stats_server.shutdown()
+
+ def test_update_modules(self):
+ self.assertEqual(len(self.stats.modules), 0)
+ self.stats.update_modules()
+ self.assertTrue('Stats' in self.stats.modules)
+ self.assertTrue('Boss' in self.stats.modules)
+ self.assertFalse('Dummy' in self.stats.modules)
+ my_statistics_data = stats.get_spec_defaults(self.stats.modules['Stats'].get_statistics_spec())
+ self.assertTrue('report_time' in my_statistics_data)
+ self.assertTrue('boot_time' in my_statistics_data)
+ self.assertTrue('last_update_time' in my_statistics_data)
+ self.assertTrue('timestamp' in my_statistics_data)
+ self.assertTrue('lname' in my_statistics_data)
+ self.assertEqual(my_statistics_data['report_time'], self.const_default_datetime)
+ self.assertEqual(my_statistics_data['boot_time'], self.const_default_datetime)
+ self.assertEqual(my_statistics_data['last_update_time'], self.const_default_datetime)
+ self.assertEqual(my_statistics_data['timestamp'], 0.0)
+ self.assertEqual(my_statistics_data['lname'], "")
+ my_statistics_data = stats.get_spec_defaults(self.stats.modules['Boss'].get_statistics_spec())
+ self.assertTrue('boot_time' in my_statistics_data)
+ self.assertEqual(my_statistics_data['boot_time'], self.const_default_datetime)
+ orig_parse_answer = stats.isc.config.ccsession.parse_answer
+ stats.isc.config.ccsession.parse_answer = lambda x: (99, 'error')
+ self.assertRaises(stats.StatsError, self.stats.update_modules)
+ stats.isc.config.ccsession.parse_answer = orig_parse_answer
+
+ def test_get_statistics_data(self):
+ my_statistics_data = self.stats.get_statistics_data()
+ self.assertTrue('Stats' in my_statistics_data)
+ self.assertTrue('Boss' in my_statistics_data)
+ my_statistics_data = self.stats.get_statistics_data(owner='Stats')
+ self.assertTrue('report_time' in my_statistics_data)
+ self.assertTrue('boot_time' in my_statistics_data)
+ self.assertTrue('last_update_time' in my_statistics_data)
+ self.assertTrue('timestamp' in my_statistics_data)
+ self.assertTrue('lname' in my_statistics_data)
+ self.assertRaises(stats.StatsError, self.stats.get_statistics_data, owner='Foo')
+ my_statistics_data = self.stats.get_statistics_data(owner='Stats')
+ self.assertTrue('boot_time' in my_statistics_data)
+ my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='report_time')
+ self.assertEqual(my_statistics_data, self.const_default_datetime)
+ my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='boot_time')
+ self.assertEqual(my_statistics_data, self.const_default_datetime)
+ my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='last_update_time')
+ self.assertEqual(my_statistics_data, self.const_default_datetime)
+ my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='timestamp')
+ self.assertEqual(my_statistics_data, 0.0)
+ my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='lname')
+ self.assertEqual(my_statistics_data, '')
+ self.assertRaises(stats.StatsError, self.stats.get_statistics_data,
+ owner='Stats', name='Bar')
+ self.assertRaises(stats.StatsError, self.stats.get_statistics_data,
+ owner='Foo', name='Bar')
+ self.assertRaises(stats.StatsError, self.stats.get_statistics_data,
+ name='Bar')
+
+ def test_update_statistics_data(self):
+ self.stats.update_statistics_data(owner='Stats', lname='foo at bar')
+ self.assertTrue('Stats' in self.stats.statistics_data)
+ my_statistics_data = self.stats.statistics_data['Stats']
+ self.assertEqual(my_statistics_data['lname'], 'foo at bar')
+ self.stats.update_statistics_data(owner='Stats', last_update_time=self.const_datetime)
+ self.assertTrue('Stats' in self.stats.statistics_data)
+ my_statistics_data = self.stats.statistics_data['Stats']
+ self.assertEqual(my_statistics_data['last_update_time'], self.const_datetime)
+ self.assertEqual(self.stats.update_statistics_data(owner='Stats', lname=0.0),
+ ['0.0 should be a string'])
+ self.assertEqual(self.stats.update_statistics_data(owner='Dummy', foo='bar'),
+ ['unknown module name: Dummy'])
+
+ def test_commands(self):
+ # status
+ self.assertEqual(self.stats.command_status(),
+ isc.config.create_answer(
+ 0, "Stats is up. (PID " + str(os.getpid()) + ")"))
+
+ # shutdown
+ self.stats.running = True
+ self.assertEqual(self.stats.command_shutdown(),
+ isc.config.create_answer(0))
+ self.assertFalse(self.stats.running)
+
+ def test_command_show(self):
+ self.assertEqual(self.stats.command_show(owner='Foo', name=None),
+ isc.config.create_answer(
+ 1, "specified arguments are incorrect: owner: Foo, name: None"))
+ self.assertEqual(self.stats.command_show(owner='Foo', name='_bar_'),
+ isc.config.create_answer(
+ 1, "specified arguments are incorrect: owner: Foo, name: _bar_"))
+ self.assertEqual(self.stats.command_show(owner='Foo', name='bar'),
+ isc.config.create_answer(
+ 1, "specified arguments are incorrect: owner: Foo, name: bar"))
+ self.assertEqual(self.stats.command_show(owner='Auth'),
+ isc.config.create_answer(
+ 0, {'queries.tcp': 0, 'queries.udp': 0}))
+ self.assertEqual(self.stats.command_show(owner='Auth', name='queries.udp'),
+ isc.config.create_answer(
+ 0, 0))
+ orig_get_timestamp = stats.get_timestamp
+ orig_get_datetime = stats.get_datetime
+ stats.get_timestamp = lambda : self.const_timestamp
+ stats.get_datetime = lambda : self.const_datetime
+ self.assertEqual(stats.get_timestamp(), self.const_timestamp)
+ self.assertEqual(stats.get_datetime(), self.const_datetime)
+ self.assertEqual(self.stats.command_show(owner='Stats', name='report_time'), \
+ isc.config.create_answer(0, self.const_datetime))
+ self.assertEqual(self.stats.statistics_data['Stats']['timestamp'], self.const_timestamp)
+ self.assertEqual(self.stats.statistics_data['Stats']['boot_time'], self.const_default_datetime)
+ stats.get_timestamp = orig_get_timestamp
+ stats.get_datetime = orig_get_datetime
+ self.stats.mccs.specification = isc.config.module_spec.ModuleSpec(
+ { "module_name": self.stats.module_name,
+ "statistics": [] } )
+ self.assertRaises(
+ stats.StatsError, self.stats.command_show, owner='Foo', name='bar')
+
+ def test_command_showchema(self):
+ (rcode, value) = isc.config.ccsession.parse_answer(
+ self.stats.command_showschema())
+ self.assertEqual(rcode, 0)
+ self.assertEqual(len(value), 3)
+ self.assertTrue('Stats' in value)
+ self.assertTrue('Boss' in value)
+ self.assertTrue('Auth' in value)
+ self.assertFalse('__Dummy__' in value)
+ schema = value['Stats']
+ self.assertEqual(len(schema), 5)
+ for item in schema:
+ self.assertTrue(len(item) == 6 or len(item) == 7)
+ self.assertTrue('item_name' in item)
+ self.assertTrue('item_type' in item)
+ self.assertTrue('item_optional' in item)
+ self.assertTrue('item_default' in item)
+ self.assertTrue('item_title' in item)
+ self.assertTrue('item_description' in item)
+ if len(item) == 7:
+ self.assertTrue('item_format' in item)
+
+ schema = value['Boss']
+ self.assertEqual(len(schema), 1)
+ for item in schema:
+ self.assertTrue(len(item) == 7)
+ self.assertTrue('item_name' in item)
+ self.assertTrue('item_type' in item)
+ self.assertTrue('item_optional' in item)
+ self.assertTrue('item_default' in item)
+ self.assertTrue('item_title' in item)
+ self.assertTrue('item_description' in item)
+ self.assertTrue('item_format' in item)
+
+ schema = value['Auth']
+ self.assertEqual(len(schema), 2)
+ for item in schema:
+ self.assertTrue(len(item) == 6)
+ self.assertTrue('item_name' in item)
+ self.assertTrue('item_type' in item)
+ self.assertTrue('item_optional' in item)
+ self.assertTrue('item_default' in item)
+ self.assertTrue('item_title' in item)
+ self.assertTrue('item_description' in item)
+
+ (rcode, value) = isc.config.ccsession.parse_answer(
+ self.stats.command_showschema(owner='Stats'))
+ self.assertEqual(rcode, 0)
+ self.assertFalse('Stats' in value)
+ self.assertFalse('Boss' in value)
+ self.assertFalse('Auth' in value)
+ for item in value:
+ self.assertTrue(len(item) == 6 or len(item) == 7)
+ self.assertTrue('item_name' in item)
+ self.assertTrue('item_type' in item)
+ self.assertTrue('item_optional' in item)
+ self.assertTrue('item_default' in item)
+ self.assertTrue('item_title' in item)
+ self.assertTrue('item_description' in item)
+ if len(item) == 7:
+ self.assertTrue('item_format' in item)
+
+ (rcode, value) = isc.config.ccsession.parse_answer(
+ self.stats.command_showschema(owner='Stats', name='report_time'))
+ self.assertEqual(rcode, 0)
+ self.assertFalse('Stats' in value)
+ self.assertFalse('Boss' in value)
+ self.assertFalse('Auth' in value)
+ self.assertTrue(len(value) == 7)
+ self.assertTrue('item_name' in value)
+ self.assertTrue('item_type' in value)
+ self.assertTrue('item_optional' in value)
+ self.assertTrue('item_default' in value)
+ self.assertTrue('item_title' in value)
+ self.assertTrue('item_description' in value)
+ self.assertTrue('item_format' in value)
+ self.assertEqual(value['item_name'], 'report_time')
+ self.assertEqual(value['item_format'], 'date-time')
+
+ self.assertEqual(self.stats.command_showschema(owner='Foo'),
+ isc.config.create_answer(
+ 1, "specified arguments are incorrect: owner: Foo, name: None"))
+ self.assertEqual(self.stats.command_showschema(owner='Foo', name='bar'),
+ isc.config.create_answer(
+ 1, "specified arguments are incorrect: owner: Foo, name: bar"))
+ self.assertEqual(self.stats.command_showschema(owner='Auth'),
+ isc.config.create_answer(
+ 0, [{
+ "item_default": 0,
+ "item_description": "A number of total query counts which all auth servers receive over TCP since they started initially",
+ "item_name": "queries.tcp",
+ "item_optional": False,
+ "item_title": "Queries TCP",
+ "item_type": "integer"
+ },
+ {
+ "item_default": 0,
+ "item_description": "A number of total query counts which all auth servers receive over UDP since they started initially",
+ "item_name": "queries.udp",
+ "item_optional": False,
+ "item_title": "Queries UDP",
+ "item_type": "integer"
+ }]))
+ self.assertEqual(self.stats.command_showschema(owner='Auth', name='queries.tcp'),
+ isc.config.create_answer(
+ 0, {
+ "item_default": 0,
+ "item_description": "A number of total query counts which all auth servers receive over TCP since they started initially",
+ "item_name": "queries.tcp",
+ "item_optional": False,
+ "item_title": "Queries TCP",
+ "item_type": "integer"
+ }))
+
+ self.assertEqual(self.stats.command_showschema(owner='Stats', name='bar'),
+ isc.config.create_answer(
+ 1, "specified arguments are incorrect: owner: Stats, name: bar"))
+ self.assertEqual(self.stats.command_showschema(name='bar'),
+ isc.config.create_answer(
+ 1, "module name is not specified"))
+
+ def test_command_set(self):
+ orig_get_datetime = stats.get_datetime
+ stats.get_datetime = lambda : self.const_datetime
+ (rcode, value) = isc.config.ccsession.parse_answer(
+ self.stats.command_set(owner='Boss',
+ data={ 'boot_time' : self.const_datetime }))
+ stats.get_datetime = orig_get_datetime
+ self.assertEqual(rcode, 0)
+ self.assertTrue(value is None)
+ self.assertEqual(self.stats.statistics_data['Boss']['boot_time'],
+ self.const_datetime)
+ self.assertEqual(self.stats.statistics_data['Stats']['last_update_time'],
+ self.const_datetime)
+ self.assertEqual(self.stats.command_set(owner='Stats',
+ data={ 'lname' : 'foo at bar' }),
+ isc.config.create_answer(0, None))
+ self.stats.statistics_data['Stats'] = {}
+ self.stats.mccs.specification = isc.config.module_spec.ModuleSpec(
+ { "module_name": self.stats.module_name,
+ "statistics": [] } )
+ self.assertEqual(self.stats.command_set(owner='Stats',
+ data={ 'lname' : '_foo_ at _bar_' }),
+ isc.config.create_answer(
+ 1,
+ "errors while setting statistics data: unknown item lname"))
+ self.stats.statistics_data['Stats'] = {}
+ self.stats.mccs.specification = isc.config.module_spec.ModuleSpec(
+ { "module_name": self.stats.module_name } )
+ self.assertEqual(self.stats.command_set(owner='Stats',
+ data={ 'lname' : '_foo_ at _bar_' }),
+ isc.config.create_answer(
+ 1,
+ "errors while setting statistics data: No statistics specification"))
+ self.stats.statistics_data['Stats'] = {}
+ self.stats.mccs.specification = isc.config.module_spec.ModuleSpec(
+ { "module_name": self.stats.module_name,
+ "statistics": [
+ {
+ "item_name": "dummy",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "",
+ "item_title": "Local Name",
+ "item_description": "brabra"
+ } ] } )
+ self.assertRaises(stats.StatsError,
+ self.stats.command_set, owner='Stats', data={ 'dummy' : '_xxxx_yyyy_zzz_' })
+
+class TestOSEnv(unittest.TestCase):
+ def test_osenv(self):
"""
- Test for specfile
-
+ test for the environ variable "B10_FROM_SOURCE"
+ "B10_FROM_SOURCE" is set in Makefile
"""
- if "B10_FROM_SOURCE" in os.environ:
- self.assertEqual(stats.SPECFILE_LOCATION,
+ # test case having B10_FROM_SOURCE
+ self.assertTrue("B10_FROM_SOURCE" in os.environ)
+ self.assertEqual(stats.SPECFILE_LOCATION, \
os.environ["B10_FROM_SOURCE"] + os.sep + \
- "src" + os.sep + "bin" + os.sep + "stats" + \
- os.sep + "stats.spec")
- self.assertEqual(stats.SCHEMA_SPECFILE_LOCATION,
- os.environ["B10_FROM_SOURCE"] + os.sep + \
- "src" + os.sep + "bin" + os.sep + "stats" + \
- os.sep + "stats-schema.spec")
+ "src" + os.sep + "bin" + os.sep + "stats" + \
+ os.sep + "stats.spec")
+ # test case not having B10_FROM_SOURCE
+ path = os.environ["B10_FROM_SOURCE"]
+ os.environ.pop("B10_FROM_SOURCE")
+ self.assertFalse("B10_FROM_SOURCE" in os.environ)
+ # import stats again
+ imp.reload(stats)
+ # revert the changes
+ os.environ["B10_FROM_SOURCE"] = path
imp.reload(stats)
- # change path of SPECFILE_LOCATION
- stats.SPECFILE_LOCATION = TEST_SPECFILE_LOCATION
- stats.SCHEMA_SPECFILE_LOCATION = TEST_SPECFILE_LOCATION
- self.assertEqual(stats.SPECFILE_LOCATION, TEST_SPECFILE_LOCATION)
- self.subject = stats.SessionSubject(session=self.session, verbose=True)
- self.session = self.subject.session
- self.listener = stats.CCSessionListener(self.subject, verbose=True)
-
- self.assertEqual(self.listener.stats_spec, [])
- self.assertEqual(self.listener.stats_data, {})
-
- self.assertEqual(self.listener.commands_spec, [
- {
- "command_name": "status",
- "command_description": "identify whether stats module is alive or not",
- "command_args": []
- },
- {
- "command_name": "the_dummy",
- "command_description": "this is for testing",
- "command_args": []
- }])
-
- def test_func_initialize_data(self):
- """
- Test for initialize_data function
-
- """
- # prepare for sample data set
- stats_spec = [
- {
- "item_name": "none_sample",
- "item_type": "null",
- "item_default": "None"
- },
- {
- "item_name": "boolean_sample",
- "item_type": "boolean",
- "item_default": True
- },
- {
- "item_name": "string_sample",
- "item_type": "string",
- "item_default": "A something"
- },
- {
- "item_name": "int_sample",
- "item_type": "integer",
- "item_default": 9999999
- },
- {
- "item_name": "real_sample",
- "item_type": "real",
- "item_default": 0.0009
- },
- {
- "item_name": "list_sample",
- "item_type": "list",
- "item_default": [0, 1, 2, 3, 4],
- "list_item_spec": []
- },
- {
- "item_name": "map_sample",
- "item_type": "map",
- "item_default": {'name':'value'},
- "map_item_spec": []
- },
- {
- "item_name": "other_sample",
- "item_type": "__unknown__",
- "item_default": "__unknown__"
- }
- ]
- # data for comparison
- stats_data = {
- 'none_sample': None,
- 'boolean_sample': True,
- 'string_sample': 'A something',
- 'int_sample': 9999999,
- 'real_sample': 0.0009,
- 'list_sample': [0, 1, 2, 3, 4],
- 'map_sample': {'name':'value'},
- 'other_sample': '__unknown__'
- }
- self.assertEqual(self.listener.initialize_data(stats_spec), stats_data)
-
- def test_func_main(self):
- # explicitly make failed
- self.session.close()
- stats.main(session=self.session)
- def test_osenv(self):
- """
- test for not having environ "B10_FROM_SOURCE"
- """
- if "B10_FROM_SOURCE" in os.environ:
- path = os.environ["B10_FROM_SOURCE"]
- os.environ.pop("B10_FROM_SOURCE")
- imp.reload(stats)
- os.environ["B10_FROM_SOURCE"] = path
- imp.reload(stats)
-
-def result_ok(*args):
- if args:
- return { 'result': list(args) }
- else:
- return { 'result': [ 0 ] }
+def test_main():
+ unittest.main()
if __name__ == "__main__":
- unittest.main()
+ test_main()
diff --git a/src/bin/stats/tests/fake_select.py b/src/bin/stats/tests/fake_select.py
deleted file mode 100644
index ca0ca82..0000000
--- a/src/bin/stats/tests/fake_select.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# Copyright (C) 2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-A mock-up module of select
-
-*** NOTE ***
-It is only for testing stats_httpd module and not reusable for
-external module.
-"""
-
-import fake_socket
-import errno
-
-class error(Exception):
- pass
-
-def select(rlst, wlst, xlst, timeout):
- if type(timeout) != int and type(timeout) != float:
- raise TypeError("Error: %s must be integer or float"
- % timeout.__class__.__name__)
- for s in rlst + wlst + xlst:
- if type(s) != fake_socket.socket:
- raise TypeError("Error: %s must be a dummy socket"
- % s.__class__.__name__)
- s._called = s._called + 1
- if s._called > 3:
- raise error("Something is happened!")
- elif s._called > 2:
- raise error(errno.EINTR)
- return (rlst, wlst, xlst)
diff --git a/src/bin/stats/tests/fake_socket.py b/src/bin/stats/tests/fake_socket.py
deleted file mode 100644
index 4e3a458..0000000
--- a/src/bin/stats/tests/fake_socket.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# Copyright (C) 2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-A mock-up module of socket
-
-*** NOTE ***
-It is only for testing stats_httpd module and not reusable for
-external module.
-"""
-
-import re
-
-AF_INET = 'AF_INET'
-AF_INET6 = 'AF_INET6'
-_ADDRFAMILY = AF_INET
-has_ipv6 = True
-_CLOSED = False
-
-class gaierror(Exception):
- pass
-
-class error(Exception):
- pass
-
-class socket:
-
- def __init__(self, family=None):
- if family is None:
- self.address_family = _ADDRFAMILY
- else:
- self.address_family = family
- self._closed = _CLOSED
- if self._closed:
- raise error('socket is already closed!')
- self._called = 0
-
- def close(self):
- self._closed = True
-
- def fileno(self):
- return id(self)
-
- def bind(self, server_class):
- (self.server_address, self.server_port) = server_class
- if self.address_family not in set([AF_INET, AF_INET6]):
- raise error("Address family not supported by protocol: %s" % self.address_family)
- if self.address_family == AF_INET6 and not has_ipv6:
- raise error("Address family not supported in this machine: %s has_ipv6: %s"
- % (self.address_family, str(has_ipv6)))
- if self.address_family == AF_INET and re.search(':', self.server_address) is not None:
- raise gaierror("Address family for hostname not supported : %s %s" % (self.server_address, self.address_family))
- if self.address_family == AF_INET6 and re.search(':', self.server_address) is None:
- raise error("Cannot assign requested address : %s" % str(self.server_address))
- if type(self.server_port) is not int:
- raise TypeError("an integer is required: %s" % str(self.server_port))
- if self.server_port < 0 or self.server_port > 65535:
- raise OverflowError("port number must be 0-65535.: %s" % str(self.server_port))
diff --git a/src/bin/stats/tests/fake_time.py b/src/bin/stats/tests/fake_time.py
deleted file mode 100644
index 65e0237..0000000
--- a/src/bin/stats/tests/fake_time.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright (C) 2010 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-__version__ = "$Revision$"
-
-# This is a dummy time class against a Python standard time class.
-# It is just testing use only.
-# Other methods which time class has is not implemented.
-# (This class isn't orderloaded for time class.)
-
-# These variables are constant. These are example.
-_TEST_TIME_SECS = 1283364938.229088
-_TEST_TIME_STRF = '2010-09-01T18:15:38Z'
-
-def time():
- """
- This is a dummy time() method against time.time()
- """
- # return float constant value
- return _TEST_TIME_SECS
-
-def gmtime():
- """
- This is a dummy gmtime() method against time.gmtime()
- """
- # always return nothing
- return None
-
-def strftime(*arg):
- """
- This is a dummy gmtime() method against time.gmtime()
- """
- return _TEST_TIME_STRF
-
-
diff --git a/src/bin/stats/tests/http/Makefile.am b/src/bin/stats/tests/http/Makefile.am
deleted file mode 100644
index 79263a9..0000000
--- a/src/bin/stats/tests/http/Makefile.am
+++ /dev/null
@@ -1,6 +0,0 @@
-EXTRA_DIST = __init__.py server.py
-CLEANFILES = __init__.pyc server.pyc
-CLEANDIRS = __pycache__
-
-clean-local:
- rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/http/__init__.py b/src/bin/stats/tests/http/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/src/bin/stats/tests/http/server.py b/src/bin/stats/tests/http/server.py
deleted file mode 100644
index 70ed6fa..0000000
--- a/src/bin/stats/tests/http/server.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# Copyright (C) 2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-A mock-up module of http.server
-
-*** NOTE ***
-It is only for testing stats_httpd module and not reusable for
-external module.
-"""
-
-import fake_socket
-
-class DummyHttpResponse:
- def __init__(self, path):
- self.path = path
- self.headers={}
- self.log = ""
-
- def _write_log(self, msg):
- self.log = self.log + msg
-
-class HTTPServer:
- """
- A mock-up class of http.server.HTTPServer
- """
- address_family = fake_socket.AF_INET
- def __init__(self, server_class, handler_class):
- self.socket = fake_socket.socket(self.address_family)
- self.server_class = server_class
- self.socket.bind(self.server_class)
- self._handler = handler_class(None, None, self)
-
- def handle_request(self):
- pass
-
- def server_close(self):
- self.socket.close()
-
-class BaseHTTPRequestHandler:
- """
- A mock-up class of http.server.BaseHTTPRequestHandler
- """
-
- def __init__(self, request, client_address, server):
- self.path = "/path/to"
- self.headers = {}
- self.server = server
- self.response = DummyHttpResponse(path=self.path)
- self.response.write = self._write
- self.wfile = self.response
-
- def send_response(self, code=0):
- if self.path != self.response.path:
- self.response = DummyHttpResponse(path=self.path)
- self.response.code = code
-
- def send_header(self, key, value):
- if self.path != self.response.path:
- self.response = DummyHttpResponse(path=self.path)
- self.response.headers[key] = value
-
- def end_headers(self):
- if self.path != self.response.path:
- self.response = DummyHttpResponse(path=self.path)
- self.response.wrote_headers = True
-
- def send_error(self, code, message=None):
- if self.path != self.response.path:
- self.response = DummyHttpResponse(path=self.path)
- self.response.code = code
- self.response.body = message
-
- def address_string(self):
- return 'dummyhost'
-
- def log_date_time_string(self):
- return '[DD/MM/YYYY HH:MI:SS]'
-
- def _write(self, obj):
- if self.path != self.response.path:
- self.response = DummyHttpResponse(path=self.path)
- self.response.body = obj.decode()
-
diff --git a/src/bin/stats/tests/isc/Makefile.am b/src/bin/stats/tests/isc/Makefile.am
deleted file mode 100644
index bfad7e3..0000000
--- a/src/bin/stats/tests/isc/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-SUBDIRS = cc config util
-EXTRA_DIST = __init__.py
-CLEANFILES = __init__.pyc
-
-CLEANDIRS = __pycache__
-
-clean-local:
- rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/isc/__init__.py b/src/bin/stats/tests/isc/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/src/bin/stats/tests/isc/cc/Makefile.am b/src/bin/stats/tests/isc/cc/Makefile.am
deleted file mode 100644
index 67323b5..0000000
--- a/src/bin/stats/tests/isc/cc/Makefile.am
+++ /dev/null
@@ -1,7 +0,0 @@
-EXTRA_DIST = __init__.py session.py
-CLEANFILES = __init__.pyc session.pyc
-
-CLEANDIRS = __pycache__
-
-clean-local:
- rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/isc/cc/__init__.py b/src/bin/stats/tests/isc/cc/__init__.py
deleted file mode 100644
index 9a3eaf6..0000000
--- a/src/bin/stats/tests/isc/cc/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from isc.cc.session import *
diff --git a/src/bin/stats/tests/isc/cc/session.py b/src/bin/stats/tests/isc/cc/session.py
deleted file mode 100644
index e16d6a9..0000000
--- a/src/bin/stats/tests/isc/cc/session.py
+++ /dev/null
@@ -1,148 +0,0 @@
-# Copyright (C) 2010,2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-A mock-up module of isc.cc.session
-
-*** NOTE ***
-It is only for testing stats_httpd module and not reusable for
-external module.
-"""
-
-import sys
-import fake_socket
-
-# set a dummy lname
-_TEST_LNAME = '123abc at xxxx'
-
-class Queue():
- def __init__(self, msg=None, env={}):
- self.msg = msg
- self.env = env
-
- def dump(self):
- return { 'msg': self.msg, 'env': self.env }
-
-class SessionError(Exception):
- pass
-
-class SessionTimeout(Exception):
- pass
-
-class Session:
- def __init__(self, socket_file=None, verbose=False):
- self._lname = _TEST_LNAME
- self.message_queue = []
- self.old_message_queue = []
- try:
- self._socket = fake_socket.socket()
- except fake_socket.error as se:
- raise SessionError(se)
- self.verbose = verbose
-
- @property
- def lname(self):
- return self._lname
-
- def close(self):
- self._socket.close()
-
- def _clear_queues(self):
- while len(self.message_queue) > 0:
- self.dequeue()
-
- def _next_sequence(self, que=None):
- return len(self.message_queue)
-
- def enqueue(self, msg=None, env={}):
- if self._socket._closed:
- raise SessionError("Session has been closed.")
- seq = self._next_sequence()
- env.update({"seq": 0}) # fixed here
- que = Queue(msg=msg, env=env)
- self.message_queue.append(que)
- if self.verbose:
- sys.stdout.write("[Session] enqueue: " + str(que.dump()) + "\n")
- return seq
-
- def dequeue(self):
- if self._socket._closed:
- raise SessionError("Session has been closed.")
- que = None
- try:
- que = self.message_queue.pop(0) # always pop at index 0
- self.old_message_queue.append(que)
- except IndexError:
- que = Queue()
- if self.verbose:
- sys.stdout.write("[Session] dequeue: " + str(que.dump()) + "\n")
- return que
-
- def get_queue(self, seq=None):
- if self._socket._closed:
- raise SessionError("Session has been closed.")
- if seq is None:
- seq = len(self.message_queue) - 1
- que = None
- try:
- que = self.message_queue[seq]
- except IndexError:
- raise IndexError
- que = Queue()
- if self.verbose:
- sys.stdout.write("[Session] get_queue: " + str(que.dump()) + "\n")
- return que
-
- def group_sendmsg(self, msg, group, instance="*", to="*"):
- return self.enqueue(msg=msg, env={
- "type": "send",
- "from": self._lname,
- "to": to,
- "group": group,
- "instance": instance })
-
- def group_recvmsg(self, nonblock=True, seq=0):
- que = self.dequeue()
- return que.msg, que.env
-
- def group_reply(self, routing, msg):
- return self.enqueue(msg=msg, env={
- "type": "send",
- "from": self._lname,
- "to": routing["from"],
- "group": routing["group"],
- "instance": routing["instance"],
- "reply": routing["seq"] })
-
- def get_message(self, group, to='*'):
- if self._socket._closed:
- raise SessionError("Session has been closed.")
- que = Queue()
- for q in self.message_queue:
- if q.env['group'] == group:
- self.message_queue.remove(q)
- self.old_message_queue.append(q)
- que = q
- if self.verbose:
- sys.stdout.write("[Session] get_message: " + str(que.dump()) + "\n")
- return q.msg
-
- def group_subscribe(self, group, instance = "*"):
- if self._socket._closed:
- raise SessionError("Session has been closed.")
-
- def group_unsubscribe(self, group, instance = "*"):
- if self._socket._closed:
- raise SessionError("Session has been closed.")
diff --git a/src/bin/stats/tests/isc/config/Makefile.am b/src/bin/stats/tests/isc/config/Makefile.am
deleted file mode 100644
index ffbecda..0000000
--- a/src/bin/stats/tests/isc/config/Makefile.am
+++ /dev/null
@@ -1,7 +0,0 @@
-EXTRA_DIST = __init__.py ccsession.py
-CLEANFILES = __init__.pyc ccsession.pyc
-
-CLEANDIRS = __pycache__
-
-clean-local:
- rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/isc/config/__init__.py b/src/bin/stats/tests/isc/config/__init__.py
deleted file mode 100644
index 4c49e95..0000000
--- a/src/bin/stats/tests/isc/config/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from isc.config.ccsession import *
diff --git a/src/bin/stats/tests/isc/config/ccsession.py b/src/bin/stats/tests/isc/config/ccsession.py
deleted file mode 100644
index a4e9c37..0000000
--- a/src/bin/stats/tests/isc/config/ccsession.py
+++ /dev/null
@@ -1,160 +0,0 @@
-# Copyright (C) 2010,2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-A mock-up module of isc.cc.session
-
-*** NOTE ***
-It is only for testing stats_httpd module and not reusable for
-external module.
-"""
-
-import json
-import os
-from isc.cc.session import Session
-
-COMMAND_CONFIG_UPDATE = "config_update"
-
-def parse_answer(msg):
- assert 'result' in msg
- try:
- return msg['result'][0], msg['result'][1]
- except IndexError:
- return msg['result'][0], None
-
-def create_answer(rcode, arg = None):
- if arg is None:
- return { 'result': [ rcode ] }
- else:
- return { 'result': [ rcode, arg ] }
-
-def parse_command(msg):
- assert 'command' in msg
- try:
- return msg['command'][0], msg['command'][1]
- except IndexError:
- return msg['command'][0], None
-
-def create_command(command_name, params = None):
- if params is None:
- return {"command": [command_name]}
- else:
- return {"command": [command_name, params]}
-
-def module_spec_from_file(spec_file, check = True):
- try:
- file = open(spec_file)
- json_str = file.read()
- module_spec = json.loads(json_str)
- file.close()
- return ModuleSpec(module_spec['module_spec'], check)
- except IOError as ioe:
- raise ModuleSpecError("JSON read error: " + str(ioe))
- except ValueError as ve:
- raise ModuleSpecError("JSON parse error: " + str(ve))
- except KeyError as err:
- raise ModuleSpecError("Data definition has no module_spec element")
-
-class ModuleSpecError(Exception):
- pass
-
-class ModuleSpec:
- def __init__(self, module_spec, check = True):
- self._module_spec = module_spec
-
- def get_config_spec(self):
- return self._module_spec['config_data']
-
- def get_commands_spec(self):
- return self._module_spec['commands']
-
- def get_module_name(self):
- return self._module_spec['module_name']
-
-class ModuleCCSessionError(Exception):
- pass
-
-class DataNotFoundError(Exception):
- pass
-
-class ConfigData:
- def __init__(self, specification):
- self.specification = specification
-
- def get_value(self, identifier):
- """Returns a tuple where the first item is the value at the
- given identifier, and the second item is absolutely False
- even if the value is an unset default or not. Raises an
- DataNotFoundError if the identifier is not found in the
- specification file.
- *** NOTE ***
- There are some differences from the original method. This
- method never handles local settings like the original
- method. But these different behaviors aren't so big issues
- for a mock-up method of stats_httpd because stats_httpd
- calls this method at only first."""
- for config_map in self.get_module_spec().get_config_spec():
- if config_map['item_name'] == identifier:
- if 'item_default' in config_map:
- return config_map['item_default'], False
- raise DataNotFoundError("item_name %s is not found in the specfile" % identifier)
-
- def get_module_spec(self):
- return self.specification
-
-class ModuleCCSession(ConfigData):
- def __init__(self, spec_file_name, config_handler, command_handler, cc_session = None):
- module_spec = module_spec_from_file(spec_file_name)
- ConfigData.__init__(self, module_spec)
- self._module_name = module_spec.get_module_name()
- self.set_config_handler(config_handler)
- self.set_command_handler(command_handler)
- if not cc_session:
- self._session = Session(verbose=True)
- else:
- self._session = cc_session
-
- def start(self):
- pass
-
- def close(self):
- self._session.close()
-
- def check_command(self, nonblock=True):
- msg, env = self._session.group_recvmsg(nonblock)
- if not msg or 'result' in msg:
- return
- cmd, arg = parse_command(msg)
- answer = None
- if cmd == COMMAND_CONFIG_UPDATE and self._config_handler:
- answer = self._config_handler(arg)
- elif env['group'] == self._module_name and self._command_handler:
- answer = self._command_handler(cmd, arg)
- if answer:
- self._session.group_reply(env, answer)
-
- def set_config_handler(self, config_handler):
- self._config_handler = config_handler
- # should we run this right now since we've changed the handler?
-
- def set_command_handler(self, command_handler):
- self._command_handler = command_handler
-
- def get_module_spec(self):
- return self.specification
-
- def get_socket(self):
- return self._session._socket
-
diff --git a/src/bin/stats/tests/isc/log_messages/Makefile.am b/src/bin/stats/tests/isc/log_messages/Makefile.am
new file mode 100644
index 0000000..90b4499
--- /dev/null
+++ b/src/bin/stats/tests/isc/log_messages/Makefile.am
@@ -0,0 +1,7 @@
+EXTRA_DIST = __init__.py stats_messages.py stats_httpd_messages.py
+CLEANFILES = __init__.pyc stats_messages.pyc stats_httpd_messages.pyc
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/isc/log_messages/__init__.py b/src/bin/stats/tests/isc/log_messages/__init__.py
new file mode 100644
index 0000000..58e99e3
--- /dev/null
+++ b/src/bin/stats/tests/isc/log_messages/__init__.py
@@ -0,0 +1,18 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+'''
+This is a fake package that acts as a forwarder to the real package.
+'''
diff --git a/src/bin/stats/tests/isc/log_messages/stats_httpd_messages.py b/src/bin/stats/tests/isc/log_messages/stats_httpd_messages.py
new file mode 100644
index 0000000..0adb0f0
--- /dev/null
+++ b/src/bin/stats/tests/isc/log_messages/stats_httpd_messages.py
@@ -0,0 +1,16 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+from work.stats_httpd_messages import *
diff --git a/src/bin/stats/tests/isc/log_messages/stats_messages.py b/src/bin/stats/tests/isc/log_messages/stats_messages.py
new file mode 100644
index 0000000..c05a6a8
--- /dev/null
+++ b/src/bin/stats/tests/isc/log_messages/stats_messages.py
@@ -0,0 +1,16 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+from work.stats_messages import *
diff --git a/src/bin/stats/tests/isc/util/Makefile.am b/src/bin/stats/tests/isc/util/Makefile.am
deleted file mode 100644
index 9c74354..0000000
--- a/src/bin/stats/tests/isc/util/Makefile.am
+++ /dev/null
@@ -1,7 +0,0 @@
-EXTRA_DIST = __init__.py process.py
-CLEANFILES = __init__.pyc process.pyc
-
-CLEANDIRS = __pycache__
-
-clean-local:
- rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/isc/util/__init__.py b/src/bin/stats/tests/isc/util/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/src/bin/stats/tests/isc/util/process.py b/src/bin/stats/tests/isc/util/process.py
deleted file mode 100644
index 0f764c1..0000000
--- a/src/bin/stats/tests/isc/util/process.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright (C) 2010 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-A dummy function of isc.util.process.rename()
-"""
-
-def rename(name=None):
- pass
diff --git a/src/bin/stats/tests/test_utils.py b/src/bin/stats/tests/test_utils.py
new file mode 100644
index 0000000..da0bac4
--- /dev/null
+++ b/src/bin/stats/tests/test_utils.py
@@ -0,0 +1,364 @@
+"""
+Utilities and mock modules for unittests of statistics modules
+
+"""
+import os
+import io
+import time
+import sys
+import threading
+import tempfile
+import json
+import signal
+
+import msgq
+import isc.config.cfgmgr
+import stats
+import stats_httpd
+
+# Change value of BIND10_MSGQ_SOCKET_FILE in environment variables
+if 'BIND10_MSGQ_SOCKET_FILE' not in os.environ:
+ os.environ['BIND10_MSGQ_SOCKET_FILE'] = tempfile.mktemp(prefix='msgq_socket_')
+
+class SignalHandler():
+ """A signal handler class for deadlock in unittest"""
+ def __init__(self, fail_handler, timeout=20):
+ """sets a schedule in SIGARM for invoking the handler via
+ unittest.TestCase after timeout seconds (default is 20)"""
+ self.fail_handler = fail_handler
+ self.orig_handler = signal.signal(signal.SIGALRM, self.sig_handler)
+ signal.alarm(timeout)
+
+ def reset(self):
+ """resets the schedule in SIGALRM"""
+ signal.alarm(0)
+ signal.signal(signal.SIGALRM, self.orig_handler)
+
+ def sig_handler(self, signal, frame):
+ """envokes unittest.TestCase.fail as a signal handler"""
+ self.fail_handler("A deadlock might be detected")
+
+def send_command(command_name, module_name, params=None, session=None, nonblock=False, timeout=None):
+ if session is not None:
+ cc_session = session
+ else:
+ cc_session = isc.cc.Session()
+ if timeout is not None:
+ orig_timeout = cc_session.get_timeout()
+ cc_session.set_timeout(timeout * 1000)
+ command = isc.config.ccsession.create_command(command_name, params)
+ seq = cc_session.group_sendmsg(command, module_name)
+ try:
+ (answer, env) = cc_session.group_recvmsg(nonblock, seq)
+ if answer:
+ return isc.config.ccsession.parse_answer(answer)
+ except isc.cc.SessionTimeout:
+ pass
+ finally:
+ if timeout is not None:
+ cc_session.set_timeout(orig_timeout)
+ if session is None:
+ cc_session.close()
+
+def send_shutdown(module_name, **kwargs):
+ return send_command("shutdown", module_name, **kwargs)
+
+class ThreadingServerManager:
+ def __init__(self, server, *args, **kwargs):
+ self.server = server(*args, **kwargs)
+ self.server_name = server.__name__
+ self.server._thread = threading.Thread(
+ name=self.server_name, target=self.server.run)
+ self.server._thread.daemon = True
+
+ def run(self):
+ self.server._thread.start()
+ self.server._started.wait()
+ self.server._started.clear()
+
+ def shutdown(self):
+ self.server.shutdown()
+ self.server._thread.join(0) # timeout is 0
+
+def do_nothing(*args, **kwargs): pass
+
+class dummy_sys:
+ """Dummy for sys"""
+ class dummy_io:
+ write = do_nothing
+ stdout = stderr = dummy_io()
+
+class MockMsgq:
+ def __init__(self):
+ self._started = threading.Event()
+ # suppress output to stdout and stderr
+ msgq.sys = dummy_sys()
+ msgq.print = do_nothing
+ self.msgq = msgq.MsgQ(verbose=False)
+ result = self.msgq.setup()
+ if result:
+ sys.exit("Error on Msgq startup: %s" % result)
+
+ def run(self):
+ self._started.set()
+ try:
+ self.msgq.run()
+ except Exception:
+ pass
+ finally:
+ # explicitly shut down the socket of the msgq before
+ # shutting down the msgq
+ self.msgq.listen_socket.shutdown(msgq.socket.SHUT_RDWR)
+ self.msgq.shutdown()
+
+ def shutdown(self):
+ # do nothing for avoiding shutting down the msgq twice
+ pass
+
+class MockCfgmgr:
+ def __init__(self):
+ self._started = threading.Event()
+ self.cfgmgr = isc.config.cfgmgr.ConfigManager(
+ os.environ['CONFIG_TESTDATA_PATH'], "b10-config.db")
+ self.cfgmgr.read_config()
+
+ def run(self):
+ self._started.set()
+ try:
+ self.cfgmgr.run()
+ except Exception:
+ pass
+
+ def shutdown(self):
+ self.cfgmgr.running = False
+
+class MockBoss:
+ spec_str = """\
+{
+ "module_spec": {
+ "module_name": "Boss",
+ "module_description": "Mock Master process",
+ "config_data": [],
+ "commands": [
+ {
+ "command_name": "sendstats",
+ "command_description": "Send data to a statistics module at once",
+ "command_args": []
+ }
+ ],
+ "statistics": [
+ {
+ "item_name": "boot_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Boot time",
+ "item_description": "A date time when bind10 process starts initially",
+ "item_format": "date-time"
+ }
+ ]
+ }
+}
+"""
+ _BASETIME = (2011, 6, 22, 8, 14, 8, 2, 173, 0)
+
+ def __init__(self):
+ self._started = threading.Event()
+ self.running = False
+ self.spec_file = io.StringIO(self.spec_str)
+ # create ModuleCCSession object
+ self.mccs = isc.config.ModuleCCSession(
+ self.spec_file,
+ self.config_handler,
+ self.command_handler)
+ self.spec_file.close()
+ self.cc_session = self.mccs._session
+ self.got_command_name = ''
+
+ def run(self):
+ self.mccs.start()
+ self.running = True
+ self._started.set()
+ try:
+ while self.running:
+ self.mccs.check_command(False)
+ except Exception:
+ pass
+
+ def shutdown(self):
+ self.running = False
+
+ def config_handler(self, new_config):
+ return isc.config.create_answer(0)
+
+ def command_handler(self, command, *args, **kwargs):
+ self._started.set()
+ self.got_command_name = command
+ params = { "owner": "Boss",
+ "data": {
+ 'boot_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', self._BASETIME)
+ }
+ }
+ if command == 'sendstats':
+ send_command("set", "Stats", params=params, session=self.cc_session)
+ return isc.config.create_answer(0)
+ elif command == 'getstats':
+ return isc.config.create_answer(0, params)
+ return isc.config.create_answer(1, "Unknown Command")
+
+class MockAuth:
+ spec_str = """\
+{
+ "module_spec": {
+ "module_name": "Auth",
+ "module_description": "Mock Authoritative service",
+ "config_data": [],
+ "commands": [
+ {
+ "command_name": "sendstats",
+ "command_description": "Send data to a statistics module at once",
+ "command_args": []
+ }
+ ],
+ "statistics": [
+ {
+ "item_name": "queries.tcp",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 0,
+ "item_title": "Queries TCP",
+ "item_description": "A number of total query counts which all auth servers receive over TCP since they started initially"
+ },
+ {
+ "item_name": "queries.udp",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 0,
+ "item_title": "Queries UDP",
+ "item_description": "A number of total query counts which all auth servers receive over UDP since they started initially"
+ }
+ ]
+ }
+}
+"""
+ def __init__(self):
+ self._started = threading.Event()
+ self.running = False
+ self.spec_file = io.StringIO(self.spec_str)
+ # create ModuleCCSession object
+ self.mccs = isc.config.ModuleCCSession(
+ self.spec_file,
+ self.config_handler,
+ self.command_handler)
+ self.spec_file.close()
+ self.cc_session = self.mccs._session
+ self.got_command_name = ''
+ self.queries_tcp = 3
+ self.queries_udp = 2
+
+ def run(self):
+ self.mccs.start()
+ self.running = True
+ self._started.set()
+ try:
+ while self.running:
+ self.mccs.check_command(False)
+ except Exception:
+ pass
+
+ def shutdown(self):
+ self.running = False
+
+ def config_handler(self, new_config):
+ return isc.config.create_answer(0)
+
+ def command_handler(self, command, *args, **kwargs):
+ self.got_command_name = command
+ if command == 'sendstats':
+ params = { "owner": "Auth",
+ "data": { 'queries.tcp': self.queries_tcp,
+ 'queries.udp': self.queries_udp } }
+ return send_command("set", "Stats", params=params, session=self.cc_session)
+ return isc.config.create_answer(1, "Unknown Command")
+
+class MyStats(stats.Stats):
+ def __init__(self):
+ self._started = threading.Event()
+ stats.Stats.__init__(self)
+
+ def run(self):
+ self._started.set()
+ try:
+ self.start()
+ except Exception:
+ pass
+
+ def shutdown(self):
+ self.command_shutdown()
+
+class MyStatsHttpd(stats_httpd.StatsHttpd):
+ ORIG_SPECFILE_LOCATION = stats_httpd.SPECFILE_LOCATION
+ def __init__(self, *server_address):
+ self._started = threading.Event()
+ if server_address:
+ stats_httpd.SPECFILE_LOCATION = self.create_specfile(*server_address)
+ try:
+ stats_httpd.StatsHttpd.__init__(self)
+ finally:
+ if hasattr(stats_httpd.SPECFILE_LOCATION, "close"):
+ stats_httpd.SPECFILE_LOCATION.close()
+ stats_httpd.SPECFILE_LOCATION = self.ORIG_SPECFILE_LOCATION
+ else:
+ stats_httpd.StatsHttpd.__init__(self)
+
+ def create_specfile(self, *server_address):
+ spec_io = open(self.ORIG_SPECFILE_LOCATION)
+ try:
+ spec = json.load(spec_io)
+ spec_io.close()
+ config = spec['module_spec']['config_data']
+ for i in range(len(config)):
+ if config[i]['item_name'] == 'listen_on':
+ config[i]['item_default'] = \
+ [ dict(address=a[0], port=a[1]) for a in server_address ]
+ break
+ return io.StringIO(json.dumps(spec))
+ finally:
+ spec_io.close()
+
+ def run(self):
+ self._started.set()
+ try:
+ self.start()
+ except Exception:
+ pass
+
+ def shutdown(self):
+ self.command_handler('shutdown', None)
+
+class BaseModules:
+ def __init__(self):
+ # MockMsgq
+ self.msgq = ThreadingServerManager(MockMsgq)
+ self.msgq.run()
+ # Check whether msgq is ready. A SessionTimeout is raised here if not.
+ isc.cc.session.Session().close()
+ # MockCfgmgr
+ self.cfgmgr = ThreadingServerManager(MockCfgmgr)
+ self.cfgmgr.run()
+ # MockBoss
+ self.boss = ThreadingServerManager(MockBoss)
+ self.boss.run()
+ # MockAuth
+ self.auth = ThreadingServerManager(MockAuth)
+ self.auth.run()
+
+ def shutdown(self):
+ # MockAuth
+ self.auth.shutdown()
+ # MockBoss
+ self.boss.shutdown()
+ # MockCfgmgr
+ self.cfgmgr.shutdown()
+ # MockMsgq
+ self.msgq.shutdown()
diff --git a/src/bin/stats/tests/testdata/Makefile.am b/src/bin/stats/tests/testdata/Makefile.am
deleted file mode 100644
index 1b8df6d..0000000
--- a/src/bin/stats/tests/testdata/Makefile.am
+++ /dev/null
@@ -1 +0,0 @@
-EXTRA_DIST = stats_test.spec
diff --git a/src/bin/stats/tests/testdata/stats_test.spec b/src/bin/stats/tests/testdata/stats_test.spec
deleted file mode 100644
index 8136756..0000000
--- a/src/bin/stats/tests/testdata/stats_test.spec
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "module_spec": {
- "module_name": "Stats",
- "module_description": "Stats daemon",
- "config_data": [],
- "commands": [
- {
- "command_name": "status",
- "command_description": "identify whether stats module is alive or not",
- "command_args": []
- },
- {
- "command_name": "the_dummy",
- "command_description": "this is for testing",
- "command_args": []
- }
- ]
- }
-}
diff --git a/src/bin/tests/Makefile.am b/src/bin/tests/Makefile.am
index b5bcea2..41b497f 100644
--- a/src/bin/tests/Makefile.am
+++ b/src/bin/tests/Makefile.am
@@ -1,5 +1,6 @@
PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
PYTESTS = process_rename_test.py
+noinst_SCRIPTS = $(PYTESTS)
# .py will be generated by configure, so we don't have to include it
# in EXTRA_DIST.
@@ -7,19 +8,20 @@ PYTESTS = process_rename_test.py
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
if ENABLE_PYTHON_COVERAGE
- touch $(abs_top_srcdir)/.coverage
+ touch $(abs_top_srcdir)/.coverage
rm -f .coverage
${LN_S} $(abs_top_srcdir)/.coverage .coverage
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
+ chmod +x $(abs_builddir)/$$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/python/.libs \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs \
$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
done
diff --git a/src/bin/tests/process_rename_test.py.in b/src/bin/tests/process_rename_test.py.in
index 4b45210..f96c023 100644
--- a/src/bin/tests/process_rename_test.py.in
+++ b/src/bin/tests/process_rename_test.py.in
@@ -38,8 +38,10 @@ class TestRename(unittest.TestCase):
Then scan them by looking at the source text
(without actually running them)
"""
- # Regexp to find all the *_SCRIPTS = something lines,
- # including line continuations (backslash and newline)
+ # Regexp to find all the *_SCRIPTS = something lines (except for
+ # noinst_SCRIPTS, which are scripts for tests), including line
+ # continuations (backslash and newline)
+ excluded_lines = re.compile(r'^(noinst_SCRIPTS.*$)', re.MULTILINE)
lines = re.compile(r'^\w+_SCRIPTS\s*=\s*((.|\\\n)*)$',
re.MULTILINE)
# Script name regular expression
@@ -53,7 +55,8 @@ class TestRename(unittest.TestCase):
if 'Makefile' in fs:
makefile = ''.join(open(os.path.join(d,
"Makefile")).readlines())
- for (var, _) in lines.findall(makefile):
+ for (var, _) in lines.findall(re.sub(excluded_lines, '',
+ makefile)):
for (script, _) in scripts.findall(var):
self.__scan(d, script, fun)
diff --git a/src/bin/xfrin/Makefile.am b/src/bin/xfrin/Makefile.am
index 0af9be6..8d80b22 100644
--- a/src/bin/xfrin/Makefile.am
+++ b/src/bin/xfrin/Makefile.am
@@ -6,9 +6,13 @@ pkglibexec_SCRIPTS = b10-xfrin
b10_xfrindir = $(pkgdatadir)
b10_xfrin_DATA = xfrin.spec
-pyexec_DATA = xfrin_messages.py
-CLEANFILES = b10-xfrin xfrin.pyc xfrinlog.py xfrin_messages.py xfrin_messages.pyc
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/xfrin_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+CLEANFILES = b10-xfrin xfrin.pyc xfrinlog.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/xfrin_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/xfrin_messages.pyc
man_MANS = b10-xfrin.8
EXTRA_DIST = $(man_MANS) b10-xfrin.xml
@@ -22,11 +26,12 @@ b10-xfrin.8: b10-xfrin.xml
endif
# Define rule to build logging source files from message file
-xfrin_messages.py: xfrin_messages.mes
- $(top_builddir)/src/lib/log/compiler/message -p $(top_srcdir)/src/bin/xfrin/xfrin_messages.mes
+$(PYTHON_LOGMSGPKG_DIR)/work/xfrin_messages.py : xfrin_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/xfrin_messages.mes
# this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
-b10-xfrin: xfrin.py xfrin_messages.py
+b10-xfrin: xfrin.py $(PYTHON_LOGMSGPKG_DIR)/work/xfrin_messages.py
$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
-e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" xfrin.py >$@
chmod a+x $@
diff --git a/src/bin/xfrin/b10-xfrin.8 b/src/bin/xfrin/b10-xfrin.8
index 3ea2293..54dbe7c 100644
--- a/src/bin/xfrin/b10-xfrin.8
+++ b/src/bin/xfrin/b10-xfrin.8
@@ -2,12 +2,12 @@
.\" Title: b10-xfrin
.\" Author: [FIXME: author] [see http://docbook.sf.net/el/author]
.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\" Date: May 19, 2011
+.\" Date: September 8, 2011
.\" Manual: BIND10
.\" Source: BIND10
.\" Language: English
.\"
-.TH "B10\-XFRIN" "8" "May 19, 2011" "BIND10" "BIND10"
+.TH "B10\-XFRIN" "8" "September 8, 2011" "BIND10" "BIND10"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
@@ -61,7 +61,7 @@ receives its configurations from
.PP
The configurable settings are:
.PP
-\fItransfers\-in\fR
+\fItransfers_in\fR
defines the maximum number of inbound zone transfers that can run concurrently\&. The default is 10\&.
.PP
@@ -71,6 +71,9 @@ is a list of zones known to the
daemon\&. The list items are:
\fIname\fR
(the zone name),
+\fIclass\fR
+(defaults to
+\(lqIN\(rq),
\fImaster_addr\fR
(the zone master to transfer from),
\fImaster_port\fR
@@ -125,7 +128,7 @@ to define the class (defaults to
\fImaster\fR
to define the IP address of the authoritative server to transfer from, and
\fIport\fR
-to define the port number on the authoritative server (defaults to 53)\&. If the address or port is not specified, it will use the values previously defined in the
+to define the port number on the authoritative server (defaults to 53)\&. If the address or port is not specified, it will use the value previously defined in the
\fIzones\fR
configuration\&.
.PP
diff --git a/src/bin/xfrin/b10-xfrin.xml b/src/bin/xfrin/b10-xfrin.xml
index ea4c724..d45e15f 100644
--- a/src/bin/xfrin/b10-xfrin.xml
+++ b/src/bin/xfrin/b10-xfrin.xml
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>May 19, 2011</date>
+ <date>September 8, 2011</date>
</refentryinfo>
<refmeta>
@@ -92,7 +92,7 @@ in separate zonemgr process.
The configurable settings are:
</para>
- <para><varname>transfers-in</varname>
+ <para><varname>transfers_in</varname>
defines the maximum number of inbound zone transfers
that can run concurrently. The default is 10.
</para>
@@ -103,6 +103,7 @@ in separate zonemgr process.
<command>b10-xfrin</command> daemon.
The list items are:
<varname>name</varname> (the zone name),
+ <varname>class</varname> (defaults to <quote>IN</quote>),
<varname>master_addr</varname> (the zone master to transfer from),
<varname>master_port</varname> (defaults to 53), and
<varname>tsig_key</varname> (optional TSIG key to use).
@@ -168,7 +169,7 @@ in separate zonemgr process.
and <varname>port</varname> to define the port number on the
authoritative server (defaults to 53).
If the address or port is not specified, it will use the
- values previously defined in the <varname>zones</varname>
+ value previously defined in the <varname>zones</varname>
configuration.
</para>
<!-- TODO: later hostname for master? -->
diff --git a/src/bin/xfrin/tests/Makefile.am b/src/bin/xfrin/tests/Makefile.am
index 0f485aa..3d56009 100644
--- a/src/bin/xfrin/tests/Makefile.am
+++ b/src/bin/xfrin/tests/Makefile.am
@@ -6,7 +6,7 @@ EXTRA_DIST = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/xfr/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -19,6 +19,6 @@ endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
- env PYTHONPATH=$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/bin/xfrin:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python \
+ PYTHONPATH=$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/bin/xfrin:$(COMMON_PYTHON_PATH) \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/bin/xfrin/tests/xfrin_test.py b/src/bin/xfrin/tests/xfrin_test.py
index 2acd9d6..05cce98 100644
--- a/src/bin/xfrin/tests/xfrin_test.py
+++ b/src/bin/xfrin/tests/xfrin_test.py
@@ -18,6 +18,7 @@ import socket
import io
from isc.testutils.tsigctx_mock import MockTSIGContext
from xfrin import *
+import isc.log
#
# Commonly used (mostly constant) test parameters
@@ -954,13 +955,20 @@ class TestXfrin(unittest.TestCase):
self.assertEqual(zone_info.tsig_key.to_text(), TSIGKey(zone_config['tsig_key']).to_text())
else:
self.assertIsNone(zone_info.tsig_key)
+ if 'ixfr_disabled' in zone_config and\
+ zone_config.get('ixfr_disabled'):
+ self.assertTrue(zone_info.ixfr_disabled)
+ else:
+ # if not set, should default to False
+ self.assertFalse(zone_info.ixfr_disabled)
def test_command_handler_zones(self):
config1 = { 'transfers_in': 3,
'zones': [
{ 'name': 'test.example.',
'master_addr': '192.0.2.1',
- 'master_port': 53
+ 'master_port': 53,
+ 'ixfr_disabled': False
}
]}
self.assertEqual(self.xfr.config_handler(config1)['result'][0], 0)
@@ -971,7 +979,8 @@ class TestXfrin(unittest.TestCase):
{ 'name': 'test.example.',
'master_addr': '192.0.2.2',
'master_port': 53,
- 'tsig_key': "example.com:SFuWd/q99SzF8Yzd1QbB9g=="
+ 'tsig_key': "example.com:SFuWd/q99SzF8Yzd1QbB9g==",
+ 'ixfr_disabled': True
}
]}
self.assertEqual(self.xfr.config_handler(config2)['result'][0], 0)
@@ -1115,6 +1124,7 @@ class TestMain(unittest.TestCase):
if __name__== "__main__":
try:
+ isc.log.resetUnitTestRootLogger()
unittest.main()
except KeyboardInterrupt as e:
print(e)
diff --git a/src/bin/xfrin/xfrin.py.in b/src/bin/xfrin/xfrin.py.in
index 64e3563..a77a383 100755
--- a/src/bin/xfrin/xfrin.py.in
+++ b/src/bin/xfrin/xfrin.py.in
@@ -29,7 +29,7 @@ from isc.config.ccsession import *
from isc.notify import notify_out
import isc.util.process
import isc.net.parse
-from xfrin_messages import *
+from isc.log_messages.xfrin_messages import *
isc.log.init("b10-xfrin")
logger = isc.log.Logger("xfrin")
@@ -152,7 +152,7 @@ class XfrinConnection(asyncore.dispatcher):
self.connect(self._master_address)
return True
except socket.error as e:
- logger.error(CONNECT_MASTER, self._master_address, str(e))
+ logger.error(XFRIN_CONNECT_MASTER, self._master_address, str(e))
return False
def _create_query(self, query_type):
@@ -451,6 +451,7 @@ class ZoneInfo:
self.set_master_port(config_data.get('master_port'))
self.set_zone_class(config_data.get('class'))
self.set_tsig_key(config_data.get('tsig_key'))
+ self.set_ixfr_disabled(config_data.get('ixfr_disabled'))
def set_name(self, name_str):
"""Set the name for this zone given a name string.
@@ -525,6 +526,16 @@ class ZoneInfo:
errmsg = "bad TSIG key string: " + tsig_key_str
raise XfrinZoneInfoException(errmsg)
+ def set_ixfr_disabled(self, ixfr_disabled):
+ """Set ixfr_disabled. If set to False (the default), it will use
+ IXFR for incoming transfers. If set to True, it will use AXFR.
+ At this moment there is no automatic fallback"""
+ # don't care what type it is; if evaluates to true, set to True
+ if ixfr_disabled:
+ self.ixfr_disabled = True
+ else:
+ self.ixfr_disabled = False
+
def get_master_addr_info(self):
return (self.master_addr.family, socket.SOCK_STREAM,
(str(self.master_addr), self.master_port))
@@ -548,8 +559,7 @@ class Xfrin:
self._send_cc_session = isc.cc.Session()
self._module_cc = isc.config.ModuleCCSession(SPECFILE_LOCATION,
self.config_handler,
- self.command_handler,
- None, True)
+ self.command_handler)
self._module_cc.start()
config_data = self._module_cc.get_full_config()
self.config_handler(config_data)
diff --git a/src/bin/xfrin/xfrin.spec b/src/bin/xfrin/xfrin.spec
index a3e62ce..bc93720 100644
--- a/src/bin/xfrin/xfrin.spec
+++ b/src/bin/xfrin/xfrin.spec
@@ -43,6 +43,11 @@
{ "item_name": "tsig_key",
"item_type": "string",
"item_optional": true
+ },
+ { "item_name": "ixfr_disabled",
+ "item_type": "boolean",
+ "item_optional": false,
+ "item_default": false
}
]
}
diff --git a/src/bin/xfrout/Makefile.am b/src/bin/xfrout/Makefile.am
index c5492ad..6100e64 100644
--- a/src/bin/xfrout/Makefile.am
+++ b/src/bin/xfrout/Makefile.am
@@ -6,9 +6,13 @@ pkglibexec_SCRIPTS = b10-xfrout
b10_xfroutdir = $(pkgdatadir)
b10_xfrout_DATA = xfrout.spec
-pyexec_DATA = xfrout_messages.py
-CLEANFILES= b10-xfrout xfrout.pyc xfrout.spec xfrout_messages.py xfrout_messages.pyc
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/xfrout_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+CLEANFILES = b10-xfrout xfrout.pyc xfrout.spec
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/xfrout_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/xfrout_messages.pyc
man_MANS = b10-xfrout.8
EXTRA_DIST = $(man_MANS) b10-xfrout.xml xfrout_messages.mes
@@ -21,14 +25,15 @@ b10-xfrout.8: b10-xfrout.xml
endif
# Define rule to build logging source files from message file
-xfrout_messages.py: xfrout_messages.mes
- $(top_builddir)/src/lib/log/compiler/message -p $(top_srcdir)/src/bin/xfrout/xfrout_messages.mes
+$(PYTHON_LOGMSGPKG_DIR)/work/xfrout_messages.py : xfrout_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/xfrout_messages.mes
xfrout.spec: xfrout.spec.pre
$(SED) -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" xfrout.spec.pre >$@
# this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
-b10-xfrout: xfrout.py xfrout_messages.py
+b10-xfrout: xfrout.py $(PYTHON_LOGMSGPKG_DIR)/work/xfrout_messages.py
$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
-e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" xfrout.py >$@
chmod a+x $@
diff --git a/src/bin/xfrout/b10-xfrout.xml b/src/bin/xfrout/b10-xfrout.xml
index ad71fe2..9889b80 100644
--- a/src/bin/xfrout/b10-xfrout.xml
+++ b/src/bin/xfrout/b10-xfrout.xml
@@ -134,6 +134,14 @@
data storage types.
</simpara></note>
+
+<!--
+
+tsig_key_ring list of
+tsig_key string
+
+-->
+
<!-- TODO: formating -->
<para>
The configuration commands are:
diff --git a/src/bin/xfrout/tests/Makefile.am b/src/bin/xfrout/tests/Makefile.am
index 6ca2b42..ace8fc9 100644
--- a/src/bin/xfrout/tests/Makefile.am
+++ b/src/bin/xfrout/tests/Makefile.am
@@ -1,15 +1,17 @@
PYCOVERAGE_RUN=@PYCOVERAGE_RUN@
PYTESTS = xfrout_test.py
-EXTRA_DIST = $(PYTESTS)
+noinst_SCRIPTS = $(PYTESTS)
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$(abs_top_builddir)/src/lib/acl/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
+# We set B10_FROM_BUILD below, so that the test can refer to the in-source
+# spec file.
check-local:
if ENABLE_PYTHON_COVERAGE
touch $(abs_top_srcdir)/.coverage
@@ -18,7 +20,9 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
+ chmod +x $(abs_builddir)/$$pytest ; \
+ B10_FROM_BUILD=$(abs_top_builddir) \
$(LIBRARY_PATH_PLACEHOLDER) \
- env PYTHONPATH=$(abs_top_builddir)/src/bin/xfrout:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/util/io/.libs \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/xfrout:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/util/io/.libs \
$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
done
diff --git a/src/bin/xfrout/tests/xfrout_test.py.in b/src/bin/xfrout/tests/xfrout_test.py.in
index adabf48..85979a0 100644
--- a/src/bin/xfrout/tests/xfrout_test.py.in
+++ b/src/bin/xfrout/tests/xfrout_test.py.in
@@ -20,9 +20,12 @@ import unittest
import os
from isc.testutils.tsigctx_mock import MockTSIGContext
from isc.cc.session import *
+import isc.config
from pydnspp import *
from xfrout import *
import xfrout
+import isc.log
+import isc.acl.dns
TSIG_KEY = TSIGKey("example.com:SFuWd/q99SzF8Yzd1QbB9g==")
@@ -99,26 +102,34 @@ class TestXfroutSession(unittest.TestCase):
def message_has_tsig(self, msg):
return msg.get_tsig_record() is not None
- def create_request_data_with_tsig(self):
+ def create_request_data(self, with_tsig=False):
msg = Message(Message.RENDER)
query_id = 0x1035
msg.set_qid(query_id)
msg.set_opcode(Opcode.QUERY())
msg.set_rcode(Rcode.NOERROR())
- query_question = Question(Name("example.com."), RRClass.IN(), RRType.AXFR())
+ query_question = Question(Name("example.com"), RRClass.IN(),
+ RRType.AXFR())
msg.add_question(query_question)
renderer = MessageRenderer()
- tsig_ctx = MockTSIGContext(TSIG_KEY)
- msg.to_wire(renderer, tsig_ctx)
- reply_data = renderer.get_data()
- return reply_data
+ if with_tsig:
+ tsig_ctx = MockTSIGContext(TSIG_KEY)
+ msg.to_wire(renderer, tsig_ctx)
+ else:
+ msg.to_wire(renderer)
+ request_data = renderer.get_data()
+ return request_data
def setUp(self):
self.sock = MySocket(socket.AF_INET,socket.SOCK_STREAM)
- #self.log = isc.log.NSLogger('xfrout', '', severity = 'critical', log_to_console = False )
- self.xfrsess = MyXfroutSession(self.sock, None, Dbserver(), TSIGKeyRing())
- self.mdata = bytes(b'\xd6=\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07example\x03com\x00\x00\xfc\x00\x01')
+ self.xfrsess = MyXfroutSession(self.sock, None, Dbserver(),
+ TSIGKeyRing(), ('127.0.0.1', 12345),
+ # When not testing ACLs, simply accept
+ isc.acl.dns.REQUEST_LOADER.load(
+ [{"action": "ACCEPT"}]),
+ {})
+ self.mdata = self.create_request_data(False)
self.soa_record = (4, 3, 'example.com.', 'com.example.', 3600, 'SOA', None, 'master.example.com. admin.example.com. 1234 3600 1800 2419200 7200')
def test_parse_query_message(self):
@@ -126,17 +137,158 @@ class TestXfroutSession(unittest.TestCase):
self.assertEqual(get_rcode.to_text(), "NOERROR")
# tsig signed query message
- request_data = self.create_request_data_with_tsig()
+ request_data = self.create_request_data(True)
# BADKEY
[rcode, msg] = self.xfrsess._parse_query_message(request_data)
self.assertEqual(rcode.to_text(), "NOTAUTH")
self.assertTrue(self.xfrsess._tsig_ctx is not None)
# NOERROR
- self.xfrsess._tsig_key_ring.add(TSIG_KEY)
+ self.assertEqual(TSIGKeyRing.SUCCESS,
+ self.xfrsess._tsig_key_ring.add(TSIG_KEY))
[rcode, msg] = self.xfrsess._parse_query_message(request_data)
self.assertEqual(rcode.to_text(), "NOERROR")
self.assertTrue(self.xfrsess._tsig_ctx is not None)
+ def check_transfer_acl(self, acl_setter):
+ # ACL checks, put some ACL inside
+ acl_setter(isc.acl.dns.REQUEST_LOADER.load([
+ {
+ "from": "127.0.0.1",
+ "action": "ACCEPT"
+ },
+ {
+ "from": "192.0.2.1",
+ "action": "DROP"
+ }
+ ]))
+ # Localhost (the default in this test) is accepted
+ rcode, msg = self.xfrsess._parse_query_message(self.mdata)
+ self.assertEqual(rcode.to_text(), "NOERROR")
+ # This should be dropped completely, therefore returning None
+ self.xfrsess._remote = ('192.0.2.1', 12345)
+ rcode, msg = self.xfrsess._parse_query_message(self.mdata)
+ self.assertEqual(None, rcode)
+ # This should be refused, therefore REFUSED
+ self.xfrsess._remote = ('192.0.2.2', 12345)
+ rcode, msg = self.xfrsess._parse_query_message(self.mdata)
+ self.assertEqual(rcode.to_text(), "REFUSED")
+
+ # TSIG signed request
+ request_data = self.create_request_data(True)
+
+ # If the TSIG check fails, it should not check ACL
+ # (If it checked ACL as well, it would just drop the request)
+ self.xfrsess._remote = ('192.0.2.1', 12345)
+ self.xfrsess._tsig_key_ring = TSIGKeyRing()
+ rcode, msg = self.xfrsess._parse_query_message(request_data)
+ self.assertEqual(rcode.to_text(), "NOTAUTH")
+ self.assertTrue(self.xfrsess._tsig_ctx is not None)
+
+ # ACL using TSIG: successful case
+ acl_setter(isc.acl.dns.REQUEST_LOADER.load([
+ {"key": "example.com", "action": "ACCEPT"}, {"action": "REJECT"}
+ ]))
+ self.assertEqual(TSIGKeyRing.SUCCESS,
+ self.xfrsess._tsig_key_ring.add(TSIG_KEY))
+ [rcode, msg] = self.xfrsess._parse_query_message(request_data)
+ self.assertEqual(rcode.to_text(), "NOERROR")
+
+ # ACL using TSIG: key name doesn't match; should be rejected
+ acl_setter(isc.acl.dns.REQUEST_LOADER.load([
+ {"key": "example.org", "action": "ACCEPT"}, {"action": "REJECT"}
+ ]))
+ [rcode, msg] = self.xfrsess._parse_query_message(request_data)
+ self.assertEqual(rcode.to_text(), "REFUSED")
+
+ # ACL using TSIG: no TSIG; should be rejected
+ acl_setter(isc.acl.dns.REQUEST_LOADER.load([
+ {"key": "example.org", "action": "ACCEPT"}, {"action": "REJECT"}
+ ]))
+ [rcode, msg] = self.xfrsess._parse_query_message(self.mdata)
+ self.assertEqual(rcode.to_text(), "REFUSED")
+
+ #
+ # ACL using IP + TSIG: both should match
+ #
+ acl_setter(isc.acl.dns.REQUEST_LOADER.load([
+ {"ALL": [{"key": "example.com"}, {"from": "192.0.2.1"}],
+ "action": "ACCEPT"},
+ {"action": "REJECT"}
+ ]))
+ # both matches
+ self.xfrsess._remote = ('192.0.2.1', 12345)
+ [rcode, msg] = self.xfrsess._parse_query_message(request_data)
+ self.assertEqual(rcode.to_text(), "NOERROR")
+ # TSIG matches, but address doesn't
+ self.xfrsess._remote = ('192.0.2.2', 12345)
+ [rcode, msg] = self.xfrsess._parse_query_message(request_data)
+ self.assertEqual(rcode.to_text(), "REFUSED")
+ # Address matches, but TSIG doesn't (not included)
+ self.xfrsess._remote = ('192.0.2.1', 12345)
+ [rcode, msg] = self.xfrsess._parse_query_message(self.mdata)
+ self.assertEqual(rcode.to_text(), "REFUSED")
+ # Neither address nor TSIG matches
+ self.xfrsess._remote = ('192.0.2.2', 12345)
+ [rcode, msg] = self.xfrsess._parse_query_message(self.mdata)
+ self.assertEqual(rcode.to_text(), "REFUSED")
+
+ def test_transfer_acl(self):
+ # ACL checks only with the default ACL
+ def acl_setter(acl):
+ self.xfrsess._acl = acl
+ self.check_transfer_acl(acl_setter)
+
+ def test_transfer_zoneacl(self):
+ # ACL check with a per zone ACL + default ACL. The per zone ACL
+ # should match the queryied zone, so it should be used.
+ def acl_setter(acl):
+ zone_key = ('IN', 'example.com.')
+ self.xfrsess._zone_config[zone_key] = {}
+ self.xfrsess._zone_config[zone_key]['transfer_acl'] = acl
+ self.xfrsess._acl = isc.acl.dns.REQUEST_LOADER.load([
+ {"from": "127.0.0.1", "action": "DROP"}])
+ self.check_transfer_acl(acl_setter)
+
+ def test_transfer_zoneacl_nomatch(self):
+ # similar to the previous one, but the per zone doesn't match the
+ # query. The default should be used.
+ def acl_setter(acl):
+ zone_key = ('IN', 'example.org.')
+ self.xfrsess._zone_config[zone_key] = {}
+ self.xfrsess._zone_config[zone_key]['transfer_acl'] = \
+ isc.acl.dns.REQUEST_LOADER.load([
+ {"from": "127.0.0.1", "action": "DROP"}])
+ self.xfrsess._acl = acl
+ self.check_transfer_acl(acl_setter)
+
+ def test_get_transfer_acl(self):
+ # set the default ACL. If there's no specific zone ACL, this one
+ # should be used.
+ self.xfrsess._acl = isc.acl.dns.REQUEST_LOADER.load([
+ {"from": "127.0.0.1", "action": "ACCEPT"}])
+ acl = self.xfrsess._get_transfer_acl(Name('example.com'), RRClass.IN())
+ self.assertEqual(acl, self.xfrsess._acl)
+
+ # install a per zone config with transfer ACL for example.com. Then
+ # that ACL will be used for example.com; for others the default ACL
+ # will still be used.
+ com_acl = isc.acl.dns.REQUEST_LOADER.load([
+ {"from": "127.0.0.1", "action": "REJECT"}])
+ self.xfrsess._zone_config[('IN', 'example.com.')] = {}
+ self.xfrsess._zone_config[('IN', 'example.com.')]['transfer_acl'] = \
+ com_acl
+ self.assertEqual(com_acl,
+ self.xfrsess._get_transfer_acl(Name('example.com'),
+ RRClass.IN()))
+ self.assertEqual(self.xfrsess._acl,
+ self.xfrsess._get_transfer_acl(Name('example.org'),
+ RRClass.IN()))
+
+ # Name matching should be case insensitive.
+ self.assertEqual(com_acl,
+ self.xfrsess._get_transfer_acl(Name('EXAMPLE.COM'),
+ RRClass.IN()))
+
def test_get_query_zone_name(self):
msg = self.getmsg()
self.assertEqual(self.xfrsess._get_query_zone_name(msg), "example.com.")
@@ -195,20 +347,6 @@ class TestXfroutSession(unittest.TestCase):
self.assertEqual(msg.get_rcode(), rcode)
self.assertTrue(msg.get_header_flag(Message.HEADERFLAG_AA))
- def test_reply_query_with_format_error(self):
- msg = self.getmsg()
- self.xfrsess._reply_query_with_format_error(msg, self.sock)
- get_msg = self.sock.read_msg()
- self.assertEqual(get_msg.get_rcode().to_text(), "FORMERR")
-
- # tsig signed message
- msg = self.getmsg()
- self.xfrsess._tsig_ctx = self.create_mock_tsig_ctx(TSIGError.NOERROR)
- self.xfrsess._reply_query_with_format_error(msg, self.sock)
- get_msg = self.sock.read_msg()
- self.assertEqual(get_msg.get_rcode().to_text(), "FORMERR")
- self.assertTrue(self.message_has_tsig(get_msg))
-
def test_create_rrset_from_db_record(self):
rrset = self.xfrsess._create_rrset_from_db_record(self.soa_record)
self.assertEqual(rrset.get_name().to_text(), "example.com.")
@@ -502,9 +640,11 @@ class TestXfroutSession(unittest.TestCase):
# and it should not have sent anything else
self.assertEqual(0, len(self.sock.sendqueue))
-class MyCCSession():
+class MyCCSession(isc.config.ConfigData):
def __init__(self):
- pass
+ module_spec = isc.config.module_spec_from_file(
+ xfrout.SPECFILE_LOCATION)
+ ConfigData.__init__(self, module_spec)
def get_remote_config_value(self, module_name, identifier):
if module_name == "Auth" and identifier == "database_file":
@@ -515,18 +655,42 @@ class MyCCSession():
class MyUnixSockServer(UnixSockServer):
def __init__(self):
- self._lock = threading.Lock()
- self._transfers_counter = 0
self._shutdown_event = threading.Event()
- self._max_transfers_out = 10
+ self._common_init()
self._cc = MyCCSession()
- #self._log = isc.log.NSLogger('xfrout', '', severity = 'critical', log_to_console = False )
+ self.update_config_data(self._cc.get_full_config())
class TestUnixSockServer(unittest.TestCase):
def setUp(self):
self.write_sock, self.read_sock = socket.socketpair()
self.unix = MyUnixSockServer()
+ def test_guess_remote(self):
+ """Test we can guess the remote endpoint when we have only the
+ file descriptor. This is needed, because we get only that one
+ from auth."""
+ # We test with UDP, as it can be "connected" without other
+ # endpoint
+ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ sock.connect(('127.0.0.1', 12345))
+ self.assertEqual(('127.0.0.1', 12345),
+ self.unix._guess_remote(sock.fileno()))
+ if socket.has_ipv6:
+ # Don't check IPv6 address on hosts not supporting them
+ sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
+ sock.connect(('::1', 12345))
+ self.assertEqual(('::1', 12345, 0, 0),
+ self.unix._guess_remote(sock.fileno()))
+ # Try when pretending there's no IPv6 support
+ # (No need to pretend when there's really no IPv6)
+ xfrout.socket.has_ipv6 = False
+ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ sock.connect(('127.0.0.1', 12345))
+ self.assertEqual(('127.0.0.1', 12345),
+ self.unix._guess_remote(sock.fileno()))
+ # Return it back
+ xfrout.socket.has_ipv6 = True
+
def test_receive_query_message(self):
send_msg = b"\xd6=\x00\x00\x00\x01\x00"
msg_len = struct.pack('H', socket.htons(len(send_msg)))
@@ -535,15 +699,37 @@ class TestUnixSockServer(unittest.TestCase):
recv_msg = self.unix._receive_query_message(self.read_sock)
self.assertEqual(recv_msg, send_msg)
- def test_updata_config_data(self):
+ def check_default_ACL(self):
+ context = isc.acl.dns.RequestContext(socket.getaddrinfo("127.0.0.1",
+ 1234, 0, socket.SOCK_DGRAM,
+ socket.IPPROTO_UDP,
+ socket.AI_NUMERICHOST)[0][4])
+ self.assertEqual(isc.acl.acl.ACCEPT, self.unix._acl.execute(context))
+
+ def check_loaded_ACL(self, acl):
+ context = isc.acl.dns.RequestContext(socket.getaddrinfo("127.0.0.1",
+ 1234, 0, socket.SOCK_DGRAM,
+ socket.IPPROTO_UDP,
+ socket.AI_NUMERICHOST)[0][4])
+ self.assertEqual(isc.acl.acl.ACCEPT, acl.execute(context))
+ context = isc.acl.dns.RequestContext(socket.getaddrinfo("192.0.2.1",
+ 1234, 0, socket.SOCK_DGRAM,
+ socket.IPPROTO_UDP,
+ socket.AI_NUMERICHOST)[0][4])
+ self.assertEqual(isc.acl.acl.REJECT, acl.execute(context))
+
+ def test_update_config_data(self):
+ self.check_default_ACL()
tsig_key_str = 'example.com:SFuWd/q99SzF8Yzd1QbB9g=='
tsig_key_list = [tsig_key_str]
bad_key_list = ['bad..example.com:SFuWd/q99SzF8Yzd1QbB9g==']
self.unix.update_config_data({'transfers_out':10 })
self.assertEqual(self.unix._max_transfers_out, 10)
self.assertTrue(self.unix.tsig_key_ring is not None)
+ self.check_default_ACL()
- self.unix.update_config_data({'transfers_out':9, 'tsig_key_ring':tsig_key_list})
+ self.unix.update_config_data({'transfers_out':9,
+ 'tsig_key_ring':tsig_key_list})
self.assertEqual(self.unix._max_transfers_out, 9)
self.assertEqual(self.unix.tsig_key_ring.size(), 1)
self.unix.tsig_key_ring.remove(Name("example.com."))
@@ -554,6 +740,81 @@ class TestUnixSockServer(unittest.TestCase):
self.assertRaises(None, self.unix.update_config_data(config_data))
self.assertEqual(self.unix.tsig_key_ring.size(), 0)
+ # Load the ACL
+ self.unix.update_config_data({'transfer_acl': [{'from': '127.0.0.1',
+ 'action': 'ACCEPT'}]})
+ self.check_loaded_ACL(self.unix._acl)
+ # Pass a wrong data there and check it does not replace the old one
+ self.assertRaises(XfroutConfigError,
+ self.unix.update_config_data,
+ {'transfer_acl': ['Something bad']})
+ self.check_loaded_ACL(self.unix._acl)
+
+ def test_zone_config_data(self):
+ # By default, there's no specific zone config
+ self.assertEqual({}, self.unix._zone_config)
+
+ # Adding config for a specific zone. The config is empty unless
+ # explicitly specified.
+ self.unix.update_config_data({'zone_config':
+ [{'origin': 'example.com',
+ 'class': 'IN'}]})
+ self.assertEqual({}, self.unix._zone_config[('IN', 'example.com.')])
+
+ # zone class can be omitted
+ self.unix.update_config_data({'zone_config':
+ [{'origin': 'example.com'}]})
+ self.assertEqual({}, self.unix._zone_config[('IN', 'example.com.')])
+
+ # zone class, name are stored in the "normalized" form. class
+ # strings are upper cased, names are down cased.
+ self.unix.update_config_data({'zone_config':
+ [{'origin': 'EXAMPLE.com'}]})
+ self.assertEqual({}, self.unix._zone_config[('IN', 'example.com.')])
+
+ # invalid zone class, name will result in exceptions
+ self.assertRaises(EmptyLabel,
+ self.unix.update_config_data,
+ {'zone_config': [{'origin': 'bad..example'}]})
+ self.assertRaises(InvalidRRClass,
+ self.unix.update_config_data,
+ {'zone_config': [{'origin': 'example.com',
+ 'class': 'badclass'}]})
+
+ # Configuring a couple of more zones
+ self.unix.update_config_data({'zone_config':
+ [{'origin': 'example.com'},
+ {'origin': 'example.com',
+ 'class': 'CH'},
+ {'origin': 'example.org'}]})
+ self.assertEqual({}, self.unix._zone_config[('IN', 'example.com.')])
+ self.assertEqual({}, self.unix._zone_config[('CH', 'example.com.')])
+ self.assertEqual({}, self.unix._zone_config[('IN', 'example.org.')])
+
+ # Duplicate data: should be rejected with an exception
+ self.assertRaises(XfroutConfigError,
+ self.unix.update_config_data,
+ {'zone_config': [{'origin': 'example.com'},
+ {'origin': 'example.org'},
+ {'origin': 'example.com'}]})
+
+ def test_zone_config_data_with_acl(self):
+ # Similar to the previous test, but with transfer_acl config
+ self.unix.update_config_data({'zone_config':
+ [{'origin': 'example.com',
+ 'transfer_acl':
+ [{'from': '127.0.0.1',
+ 'action': 'ACCEPT'}]}]})
+ acl = self.unix._zone_config[('IN', 'example.com.')]['transfer_acl']
+ self.check_loaded_ACL(acl)
+
+ # invalid ACL syntax will be rejected with exception
+ self.assertRaises(XfroutConfigError,
+ self.unix.update_config_data,
+ {'zone_config': [{'origin': 'example.com',
+ 'transfer_acl':
+ [{'action': 'BADACTION'}]}]})
+
def test_get_db_file(self):
self.assertEqual(self.unix.get_db_file(), "initdb.file")
@@ -670,4 +931,5 @@ class TestInitialization(unittest.TestCase):
self.assertEqual(xfrout.UNIX_SOCKET_FILE, "The/Socket/File")
if __name__== "__main__":
+ isc.log.resetUnitTestRootLogger()
unittest.main()
diff --git a/src/bin/xfrout/xfrout.py.in b/src/bin/xfrout/xfrout.py.in
index a75ff22..8049e29 100755
--- a/src/bin/xfrout/xfrout.py.in
+++ b/src/bin/xfrout/xfrout.py.in
@@ -35,7 +35,7 @@ import errno
from optparse import OptionParser, OptionValueError
from isc.util import socketserver_mixin
-from xfrout_messages import *
+from isc.log_messages.xfrout_messages import *
isc.log.init("b10-xfrout")
logger = isc.log.Logger("xfrout")
@@ -48,8 +48,23 @@ except ImportError as e:
# must keep running, so we warn about it and move forward.
log.error(XFROUT_IMPORT, str(e))
+from isc.acl.acl import ACCEPT, REJECT, DROP, LoaderError
+from isc.acl.dns import REQUEST_LOADER
+
isc.util.process.rename()
+class XfroutConfigError(Exception):
+ """An exception indicating an error in updating xfrout configuration.
+
+ This exception is raised when the xfrout process encouters an error in
+ handling configuration updates. Not all syntax error can be caught
+ at the module-CC layer, so xfrout needs to (explicitly or implicitly)
+ validate the given configuration data itself. When it finds an error
+ it raises this exception (either directly or by converting an exception
+ from other modules) as a unified error in configuration.
+ """
+ pass
+
def init_paths():
global SPECFILE_PATH
global AUTH_SPECFILE_PATH
@@ -76,14 +91,12 @@ init_paths()
SPECFILE_LOCATION = SPECFILE_PATH + "/xfrout.spec"
AUTH_SPECFILE_LOCATION = AUTH_SPECFILE_PATH + os.sep + "auth.spec"
-MAX_TRANSFERS_OUT = 10
VERBOSE_MODE = False
# tsig sign every N axfr packets.
TSIG_SIGN_EVERY_NTH = 96
XFROUT_MAX_MESSAGE_SIZE = 65535
-
def get_rrset_len(rrset):
"""Returns the wire length of the given RRset"""
bytes = bytearray()
@@ -92,16 +105,17 @@ def get_rrset_len(rrset):
class XfroutSession():
- def __init__(self, sock_fd, request_data, server, tsig_key_ring):
- # The initializer for the superclass may call functions
- # that need _log to be set, so we set it first
+ def __init__(self, sock_fd, request_data, server, tsig_key_ring, remote,
+ default_acl, zone_config):
self._sock_fd = sock_fd
self._request_data = request_data
self._server = server
- #self._log = log
self._tsig_key_ring = tsig_key_ring
self._tsig_ctx = None
self._tsig_len = 0
+ self._remote = remote
+ self._acl = default_acl
+ self._zone_config = zone_config
self.handle()
def create_tsig_ctx(self, tsig_record, tsig_key_ring):
@@ -114,7 +128,7 @@ class XfroutSession():
self.dns_xfrout_start(self._sock_fd, self._request_data)
#TODO, avoid catching all exceptions
except Exception as e:
- logger.error(XFROUT_HANDLE_QUERY_ERROR, str(e))
+ logger.error(XFROUT_HANDLE_QUERY_ERROR, e)
pass
os.close(self._sock_fd)
@@ -137,16 +151,50 @@ class XfroutSession():
try:
msg = Message(Message.PARSE)
Message.from_wire(msg, mdata)
-
- # TSIG related checks
- rcode = self._check_request_tsig(msg, mdata)
-
- except Exception as err:
- logger.error(XFROUT_PARSE_QUERY_ERROR, str(err))
+ except Exception as err: # Exception is too broad
+ logger.error(XFROUT_PARSE_QUERY_ERROR, err)
return Rcode.FORMERR(), None
+ # TSIG related checks
+ rcode = self._check_request_tsig(msg, mdata)
+
+ if rcode == Rcode.NOERROR():
+ # ACL checks
+ zone_name = msg.get_question()[0].get_name()
+ zone_class = msg.get_question()[0].get_class()
+ acl = self._get_transfer_acl(zone_name, zone_class)
+ acl_result = acl.execute(
+ isc.acl.dns.RequestContext(self._remote,
+ msg.get_tsig_record()))
+ if acl_result == DROP:
+ logger.info(XFROUT_QUERY_DROPPED, zone_name, zone_class,
+ self._remote[0], self._remote[1])
+ return None, None
+ elif acl_result == REJECT:
+ logger.info(XFROUT_QUERY_REJECTED, zone_name, zone_class,
+ self._remote[0], self._remote[1])
+ return Rcode.REFUSED(), msg
+
return rcode, msg
+ def _get_transfer_acl(self, zone_name, zone_class):
+ '''Return the ACL that should be applied for a given zone.
+
+ The zone is identified by a tuple of name and RR class.
+ If a per zone configuration for the zone exists and contains
+ transfer_acl, that ACL will be used; otherwise, the default
+ ACL will be used.
+
+ '''
+ # Internally zone names are managed in lower cased label characters,
+ # so we first need to convert the name.
+ zone_name_lower = Name(zone_name.to_text(), True)
+ config_key = (zone_class.to_text(), zone_name_lower.to_text())
+ if config_key in self._zone_config and \
+ 'transfer_acl' in self._zone_config[config_key]:
+ return self._zone_config[config_key]['transfer_acl']
+ return self._acl
+
def _get_query_zone_name(self, msg):
question = msg.get_question()[0]
return question.get_name().to_text()
@@ -183,18 +231,11 @@ class XfroutSession():
def _reply_query_with_error_rcode(self, msg, sock_fd, rcode_):
- msg.make_response()
- msg.set_rcode(rcode_)
- self._send_message(sock_fd, msg, self._tsig_ctx)
-
-
- def _reply_query_with_format_error(self, msg, sock_fd):
- '''query message format isn't legal.'''
if not msg:
return # query message is invalid. send nothing back.
msg.make_response()
- msg.set_rcode(Rcode.FORMERR())
+ msg.set_rcode(rcode_)
self._send_message(sock_fd, msg, self._tsig_ctx)
def _zone_has_soa(self, zone):
@@ -244,10 +285,13 @@ class XfroutSession():
def dns_xfrout_start(self, sock_fd, msg_query):
rcode_, msg = self._parse_query_message(msg_query)
#TODO. create query message and parse header
- if rcode_ == Rcode.NOTAUTH():
+ if rcode_ is None: # Dropped by ACL
+ return
+ elif rcode_ == Rcode.NOTAUTH() or rcode_ == Rcode.REFUSED():
return self._reply_query_with_error_rcode(msg, sock_fd, rcode_)
elif rcode_ != Rcode.NOERROR():
- return self._reply_query_with_format_error(msg, sock_fd)
+ return self._reply_query_with_error_rcode(msg, sock_fd,
+ Rcode.FORMERR())
zone_name = self._get_query_zone_name(msg)
zone_class_str = self._get_query_zone_class(msg)
@@ -257,7 +301,7 @@ class XfroutSession():
if rcode_ != Rcode.NOERROR():
logger.info(XFROUT_AXFR_TRANSFER_FAILED, zone_name,
zone_class_str, rcode_.to_text())
- return self. _reply_query_with_error_rcode(msg, sock_fd, rcode_)
+ return self._reply_query_with_error_rcode(msg, sock_fd, rcode_)
try:
logger.info(XFROUT_AXFR_TRANSFER_STARTED, zone_name, zone_class_str)
@@ -367,21 +411,28 @@ class XfroutSession():
self._send_message_with_last_soa(msg, sock_fd, rrset_soa, message_upper_len,
count_since_last_tsig_sign)
-class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
+class UnixSockServer(socketserver_mixin.NoPollMixIn,
+ ThreadingUnixStreamServer):
'''The unix domain socket server which accept xfr query sent from auth server.'''
- def __init__(self, sock_file, handle_class, shutdown_event, config_data, cc):
+ def __init__(self, sock_file, handle_class, shutdown_event, config_data,
+ cc):
self._remove_unused_sock_file(sock_file)
self._sock_file = sock_file
socketserver_mixin.NoPollMixIn.__init__(self)
ThreadingUnixStreamServer.__init__(self, sock_file, handle_class)
- self._lock = threading.Lock()
- self._transfers_counter = 0
self._shutdown_event = shutdown_event
self._write_sock, self._read_sock = socket.socketpair()
- #self._log = log
- self.update_config_data(config_data)
+ self._common_init()
self._cc = cc
+ self.update_config_data(config_data)
+
+ def _common_init(self):
+ '''Initialization shared with the mock server class used for tests'''
+ self._lock = threading.Lock()
+ self._transfers_counter = 0
+ self._zone_config = {}
+ self._acl = None # this will be initialized in update_config_data()
def _receive_query_message(self, sock):
''' receive request message from sock'''
@@ -459,16 +510,41 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
if not request_data:
return
- t = threading.Thread(target = self.finish_request,
+ t = threading.Thread(target=self.finish_request,
args = (sock_fd, request_data))
if self.daemon_threads:
t.daemon = True
t.start()
+ def _guess_remote(self, sock_fd):
+ """
+ Guess remote address and port of the socket. The sock_fd must be a
+ socket
+ """
+ # This uses a trick. If the socket is IPv4 in reality and we pretend
+ # it to be IPv6, it returns IPv4 address anyway. This doesn't seem
+ # to care about the SOCK_STREAM parameter at all (which it really is,
+ # except for testing)
+ if socket.has_ipv6:
+ sock = socket.fromfd(sock_fd, socket.AF_INET6, socket.SOCK_STREAM)
+ else:
+ # To make it work even on hosts without IPv6 support
+ # (Any idea how to simulate this in test?)
+ sock = socket.fromfd(sock_fd, socket.AF_INET, socket.SOCK_STREAM)
+ return sock.getpeername()
def finish_request(self, sock_fd, request_data):
- '''Finish one request by instantiating RequestHandlerClass.'''
- self.RequestHandlerClass(sock_fd, request_data, self, self.tsig_key_ring)
+ '''Finish one request by instantiating RequestHandlerClass.
+
+ This method creates a XfroutSession object.
+ '''
+ self._lock.acquire()
+ acl = self._acl
+ zone_config = self._zone_config
+ self._lock.release()
+ self.RequestHandlerClass(sock_fd, request_data, self,
+ self.tsig_key_ring,
+ self._guess_remote(sock_fd), acl, zone_config)
def _remove_unused_sock_file(self, sock_file):
'''Try to remove the socket file. If the file is being used
@@ -510,14 +586,65 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
pass
def update_config_data(self, new_config):
- '''Apply the new config setting of xfrout module. '''
- logger.info(XFROUT_NEW_CONFIG)
+ '''Apply the new config setting of xfrout module.
+
+ '''
self._lock.acquire()
- self._max_transfers_out = new_config.get('transfers_out')
- self.set_tsig_key_ring(new_config.get('tsig_key_ring'))
+ try:
+ logger.info(XFROUT_NEW_CONFIG)
+ new_acl = self._acl
+ if 'transfer_acl' in new_config:
+ try:
+ new_acl = REQUEST_LOADER.load(new_config['transfer_acl'])
+ except LoaderError as e:
+ raise XfroutConfigError('Failed to parse transfer_acl: ' +
+ str(e))
+
+ new_zone_config = self._zone_config
+ zconfig_data = new_config.get('zone_config')
+ if zconfig_data is not None:
+ new_zone_config = self.__create_zone_config(zconfig_data)
+
+ self._acl = new_acl
+ self._zone_config = new_zone_config
+ self._max_transfers_out = new_config.get('transfers_out')
+ self.set_tsig_key_ring(new_config.get('tsig_key_ring'))
+ except Exception as e:
+ self._lock.release()
+ raise e
self._lock.release()
logger.info(XFROUT_NEW_CONFIG_DONE)
+ def __create_zone_config(self, zone_config_list):
+ new_config = {}
+ for zconf in zone_config_list:
+ # convert the class, origin (name) pair. First build pydnspp
+ # object to reject invalid input.
+ zclass_str = zconf.get('class')
+ if zclass_str is None:
+ #zclass_str = 'IN' # temporary
+ zclass_str = self._cc.get_default_value('zone_config/class')
+ zclass = RRClass(zclass_str)
+ zorigin = Name(zconf['origin'], True)
+ config_key = (zclass.to_text(), zorigin.to_text())
+
+ # reject duplicate config
+ if config_key in new_config:
+ raise XfroutConfigError('Duplicate zone_config for ' +
+ str(zorigin) + '/' + str(zclass))
+
+ # create a new config entry, build any given (and known) config
+ new_config[config_key] = {}
+ if 'transfer_acl' in zconf:
+ try:
+ new_config[config_key]['transfer_acl'] = \
+ REQUEST_LOADER.load(zconf['transfer_acl'])
+ except LoaderError as e:
+ raise XfroutConfigError('Failed to parse transfer_acl ' +
+ 'for ' + zorigin.to_text() + '/' +
+ zclass_str + ': ' + str(e))
+ return new_config
+
def set_tsig_key_ring(self, key_list):
"""Set the tsig_key_ring , given a TSIG key string list representation. """
@@ -563,23 +690,21 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
class XfroutServer:
def __init__(self):
self._unix_socket_server = None
- #self._log = None
self._listen_sock_file = UNIX_SOCKET_FILE
self._shutdown_event = threading.Event()
- self._cc = isc.config.ModuleCCSession(SPECFILE_LOCATION, self.config_handler, self.command_handler, None, True)
+ self._cc = isc.config.ModuleCCSession(SPECFILE_LOCATION, self.config_handler, self.command_handler)
self._config_data = self._cc.get_full_config()
self._cc.start()
self._cc.add_remote_config(AUTH_SPECFILE_LOCATION);
- #self._log = isc.log.NSLogger(self._config_data.get('log_name'), self._config_data.get('log_file'),
- # self._config_data.get('log_severity'), self._config_data.get('log_versions'),
- # self._config_data.get('log_max_bytes'), True)
self._start_xfr_query_listener()
self._start_notifier()
def _start_xfr_query_listener(self):
'''Start a new thread to accept xfr query. '''
- self._unix_socket_server = UnixSockServer(self._listen_sock_file, XfroutSession,
- self._shutdown_event, self._config_data,
+ self._unix_socket_server = UnixSockServer(self._listen_sock_file,
+ XfroutSession,
+ self._shutdown_event,
+ self._config_data,
self._cc)
listener = threading.Thread(target=self._unix_socket_server.serve_forever)
listener.start()
@@ -601,11 +726,13 @@ class XfroutServer:
continue
self._config_data[key] = new_config[key]
- #if self._log:
- # self._log.update_config(new_config)
-
if self._unix_socket_server:
- self._unix_socket_server.update_config_data(self._config_data)
+ try:
+ self._unix_socket_server.update_config_data(self._config_data)
+ except Exception as e:
+ answer = create_answer(1,
+ "Failed to handle new configuration: " +
+ str(e))
return answer
@@ -685,6 +812,10 @@ if '__main__' == __name__:
logger.INFO(XFROUT_STOPPED_BY_KEYBOARD)
except SessionError as e:
logger.error(XFROUT_CC_SESSION_ERROR, str(e))
+ except ModuleCCSessionError as e:
+ logger.error(XFROUT_MODULECC_SESSION_ERROR, str(e))
+ except XfroutConfigError as e:
+ logger.error(XFROUT_CONFIG_ERROR, str(e))
except SessionTimeout as e:
logger.error(XFROUT_CC_SESSION_TIMEOUT_ERROR)
diff --git a/src/bin/xfrout/xfrout.spec.pre.in b/src/bin/xfrout/xfrout.spec.pre.in
index 2efa3d7..0891a57 100644
--- a/src/bin/xfrout/xfrout.spec.pre.in
+++ b/src/bin/xfrout/xfrout.spec.pre.in
@@ -16,27 +16,27 @@
},
{
"item_name": "log_file",
- "item_type": "string",
+ "item_type": "string",
"item_optional": false,
"item_default": "@@LOCALSTATEDIR@@/@PACKAGE@/log/Xfrout.log"
},
{
"item_name": "log_severity",
- "item_type": "string",
+ "item_type": "string",
"item_optional": false,
- "item_default": "debug"
+ "item_default": "debug"
},
{
"item_name": "log_versions",
- "item_type": "integer",
+ "item_type": "integer",
"item_optional": false,
- "item_default": 5
+ "item_default": 5
},
{
"item_name": "log_max_bytes",
- "item_type": "integer",
+ "item_type": "integer",
"item_optional": false,
- "item_default": 1048576
+ "item_default": 1048576
},
{
"item_name": "tsig_key_ring",
@@ -49,6 +49,57 @@
"item_type": "string",
"item_optional": true
}
+ },
+ {
+ "item_name": "transfer_acl",
+ "item_type": "list",
+ "item_optional": false,
+ "item_default": [{"action": "ACCEPT"}],
+ "list_item_spec":
+ {
+ "item_name": "acl_element",
+ "item_type": "any",
+ "item_optional": true
+ }
+ },
+ {
+ "item_name": "zone_config",
+ "item_type": "list",
+ "item_optional": true,
+ "item_default": [],
+ "list_item_spec":
+ {
+ "item_name": "zone_config_element",
+ "item_type": "map",
+ "item_optional": true,
+ "item_default": { "origin": "" },
+ "map_item_spec": [
+ {
+ "item_name": "origin",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": ""
+ },
+ {
+ "item_name": "class",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "IN"
+ },
+ {
+ "item_name": "transfer_acl",
+ "item_type": "list",
+ "item_optional": true,
+ "item_default": [{"action": "ACCEPT"}],
+ "list_item_spec":
+ {
+ "item_name": "acl_element",
+ "item_type": "any",
+ "item_optional": true
+ }
+ }
+ ]
+ }
}
],
"commands": [
diff --git a/src/bin/xfrout/xfrout_messages.mes b/src/bin/xfrout/xfrout_messages.mes
index 2dada54..b2e432c 100644
--- a/src/bin/xfrout/xfrout_messages.mes
+++ b/src/bin/xfrout/xfrout_messages.mes
@@ -47,8 +47,19 @@ a valid TSIG key.
There was a problem reading from the command and control channel. The
most likely cause is that the msgq daemon is not running.
+% XFROUT_MODULECC_SESSION_ERROR error encountered by configuration/command module: %1
+There was a problem in the lower level module handling configuration and
+control commands. This could happen for various reasons, but the most likely
+cause is that the configuration database contains a syntax error and xfrout
+failed to start at initialization. A detailed error message from the module
+will also be displayed.
+
+% XFROUT_CONFIG_ERROR error found in configuration data: %1
+The xfrout process encountered an error when installing the configuration at
+startup time. Details of the error are included in the log message.
+
% XFROUT_CC_SESSION_TIMEOUT_ERROR timeout waiting for cc response
-There was a problem reading a response from antoher module over the
+There was a problem reading a response from another module over the
command and control channel. The most likely cause is that the
configuration manager b10-cfgmgr is not running.
@@ -95,6 +106,17 @@ in the log message, but at this point no specific information other
than that could be given. This points to incomplete exception handling
in the code.
+% XFROUT_QUERY_DROPPED request to transfer %1/%2 to [%3]:%4 dropped
+The xfrout process silently dropped a request to transfer zone to given host.
+This is required by the ACLs. The %1 and %2 represent the zone name and class,
+the %3 and %4 the IP address and port of the peer requesting the transfer.
+
+% XFROUT_QUERY_REJECTED request to transfer %1/%2 to [%3]:%4 rejected
+The xfrout process rejected (by REFUSED rcode) a request to transfer zone to
+given host. This is because of ACLs. The %1 and %2 represent the zone name and
+class, the %3 and %4 the IP address and port of the peer requesting the
+transfer.
+
% XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR error receiving the file descriptor for an XFR connection
There was an error receiving the file descriptor for the transfer
request. Normally, the request is received by b10-auth, and passed on
diff --git a/src/bin/zonemgr/Makefile.am b/src/bin/zonemgr/Makefile.am
index 8ab5f7a..aa427fd 100644
--- a/src/bin/zonemgr/Makefile.am
+++ b/src/bin/zonemgr/Makefile.am
@@ -7,10 +7,15 @@ pkglibexec_SCRIPTS = b10-zonemgr
b10_zonemgrdir = $(pkgdatadir)
b10_zonemgr_DATA = zonemgr.spec
-CLEANFILES = b10-zonemgr zonemgr.pyc zonemgr.spec
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/zonemgr_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+CLEANFILES = b10-zonemgr zonemgr.pyc zonemgr.spec
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/zonemgr_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/zonemgr_messages.pyc
man_MANS = b10-zonemgr.8
-EXTRA_DIST = $(man_MANS) b10-zonemgr.xml
+EXTRA_DIST = $(man_MANS) b10-zonemgr.xml zonemgr_messages.mes
if ENABLE_MAN
@@ -19,10 +24,15 @@ b10-zonemgr.8: b10-zonemgr.xml
endif
+# Build logging source file from message files
+$(PYTHON_LOGMSGPKG_DIR)/work/zonemgr_messages.py : zonemgr_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/zonemgr_messages.mes
+
zonemgr.spec: zonemgr.spec.pre
$(SED) -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" zonemgr.spec.pre >$@
-b10-zonemgr: zonemgr.py
+b10-zonemgr: zonemgr.py $(PYTHON_LOGMSGPKG_DIR)/work/zonemgr_messages.py
$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
-e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" zonemgr.py >$@
chmod a+x $@
diff --git a/src/bin/zonemgr/tests/Makefile.am b/src/bin/zonemgr/tests/Makefile.am
index 97f9b5e..769d332 100644
--- a/src/bin/zonemgr/tests/Makefile.am
+++ b/src/bin/zonemgr/tests/Makefile.am
@@ -7,7 +7,7 @@ CLEANFILES = initdb.file
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -20,6 +20,6 @@ endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
- env PYTHONPATH=$(abs_top_builddir)/src/bin/zonemgr:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/xfr/.libs \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/zonemgr:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/xfr/.libs \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/bin/zonemgr/tests/zonemgr_test.py b/src/bin/zonemgr/tests/zonemgr_test.py
index 496ce6b..80e41b3 100644
--- a/src/bin/zonemgr/tests/zonemgr_test.py
+++ b/src/bin/zonemgr/tests/zonemgr_test.py
@@ -152,6 +152,16 @@ class TestZonemgrRefresh(unittest.TestCase):
self.assertTrue((time1 + 3600 * (1 - self.zone_refresh._refresh_jitter)) <= zone_timeout)
self.assertTrue(zone_timeout <= time2 + 3600)
+ # No soa rdata
+ self.zone_refresh._zonemgr_refresh_info[ZONE_NAME_CLASS1_IN]["zone_soa_rdata"] = None
+ time3 = time.time()
+ self.zone_refresh._set_zone_retry_timer(ZONE_NAME_CLASS1_IN)
+ zone_timeout = self.zone_refresh._zonemgr_refresh_info[ZONE_NAME_CLASS1_IN]["next_refresh_time"]
+ time4 = time.time()
+ self.assertTrue((time3 + self.zone_refresh._lowerbound_retry * (1 - self.zone_refresh._refresh_jitter))
+ <= zone_timeout)
+ self.assertTrue(zone_timeout <= time4 + self.zone_refresh._lowerbound_retry)
+
def test_zone_not_exist(self):
self.assertFalse(self.zone_refresh._zone_not_exist(ZONE_NAME_CLASS1_IN))
self.assertTrue(self.zone_refresh._zone_not_exist(ZONE_NAME_CLASS1_CH))
@@ -304,8 +314,8 @@ class TestZonemgrRefresh(unittest.TestCase):
def get_zone_soa2(zone_name, db_file):
return None
sqlite3_ds.get_zone_soa = get_zone_soa2
- self.assertRaises(ZonemgrException, self.zone_refresh.zonemgr_add_zone, \
- ZONE_NAME_CLASS1_IN)
+ self.zone_refresh.zonemgr_add_zone(ZONE_NAME_CLASS2_IN)
+ self.assertTrue(self.zone_refresh._zonemgr_refresh_info[ZONE_NAME_CLASS2_IN]["zone_soa_rdata"] is None)
sqlite3_ds.get_zone_soa = old_get_zone_soa
def test_zone_handle_notify(self):
@@ -362,6 +372,15 @@ class TestZonemgrRefresh(unittest.TestCase):
self.assertRaises(ZonemgrException, self.zone_refresh.zone_refresh_fail, ZONE_NAME_CLASS3_CH)
self.assertRaises(ZonemgrException, self.zone_refresh.zone_refresh_fail, ZONE_NAME_CLASS3_IN)
+ old_get_zone_soa = sqlite3_ds.get_zone_soa
+ def get_zone_soa(zone_name, db_file):
+ return None
+ sqlite3_ds.get_zone_soa = get_zone_soa
+ self.zone_refresh.zone_refresh_fail(ZONE_NAME_CLASS1_IN)
+ self.assertEqual(self.zone_refresh._zonemgr_refresh_info[ZONE_NAME_CLASS1_IN]["zone_state"],
+ ZONE_EXPIRED)
+ sqlite3_ds.get_zone_soa = old_get_zone_soa
+
def test_find_need_do_refresh_zone(self):
time1 = time.time()
self.zone_refresh._zonemgr_refresh_info = {
@@ -440,6 +459,8 @@ class TestZonemgrRefresh(unittest.TestCase):
"class": "IN" } ]
}
self.zone_refresh.update_config_data(config_data)
+ self.assertTrue(("example.net.", "IN") in
+ self.zone_refresh._zonemgr_refresh_info)
# update all values
config_data = {
@@ -479,14 +500,16 @@ class TestZonemgrRefresh(unittest.TestCase):
"secondary_zones": [ { "name": "doesnotexist",
"class": "IN" } ]
}
- self.assertRaises(ZonemgrException,
- self.zone_refresh.update_config_data,
- config_data)
- self.assertEqual(60, self.zone_refresh._lowerbound_refresh)
- self.assertEqual(30, self.zone_refresh._lowerbound_retry)
- self.assertEqual(19800, self.zone_refresh._max_transfer_timeout)
- self.assertEqual(0.25, self.zone_refresh._refresh_jitter)
- self.assertEqual(0.35, self.zone_refresh._reload_jitter)
+ self.zone_refresh.update_config_data(config_data)
+ name_class = ("doesnotexist.", "IN")
+ self.assertTrue(self.zone_refresh._zonemgr_refresh_info[name_class]["zone_soa_rdata"]
+ is None)
+ # The other configs should be updated successfully
+ self.assertEqual(61, self.zone_refresh._lowerbound_refresh)
+ self.assertEqual(31, self.zone_refresh._lowerbound_retry)
+ self.assertEqual(19801, self.zone_refresh._max_transfer_timeout)
+ self.assertEqual(0.21, self.zone_refresh._refresh_jitter)
+ self.assertEqual(0.71, self.zone_refresh._reload_jitter)
# Make sure we accept 0 as a value
config_data = {
@@ -526,10 +549,11 @@ class TestZonemgrRefresh(unittest.TestCase):
self.zone_refresh._zonemgr_refresh_info)
# This one does not exist
config.set_zone_list_from_name_classes(["example.net", "CH"])
- self.assertRaises(ZonemgrException,
- self.zone_refresh.update_config_data, config)
- # So it should not affect the old ones
- self.assertTrue(("example.net.", "IN") in
+ self.zone_refresh.update_config_data(config)
+ self.assertFalse(("example.net.", "CH") in
+ self.zone_refresh._zonemgr_refresh_info)
+ # Simply skip loading soa for the zone, the other configs should be updated successful
+ self.assertFalse(("example.net.", "IN") in
self.zone_refresh._zonemgr_refresh_info)
# Make sure it works even when we "accidentally" forget the final dot
config.set_zone_list_from_name_classes([("example.net", "IN")])
@@ -596,15 +620,18 @@ class TestZonemgr(unittest.TestCase):
config_data3 = {"refresh_jitter" : 0.7}
self.zonemgr.config_handler(config_data3)
self.assertEqual(0.5, self.zonemgr._config_data.get("refresh_jitter"))
- # The zone doesn't exist in database, it should be rejected
+ # The zone doesn't exist in database, simply skip loading soa for it and log an warning
self.zonemgr._zone_refresh = ZonemgrRefresh(None, "initdb.file", None,
config_data1)
config_data1["secondary_zones"] = [{"name": "nonexistent.example",
"class": "IN"}]
- self.assertNotEqual(self.zonemgr.config_handler(config_data1),
- {"result": [0]})
- # As it is rejected, the old value should be kept
- self.assertEqual(0.5, self.zonemgr._config_data.get("refresh_jitter"))
+ self.assertEqual(self.zonemgr.config_handler(config_data1),
+ {"result": [0]})
+ # other configs should be updated successfully
+ name_class = ("nonexistent.example.", "IN")
+ self.assertTrue(self.zonemgr._zone_refresh._zonemgr_refresh_info[name_class]["zone_soa_rdata"]
+ is None)
+ self.assertEqual(0.1, self.zonemgr._config_data.get("refresh_jitter"))
def test_get_db_file(self):
self.assertEqual("initdb.file", self.zonemgr.get_db_file())
diff --git a/src/bin/zonemgr/zonemgr.py.in b/src/bin/zonemgr/zonemgr.py.in
index c6e3163..5c8d9b5 100755
--- a/src/bin/zonemgr/zonemgr.py.in
+++ b/src/bin/zonemgr/zonemgr.py.in
@@ -37,6 +37,16 @@ from isc.datasrc import sqlite3_ds
from optparse import OptionParser, OptionValueError
from isc.config.ccsession import *
import isc.util.process
+from isc.log_messages.zonemgr_messages import *
+
+# Initialize logging for called modules.
+isc.log.init("b10-zonemgr")
+logger = isc.log.Logger("zonemgr")
+
+# Constants for debug levels, to be removed when we have #1074.
+DBG_START_SHUT = 0
+DBG_ZONEMGR_COMMAND = 10
+DBG_ZONEMGR_BASIC = 40
isc.util.process.rename()
@@ -77,13 +87,6 @@ REFRESH_OFFSET = 3
RETRY_OFFSET = 4
EXPIRED_OFFSET = 5
-# verbose mode
-VERBOSE_MODE = False
-
-def log_msg(msg):
- if VERBOSE_MODE:
- sys.stdout.write("[b10-zonemgr] %s\n" % str(msg))
-
class ZonemgrException(Exception):
pass
@@ -93,7 +96,6 @@ class ZonemgrRefresh:
do zone refresh.
Zone timers can be started by calling run_timer(), and it
can be stopped by calling shutdown() in another thread.
-
"""
def __init__(self, cc, db_file, slave_socket, config_data):
@@ -140,7 +142,10 @@ class ZonemgrRefresh:
"""Set zone next refresh time after zone refresh fail.
now + retry - retry_jitter <= next_refresh_time <= now + retry
"""
- zone_retry_time = float(self._get_zone_soa_rdata(zone_name_class).split(" ")[RETRY_OFFSET])
+ if (self._get_zone_soa_rdata(zone_name_class) is not None):
+ zone_retry_time = float(self._get_zone_soa_rdata(zone_name_class).split(" ")[RETRY_OFFSET])
+ else:
+ zone_retry_time = 0.0
zone_retry_time = max(self._lowerbound_retry, zone_retry_time)
self._set_zone_timer(zone_name_class, zone_retry_time, self._refresh_jitter * zone_retry_time)
@@ -157,6 +162,7 @@ class ZonemgrRefresh:
def zone_refresh_success(self, zone_name_class):
"""Update zone info after zone refresh success"""
if (self._zone_not_exist(zone_name_class)):
+ logger.error(ZONEMGR_UNKNOWN_ZONE_SUCCESS, zone_name_class[0], zone_name_class[1])
raise ZonemgrException("[b10-zonemgr] Zone (%s, %s) doesn't "
"belong to zonemgr" % zone_name_class)
self.zonemgr_reload_zone(zone_name_class)
@@ -167,10 +173,12 @@ class ZonemgrRefresh:
def zone_refresh_fail(self, zone_name_class):
"""Update zone info after zone refresh fail"""
if (self._zone_not_exist(zone_name_class)):
+ logger.error(ZONEMGR_UNKNOWN_ZONE_FAIL, zone_name_class[0], zone_name_class[1])
raise ZonemgrException("[b10-zonemgr] Zone (%s, %s) doesn't "
"belong to zonemgr" % zone_name_class)
# Is zone expired?
- if (self._zone_is_expired(zone_name_class)):
+ if ((self._get_zone_soa_rdata(zone_name_class) is None) or
+ self._zone_is_expired(zone_name_class)):
self._set_zone_state(zone_name_class, ZONE_EXPIRED)
else:
self._set_zone_state(zone_name_class, ZONE_OK)
@@ -179,6 +187,7 @@ class ZonemgrRefresh:
def zone_handle_notify(self, zone_name_class, master):
"""Handle zone notify"""
if (self._zone_not_exist(zone_name_class)):
+ logger.error(ZONEMGR_UNKNOWN_ZONE_NOTIFIED, zone_name_class[0], zone_name_class[1])
raise ZonemgrException("[b10-zonemgr] Notified zone (%s, %s) "
"doesn't belong to zonemgr" % zone_name_class)
self._set_zone_notifier_master(zone_name_class, master)
@@ -191,19 +200,23 @@ class ZonemgrRefresh:
def zonemgr_add_zone(self, zone_name_class):
""" Add a zone into zone manager."""
- log_msg("Loading zone (%s, %s)" % zone_name_class)
+
+ logger.debug(DBG_ZONEMGR_BASIC, ZONEMGR_LOAD_ZONE, zone_name_class[0], zone_name_class[1])
zone_info = {}
zone_soa = sqlite3_ds.get_zone_soa(str(zone_name_class[0]), self._db_file)
- if not zone_soa:
- raise ZonemgrException("[b10-zonemgr] zone (%s, %s) doesn't have soa." % zone_name_class)
- zone_info["zone_soa_rdata"] = zone_soa[7]
+ if zone_soa is None:
+ logger.warn(ZONEMGR_NO_SOA, zone_name_class[0], zone_name_class[1])
+ zone_info["zone_soa_rdata"] = None
+ zone_reload_time = 0.0
+ else:
+ zone_info["zone_soa_rdata"] = zone_soa[7]
+ zone_reload_time = float(zone_soa[7].split(" ")[RETRY_OFFSET])
zone_info["zone_state"] = ZONE_OK
zone_info["last_refresh_time"] = self._get_current_time()
self._zonemgr_refresh_info[zone_name_class] = zone_info
# Imposes some random jitters to avoid many zones need to do refresh at the same time.
- zone_reload_jitter = float(zone_soa[7].split(" ")[RETRY_OFFSET])
- zone_reload_jitter = max(self._lowerbound_retry, zone_reload_jitter)
- self._set_zone_timer(zone_name_class, zone_reload_jitter, self._reload_jitter * zone_reload_jitter)
+ zone_reload_time = max(self._lowerbound_retry, zone_reload_time)
+ self._set_zone_timer(zone_name_class, zone_reload_time, self._reload_jitter * zone_reload_time)
def _zone_is_expired(self, zone_name_class):
"""Judge whether a zone is expired or not."""
@@ -265,7 +278,7 @@ class ZonemgrRefresh:
except isc.cc.session.SessionTimeout:
pass # for now we just ignore the failure
except socket.error:
- sys.stderr.write("[b10-zonemgr] Failed to send to module %s, the session has been closed." % module_name)
+ logger.error(ZONEMGR_SEND_FAIL, module_name)
def _find_need_do_refresh_zone(self):
"""Find the first zone need do refresh, if no zone need
@@ -274,7 +287,8 @@ class ZonemgrRefresh:
zone_need_refresh = None
for zone_name_class in self._zonemgr_refresh_info.keys():
zone_state = self._get_zone_state(zone_name_class)
- # If hasn't received refresh response but are within refresh timeout, skip the zone
+ # If hasn't received refresh response but are within refresh
+ # timeout, skip the zone
if (ZONE_REFRESHING == zone_state and
(self._get_zone_refresh_timeout(zone_name_class) > self._get_current_time())):
continue
@@ -294,7 +308,7 @@ class ZonemgrRefresh:
def _do_refresh(self, zone_name_class):
"""Do zone refresh."""
- log_msg("Do refresh for zone (%s, %s)." % zone_name_class)
+ logger.debug(DBG_ZONEMGR_BASIC, ZONEMGR_REFRESH_ZONE, zone_name_class[0], zone_name_class[1])
self._set_zone_state(zone_name_class, ZONE_REFRESHING)
self._set_zone_refresh_timeout(zone_name_class, self._get_current_time() + self._max_transfer_timeout)
notify_master = self._get_zone_notifier_master(zone_name_class)
@@ -351,7 +365,7 @@ class ZonemgrRefresh:
if e.args[0] == errno.EINTR:
(rlist, wlist, xlist) = ([], [], [])
else:
- sys.stderr.write("[b10-zonemgr] Error with select(); %s\n" % e)
+ logger.error(ZONEMGR_SELECT_ERROR, e);
break
for fd in rlist:
@@ -365,12 +379,14 @@ class ZonemgrRefresh:
def run_timer(self, daemon=False):
"""
- Keep track of zone timers. Spawns and starts a thread. The thread object is returned.
+ Keep track of zone timers. Spawns and starts a thread. The thread object
+ is returned.
You can stop it by calling shutdown().
"""
# Small sanity check
if self._running:
+ logger.error(ZONEMGR_TIMER_THREAD_RUNNING)
raise RuntimeError("Trying to run the timers twice at the same time")
# Prepare the launch
@@ -395,6 +411,7 @@ class ZonemgrRefresh:
called from a different thread.
"""
if not self._running:
+ logger.error(ZONEMGR_NO_TIMER_THREAD)
raise RuntimeError("Trying to shutdown, but not running")
# Ask the thread to stop
@@ -409,12 +426,6 @@ class ZonemgrRefresh:
def update_config_data(self, new_config):
""" update ZonemgrRefresh config """
- # TODO: we probably want to store all this info in a nice
- # class, so that we don't have to backup and restore every
- # single value.
- # TODO2: We also don't use get_default_value yet
- backup = self._zonemgr_refresh_info.copy()
-
# Get a new value, but only if it is defined (commonly used below)
# We don't use "value or default", because if value would be
# 0, we would take default
@@ -424,26 +435,21 @@ class ZonemgrRefresh:
else:
return default
- # store the values so we can restore them if there is a problem
- lowerbound_refresh_backup = self._lowerbound_refresh
self._lowerbound_refresh = val_or_default(
new_config.get('lowerbound_refresh'), self._lowerbound_refresh)
- lowerbound_retry_backup = self._lowerbound_retry
self._lowerbound_retry = val_or_default(
new_config.get('lowerbound_retry'), self._lowerbound_retry)
- max_transfer_timeout_backup = self._max_transfer_timeout
self._max_transfer_timeout = val_or_default(
new_config.get('max_transfer_timeout'), self._max_transfer_timeout)
- refresh_jitter_backup = self._refresh_jitter
self._refresh_jitter = val_or_default(
new_config.get('refresh_jitter'), self._refresh_jitter)
- reload_jitter_backup = self._reload_jitter
self._reload_jitter = val_or_default(
new_config.get('reload_jitter'), self._reload_jitter)
+
try:
required = {}
secondary_zones = new_config.get('secondary_zones')
@@ -458,6 +464,7 @@ class ZonemgrRefresh:
required[name_class] = True
# Add it only if it isn't there already
if not name_class in self._zonemgr_refresh_info:
+ # If we are not able to find it in database, log an warning
self.zonemgr_add_zone(name_class)
# Drop the zones that are no longer there
# Do it in two phases, python doesn't like deleting while iterating
@@ -467,14 +474,7 @@ class ZonemgrRefresh:
to_drop.append(old_zone)
for drop in to_drop:
del self._zonemgr_refresh_info[drop]
- # If we are not able to find it in database, restore the original
except:
- self._zonemgr_refresh_info = backup
- self._lowerbound_refresh = lowerbound_refresh_backup
- self._lowerbound_retry = lowerbound_retry_backup
- self._max_transfer_timeout = max_transfer_timeout_backup
- self._refresh_jitter = refresh_jitter_backup
- self._reload_jitter = reload_jitter_backup
raise
class Zonemgr:
@@ -515,8 +515,8 @@ class Zonemgr:
return db_file
def shutdown(self):
- """Shutdown the zonemgr process. the thread which is keeping track of zone
- timers should be terminated.
+ """Shutdown the zonemgr process. The thread which is keeping track of
+ zone timers should be terminated.
"""
self._zone_refresh.shutdown()
@@ -556,17 +556,17 @@ class Zonemgr:
# jitter should not be bigger than half of the original value
if config_data.get('refresh_jitter') > 0.5:
config_data['refresh_jitter'] = 0.5
- log_msg("[b10-zonemgr] refresh_jitter is too big, its value will "
- "be set to 0.5")
-
+ logger.warn(ZONEMGR_JITTER_TOO_BIG)
def _parse_cmd_params(self, args, command):
zone_name = args.get("zone_name")
if not zone_name:
+ logger.error(ZONEMGR_NO_ZONE_NAME)
raise ZonemgrException("zone name should be provided")
zone_class = args.get("zone_class")
if not zone_class:
+ logger.error(ZONEMGR_NO_ZONE_CLASS)
raise ZonemgrException("zone class should be provided")
if (command != ZONE_NOTIFY_COMMAND):
@@ -574,6 +574,7 @@ class Zonemgr:
master_str = args.get("master")
if not master_str:
+ logger.error(ZONEMGR_NO_MASTER_ADDRESS)
raise ZonemgrException("master address should be provided")
return ((zone_name, zone_class), master_str)
@@ -581,15 +582,16 @@ class Zonemgr:
def command_handler(self, command, args):
"""Handle command receivd from command channel.
- ZONE_NOTIFY_COMMAND is issued by Auth process; ZONE_XFRIN_SUCCESS_COMMAND
- and ZONE_XFRIN_FAILED_COMMAND are issued by Xfrin process; shutdown is issued
- by a user or Boss process. """
+ ZONE_NOTIFY_COMMAND is issued by Auth process;
+ ZONE_XFRIN_SUCCESS_COMMAND and ZONE_XFRIN_FAILED_COMMAND are issued by
+ Xfrin process;
+ shutdown is issued by a user or Boss process. """
answer = create_answer(0)
if command == ZONE_NOTIFY_COMMAND:
""" Handle Auth notify command"""
# master is the source sender of the notify message.
zone_name_class, master = self._parse_cmd_params(args, command)
- log_msg("Received notify command for zone (%s, %s)." % zone_name_class)
+ logger.debug(DBG_ZONEMGR_COMMAND, ZONEMGR_RECEIVE_NOTIFY, zone_name_class[0], zone_name_class[1])
with self._lock:
self._zone_refresh.zone_handle_notify(zone_name_class, master)
# Send notification to zonemgr timer thread
@@ -598,6 +600,7 @@ class Zonemgr:
elif command == ZONE_XFRIN_SUCCESS_COMMAND:
""" Handle xfrin success command"""
zone_name_class = self._parse_cmd_params(args, command)
+ logger.debug(DBG_ZONEMGR_COMMAND, ZONEMGR_RECEIVE_XFRIN_SUCCESS, zone_name_class[0], zone_name_class[1])
with self._lock:
self._zone_refresh.zone_refresh_success(zone_name_class)
self._master_socket.send(b" ")# make self._slave_socket readble
@@ -605,14 +608,17 @@ class Zonemgr:
elif command == ZONE_XFRIN_FAILED_COMMAND:
""" Handle xfrin fail command"""
zone_name_class = self._parse_cmd_params(args, command)
+ logger.debug(DBG_ZONEMGR_COMMAND, ZONEMGR_RECEIVE_XFRIN_FAILED, zone_name_class[0], zone_name_class[1])
with self._lock:
self._zone_refresh.zone_refresh_fail(zone_name_class)
self._master_socket.send(b" ")# make self._slave_socket readble
elif command == "shutdown":
+ logger.debug(DBG_ZONEMGR_COMMAND, ZONEMGR_RECEIVE_SHUTDOWN)
self.shutdown()
else:
+ logger.warn(ZONEMGR_RECEIVE_UNKNOWN, str(command))
answer = create_answer(1, "Unknown command:" + str(command))
return answer
@@ -639,25 +645,29 @@ def set_cmd_options(parser):
if '__main__' == __name__:
try:
+ logger.debug(DBG_START_SHUT, ZONEMGR_STARTING)
parser = OptionParser()
set_cmd_options(parser)
(options, args) = parser.parse_args()
- VERBOSE_MODE = options.verbose
+ if options.verbose:
+ logger.set_severity("DEBUG", 99)
set_signal_handler()
zonemgrd = Zonemgr()
zonemgrd.run()
except KeyboardInterrupt:
- sys.stderr.write("[b10-zonemgr] exit zonemgr process\n")
+ logger.info(ZONEMGR_KEYBOARD_INTERRUPT)
+
except isc.cc.session.SessionError as e:
- sys.stderr.write("[b10-zonemgr] Error creating zonemgr, "
- "is the command channel daemon running?\n")
+ logger.error(ZONEMGR_SESSION_ERROR)
+
except isc.cc.session.SessionTimeout as e:
- sys.stderr.write("[b10-zonemgr] Error creating zonemgr, "
- "is the configuration manager running?\n")
+ logger.error(ZONEMGR_SESSION_TIMEOUT)
+
except isc.config.ModuleCCSessionError as e:
- sys.stderr.write("[b10-zonemgr] exit zonemgr process: %s\n" % str(e))
+ logger.error(ZONEMGR_CCSESSION_ERROR, str(e))
if zonemgrd and zonemgrd.running:
zonemgrd.shutdown()
+ logger.debug(DBG_START_SHUT, ZONEMGR_SHUTDOWN)
diff --git a/src/bin/zonemgr/zonemgr_messages.mes b/src/bin/zonemgr/zonemgr_messages.mes
new file mode 100644
index 0000000..8abec5d
--- /dev/null
+++ b/src/bin/zonemgr/zonemgr_messages.mes
@@ -0,0 +1,145 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the zonemgr messages python module.
+
+% ZONEMGR_CCSESSION_ERROR command channel session error: %1
+An error was encountered on the command channel. The message indicates
+the nature of the error.
+
+% ZONEMGR_JITTER_TOO_BIG refresh_jitter is too big, setting to 0.5
+The value specified in the configuration for the refresh jitter is too large
+so its value has been set to the maximum of 0.5.
+
+% ZONEMGR_KEYBOARD_INTERRUPT exiting zonemgr process as result of keyboard interrupt
+An informational message output when the zone manager was being run at a
+terminal and it was terminated via a keyboard interrupt signal.
+
+% ZONEMGR_LOAD_ZONE loading zone %1 (class %2)
+This is a debug message indicating that the zone of the specified class
+is being loaded.
+
+% ZONEMGR_NO_MASTER_ADDRESS internal BIND 10 command did not contain address of master
+A command received by the zone manager from the Auth module did not
+contain the address of the master server from which a NOTIFY message
+was received. This may be due to an internal programming error; please
+submit a bug report.
+
+% ZONEMGR_NO_SOA zone %1 (class %2) does not have an SOA record
+When loading the named zone of the specified class the zone manager
+discovered that the data did not contain an SOA record. The load has
+been abandoned.
+
+% ZONEMGR_NO_TIMER_THREAD trying to stop zone timer thread but it is not running
+An attempt was made to stop the timer thread (used to track when zones
+should be refreshed) but it was not running. This may indicate an
+internal program error. Please submit a bug report.
+
+% ZONEMGR_NO_ZONE_CLASS internal BIND 10 command did not contain class of zone
+A command received by the zone manager from another BIND 10 module did
+not contain the class of the zone on which the zone manager should act.
+This may be due to an internal programming error; please submit a
+bug report.
+
+% ZONEMGR_NO_ZONE_NAME internal BIND 10 command did not contain name of zone
+A command received by the zone manager from another BIND 10 module did
+not contain the name of the zone on which the zone manager should act.
+This may be due to an internal programming error; please submit a
+bug report.
+
+% ZONEMGR_RECEIVE_NOTIFY received NOTIFY command for zone %1 (class %2)
+This is a debug message indicating that the zone manager has received a
+NOTIFY command over the command channel. The command is sent by the Auth
+process when it is acting as a slave server for the zone and causes the
+zone manager to record the master server for the zone and start a timer;
+when the timer expires, the master will be polled to see if it contains
+new data.
+
+% ZONEMGR_RECEIVE_SHUTDOWN received SHUTDOWN command
+This is a debug message indicating that the zone manager has received
+a SHUTDOWN command over the command channel from the Boss process.
+It will act on this command and shut down.
+
+% ZONEMGR_RECEIVE_UNKNOWN received unknown command '%1'
+This is a warning message indicating that the zone manager has received
+the stated command over the command channel. The command is not known
+to the zone manager and although the command is ignored, its receipt
+may indicate an internal error. Please submit a bug report.
+
+% ZONEMGR_RECEIVE_XFRIN_FAILED received XFRIN FAILED command for zone %1 (class %2)
+This is a debug message indicating that the zone manager has received
+an XFRIN FAILED command over the command channel. The command is sent
+by the Xfrin process when a transfer of zone data into the system has
+failed, and causes the zone manager to schedule another transfer attempt.
+
+% ZONEMGR_RECEIVE_XFRIN_SUCCESS received XFRIN SUCCESS command for zone %1 (class %2)
+This is a debug message indicating that the zone manager has received
+an XFRIN SUCCESS command over the command channel. The command is sent
+by the Xfrin process when the transfer of zone data into the system has
+succeeded, and causes the data to be loaded and served by BIND 10.
+
+% ZONEMGR_REFRESH_ZONE refreshing zone %1 (class %2)
+The zone manager is refreshing the named zone of the specified class
+with updated information.
+
+% ZONEMGR_SELECT_ERROR error with select(): %1
+An attempt to wait for input from a socket failed. The failing operation
+is a call to the operating system's select() function, which failed for
+the given reason.
+
+% ZONEMGR_SEND_FAIL failed to send command to %1, session has been closed
+The zone manager attempted to send a command to the named BIND 10 module,
+but the send failed. The session between the modules has been closed.
+
+% ZONEMGR_SESSION_ERROR unable to establish session to command channel daemon
+The zonemgr process was not able to be started because it could not
+connect to the command channel daemon. The most usual cause of this
+problem is that the daemon is not running.
+
+% ZONEMGR_SESSION_TIMEOUT timeout on session to command channel daemon
+The zonemgr process was not able to be started because it timed out when
+connecting to the command channel daemon. The most usual cause of this
+problem is that the daemon is not running.
+
+% ZONEMGR_SHUTDOWN zone manager has shut down
+A debug message, output when the zone manager has shut down completely.
+
+% ZONEMGR_STARTING zone manager starting
+A debug message output when the zone manager starts up.
+
+% ZONEMGR_TIMER_THREAD_RUNNING trying to start timer thread but one is already running
+This message is issued when an attempt is made to start the timer
+thread (which keeps track of when zones need a refresh) but one is
+already running. It indicates either an error in the program logic or
+a problem with stopping a previous instance of the timer. Please submit
+a bug report.
+
+% ZONEMGR_UNKNOWN_ZONE_FAIL zone %1 (class %2) is not known to the zone manager
+An XFRIN operation has failed but the zone that was the subject of the
+operation is not being managed by the zone manager. This may indicate
+an error in the program (as the operation should not have been initiated
+if this were the case). Please submit a bug report.
+
+% ZONEMGR_UNKNOWN_ZONE_NOTIFIED notified zone %1 (class %2) is not known to the zone manager
+A NOTIFY was received but the zone that was the subject of the operation
+is not being managed by the zone manager. This may indicate an error
+in the program (as the operation should not have been initiated if this
+were the case). Please submit a bug report.
+
+% ZONEMGR_UNKNOWN_ZONE_SUCCESS zone %1 (class %2) is not known to the zone manager
+An XFRIN operation has succeeded but the zone received is not being
+managed by the zone manager. This may indicate an error in the program
+(as the operation should not have been initiated if this were the case).
+Please submit a bug report.
diff --git a/src/cppcheck-suppress.lst b/src/cppcheck-suppress.lst
index a4fea30..8a4c7c1 100644
--- a/src/cppcheck-suppress.lst
+++ b/src/cppcheck-suppress.lst
@@ -3,7 +3,7 @@
debug
missingInclude
// This is a template, and should be excluded from the check
-unreadVariable:src/lib/dns/rdata/template.cc:60
+unreadVariable:src/lib/dns/rdata/template.cc:61
// Intentional self assignment tests. Suppress warning about them.
selfAssignment:src/lib/dns/tests/name_unittest.cc:293
selfAssignment:src/lib/dns/tests/rdata_unittest.cc:228
diff --git a/src/lib/Makefile.am b/src/lib/Makefile.am
index f4bef6b..04eee45 100644
--- a/src/lib/Makefile.am
+++ b/src/lib/Makefile.am
@@ -1,3 +1,3 @@
-SUBDIRS = exceptions util log cryptolink dns cc config python xfr \
- bench asiolink asiodns nsas cache resolve testutils datasrc \
- acl server_common
+SUBDIRS = exceptions util log cryptolink dns cc config acl xfr bench \
+ asiolink asiodns nsas cache resolve testutils datasrc \
+ server_common python
diff --git a/src/lib/acl/Makefile.am b/src/lib/acl/Makefile.am
index f211025..92b7869 100644
--- a/src/lib/acl/Makefile.am
+++ b/src/lib/acl/Makefile.am
@@ -19,7 +19,7 @@ libacl_la_LIBADD += $(top_builddir)/src/lib/util/libutil.la
# DNS specialized one
lib_LTLIBRARIES += libdnsacl.la
-libdnsacl_la_SOURCES = dns.h dns.cc
+libdnsacl_la_SOURCES = dns.h dns.cc dnsname_check.h
libdnsacl_la_LIBADD = libacl.la
libdnsacl_la_LIBADD += $(top_builddir)/src/lib/dns/libdns++.la
diff --git a/src/lib/acl/acl.h b/src/lib/acl/acl.h
index 998b2b0..76039c9 100644
--- a/src/lib/acl/acl.h
+++ b/src/lib/acl/acl.h
@@ -88,8 +88,11 @@ public:
* the context against conditions and if it matches, returns the
* action that belongs to the first matched entry or default action
* if nothing matches.
+ *
* \param context The thing that should be checked. It is directly
* passed to the checks.
+ *
+ * \return The action for the ACL entry that first matches the context.
*/
const Action& execute(const Context& context) const {
const typename Entries::const_iterator end(entries_.end());
diff --git a/src/lib/acl/dns.cc b/src/lib/acl/dns.cc
index 16f1bf5..b9cf91f 100644
--- a/src/lib/acl/dns.cc
+++ b/src/lib/acl/dns.cc
@@ -12,20 +12,126 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
-#include "dns.h"
+#include <memory>
+#include <string>
+#include <vector>
+
+#include <boost/shared_ptr.hpp>
+
+#include <exceptions/exceptions.h>
+
+#include <dns/name.h>
+#include <dns/tsigrecord.h>
+
+#include <cc/data.h>
+
+#include <acl/dns.h>
+#include <acl/ip_check.h>
+#include <acl/dnsname_check.h>
+#include <acl/loader.h>
+#include <acl/logic_check.h>
+
+using namespace std;
+using boost::shared_ptr;
+using namespace isc::dns;
+using namespace isc::data;
namespace isc {
namespace acl {
+
+/// The specialization of \c IPCheck for access control with \c RequestContext.
+///
+/// It returns \c true if the remote (source) IP address of the request
+/// matches the expression encapsulated in the \c IPCheck, and returns
+/// \c false if not.
+template <>
+bool
+IPCheck<dns::RequestContext>::matches(
+ const dns::RequestContext& request) const
+{
+ return (compare(request.remote_address.getData(),
+ request.remote_address.getFamily()));
+}
+
namespace dns {
-Loader&
-getLoader() {
- static Loader* loader(NULL);
+/// The specialization of \c NameCheck for access control with
+/// \c RequestContext.
+///
+/// It returns \c true if the request contains a TSIG record and its key
+/// (owner) name is equal to the name stored in the check; otherwise
+/// it returns \c false.
+template<>
+bool
+NameCheck<RequestContext>::matches(const RequestContext& request) const {
+ return (request.tsig != NULL && request.tsig->getName() == name_);
+}
+
+vector<string>
+internal::RequestCheckCreator::names() const {
+ // Probably we should eventually build this vector in a more
+ // sophisticated way. For now, it's simple enough to hardcode
+ // everything.
+ vector<string> supported_names;
+ supported_names.push_back("from");
+ supported_names.push_back("key");
+ return (supported_names);
+}
+
+shared_ptr<RequestCheck>
+internal::RequestCheckCreator::create(const string& name,
+ ConstElementPtr definition,
+ // unused:
+ const acl::Loader<RequestContext>&)
+{
+ if (!definition) {
+ isc_throw(LoaderError,
+ "NULL pointer is passed to RequestCheckCreator");
+ }
+
+ if (name == "from") {
+ return (shared_ptr<internal::RequestIPCheck>(
+ new internal::RequestIPCheck(definition->stringValue())));
+ } else if (name == "key") {
+ return (shared_ptr<internal::RequestKeyCheck>(
+ new internal::RequestKeyCheck(
+ Name(definition->stringValue()))));
+ } else {
+ // This case shouldn't happen (normally) as it should have been
+ // rejected at the loader level. But we explicitly catch the case
+ // and throw an exception for that.
+ isc_throw(LoaderError, "Invalid check name for RequestCheck: " <<
+ name);
+ }
+}
+
+RequestLoader&
+getRequestLoader() {
+ static RequestLoader* loader(NULL);
if (loader == NULL) {
- loader = new Loader(REJECT);
- // TODO: This is the place where we register default check creators
- // like IP check, etc, once we have them.
+ // Creator registration may throw, so we first store the new loader
+ // in an auto pointer in order to provide the strong exception
+ // guarantee.
+ auto_ptr<RequestLoader> loader_ptr =
+ auto_ptr<RequestLoader>(new RequestLoader(REJECT));
+
+ // Register default check creator(s)
+ loader_ptr->registerCreator(shared_ptr<internal::RequestCheckCreator>(
+ new internal::RequestCheckCreator()));
+ loader_ptr->registerCreator(
+ shared_ptr<NotCreator<RequestContext> >(
+ new NotCreator<RequestContext>("NOT")));
+ loader_ptr->registerCreator(
+ shared_ptr<LogicCreator<AnyOfSpec, RequestContext> >(
+ new LogicCreator<AnyOfSpec, RequestContext>("ANY")));
+ loader_ptr->registerCreator(
+ shared_ptr<LogicCreator<AllOfSpec, RequestContext> >(
+ new LogicCreator<AllOfSpec, RequestContext>("ALL")));
+
+ // From this point there shouldn't be any exception thrown
+ loader = loader_ptr.release();
}
+
return (*loader);
}
diff --git a/src/lib/acl/dns.h b/src/lib/acl/dns.h
index 6f36e51..426c961 100644
--- a/src/lib/acl/dns.h
+++ b/src/lib/acl/dns.h
@@ -13,14 +13,23 @@
// PERFORMANCE OF THIS SOFTWARE.
#ifndef ACL_DNS_H
-#define ACL_DNS_H
+#define ACL_DNS_H 1
-#include "loader.h"
+#include <string>
+#include <vector>
-#include <asiolink/io_address.h>
-#include <dns/message.h>
+#include <boost/shared_ptr.hpp>
+
+#include <cc/data.h>
+
+#include <acl/ip_check.h>
+#include <acl/dnsname_check.h>
+#include <acl/loader.h>
namespace isc {
+namespace dns {
+class TSIGRecord;
+}
namespace acl {
namespace dns {
@@ -30,47 +39,74 @@ namespace dns {
* This plays the role of Context of the generic template ACLs (in namespace
* isc::acl).
*
- * It is simple structure holding just the bunch of information. Therefore
- * the names don't end up with a slash, there are no methods so they can't be
- * confused with local variables.
+ * It is a simple structure holding just the bunch of information. Therefore
+ * the names don't end up with an underscore; there are no methods so they
+ * can't be confused with local variables.
+ *
+ * This structure is generally expected to be ephemeral and read-only: It
+ * would be constructed immediately before a particular ACL is checked
+ * and used only for the ACL match purposes. Due to this nature, and since
+ * ACL processing is often performance sensitive (typically it's performed
+ * against all incoming packets), the construction is designed to be
+ * lightweight: it tries to avoid expensive data copies or dynamic memory
+ * allocation as much as possible. Specifically, the constructor can
+ * take a pointer or reference to an object and keeps it as a reference
+ * (not making a local copy). This also means the caller is responsible for
+ * keeping the passed parameters valid while this structure is used.
+ * This should generally be reasonable as this structure is expected to be
+ * used only for a very short period as stated above.
*
- * \todo Do we want a constructor to set this in a shorter manner? So we can
- * call the ACLs directly?
+ * Based on the minimalist philosophy, the initial implementation only
+ * maintains the remote (source) IP address of the request and (optionally)
+ * the TSIG record included in the request. We may add more parameters of
+ * the request as we see the need for them. Possible additional parameters
+ * are the local (destination) IP address, the remote and local port numbers,
+ * various fields of the DNS request (e.g. a particular header flag value).
*/
struct RequestContext {
- /// \brief The DNS message (payload).
- isc::dns::ConstMessagePtr message;
- /// \brief The remote IP address (eg. the client).
- asiolink::IOAddress remote_address;
- /// \brief The local IP address (ours, of the interface where we received).
- asiolink::IOAddress local_address;
- /// \brief The remote port.
- uint16_t remote_port;
- /// \brief The local port.
- uint16_t local_port;
- /**
- * \brief Name of the TSIG key the message is signed with.
- *
- * This will be either the name of the TSIG key the message is signed with,
- * or empty string, if the message is not signed. It is true we could get
- * the information from the message itself, but because at the time when
- * the ACL is checked, the signature has been verified already, so passing
- * it around is probably cheaper.
- *
- * It is expected that messages with invalid signatures are handled before
- * ACL.
- */
- std::string tsig_key_name;
+ /// The constructor
+ ///
+ /// This is a trivial constructor that perform straightforward
+ /// initialization of the member variables from the given parameters.
+ ///
+ /// \exception None
+ ///
+ /// \parameter remote_address_param The remote IP address
+ /// \parameter tsig_param A valid pointer to the TSIG record included in
+ /// the request or NULL if the request doesn't contain a TSIG.
+ RequestContext(const IPAddress& remote_address_param,
+ const isc::dns::TSIGRecord* tsig_param) :
+ remote_address(remote_address_param),
+ tsig(tsig_param)
+ {}
+
+ ///
+ /// \name Parameter variables
+ ///
+ /// These member variables must be immutable so that the integrity of
+ /// the structure is kept throughout its lifetime. The easiest way is
+ /// to declare the variable as const. If it's not possible for a
+ /// particular variable, it must be defined as private and accessible
+ /// only via an accessor method.
+ //@{
+ /// \brief The remote IP address (eg. the client's IP address).
+ const IPAddress& remote_address;
+
+ /// \brief The TSIG record included in the request message, if any.
+ ///
+ /// If the request doesn't include a TSIG, this member will be NULL.
+ const isc::dns::TSIGRecord* const tsig;
+ //@}
};
/// \brief DNS based check.
-typedef acl::Check<RequestContext> Check;
+typedef acl::Check<RequestContext> RequestCheck;
/// \brief DNS based compound check.
typedef acl::CompoundCheck<RequestContext> CompoundCheck;
/// \brief DNS based ACL.
-typedef acl::ACL<RequestContext> ACL;
+typedef acl::ACL<RequestContext> RequestACL;
/// \brief DNS based ACL loader.
-typedef acl::Loader<RequestContext> Loader;
+typedef acl::Loader<RequestContext> RequestLoader;
/**
* \brief Loader singleton access function.
@@ -80,10 +116,39 @@ typedef acl::Loader<RequestContext> Loader;
* one is enough, this one will have registered default checks and it
* is known one, so any plugins can registrer additional checks as well.
*/
-Loader& getLoader();
+RequestLoader& getRequestLoader();
-}
-}
-}
+// The following is essentially private to the implementation and could
+// be hidden in the implementation file. But it's visible via this header
+// file for testing purposes. They are not supposed to be used by normal
+// applications directly, and to signal the intent, they are given inside
+// a separate namespace.
+namespace internal {
+
+// Shortcut typedef
+typedef isc::acl::IPCheck<RequestContext> RequestIPCheck;
+typedef isc::acl::dns::NameCheck<RequestContext> RequestKeyCheck;
+
+class RequestCheckCreator : public acl::Loader<RequestContext>::CheckCreator {
+public:
+ virtual std::vector<std::string> names() const;
+
+ virtual boost::shared_ptr<RequestCheck>
+ create(const std::string& name, isc::data::ConstElementPtr definition,
+ const acl::Loader<RequestContext>& loader);
+
+ /// Until we are sure how the various rules work for this case, we won't
+ /// allow unexpected special interpretation for list definitions.
+ virtual bool allowListAbbreviation() const { return (false); }
+};
+} // end of namespace "internal"
+
+} // end of namespace "dns"
+} // end of namespace "acl"
+} // end of namespace "isc"
#endif
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/acl/dnsname_check.h b/src/lib/acl/dnsname_check.h
new file mode 100644
index 0000000..7498d99
--- /dev/null
+++ b/src/lib/acl/dnsname_check.h
@@ -0,0 +1,83 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __DNSNAME_CHECK_H
+#define __DNSNAME_CHECK_H 1
+
+#include <dns/name.h>
+
+#include <acl/check.h>
+
+namespace isc {
+namespace acl {
+namespace dns {
+
+/// ACL check for DNS names
+///
+/// This class is intended to perform a match between a domain name
+/// specified in an ACL and a given name. The primary usage of this class
+/// is an ACL match for TSIG keys, where an ACL would contain a list of
+/// acceptable key names and the \c match() method would compare the owner
+/// name of a TSIG record against the specified names.
+///
+/// This class could be used for other kinds of names such as the query name
+/// of normal DNS queries.
+///
+/// The class is templated on the type of a context structure passed to the
+/// matches() method, and a template specialisation for that method must be
+/// supplied for the class to be used.
+template <typename Context>
+class NameCheck : public Check<Context> {
+public:
+ /// The constructor
+ ///
+ /// \exception std::bad_alloc Resource allocation fails in copying the
+ /// name
+ ///
+ /// \param name The domain name to be matched in \c matches().
+ NameCheck(const isc::dns::Name& name) : name_(name) {}
+
+ /// Destructor
+ virtual ~NameCheck() {}
+
+ /// The check method
+ ///
+ /// Matches the passed argument to the condition stored here. Different
+ /// specializations must be provided for different argument types, and the
+ /// program will fail to compile if a required specialisation is not
+ /// provided.
+ ///
+ /// \param context Information to be matched
+ virtual bool matches(const Context& context) const;
+
+ /// Returns the name specified on construction.
+ ///
+ /// This is mainly for testing purposes.
+ ///
+ /// \exception None
+ const isc::dns::Name& getName() const { return (name_); }
+
+private:
+ const isc::dns::Name name_;
+};
+
+} // namespace dns
+} // namespace acl
+} // namespace isc
+
+#endif // __DNSNAME_CHECK_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/acl/loader.h b/src/lib/acl/loader.h
index 6b024c9..f60b144 100644
--- a/src/lib/acl/loader.h
+++ b/src/lib/acl/loader.h
@@ -15,7 +15,8 @@
#ifndef ACL_LOADER_H
#define ACL_LOADER_H
-#include "acl.h"
+#include <exceptions/exceptions.h>
+#include <acl/acl.h>
#include <cc/data.h>
#include <boost/function.hpp>
#include <boost/shared_ptr.hpp>
@@ -81,7 +82,7 @@ public:
* or if it doesn't contain one of the accepted values.
*
* \param action The JSON representation of the action. It must be a string
- * and contain one of "ACCEPT", "REJECT" or "DENY".
+ * and contain one of "ACCEPT", "REJECT" or "DROP.
* \note We could define different names or add aliases if needed.
*/
BasicAction defaultActionLoader(data::ConstElementPtr action);
@@ -100,21 +101,21 @@ BasicAction defaultActionLoader(data::ConstElementPtr action);
*
* An ACL definition looks like this:
* \verbatim
- * [
- * {
- * "action": "ACCEPT",
- * "match-type": <parameter>
- * },
- * {
- * "action": "REJECT",
- * "match-type": <parameter>
- * "another-match-type": [<parameter1>, <parameter2>]
-* },
-* {
-* "action": "DROP"
-* }
- * ]
- * \endverbatim
+ [
+ {
+ "action": "ACCEPT",
+ "match-type": <parameter>
+ },
+ {
+ "action": "REJECT",
+ "match-type": <parameter>,
+ "another-match-type": [<parameter1>, <parameter2>]
+ },
+ {
+ "action": "DROP"
+ }
+ ]
+ \endverbatim
*
* This is a list of elements. Each element must have an "action"
* entry/keyword. That one specifies which action is returned if this
@@ -297,16 +298,28 @@ public:
* \brief Load an ACL.
*
* This parses an ACL list, creates the checks and actions of each element
- * and returns it. It may throw LoaderError if it isn't a list or the
- * "action" key is missing in some element. Also, no exceptions from
- * loadCheck (therefore from whatever creator is used) and from the
- * actionLoader passed to constructor are not caught.
+ * and returns it.
+ *
+ * No exceptions from \c loadCheck (therefore from whatever creator is
+ * used) and from the actionLoader passed to constructor are caught.
+ *
+ * \exception InvalidParameter The given element is NULL (most likely a
+ * caller's bug)
+ * \exception LoaderError The given element isn't a list or the
+ * "action" key is missing in some element
*
* \param description The JSON list of ACL.
+ *
+ * \return The newly created ACL object
*/
boost::shared_ptr<ACL<Context, Action> > load(const data::ConstElementPtr&
description) const
{
+ if (!description) {
+ isc_throw(isc::InvalidParameter,
+ "Null description is passed to ACL loader");
+ }
+
// We first check it's a list, so we can use the list reference
// (the list may be huge)
if (description->getType() != data::Element::list) {
@@ -460,3 +473,7 @@ private:
#include "logic_check.h"
#endif
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/acl/logic_check.h b/src/lib/acl/logic_check.h
index 6e1c567..92441e8 100644
--- a/src/lib/acl/logic_check.h
+++ b/src/lib/acl/logic_check.h
@@ -200,6 +200,86 @@ private:
const std::string name_;
};
+/**
+ * \brief The NOT operator for ACLs.
+ *
+ * This simply returns the negation of whatever returns the subexpression.
+ */
+template<typename Context>
+class NotOperator : public CompoundCheck<Context> {
+public:
+ /**
+ * \brief Constructor
+ *
+ * \param expr The subexpression to be negated by this NOT.
+ */
+ NotOperator(const boost::shared_ptr<Check<Context> >& expr) :
+ expr_(expr)
+ { }
+ /**
+ * \brief The list of subexpressions
+ *
+ * \return The vector will contain single value and it is the expression
+ * passed by constructor.
+ */
+ virtual typename CompoundCheck<Context>::Checks getSubexpressions() const {
+ typename CompoundCheck<Context>::Checks result;
+ result.push_back(expr_.get());
+ return (result);
+ }
+ /// \brief The matching function
+ virtual bool matches(const Context& context) const {
+ return (!expr_->matches(context));
+ }
+private:
+ /// \brief The subexpression
+ const boost::shared_ptr<Check<Context> > expr_;
+};
+
+template<typename Context, typename Action = BasicAction>
+class NotCreator : public Loader<Context, Action>::CheckCreator {
+public:
+ /**
+ * \brief Constructor
+ *
+ * \param name The name of the NOT operator to be loaded as.
+ */
+ NotCreator(const std::string& name) :
+ name_(name)
+ { }
+ /**
+ * \brief List of the names this loads
+ *
+ * \return Single-value vector containing the name passed to the
+ * constructor.
+ */
+ virtual std::vector<std::string> names() const {
+ std::vector<std::string> result;
+ result.push_back(name_);
+ return (result);
+ }
+ /// \brief Create the check.
+ virtual boost::shared_ptr<Check<Context> > create(const std::string&,
+ data::ConstElementPtr
+ definition,
+ const Loader<Context,
+ Action>& loader)
+ {
+ return (boost::shared_ptr<Check<Context> >(new NotOperator<Context>(
+ loader.loadCheck(definition))));
+ }
+ /**
+ * \brief Or-abbreviated form.
+ *
+ * This returns false. In theory, the NOT operator could be used with
+ * the abbreviated form, but it would be confusing. Such syntax is
+ * therefore explicitly forbidden.
+ */
+ virtual bool allowListAbbreviation() const { return (false); }
+public:
+ const std::string name_;
+};
+
}
}
diff --git a/src/lib/acl/tests/Makefile.am b/src/lib/acl/tests/Makefile.am
index 03b08bb..6369511 100644
--- a/src/lib/acl/tests/Makefile.am
+++ b/src/lib/acl/tests/Makefile.am
@@ -16,10 +16,12 @@ run_unittests_SOURCES += acl_test.cc
run_unittests_SOURCES += check_test.cc
run_unittests_SOURCES += dns_test.cc
run_unittests_SOURCES += ip_check_unittest.cc
+run_unittests_SOURCES += dnsname_check_unittest.cc
run_unittests_SOURCES += loader_test.cc
run_unittests_SOURCES += logcheck.h
run_unittests_SOURCES += creators.h
run_unittests_SOURCES += logic_check_test.cc
+run_unittests_SOURCES += sockaddr.h
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
@@ -29,6 +31,7 @@ run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.
run_unittests_LDADD += $(top_builddir)/src/lib/acl/libacl.la
run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
run_unittests_LDADD += $(top_builddir)/src/lib/cc/libcc.la
+run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
run_unittests_LDADD += $(top_builddir)/src/lib/acl/libdnsacl.la
diff --git a/src/lib/acl/tests/dns_test.cc b/src/lib/acl/tests/dns_test.cc
index e5e0f3a..b3ddbf4 100644
--- a/src/lib/acl/tests/dns_test.cc
+++ b/src/lib/acl/tests/dns_test.cc
@@ -12,24 +12,260 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <stdint.h>
+
+#include <algorithm>
+#include <vector>
+#include <string>
+
+#include <boost/scoped_ptr.hpp>
+#include <boost/shared_ptr.hpp>
+
+#include <exceptions/exceptions.h>
+
+#include <dns/name.h>
+#include <dns/tsigkey.h>
+#include <dns/tsigrecord.h>
+#include <dns/rdataclass.h>
+
+#include <cc/data.h>
#include <acl/dns.h>
+#include <acl/loader.h>
+#include <acl/check.h>
+#include <acl/ip_check.h>
+
+#include "sockaddr.h"
+
#include <gtest/gtest.h>
+using namespace std;
+using boost::scoped_ptr;
+using namespace isc::dns;
+using namespace isc::dns::rdata;
+using namespace isc::data;
+using namespace isc::acl;
using namespace isc::acl::dns;
+using isc::acl::LoaderError;
namespace {
-// Tests that the getLoader actually returns something, returns the same every
-// time and the returned value can be used to anything. It is not much of a
-// test, but the getLoader is not much of a function.
-TEST(DNSACL, getLoader) {
- Loader* l(&getLoader());
+TEST(DNSACL, getRequestLoader) {
+ dns::RequestLoader* l(&getRequestLoader());
ASSERT_TRUE(l != NULL);
- EXPECT_EQ(l, &getLoader());
- EXPECT_NO_THROW(l->load(isc::data::Element::fromJSON(
- "[{\"action\": \"DROP\"}]")));
- // TODO Test that the things we should register by default, like IP based
- // check, are loaded.
+ EXPECT_EQ(l, &getRequestLoader());
+ EXPECT_NO_THROW(l->load(Element::fromJSON("[{\"action\": \"DROP\"}]")));
+
+ // Confirm it can load the ACl syntax acceptable to a default creator.
+ // Tests to see whether the loaded rules work correctly will be in
+ // other dedicated tests below.
+ EXPECT_NO_THROW(l->load(Element::fromJSON("[{\"action\": \"DROP\","
+ " \"from\": \"192.0.2.1\"}]")));
+}
+
+class RequestCheckCreatorTest : public ::testing::Test {
+protected:
+ dns::internal::RequestCheckCreator creator_;
+
+ typedef boost::shared_ptr<const dns::RequestCheck> ConstRequestCheckPtr;
+ ConstRequestCheckPtr check_;
+};
+
+TEST_F(RequestCheckCreatorTest, names) {
+ const vector<string> names = creator_.names();
+ EXPECT_EQ(2, names.size());
+ EXPECT_TRUE(find(names.begin(), names.end(), "from") != names.end());
+ EXPECT_TRUE(find(names.begin(), names.end(), "key") != names.end());
+}
+
+TEST_F(RequestCheckCreatorTest, allowListAbbreviation) {
+ EXPECT_FALSE(creator_.allowListAbbreviation());
+}
+
+// The following two tests check the creator for the form of
+// 'from: "IP prefix"'. We don't test many variants of prefixes, which
+// are done in the tests for IPCheck.
+TEST_F(RequestCheckCreatorTest, createIPv4Check) {
+ check_ = creator_.create("from", Element::fromJSON("\"192.0.2.1\""),
+ getRequestLoader());
+ const dns::internal::RequestIPCheck& ipcheck_ =
+ dynamic_cast<const dns::internal::RequestIPCheck&>(*check_);
+ EXPECT_EQ(AF_INET, ipcheck_.getFamily());
+ EXPECT_EQ(32, ipcheck_.getPrefixlen());
+ const vector<uint8_t> check_address(ipcheck_.getAddress());
+ ASSERT_EQ(4, check_address.size());
+ const uint8_t expected_address[] = { 192, 0, 2, 1 };
+ EXPECT_TRUE(equal(check_address.begin(), check_address.end(),
+ expected_address));
+}
+
+TEST_F(RequestCheckCreatorTest, createIPv6Check) {
+ check_ = creator_.create("from",
+ Element::fromJSON("\"2001:db8::5300/120\""),
+ getRequestLoader());
+ const dns::internal::RequestIPCheck& ipcheck =
+ dynamic_cast<const dns::internal::RequestIPCheck&>(*check_);
+ EXPECT_EQ(AF_INET6, ipcheck.getFamily());
+ EXPECT_EQ(120, ipcheck.getPrefixlen());
+ const vector<uint8_t> check_address(ipcheck.getAddress());
+ ASSERT_EQ(16, check_address.size());
+ const uint8_t expected_address[] = { 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x53, 0x00 };
+ EXPECT_TRUE(equal(check_address.begin(), check_address.end(),
+ expected_address));
+}
+
+TEST_F(RequestCheckCreatorTest, createTSIGKeyCheck) {
+ check_ = creator_.create("key", Element::fromJSON("\"key.example.com\""),
+ getRequestLoader());
+ const dns::internal::RequestKeyCheck& keycheck =
+ dynamic_cast<const dns::internal::RequestKeyCheck&>(*check_);
+ EXPECT_EQ(Name("key.example.com"), keycheck.getName());
+}
+
+TEST_F(RequestCheckCreatorTest, badCreate) {
+ // Invalid name
+ EXPECT_THROW(creator_.create("bad", Element::fromJSON("\"192.0.2.1\""),
+ getRequestLoader()), LoaderError);
+
+ // Invalid type of parameter
+ EXPECT_THROW(creator_.create("from", Element::fromJSON("4"),
+ getRequestLoader()),
+ isc::data::TypeError);
+ EXPECT_THROW(creator_.create("from", Element::fromJSON("[]"),
+ getRequestLoader()),
+ isc::data::TypeError);
+ EXPECT_THROW(creator_.create("key", Element::fromJSON("1"),
+ getRequestLoader()),
+ isc::data::TypeError);
+ EXPECT_THROW(creator_.create("key", Element::fromJSON("{}"),
+ getRequestLoader()),
+ isc::data::TypeError);
+
+ // Syntax error for IPCheck
+ EXPECT_THROW(creator_.create("from", Element::fromJSON("\"bad\""),
+ getRequestLoader()),
+ isc::InvalidParameter);
+
+ // Syntax error for Name (key) Check
+ EXPECT_THROW(creator_.create("key", Element::fromJSON("\"bad..name\""),
+ getRequestLoader()),
+ EmptyLabel);
+
+ // NULL pointer
+ EXPECT_THROW(creator_.create("from", ConstElementPtr(), getRequestLoader()),
+ LoaderError);
+}
+
+class RequestCheckTest : public ::testing::Test {
+protected:
+ typedef boost::shared_ptr<const dns::RequestCheck> ConstRequestCheckPtr;
+
+ // A helper shortcut to create a single IP check for the given prefix.
+ ConstRequestCheckPtr createIPCheck(const string& prefix) {
+ return (creator_.create("from", Element::fromJSON(
+ string("\"") + prefix + string("\"")),
+ getRequestLoader()));
+ }
+
+ // A helper shortcut to create a single Name (key) check for the given
+ // name.
+ ConstRequestCheckPtr createKeyCheck(const string& key_name) {
+ return (creator_.create("key", Element::fromJSON(
+ string("\"") + key_name + string("\"")),
+ getRequestLoader()));
+ }
+
+ // create a one time request context for a specific test. Note that
+ // getSockaddr() uses a static storage, so it cannot be called more than
+ // once in a single test.
+ const dns::RequestContext& getRequest4(const TSIGRecord* tsig = NULL) {
+ ipaddr.reset(new IPAddress(tests::getSockAddr("192.0.2.1")));
+ request.reset(new dns::RequestContext(*ipaddr, tsig));
+ return (*request);
+ }
+ const dns::RequestContext& getRequest6(const TSIGRecord* tsig = NULL) {
+ ipaddr.reset(new IPAddress(tests::getSockAddr("2001:db8::1")));
+ request.reset(new dns::RequestContext(*ipaddr, tsig));
+ return (*request);
+ }
+
+ // create a one time TSIG Record for a specific test. The only parameter
+ // of the record that matters is the key name; others are hardcoded with
+ // arbitrarily chosen values.
+ const TSIGRecord* getTSIGRecord(const string& key_name) {
+ tsig_rdata.reset(new any::TSIG(TSIGKey::HMACMD5_NAME(), 0, 0, 0, NULL,
+ 0, 0, 0, NULL));
+ tsig.reset(new TSIGRecord(Name(key_name), *tsig_rdata));
+ return (tsig.get());
+ }
+
+private:
+ scoped_ptr<IPAddress> ipaddr;
+ scoped_ptr<dns::RequestContext> request;
+ scoped_ptr<any::TSIG> tsig_rdata;
+ scoped_ptr<TSIGRecord> tsig;
+ dns::internal::RequestCheckCreator creator_;
+};
+
+TEST_F(RequestCheckTest, checkIPv4) {
+ // Exact match
+ EXPECT_TRUE(createIPCheck("192.0.2.1")->matches(getRequest4()));
+ // Exact match (negative)
+ EXPECT_FALSE(createIPCheck("192.0.2.53")->matches(getRequest4()));
+ // Prefix match
+ EXPECT_TRUE(createIPCheck("192.0.2.0/24")->matches(getRequest4()));
+ // Prefix match (negative)
+ EXPECT_FALSE(createIPCheck("192.0.1.0/24")->matches(getRequest4()));
+ // Address family mismatch (the first 4 bytes of the IPv6 address has the
+ // same binary representation as the client's IPv4 address, which
+ // shouldn't confuse the match logic)
+ EXPECT_FALSE(createIPCheck("c000:0201::")->matches(getRequest4()));
+}
+
+TEST_F(RequestCheckTest, checkIPv6) {
+ // The following are a set of tests of the same concept as checkIPv4
+ EXPECT_TRUE(createIPCheck("2001:db8::1")->matches(getRequest6()));
+ EXPECT_FALSE(createIPCheck("2001:db8::53")->matches(getRequest6()));
+ EXPECT_TRUE(createIPCheck("2001:db8::/64")->matches(getRequest6()));
+ EXPECT_FALSE(createIPCheck("2001:db8:1::/64")->matches(getRequest6()));
+ EXPECT_FALSE(createIPCheck("32.1.13.184")->matches(getRequest6()));
+}
+
+TEST_F(RequestCheckTest, checkTSIGKey) {
+ EXPECT_TRUE(createKeyCheck("key.example.com")->matches(
+ getRequest4(getTSIGRecord("key.example.com"))));
+ EXPECT_FALSE(createKeyCheck("key.example.com")->matches(
+ getRequest4(getTSIGRecord("badkey.example.com"))));
+
+ // Same for IPv6 (which shouldn't matter)
+ EXPECT_TRUE(createKeyCheck("key.example.com")->matches(
+ getRequest6(getTSIGRecord("key.example.com"))));
+ EXPECT_FALSE(createKeyCheck("key.example.com")->matches(
+ getRequest6(getTSIGRecord("badkey.example.com"))));
+
+ // by default the test request doesn't have a TSIG key, which shouldn't
+ // match any key checks.
+ EXPECT_FALSE(createKeyCheck("key.example.com")->matches(getRequest4()));
+ EXPECT_FALSE(createKeyCheck("key.example.com")->matches(getRequest6()));
+}
+
+// The following tests test only the creators are registered, they are tested
+// elsewhere
+
+TEST(DNSACL, notLoad) {
+ EXPECT_NO_THROW(getRequestLoader().loadCheck(isc::data::Element::fromJSON(
+ "{\"NOT\": {\"from\": \"192.0.2.1\"}}")));
+}
+
+TEST(DNSACL, allLoad) {
+ EXPECT_NO_THROW(getRequestLoader().loadCheck(isc::data::Element::fromJSON(
+ "{\"ALL\": [{\"from\": \"192.0.2.1\"}]}")));
+}
+
+TEST(DNSACL, anyLoad) {
+ EXPECT_NO_THROW(getRequestLoader().loadCheck(isc::data::Element::fromJSON(
+ "{\"ANY\": [{\"from\": \"192.0.2.1\"}]}")));
}
}
diff --git a/src/lib/acl/tests/dnsname_check_unittest.cc b/src/lib/acl/tests/dnsname_check_unittest.cc
new file mode 100644
index 0000000..95b5314
--- /dev/null
+++ b/src/lib/acl/tests/dnsname_check_unittest.cc
@@ -0,0 +1,59 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <gtest/gtest.h>
+
+#include <dns/name.h>
+
+#include <acl/dnsname_check.h>
+
+using namespace isc::dns;
+using namespace isc::acl::dns;
+
+// Provide a specialization of the DNSNameCheck::matches() method.
+namespace isc {
+namespace acl {
+namespace dns {
+template <>
+bool NameCheck<Name>::matches(const Name& name) const {
+ return (name_ == name);
+}
+} // namespace dns
+} // namespace acl
+} // namespace isc
+
+namespace {
+TEST(DNSNameCheck, construct) {
+ EXPECT_EQ(Name("example.com"),
+ NameCheck<Name>(Name("example.com")).getName());
+
+ // Construct the same check with an explicit trailing dot. Should result
+ // in the same result.
+ EXPECT_EQ(Name("example.com"),
+ NameCheck<Name>(Name("example.com.")).getName());
+}
+
+TEST(DNSNameCheck, match) {
+ NameCheck<Name> check(Name("example.com"));
+ EXPECT_TRUE(check.matches(Name("example.com")));
+ EXPECT_FALSE(check.matches(Name("example.org")));
+
+ // comparison is case insensitive
+ EXPECT_TRUE(check.matches(Name("EXAMPLE.COM")));
+
+ // this is exact match. so super/sub domains don't match
+ EXPECT_FALSE(check.matches(Name("com")));
+ EXPECT_FALSE(check.matches(Name("www.example.com")));
+}
+} // Unnamed namespace
diff --git a/src/lib/acl/tests/ip_check_unittest.cc b/src/lib/acl/tests/ip_check_unittest.cc
index fb24978..8b8c498 100644
--- a/src/lib/acl/tests/ip_check_unittest.cc
+++ b/src/lib/acl/tests/ip_check_unittest.cc
@@ -14,12 +14,13 @@
#include <sys/types.h>
#include <sys/socket.h>
-#include <netdb.h>
#include <string.h>
#include <gtest/gtest.h>
#include <acl/ip_check.h>
+#include "sockaddr.h"
+
using namespace isc::acl;
using namespace isc::acl::internal;
using namespace std;
@@ -159,32 +160,8 @@ TEST(IPFunctionCheck, SplitIPAddress) {
EXPECT_THROW(splitIPAddress(" 1/ "), isc::InvalidParameter);
}
-const struct sockaddr&
-getSockAddr(const char* const addr) {
- struct addrinfo hints, *res;
- memset(&hints, 0, sizeof(hints));
- hints.ai_family = AF_UNSPEC;
- hints.ai_socktype = SOCK_STREAM;
- hints.ai_flags = AI_NUMERICHOST;
-
- if (getaddrinfo(addr, NULL, &hints, &res) == 0) {
- static struct sockaddr_storage ss;
- void* ss_ptr = &ss;
- memcpy(ss_ptr, res->ai_addr, res->ai_addrlen);
- freeaddrinfo(res);
- return (*static_cast<struct sockaddr*>(ss_ptr));
- }
-
- // We don't expect getaddrinfo to fail for our tests. But if that
- // ever happens we return a dummy value that would make subsequent test
- // fail.
- static struct sockaddr sa_dummy;
- sa_dummy.sa_family = AF_UNSPEC;
- return (sa_dummy);
-}
-
TEST(IPAddress, constructIPv4) {
- IPAddress ipaddr(getSockAddr("192.0.2.1"));
+ IPAddress ipaddr(tests::getSockAddr("192.0.2.1"));
const char expected_data[4] = { 192, 0, 2, 1 };
EXPECT_EQ(AF_INET, ipaddr.getFamily());
EXPECT_EQ(4, ipaddr.getLength());
@@ -192,7 +169,7 @@ TEST(IPAddress, constructIPv4) {
}
TEST(IPAddress, constructIPv6) {
- IPAddress ipaddr(getSockAddr("2001:db8:1234:abcd::53"));
+ IPAddress ipaddr(tests::getSockAddr("2001:db8:1234:abcd::53"));
const char expected_data[16] = { 0x20, 0x01, 0x0d, 0xb8, 0x12, 0x34, 0xab,
0xcd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x53 };
diff --git a/src/lib/acl/tests/loader_test.cc b/src/lib/acl/tests/loader_test.cc
index 4415081..1705c0a 100644
--- a/src/lib/acl/tests/loader_test.cc
+++ b/src/lib/acl/tests/loader_test.cc
@@ -13,6 +13,7 @@
// PERFORMANCE OF THIS SOFTWARE.
#include "creators.h"
+#include <exceptions/exceptions.h>
#include <acl/loader.h>
#include <string>
#include <gtest/gtest.h>
@@ -373,7 +374,10 @@ TEST_F(LoaderTest, ACLPropagate) {
Element::fromJSON(
"[{\"action\": \"ACCEPT\", \"throw\": 1}]")),
TestCreatorError);
+}
+TEST_F(LoaderTest, nullDescription) {
+ EXPECT_THROW(loader_.load(ConstElementPtr()), isc::InvalidParameter);
}
}
diff --git a/src/lib/acl/tests/logic_check_test.cc b/src/lib/acl/tests/logic_check_test.cc
index eec6d51..1c80277 100644
--- a/src/lib/acl/tests/logic_check_test.cc
+++ b/src/lib/acl/tests/logic_check_test.cc
@@ -93,6 +93,7 @@ public:
LogicCreator<AllOfSpec, Log>("ALL")));
loader_.registerCreator(CreatorPtr(new ThrowCreator));
loader_.registerCreator(CreatorPtr(new LogCreator));
+ loader_.registerCreator(CreatorPtr(new NotCreator<Log>("NOT")));
}
// To mark which parts of the check did run
Log log_;
@@ -242,4 +243,49 @@ TEST_F(LogicCreatorTest, nested) {
log_.checkFirst(2);
}
+void notTest(bool value) {
+ NotOperator<Log> notOp(shared_ptr<Check<Log> >(new ConstCheck(value, 0)));
+ Log log;
+ // It returns negated value
+ EXPECT_EQ(!value, notOp.matches(log));
+ // And runs the only one thing there
+ log.checkFirst(1);
+ // Check the getSubexpressions does sane things
+ ASSERT_EQ(1, notOp.getSubexpressions().size());
+ EXPECT_EQ(value, notOp.getSubexpressions()[0]->matches(log));
+}
+
+TEST(Not, trueValue) {
+ notTest(true);
+}
+
+TEST(Not, falseValue) {
+ notTest(false);
+}
+
+TEST_F(LogicCreatorTest, notInvalid) {
+ EXPECT_THROW(loader_.loadCheck(Element::fromJSON("{\"NOT\": null}")),
+ LoaderError);
+ EXPECT_THROW(loader_.loadCheck(Element::fromJSON("{\"NOT\": \"hello\"}")),
+ LoaderError);
+ EXPECT_THROW(loader_.loadCheck(Element::fromJSON("{\"NOT\": true}")),
+ LoaderError);
+ EXPECT_THROW(loader_.loadCheck(Element::fromJSON("{\"NOT\": 42}")),
+ LoaderError);
+ EXPECT_THROW(loader_.loadCheck(Element::fromJSON("{\"NOT\": []}")),
+ LoaderError);
+ EXPECT_THROW(loader_.loadCheck(Element::fromJSON("{\"NOT\": [{"
+ "\"logcheck\": [0, true]"
+ "}]}")),
+ LoaderError);
+}
+
+TEST_F(LogicCreatorTest, notValid) {
+ shared_ptr<NotOperator<Log> > notOp(load<NotOperator<Log> >("{\"NOT\":"
+ " {\"logcheck\":"
+ " [0, true]}}"));
+ EXPECT_FALSE(notOp->matches(log_));
+ log_.checkFirst(1);
+}
+
}
diff --git a/src/lib/acl/tests/sockaddr.h b/src/lib/acl/tests/sockaddr.h
new file mode 100644
index 0000000..bd30451
--- /dev/null
+++ b/src/lib/acl/tests/sockaddr.h
@@ -0,0 +1,69 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __ACL_TEST_SOCKADDR_H
+#define __ACL_TEST_SOCKADDR_H 1
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netdb.h>
+#include <string.h>
+
+#include <exceptions/exceptions.h>
+
+namespace isc {
+namespace acl {
+namespace tests {
+
+// This is a helper function that returns a sockaddr for the given textual
+// IP address. Note that "inline" is crucial because this function is defined
+// in a header file included in multiple .cc files. Without inline it would
+// produce an external linkage and cause troubles at link time.
+//
+// Note that this function uses a static storage for the return value.
+// So if it's called more than once in a singe context (e.g., in the same
+// EXPECT_xx()), it's unlikely to work as expected.
+inline const struct sockaddr&
+getSockAddr(const char* const addr) {
+ struct addrinfo hints, *res;
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = AF_UNSPEC;
+ hints.ai_socktype = SOCK_STREAM;
+ hints.ai_flags = AI_NUMERICHOST;
+
+ if (getaddrinfo(addr, NULL, &hints, &res) == 0) {
+ static struct sockaddr_storage ss;
+ void* ss_ptr = &ss;
+ memcpy(ss_ptr, res->ai_addr, res->ai_addrlen);
+ freeaddrinfo(res);
+ return (*static_cast<struct sockaddr*>(ss_ptr));
+ }
+
+ // We don't expect getaddrinfo to fail for our tests. But if that
+ // ever happens we throw an exception to make sure the corresponding test
+ // fail (either due to a failure of *_NO_THROW or the uncaught exception).
+ isc_throw(Unexpected,
+ "failed to convert textual IP address to sockaddr for " <<
+ addr);
+}
+
+} // end of namespace "tests"
+} // end of namespace "acl"
+} // end of namespace "isc"
+
+#endif // __ACL_TEST_SOCKADDR_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/asiodns/asiodns_messages.mes b/src/lib/asiodns/asiodns_messages.mes
index 3e11ede..feb75d4 100644
--- a/src/lib/asiodns/asiodns_messages.mes
+++ b/src/lib/asiodns/asiodns_messages.mes
@@ -26,13 +26,13 @@ enabled.
% ASIODNS_OPEN_SOCKET error %1 opening %2 socket to %3(%4)
The asynchronous I/O code encountered an error when trying to open a socket
of the specified protocol in order to send a message to the target address.
-The number of the system error that cause the problem is given in the
+The number of the system error that caused the problem is given in the
message.
% ASIODNS_READ_DATA error %1 reading %2 data from %3(%4)
The asynchronous I/O code encountered an error when trying to read data from
the specified address on the given protocol. The number of the system
-error that cause the problem is given in the message.
+error that caused the problem is given in the message.
% ASIODNS_READ_TIMEOUT receive timeout while waiting for data from %1(%2)
An upstream fetch from the specified address timed out. This may happen for
@@ -41,9 +41,9 @@ or a problem on the network. The message will only appear if debug is
enabled.
% ASIODNS_SEND_DATA error %1 sending data using %2 to %3(%4)
-The asynchronous I/O code encountered an error when trying send data to
-the specified address on the given protocol. The the number of the system
-error that cause the problem is given in the message.
+The asynchronous I/O code encountered an error when trying to send data to
+the specified address on the given protocol. The number of the system
+error that caused the problem is given in the message.
% ASIODNS_UNKNOWN_ORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)
An internal consistency check on the origin of a message from the
diff --git a/src/lib/asiodns/tests/run_unittests.cc b/src/lib/asiodns/tests/run_unittests.cc
index df77368..5cacdaf 100644
--- a/src/lib/asiodns/tests/run_unittests.cc
+++ b/src/lib/asiodns/tests/run_unittests.cc
@@ -15,14 +15,14 @@
#include <gtest/gtest.h>
#include <util/unittests/run_all.h>
-#include <log/logger_manager.h>
+#include <log/logger_support.h>
#include <dns/tests/unittest_util.h>
int
main(int argc, char* argv[])
{
::testing::InitGoogleTest(&argc, argv); // Initialize Google test
- isc::log::LoggerManager::init("unittest"); // Set a root logger name
+ isc::log::initLogger(); // Initialize logging
isc::UnitTestUtil::addDataPath(TEST_DATA_DIR); // Add location of test data
return (isc::util::unittests::run_all());
diff --git a/src/lib/asiolink/README b/src/lib/asiolink/README
index 66091b1..b9e38f9 100644
--- a/src/lib/asiolink/README
+++ b/src/lib/asiolink/README
@@ -20,3 +20,10 @@ Some of the classes defined here--for example, IOSocket, IOEndpoint,
and IOAddress--are to be used by BIND 10 modules as wrappers around
ASIO-specific classes.
+
+Logging
+-------
+
+At this point, nothing is logged by this low-level library. We may
+revisit that in the future, if we find suitable messages to log, but
+right now there are also no loggers initialized or called.
diff --git a/src/lib/asiolink/tests/interval_timer_unittest.cc b/src/lib/asiolink/tests/interval_timer_unittest.cc
index 8e8ef81..420cb90 100644
--- a/src/lib/asiolink/tests/interval_timer_unittest.cc
+++ b/src/lib/asiolink/tests/interval_timer_unittest.cc
@@ -28,7 +28,7 @@ const boost::posix_time::time_duration TIMER_MARGIN_MSEC =
using namespace isc::asiolink;
-// This fixture is for testing IntervalTimer. Some callback functors are
+// This fixture is for testing IntervalTimer. Some callback functors are
// registered as callback function of the timer to test if they are called
// or not.
class IntervalTimerTest : public ::testing::Test {
@@ -50,7 +50,9 @@ protected:
};
class TimerCallBackCounter : public std::unary_function<void, void> {
public:
- TimerCallBackCounter(IntervalTimerTest* test_obj) : test_obj_(test_obj) {
+ TimerCallBackCounter(IntervalTimerTest* test_obj) :
+ test_obj_(test_obj)
+ {
counter_ = 0;
}
void operator()() {
@@ -164,24 +166,20 @@ TEST_F(IntervalTimerTest, startIntervalTimer) {
itimer.setup(TimerCallBack(this), 100);
EXPECT_EQ(100, itimer.getInterval());
io_service_.run();
- // reaches here after timer expired
+ // Control reaches here after io_service_ was stopped by TimerCallBack.
+
// delta: difference between elapsed time and 100 milliseconds.
boost::posix_time::time_duration test_runtime =
boost::posix_time::microsec_clock::universal_time() - start;
- EXPECT_FALSE(test_runtime.is_negative()) <<
- "test duration " << test_runtime <<
+ EXPECT_FALSE(test_runtime.is_negative()) <<
+ "test duration " << test_runtime <<
" negative - clock skew?";
- boost::posix_time::time_duration delta =
- test_runtime - boost::posix_time::milliseconds(100);
- if (delta.is_negative()) {
- delta.invert_sign();
- }
- // expect TimerCallBack is called; timer_called_ is true
+ // Expect TimerCallBack is called; timer_called_ is true
EXPECT_TRUE(timer_called_);
- // expect interval is 100 milliseconds +/- TIMER_MARGIN_MSEC.
- EXPECT_TRUE(delta < TIMER_MARGIN_MSEC) <<
- "delta " << delta.total_milliseconds() << "msec " <<
- ">= " << TIMER_MARGIN_MSEC.total_milliseconds();
+ // Expect test_runtime is 100 milliseconds or longer.
+ EXPECT_TRUE(test_runtime > boost::posix_time::milliseconds(100)) <<
+ "test runtime " << test_runtime.total_milliseconds() <<
+ "msec " << ">= 100";
}
TEST_F(IntervalTimerTest, destructIntervalTimer) {
@@ -244,7 +242,7 @@ TEST_F(IntervalTimerTest, cancel) {
}
TEST_F(IntervalTimerTest, overwriteIntervalTimer) {
- // Calling setup() multiple times updates call back function and interval.
+ // Call setup() multiple times to update call back function and interval.
//
// There are two timers:
// itimer (A)
@@ -266,7 +264,7 @@ TEST_F(IntervalTimerTest, overwriteIntervalTimer) {
// 0 100 200 300 400 500 600 700 800 (ms)
// (A) i-------------+----C----s
// ^ ^stop io_service
- // |change call back function
+ // |change call back function and interval
// (B) i------------------+-------------------S
// ^(stop io_service on fail)
//
@@ -279,30 +277,11 @@ TEST_F(IntervalTimerTest, overwriteIntervalTimer) {
itimer.setup(TimerCallBackCounter(this), 300);
itimer_overwriter.setup(TimerCallBackOverwriter(this, itimer), 400);
io_service_.run();
- // reaches here after timer expired
- // if interval is updated, it takes
- // 400 milliseconds for TimerCallBackOverwriter
- // + 100 milliseconds for TimerCallBack (stop)
- // = 500 milliseconds.
- // otherwise (test fails), it takes
- // 400 milliseconds for TimerCallBackOverwriter
- // + 400 milliseconds for TimerCallBackOverwriter (stop)
- // = 800 milliseconds.
- // delta: difference between elapsed time and 400 + 100 milliseconds
- boost::posix_time::time_duration test_runtime =
- boost::posix_time::microsec_clock::universal_time() - start;
- EXPECT_FALSE(test_runtime.is_negative()) <<
- "test duration " << test_runtime <<
- " negative - clock skew?";
- boost::posix_time::time_duration delta =
- test_runtime - boost::posix_time::milliseconds(400 + 100);
- if (delta.is_negative()) {
- delta.invert_sign();
- }
- // expect callback function is updated: TimerCallBack is called
+ // Control reaches here after io_service_ was stopped by
+ // TimerCallBackCounter or TimerCallBackOverwriter.
+
+ // Expect callback function is updated: TimerCallBack is called
EXPECT_TRUE(timer_called_);
- // expect interval is updated
- EXPECT_TRUE(delta < TIMER_MARGIN_MSEC) <<
- "delta " << delta.total_milliseconds() << " msec " <<
- ">= " << TIMER_MARGIN_MSEC.total_milliseconds();
+ // Expect interval is updated: return value of getInterval() is updated
+ EXPECT_EQ(itimer.getInterval(), 100);
}
diff --git a/src/lib/asiolink/tests/io_endpoint_unittest.cc b/src/lib/asiolink/tests/io_endpoint_unittest.cc
index f0279d1..c7283ec 100644
--- a/src/lib/asiolink/tests/io_endpoint_unittest.cc
+++ b/src/lib/asiolink/tests/io_endpoint_unittest.cc
@@ -219,7 +219,7 @@ sockAddrMatch(const struct sockaddr& actual_sa,
res->ai_addr->sa_len = actual_sa.sa_len;
#endif
EXPECT_EQ(0, memcmp(res->ai_addr, &actual_sa, res->ai_addrlen));
- free(res);
+ freeaddrinfo(res);
}
TEST(IOEndpointTest, getSockAddr) {
diff --git a/src/lib/bench/tests/Makefile.am b/src/lib/bench/tests/Makefile.am
index 3ebdf29..3f8a678 100644
--- a/src/lib/bench/tests/Makefile.am
+++ b/src/lib/bench/tests/Makefile.am
@@ -16,6 +16,7 @@ run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
run_unittests_LDADD = $(top_builddir)/src/lib/bench/libbench.la
run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
run_unittests_LDADD += $(GTEST_LDADD)
diff --git a/src/lib/cache/Makefile.am b/src/lib/cache/Makefile.am
index bfbe24a..9871a5e 100644
--- a/src/lib/cache/Makefile.am
+++ b/src/lib/cache/Makefile.am
@@ -31,5 +31,14 @@ libcache_la_SOURCES += cache_entry_key.h cache_entry_key.cc
libcache_la_SOURCES += rrset_copy.h rrset_copy.cc
libcache_la_SOURCES += local_zone_data.h local_zone_data.cc
libcache_la_SOURCES += message_utility.h message_utility.cc
+libcache_la_SOURCES += logger.h logger.cc
+nodist_libcache_la_SOURCES = cache_messages.cc cache_messages.h
-CLEANFILES = *.gcno *.gcda
+BUILT_SOURCES = cache_messages.cc cache_messages.h
+
+cache_messages.cc cache_messages.h: cache_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/cache/cache_messages.mes
+
+CLEANFILES = *.gcno *.gcda cache_messages.cc cache_messages.h
+
+EXTRA_DIST = cache_messages.mes
diff --git a/src/lib/cache/cache_messages.mes b/src/lib/cache/cache_messages.mes
new file mode 100644
index 0000000..19102ae
--- /dev/null
+++ b/src/lib/cache/cache_messages.mes
@@ -0,0 +1,148 @@
+# Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+$NAMESPACE isc::cache
+
+% CACHE_ENTRY_MISSING_RRSET missing RRset to generate message for %1
+The cache tried to generate the complete answer message. It knows the structure
+of the message, but some of the RRsets to be put there are not in cache (they
+probably expired already). Therefore it pretends the message was not found.
+
+% CACHE_LOCALZONE_FOUND found entry with key %1 in local zone data
+Debug message, noting that the requested data was successfully found in the
+local zone data of the cache.
+
+% CACHE_LOCALZONE_UNKNOWN entry with key %1 not found in local zone data
+Debug message. The requested data was not found in the local zone data.
+
+% CACHE_LOCALZONE_UPDATE updating local zone element at key %1
+Debug message issued when there's update to the local zone section of cache.
+
+% CACHE_MESSAGES_DEINIT deinitialized message cache
+Debug message. It is issued when the server deinitializes the message cache.
+
+% CACHE_MESSAGES_EXPIRED found an expired message entry for %1 in the message cache
+Debug message. The requested data was found in the message cache, but it
+already expired. Therefore the cache removes the entry and pretends it found
+nothing.
+
+% CACHE_MESSAGES_FOUND found a message entry for %1 in the message cache
+Debug message. We found the whole message in the cache, so it can be returned
+to user without any other lookups.
+
+% CACHE_MESSAGES_INIT initialized message cache for %1 messages of class %2
+Debug message issued when a new message cache is issued. It lists the class
+of messages it can hold and the maximum size of the cache.
+
+% CACHE_MESSAGES_REMOVE removing old instance of %1/%2/%3 first
+Debug message. This may follow CACHE_MESSAGES_UPDATE and indicates that, while
+updating, the old instance is being removed prior of inserting a new one.
+
+% CACHE_MESSAGES_UNCACHEABLE not inserting uncacheable message %1/%2/%3
+Debug message, noting that the given message can not be cached. This is because
+there's no SOA record in the message. See RFC 2308 section 5 for more
+information.
+
+% CACHE_MESSAGES_UNKNOWN no entry for %1 found in the message cache
+Debug message. The message cache didn't find any entry for the given key.
+
+% CACHE_MESSAGES_UPDATE updating message entry %1/%2/%3
+Debug message issued when the message cache is being updated with a new
+message. Either the old instance is removed or, if none is found, new one
+is created.
+
+% CACHE_RESOLVER_DEEPEST looking up deepest NS for %1/%2
+Debug message. The resolver cache is looking up the deepest known nameserver,
+so the resolution doesn't have to start from the root.
+
+% CACHE_RESOLVER_INIT_INFO initializing resolver cache for class %1
+Debug message, the resolver cache is being created for this given class. The
+difference from CACHE_RESOLVER_INIT is only in different format of passed
+information, otherwise it does the same.
+
+% CACHE_RESOLVER_INIT initializing resolver cache for class %1
+Debug message. The resolver cache is being created for this given class.
+
+% CACHE_RESOLVER_LOCAL_MSG message for %1/%2 found in local zone data
+Debug message. The resolver cache found a complete message for the user query
+in the zone data.
+
+% CACHE_RESOLVER_LOCAL_RRSET RRset for %1/%2 found in local zone data
+Debug message. The resolver cache found a requested RRset in the local zone
+data.
+
+% CACHE_RESOLVER_LOOKUP_MSG looking up message in resolver cache for %1/%2
+Debug message. The resolver cache is trying to find a message to answer the
+user query.
+
+% CACHE_RESOLVER_LOOKUP_RRSET looking up RRset in resolver cache for %1/%2
+Debug message. The resolver cache is trying to find an RRset (which usually
+originates as internally from resolver).
+
+% CACHE_RESOLVER_NO_QUESTION answer message for %1/%2 has empty question section
+The cache tried to fill in found data into the response message. But it
+discovered the message contains no question section, which is invalid.
+This is likely a programmer error, please submit a bug report.
+
+% CACHE_RESOLVER_UNKNOWN_CLASS_MSG no cache for class %1
+Debug message. While trying to lookup a message in the resolver cache, it was
+discovered there's no cache for this class at all. Therefore no message is
+found.
+
+% CACHE_RESOLVER_UNKNOWN_CLASS_RRSET no cache for class %1
+Debug message. While trying to lookup an RRset in the resolver cache, it was
+discovered there's no cache for this class at all. Therefore no data is found.
+
+% CACHE_RESOLVER_UPDATE_MSG updating message for %1/%2/%3
+Debug message. The resolver is updating a message in the cache.
+
+% CACHE_RESOLVER_UPDATE_RRSET updating RRset for %1/%2/%3
+Debug message. The resolver is updating an RRset in the cache.
+
+% CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_MSG no cache for class %1
+Debug message. While trying to insert a message into the cache, it was
+discovered that there's no cache for the class of message. Therefore
+the message will not be cached.
+
+% CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_RRSET no cache for class %1
+Debug message. While trying to insert an RRset into the cache, it was
+discovered that there's no cache for the class of the RRset. Therefore
+the message will not be cached.
+
+% CACHE_RRSET_EXPIRED found expired RRset %1/%2/%3
+Debug message. The requested data was found in the RRset cache. However, it is
+expired, so the cache removed it and is going to pretend nothing was found.
+
+% CACHE_RRSET_INIT initializing RRset cache for %1 RRsets of class %2
+Debug message. The RRset cache to hold at most this many RRsets for the given
+class is being created.
+
+% CACHE_RRSET_LOOKUP looking up %1/%2/%3 in RRset cache
+Debug message. The resolver is trying to look up data in the RRset cache.
+
+% CACHE_RRSET_NOT_FOUND no RRset found for %1/%2/%3 in cache
+Debug message which can follow CACHE_RRSET_LOOKUP. This means the data is not
+in the cache.
+
+% CACHE_RRSET_REMOVE_OLD removing old RRset for %1/%2/%3 to make space for new one
+Debug message which can follow CACHE_RRSET_UPDATE. During the update, the cache
+removed an old instance of the RRset to replace it with the new one.
+
+% CACHE_RRSET_UNTRUSTED not replacing old RRset for %1/%2/%3, it has higher trust level
+Debug message which can follow CACHE_RRSET_UPDATE. The cache already holds the
+same RRset, but from more trusted source, so the old one is kept and new one
+ignored.
+
+% CACHE_RRSET_UPDATE updating RRset %1/%2/%3 in the cache
+Debug message. The RRset is updating its data with this given RRset.
diff --git a/src/lib/cache/local_zone_data.cc b/src/lib/cache/local_zone_data.cc
index 61ce35a..13d1d75 100644
--- a/src/lib/cache/local_zone_data.cc
+++ b/src/lib/cache/local_zone_data.cc
@@ -16,6 +16,7 @@
#include "local_zone_data.h"
#include "cache_entry_key.h"
#include "rrset_copy.h"
+#include "logger.h"
using namespace std;
using namespace isc::dns;
@@ -33,8 +34,10 @@ LocalZoneData::lookup(const isc::dns::Name& name,
string key = genCacheEntryName(name, type);
RRsetMapIterator iter = rrsets_map_.find(key);
if (iter == rrsets_map_.end()) {
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_LOCALZONE_UNKNOWN).arg(key);
return (RRsetPtr());
} else {
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_LOCALZONE_FOUND).arg(key);
return (iter->second);
}
}
@@ -43,6 +46,7 @@ void
LocalZoneData::update(const isc::dns::RRset& rrset) {
//TODO Do we really need to recreate the rrset again?
string key = genCacheEntryName(rrset.getName(), rrset.getType());
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_LOCALZONE_UPDATE).arg(key);
RRset* rrset_copy = new RRset(rrset.getName(), rrset.getClass(),
rrset.getType(), rrset.getTTL());
diff --git a/src/lib/cache/logger.cc b/src/lib/cache/logger.cc
new file mode 100644
index 0000000..f4b0f25
--- /dev/null
+++ b/src/lib/cache/logger.cc
@@ -0,0 +1,23 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <cache/logger.h>
+
+namespace isc {
+namespace cache {
+
+isc::log::Logger logger("cache");
+
+}
+}
diff --git a/src/lib/cache/logger.h b/src/lib/cache/logger.h
new file mode 100644
index 0000000..8159ed4
--- /dev/null
+++ b/src/lib/cache/logger.h
@@ -0,0 +1,44 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __DATASRC_LOGGER_H
+#define __DATASRC_LOGGER_H
+
+#include <log/macros.h>
+#include <cache/cache_messages.h>
+
+/// \file logger.h
+/// \brief Cache library global logger
+///
+/// This holds the logger for the cache library. It is a private header
+/// and should not be included in any publicly used header, only in local
+/// cc files.
+
+namespace isc {
+namespace cache {
+
+/// \brief The logger for this library
+extern isc::log::Logger logger;
+
+enum {
+ /// \brief Trace basic operations
+ DBG_TRACE_BASIC = 10,
+ /// \brief Trace data operations
+ DBG_TRACE_DATA = 40,
+};
+
+}
+}
+
+#endif
diff --git a/src/lib/cache/message_cache.cc b/src/lib/cache/message_cache.cc
index 816ffe3..e141bb5 100644
--- a/src/lib/cache/message_cache.cc
+++ b/src/lib/cache/message_cache.cc
@@ -1,6 +1,7 @@
// Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
+//
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
@@ -20,6 +21,7 @@
#include "message_cache.h"
#include "message_utility.h"
#include "cache_entry_key.h"
+#include "logger.h"
namespace isc {
namespace cache {
@@ -39,11 +41,14 @@ MessageCache::MessageCache(const RRsetCachePtr& rrset_cache,
message_lru_((3 * cache_size),
new HashDeleter<MessageEntry>(message_table_))
{
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, CACHE_MESSAGES_INIT).arg(cache_size).
+ arg(RRClass(message_class));
}
MessageCache::~MessageCache() {
// Destroy all the message entries in the cache.
message_lru_.clear();
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, CACHE_MESSAGES_DEINIT);
}
bool
@@ -57,26 +62,38 @@ MessageCache::lookup(const isc::dns::Name& qname,
if(msg_entry) {
// Check whether the message entry has expired.
if (msg_entry->getExpireTime() > time(NULL)) {
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_MESSAGES_FOUND).
+ arg(entry_name);
message_lru_.touch(msg_entry);
return (msg_entry->genMessage(time(NULL), response));
} else {
// message entry expires, remove it from hash table and lru list.
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_MESSAGES_EXPIRED).
+ arg(entry_name);
message_table_.remove(entry_key);
message_lru_.remove(msg_entry);
return (false);
}
}
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_MESSAGES_UNKNOWN).arg(entry_name);
return (false);
}
bool
MessageCache::update(const Message& msg) {
if (!canMessageBeCached(msg)){
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_MESSAGES_UNCACHEABLE).
+ arg((*msg.beginQuestion())->getName()).
+ arg((*msg.beginQuestion())->getType()).
+ arg((*msg.beginQuestion())->getClass());
return (false);
}
QuestionIterator iter = msg.beginQuestion();
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_MESSAGES_UPDATE).
+ arg((*iter)->getName()).arg((*iter)->getType()).
+ arg((*iter)->getClass());
std::string entry_name = genCacheEntryName((*iter)->getName(),
(*iter)->getType());
HashKey entry_key = HashKey(entry_name, RRClass(message_class_));
@@ -88,6 +105,9 @@ MessageCache::update(const Message& msg) {
// add the message entry, maybe there is one way to touch it once.
MessageEntryPtr old_msg_entry = message_table_.get(entry_key);
if (old_msg_entry) {
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_MESSAGES_REMOVE).
+ arg((*iter)->getName()).arg((*iter)->getType()).
+ arg((*iter)->getClass());
message_lru_.remove(old_msg_entry);
}
diff --git a/src/lib/cache/message_cache.h b/src/lib/cache/message_cache.h
index 979b814..44d7fd1 100644
--- a/src/lib/cache/message_cache.h
+++ b/src/lib/cache/message_cache.h
@@ -39,7 +39,7 @@ private:
MessageCache& operator=(const MessageCache& source);
public:
/// \param rrset_cache The cache that stores the RRsets that the
- /// message entry will points to
+ /// message entry will point to
/// \param cache_size The size of message cache.
/// \param message_class The class of the message cache
/// \param negative_soa_cache The cache that stores the SOA record
diff --git a/src/lib/cache/message_entry.cc b/src/lib/cache/message_entry.cc
index de4ea89..d9560a6 100644
--- a/src/lib/cache/message_entry.cc
+++ b/src/lib/cache/message_entry.cc
@@ -20,6 +20,7 @@
#include "message_entry.h"
#include "message_utility.h"
#include "rrset_cache.h"
+#include "logger.h"
using namespace isc::dns;
using namespace std;
@@ -64,7 +65,7 @@ static uint32_t MAX_UINT32 = numeric_limits<uint32_t>::max();
// tunable. Values of one to three hours have been found to work well
// and would make sensible a default. Values exceeding one day have
// been found to be problematic. (sec 5, RFC2308)
-// The default value is 3 hourse (10800 seconds)
+// The default value is 3 hours (10800 seconds)
// TODO:Give an option to let user configure
static uint32_t MAX_NEGATIVE_CACHE_TTL = 10800;
@@ -142,6 +143,8 @@ MessageEntry::genMessage(const time_t& time_now,
// has expired, if it is, return false.
vector<RRsetEntryPtr> rrset_entry_vec;
if (false == getRRsetEntries(rrset_entry_vec, time_now)) {
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_ENTRY_MISSING_RRSET).
+ arg(entry_name_);
return (false);
}
diff --git a/src/lib/cache/resolver_cache.cc b/src/lib/cache/resolver_cache.cc
index 6602f79..57935c0 100644
--- a/src/lib/cache/resolver_cache.cc
+++ b/src/lib/cache/resolver_cache.cc
@@ -17,6 +17,7 @@
#include "resolver_cache.h"
#include "dns/message.h"
#include "rrset_cache.h"
+#include "logger.h"
#include <string>
#include <algorithm>
@@ -29,6 +30,7 @@ namespace cache {
ResolverClassCache::ResolverClassCache(const RRClass& cache_class) :
cache_class_(cache_class)
{
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, CACHE_RESOLVER_INIT).arg(cache_class);
local_zone_data_ = LocalZoneDataPtr(new LocalZoneData(cache_class_.getCode()));
rrsets_cache_ = RRsetCachePtr(new RRsetCache(RRSET_CACHE_DEFAULT_SIZE,
cache_class_.getCode()));
@@ -45,6 +47,8 @@ ResolverClassCache::ResolverClassCache(const RRClass& cache_class) :
ResolverClassCache::ResolverClassCache(const CacheSizeInfo& cache_info) :
cache_class_(cache_info.cclass)
{
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, CACHE_RESOLVER_INIT_INFO).
+ arg(cache_class_);
uint16_t klass = cache_class_.getCode();
// TODO We should find one way to load local zone data.
local_zone_data_ = LocalZoneDataPtr(new LocalZoneData(klass));
@@ -69,8 +73,11 @@ ResolverClassCache::lookup(const isc::dns::Name& qname,
const isc::dns::RRType& qtype,
isc::dns::Message& response) const
{
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_RESOLVER_LOOKUP_MSG).
+ arg(qname).arg(qtype);
// message response should has question section already.
if (response.beginQuestion() == response.endQuestion()) {
+ LOG_ERROR(logger, CACHE_RESOLVER_NO_QUESTION).arg(qname).arg(qtype);
isc_throw(MessageNoQuestionSection, "Message has no question section");
}
@@ -79,6 +86,8 @@ ResolverClassCache::lookup(const isc::dns::Name& qname,
// answer section.
RRsetPtr rrset_ptr = local_zone_data_->lookup(qname, qtype);
if (rrset_ptr) {
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_RESOLVER_LOCAL_MSG).
+ arg(qname).arg(qtype);
response.addRRset(Message::SECTION_ANSWER, rrset_ptr);
return (true);
}
@@ -91,11 +100,15 @@ isc::dns::RRsetPtr
ResolverClassCache::lookup(const isc::dns::Name& qname,
const isc::dns::RRType& qtype) const
{
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_RESOLVER_LOOKUP_RRSET).
+ arg(qname).arg(qtype);
// Algorithm:
// 1. Search in local zone data first,
// 2. Then do search in rrsets_cache_.
RRsetPtr rrset_ptr = local_zone_data_->lookup(qname, qtype);
if (rrset_ptr) {
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_RESOLVER_LOCAL_RRSET).
+ arg(qname).arg(qtype);
return (rrset_ptr);
} else {
RRsetEntryPtr rrset_entry = rrsets_cache_->lookup(qname, qtype);
@@ -109,6 +122,10 @@ ResolverClassCache::lookup(const isc::dns::Name& qname,
bool
ResolverClassCache::update(const isc::dns::Message& msg) {
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_RESOLVER_UPDATE_MSG).
+ arg((*msg.beginQuestion())->getName()).
+ arg((*msg.beginQuestion())->getType()).
+ arg((*msg.beginQuestion())->getClass());
return (messages_cache_->update(msg));
}
@@ -130,6 +147,9 @@ ResolverClassCache::updateRRsetCache(const isc::dns::ConstRRsetPtr& rrset_ptr,
bool
ResolverClassCache::update(const isc::dns::ConstRRsetPtr& rrset_ptr) {
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_RESOLVER_UPDATE_RRSET).
+ arg(rrset_ptr->getName()).arg(rrset_ptr->getType()).
+ arg(rrset_ptr->getClass());
// First update local zone, then update rrset cache.
local_zone_data_->update((*rrset_ptr.get()));
updateRRsetCache(rrset_ptr, rrsets_cache_);
@@ -166,6 +186,8 @@ ResolverCache::lookup(const isc::dns::Name& qname,
if (cc) {
return (cc->lookup(qname, qtype, response));
} else {
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_RESOLVER_UNKNOWN_CLASS_MSG).
+ arg(qclass);
return (false);
}
}
@@ -179,6 +201,8 @@ ResolverCache::lookup(const isc::dns::Name& qname,
if (cc) {
return (cc->lookup(qname, qtype));
} else {
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_RESOLVER_UNKNOWN_CLASS_RRSET).
+ arg(qclass);
return (RRsetPtr());
}
}
@@ -187,6 +211,8 @@ isc::dns::RRsetPtr
ResolverCache::lookupDeepestNS(const isc::dns::Name& qname,
const isc::dns::RRClass& qclass) const
{
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_RESOLVER_DEEPEST).arg(qname).
+ arg(qclass);
isc::dns::RRType qtype = RRType::NS();
ResolverClassCache* cc = getClassCache(qclass);
if (cc) {
@@ -213,6 +239,9 @@ ResolverCache::update(const isc::dns::Message& msg) {
if (cc) {
return (cc->update(msg));
} else {
+ LOG_DEBUG(logger, DBG_TRACE_DATA,
+ CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_MSG).
+ arg((*msg.beginQuestion())->getClass());
return (false);
}
}
@@ -223,6 +252,9 @@ ResolverCache::update(const isc::dns::ConstRRsetPtr& rrset_ptr) {
if (cc) {
return (cc->update(rrset_ptr));
} else {
+ LOG_DEBUG(logger, DBG_TRACE_DATA,
+ CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_RRSET).
+ arg(rrset_ptr->getClass());
return (false);
}
}
diff --git a/src/lib/cache/rrset_cache.cc b/src/lib/cache/rrset_cache.cc
index da19b6d..1a5fd48 100644
--- a/src/lib/cache/rrset_cache.cc
+++ b/src/lib/cache/rrset_cache.cc
@@ -14,8 +14,9 @@
#include <config.h>
-#include <string>
#include "rrset_cache.h"
+#include "logger.h"
+#include <string>
#include <nsas/nsas_entry_compare.h>
#include <nsas/hash_table.h>
#include <nsas/hash_deleter.h>
@@ -34,20 +35,28 @@ RRsetCache::RRsetCache(uint32_t cache_size,
rrset_lru_((3 * cache_size),
new HashDeleter<RRsetEntry>(rrset_table_))
{
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, CACHE_RRSET_INIT).arg(cache_size).
+ arg(RRClass(rrset_class));
}
RRsetEntryPtr
RRsetCache::lookup(const isc::dns::Name& qname,
const isc::dns::RRType& qtype)
{
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_RRSET_LOOKUP).arg(qname).
+ arg(qtype).arg(RRClass(class_));
const string entry_name = genCacheEntryName(qname, qtype);
- RRsetEntryPtr entry_ptr = rrset_table_.get(HashKey(entry_name, RRClass(class_)));
+
+ RRsetEntryPtr entry_ptr = rrset_table_.get(HashKey(entry_name,
+ RRClass(class_)));
if (entry_ptr) {
if (entry_ptr->getExpireTime() > time(NULL)) {
// Only touch the non-expired rrset entries
rrset_lru_.touch(entry_ptr);
return (entry_ptr);
} else {
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_RRSET_EXPIRED).arg(qname).
+ arg(qtype).arg(RRClass(class_));
// the rrset entry has expired, so just remove it from
// hash table and lru list.
rrset_table_.remove(entry_ptr->hashKey());
@@ -55,19 +64,31 @@ RRsetCache::lookup(const isc::dns::Name& qname,
}
}
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_RRSET_NOT_FOUND).arg(qname).
+ arg(qtype).arg(RRClass(class_));
return (RRsetEntryPtr());
}
RRsetEntryPtr
-RRsetCache::update(const isc::dns::RRset& rrset, const RRsetTrustLevel& level) {
+RRsetCache::update(const isc::dns::RRset& rrset,
+ const RRsetTrustLevel& level)
+{
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_RRSET_UPDATE).arg(rrset.getName()).
+ arg(rrset.getType()).arg(rrset.getClass());
// TODO: If the RRset is an NS, we should update the NSAS as well
// lookup first
RRsetEntryPtr entry_ptr = lookup(rrset.getName(), rrset.getType());
if (entry_ptr) {
if (entry_ptr->getTrustLevel() > level) {
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_RRSET_UNTRUSTED).
+ arg(rrset.getName()).arg(rrset.getType()).
+ arg(rrset.getClass());
// existed rrset entry is more authoritative, just return it
return (entry_ptr);
} else {
+ LOG_DEBUG(logger, DBG_TRACE_DATA, CACHE_RRSET_REMOVE_OLD).
+ arg(rrset.getName()).arg(rrset.getType()).
+ arg(rrset.getClass());
// Remove the old rrset entry from the lru list.
rrset_lru_.remove(entry_ptr);
}
diff --git a/src/lib/cache/tests/Makefile.am b/src/lib/cache/tests/Makefile.am
index 39215d9..a215c56 100644
--- a/src/lib/cache/tests/Makefile.am
+++ b/src/lib/cache/tests/Makefile.am
@@ -53,8 +53,10 @@ run_unittests_LDADD += -lboost_thread
endif
run_unittests_LDADD += $(top_builddir)/src/lib/cache/libcache.la
+run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
run_unittests_LDADD += $(top_builddir)/src/lib/nsas/libnsas.la
run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
run_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
diff --git a/src/lib/cache/tests/run_unittests.cc b/src/lib/cache/tests/run_unittests.cc
index b75fc06..370bc69 100644
--- a/src/lib/cache/tests/run_unittests.cc
+++ b/src/lib/cache/tests/run_unittests.cc
@@ -19,11 +19,15 @@
#include <dns/tests/unittest_util.h>
+#include <log/logger_support.h>
+
int
main(int argc, char* argv[]) {
::testing::InitGoogleTest(&argc, argv);
isc::UnitTestUtil::addDataPath(TEST_DATA_SRCDIR);
isc::UnitTestUtil::addDataPath(TEST_DATA_BUILDDIR);
+ isc::log::initLogger();
+
return (isc::util::unittests::run_all());
}
diff --git a/src/lib/cc/cc_messages.mes b/src/lib/cc/cc_messages.mes
index 8c62ea1..8370cdd 100644
--- a/src/lib/cc/cc_messages.mes
+++ b/src/lib/cc/cc_messages.mes
@@ -53,11 +53,11 @@ Debug message, we're about to send a message over the command channel.
This happens when garbage comes over the command channel or some kind of
confusion happens in the program. The data received from the socket make no
sense if we interpret it as lengths of message. The first one is total length
-of message, the second length of the header. The header and it's length
-(2 bytes) is counted in the total length.
+of the message; the second is the length of the header. The header
+and its length (2 bytes) is counted in the total length.
% CC_LENGTH_NOT_READY length not ready
-There should be data representing length of message on the socket, but it
+There should be data representing the length of message on the socket, but it
is not there.
% CC_NO_MESSAGE no message ready to be received yet
diff --git a/src/lib/cc/data.cc b/src/lib/cc/data.cc
index 932bef4..ffa5346 100644
--- a/src/lib/cc/data.cc
+++ b/src/lib/cc/data.cc
@@ -447,7 +447,9 @@ from_stringstream_map(std::istream &in, const std::string& file, int& line,
ElementPtr map = Element::createMap();
skip_chars(in, " \t\n", line, pos);
char c = in.peek();
- if (c == '}') {
+ if (c == EOF) {
+ throwJSONError(std::string("Unterminated map, <string> or } expected"), file, line, pos);
+ } else if (c == '}') {
// empty map, skip closing curly
c = in.get();
} else {
@@ -509,6 +511,8 @@ Element::nameToType(const std::string& type_name) {
return (Element::list);
} else if (type_name == "map") {
return (Element::map);
+ } else if (type_name == "named_set") {
+ return (Element::map);
} else if (type_name == "null") {
return (Element::null);
} else if (type_name == "any") {
diff --git a/src/lib/cc/session.cc b/src/lib/cc/session.cc
index 97d5cf1..e0e24cf 100644
--- a/src/lib/cc/session.cc
+++ b/src/lib/cc/session.cc
@@ -119,7 +119,7 @@ private:
void
SessionImpl::establish(const char& socket_file) {
try {
- LOG_DEBUG(logger, DBG_TRACE_BASIC, CC_ESTABLISH).arg(socket_file);
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, CC_ESTABLISH).arg(&socket_file);
socket_.connect(asio::local::stream_protocol::endpoint(&socket_file),
error_);
LOG_DEBUG(logger, DBG_TRACE_BASIC, CC_ESTABLISHED);
diff --git a/src/lib/cc/tests/data_unittests.cc b/src/lib/cc/tests/data_unittests.cc
index 2536682..53d5ab8 100644
--- a/src/lib/cc/tests/data_unittests.cc
+++ b/src/lib/cc/tests/data_unittests.cc
@@ -396,9 +396,24 @@ TEST(Element, to_and_from_wire) {
EXPECT_EQ("1", Element::fromWire(ss, 1)->str());
// Some malformed JSON input
+ EXPECT_THROW(Element::fromJSON("{ "), isc::data::JSONError);
+ EXPECT_THROW(Element::fromJSON("{ \"a\" "), isc::data::JSONError);
+ EXPECT_THROW(Element::fromJSON("{ \"a\": "), isc::data::JSONError);
+ EXPECT_THROW(Element::fromJSON("{ \"a\": \"b\""), isc::data::JSONError);
+ EXPECT_THROW(Element::fromJSON("{ \"a\": {"), isc::data::JSONError);
+ EXPECT_THROW(Element::fromJSON("{ \"a\": {}"), isc::data::JSONError);
+ EXPECT_THROW(Element::fromJSON("{ \"a\": []"), isc::data::JSONError);
+ EXPECT_THROW(Element::fromJSON("{ \"a\": [ }"), isc::data::JSONError);
EXPECT_THROW(Element::fromJSON("{\":"), isc::data::JSONError);
EXPECT_THROW(Element::fromJSON("]"), isc::data::JSONError);
EXPECT_THROW(Element::fromJSON("[ 1, 2, }"), isc::data::JSONError);
+ EXPECT_THROW(Element::fromJSON("[ 1, 2, {}"), isc::data::JSONError);
+ EXPECT_THROW(Element::fromJSON("[ 1, 2, { ]"), isc::data::JSONError);
+ EXPECT_THROW(Element::fromJSON("[ "), isc::data::JSONError);
+ EXPECT_THROW(Element::fromJSON("{{}}"), isc::data::JSONError);
+ EXPECT_THROW(Element::fromJSON("{[]}"), isc::data::JSONError);
+ EXPECT_THROW(Element::fromJSON("{ \"a\", \"b\" }"), isc::data::JSONError);
+ EXPECT_THROW(Element::fromJSON("[ \"a\": \"b\" ]"), isc::data::JSONError);
}
ConstElementPtr
diff --git a/src/lib/config/ccsession.cc b/src/lib/config/ccsession.cc
index 6b094ec..ac85077 100644
--- a/src/lib/config/ccsession.cc
+++ b/src/lib/config/ccsession.cc
@@ -18,12 +18,15 @@
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
+#include <ctype.h>
-#include <iostream>
-#include <fstream>
-#include <sstream>
+#include <algorithm>
#include <cerrno>
+#include <fstream>
+#include <iostream>
#include <set>
+#include <sstream>
+#include <string>
#include <boost/bind.hpp>
#include <boost/foreach.hpp>
@@ -175,6 +178,36 @@ ConstElementPtr getValueOrDefault(ConstElementPtr config_part,
}
}
+// Prefix name with "b10-".
+//
+// In BIND 10, modules have names taken from the .spec file, which are typically
+// names starting with a capital letter (e.g. "Resolver", "Auth" etc.). The
+// names of the associated binaries are derived from the module names, being
+// prefixed "b10-" and having the first letter of the module name lower-cased
+// (e.g. "b10-resolver", "b10-auth"). (It is a required convention that there
+// be this relationship between the names.)
+//
+// Within the binaries the root loggers are named after the binaries themselves.
+// (The reason for this is that the name of the logger is included in the
+// message logged, so making it clear which message comes from which BIND 10
+// process.) As logging is configured using module names, the configuration code
+// has to match these with the corresponding logger names. This function
+// converts a module name to a root logger name by lowercasing the first letter
+// of the module name and prepending "b10-".
+//
+// \param instring String to convert. (This may be empty, in which case
+// "b10-" will be returned.)
+//
+// \return Converted string.
+std::string
+b10Prefix(const std::string& instring) {
+ std::string result = instring;
+ if (!result.empty()) {
+ result[0] = tolower(result[0]);
+ }
+ return (std::string("b10-") + result);
+}
+
// Reads a output_option subelement of a logger configuration,
// and sets the values thereing to the given OutputOption struct,
// or defaults values if they are not provided (from config_data).
@@ -215,6 +248,7 @@ readLoggersConf(std::vector<isc::log::LoggerSpecification>& specs,
ConstElementPtr logger,
const ConfigData& config_data)
{
+ // Read name, adding prefix as required.
std::string lname = logger->get("name")->stringValue();
ConstElementPtr severity_el = getValueOrDefault(logger,
@@ -247,6 +281,27 @@ readLoggersConf(std::vector<isc::log::LoggerSpecification>& specs,
specs.push_back(logger_spec);
}
+// Copies the map for a logger, changing the name of the logger in the process.
+// This is used because the map being copied is "const", so in order to
+// change the name we need to create a new one.
+//
+// \param cur_logger Logger being copied.
+// \param new_name New value of the "name" element at the top level.
+//
+// \return Pointer to the map with the updated element.
+ConstElementPtr
+copyLogger(ConstElementPtr& cur_logger, const std::string& new_name) {
+
+ // Since we'll only be updating one first-level element and subsequent
+ // use won't change the contents of the map, a shallow map copy is enough.
+ ElementPtr new_logger(Element::createMap());
+ new_logger->setValue(cur_logger->mapValue());
+ new_logger->set("name", Element::create(new_name));
+
+ return (new_logger);
+}
+
+
} // end anonymous namespace
@@ -259,38 +314,60 @@ getRelatedLoggers(ConstElementPtr loggers) {
ElementPtr result = isc::data::Element::createList();
BOOST_FOREACH(ConstElementPtr cur_logger, loggers->listValue()) {
+ // Need to add the b10- prefix to names ready from the spec file.
const std::string cur_name = cur_logger->get("name")->stringValue();
- if (cur_name == root_name || cur_name.find(root_name + ".") == 0) {
- our_names.insert(cur_name);
- result->add(cur_logger);
+ const std::string mod_name = b10Prefix(cur_name);
+ if (mod_name == root_name || mod_name.find(root_name + ".") == 0) {
+
+ // Note this name so that we don't add a wildcard that matches it.
+ our_names.insert(mod_name);
+
+ // We want to store the logger with the modified name (i.e. with
+ // the b10- prefix). As we are dealing with const loggers, we
+ // store a modified copy of the data.
+ result->add(copyLogger(cur_logger, mod_name));
+ LOG_DEBUG(config_logger, DBG_CONFIG_PROCESS, CONFIG_LOG_EXPLICIT)
+ .arg(cur_name);
+
+ } else if (!cur_name.empty() && (cur_name[0] != '*')) {
+ // Not a wildcard logger and we are ignoring it.
+ LOG_DEBUG(config_logger, DBG_CONFIG_PROCESS,
+ CONFIG_LOG_IGNORE_EXPLICIT).arg(cur_name);
}
}
- // now find the * names
+ // Now find the wildcard names (the one that start with "*").
BOOST_FOREACH(ConstElementPtr cur_logger, loggers->listValue()) {
std::string cur_name = cur_logger->get("name")->stringValue();
- // if name is '*', or starts with '*.', replace * with root
- // logger name
+ // If name is '*', or starts with '*.', replace * with root
+ // logger name.
if (cur_name == "*" || cur_name.length() > 1 &&
cur_name[0] == '*' && cur_name[1] == '.') {
- cur_name = root_name + cur_name.substr(1);
- // now add it to the result list, but only if a logger with
- // that name was not configured explicitely
- if (our_names.find(cur_name) == our_names.end()) {
- // we substitute the name here already, but as
- // we are dealing with consts, we copy the data
- ElementPtr new_logger(Element::createMap());
- // since we'll only be updating one first-level element,
- // and we return as const again, a shallow map copy is
- // enough
- new_logger->setValue(cur_logger->mapValue());
- new_logger->set("name", Element::create(cur_name));
- result->add(new_logger);
+ // Substitute the "*" with the root name
+ std::string mod_name = cur_name;
+ mod_name.replace(0, 1, root_name);
+
+ // Now add it to the result list, but only if a logger with
+ // that name was not configured explicitly.
+ if (our_names.find(mod_name) == our_names.end()) {
+
+ // We substitute the name here, but as we are dealing with
+ // consts, we need to copy the data.
+ result->add(copyLogger(cur_logger, mod_name));
+ LOG_DEBUG(config_logger, DBG_CONFIG_PROCESS,
+ CONFIG_LOG_WILD_MATCH).arg(cur_name);
+
+ } else if (!cur_name.empty() && (cur_name[0] == '*')) {
+ // Is a wildcard and we are ignoring it (because the wildcard
+ // expands to a specification that we already encountered when
+ // processing explicit names).
+ LOG_DEBUG(config_logger, DBG_CONFIG_PROCESS,
+ CONFIG_LOG_IGNORE_WILD).arg(cur_name);
}
}
}
- return result;
+ return (result);
}
void
@@ -318,7 +395,7 @@ ModuleSpec
ModuleCCSession::readModuleSpecification(const std::string& filename) {
std::ifstream file;
ModuleSpec module_spec;
-
+
// this file should be declared in a @something@ directive
file.open(filename.c_str());
if (!file) {
@@ -385,7 +462,7 @@ ModuleCCSession::ModuleCCSession(
LOG_ERROR(config_logger, CONFIG_MOD_SPEC_REJECT).arg(answer->str());
isc_throw(CCSessionInitError, answer->str());
}
-
+
setLocalConfig(Element::fromJSON("{}"));
// get any stored configuration from the manager
if (config_handler_) {
@@ -511,7 +588,7 @@ int
ModuleCCSession::checkCommand() {
ConstElementPtr cmd, routing, data;
if (session_.group_recvmsg(routing, data, true)) {
-
+
/* ignore result messages (in case we're out of sync, to prevent
* pingpongs */
if (data->getType() != Element::map || data->contains("result")) {
diff --git a/src/lib/config/ccsession.h b/src/lib/config/ccsession.h
index 7dc34ba..50bb65c 100644
--- a/src/lib/config/ccsession.h
+++ b/src/lib/config/ccsession.h
@@ -179,7 +179,7 @@ public:
* We'll need to develop a cleaner solution, and then remove this knob)
* @param handle_logging If true, the ModuleCCSession will automatically
* take care of logging configuration through the virtual Logging config
- * module.
+ * module. Defaults to true.
*/
ModuleCCSession(const std::string& spec_file_name,
isc::cc::AbstractSession& session,
@@ -189,7 +189,7 @@ public:
const std::string& command,
isc::data::ConstElementPtr args) = NULL,
bool start_immediately = true,
- bool handle_logging = false
+ bool handle_logging = true
);
/// Start receiving new commands and configuration changes asynchronously.
@@ -377,10 +377,10 @@ default_logconfig_handler(const std::string& module_name,
/// \brief Returns the loggers related to this module
///
/// This function does two things;
-/// - it drops the configuration parts for loggers for other modules
+/// - it drops the configuration parts for loggers for other modules.
/// - it replaces the '*' in the name of the loggers by the name of
/// this module, but *only* if the expanded name is not configured
-/// explicitely
+/// explicitly.
///
/// Examples: if this is the module b10-resolver,
/// For the config names ['*', 'b10-auth']
diff --git a/src/lib/config/config_log.h b/src/lib/config/config_log.h
index 0063855..74e6a84 100644
--- a/src/lib/config/config_log.h
+++ b/src/lib/config/config_log.h
@@ -32,6 +32,14 @@ namespace config {
/// space.
extern isc::log::Logger config_logger; // isc::config::config_logger is the CONFIG logger
+/// \brief Debug Levels
+///
+/// Debug levels used in the configuration library
+enum {
+ DBG_CONFIG_PROCESS = 40 // Enumerate configuration elements as they
+ // ... are processed.
+};
+
} // namespace config
} // namespace isc
diff --git a/src/lib/config/config_messages.mes b/src/lib/config/config_messages.mes
index 660ab9a..c439edd 100644
--- a/src/lib/config/config_messages.mes
+++ b/src/lib/config/config_messages.mes
@@ -37,6 +37,31 @@ manager is appended to the log error. The most likely cause is that
the module is of a different (command specification) version than the
running configuration manager.
+% CONFIG_LOG_EXPLICIT will use logging configuration for explicitly-named logger %1
+This is a debug message. When processing the "loggers" part of the
+configuration file, the configuration library found an entry for the named
+logger that matches the logger specification for the program. The logging
+configuration for the program will be updated with the information.
+
+% CONFIG_LOG_IGNORE_EXPLICIT ignoring logging configuration for explicitly-named logger %1
+This is a debug message. When processing the "loggers" part of the
+configuration file, the configuration library found an entry for the
+named logger. As this does not match the logger specification for the
+program, it has been ignored.
+
+% CONFIG_LOG_IGNORE_WILD ignoring logging configuration for wildcard logger %1
+This is a debug message. When processing the "loggers" part of the
+configuration file, the configuration library found the named wildcard
+entry (one containing the "*" character) that matched a logger already
+matched by an explicitly named entry. The configuration is ignored.
+
+% CONFIG_LOG_WILD_MATCH will use logging configuration for wildcard logger %1
+This is a debug message. When processing the "loggers" part of
+the configuration file, the configuration library found the named
+wildcard entry (one containing the "*" character) that matches a logger
+specification in the program. The logging configuration for the program
+will be updated with the information.
+
% CONFIG_JSON_PARSE JSON parse error in %1: %2
There was an error parsing the JSON file. The given file does not appear
to be in valid JSON format. Please verify that the filename is correct
diff --git a/src/lib/config/module_spec.cc b/src/lib/config/module_spec.cc
index 1621fe3..bebe695 100644
--- a/src/lib/config/module_spec.cc
+++ b/src/lib/config/module_spec.cc
@@ -1,4 +1,4 @@
-// Copyright (C) 2010 Internet Systems Consortium.
+// Copyright (C) 2010, 2011 Internet Systems Consortium.
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -67,10 +67,13 @@ check_config_item(ConstElementPtr spec) {
check_leaf_item(spec, "list_item_spec", Element::map, true);
check_config_item(spec->get("list_item_spec"));
}
- // todo: add stuff for type map
- if (Element::nameToType(spec->get("item_type")->stringValue()) == Element::map) {
+
+ if (spec->get("item_type")->stringValue() == "map") {
check_leaf_item(spec, "map_item_spec", Element::list, true);
check_config_item_list(spec->get("map_item_spec"));
+ } else if (spec->get("item_type")->stringValue() == "named_set") {
+ check_leaf_item(spec, "named_set_item_spec", Element::map, true);
+ check_config_item(spec->get("named_set_item_spec"));
}
}
@@ -84,6 +87,61 @@ check_config_item_list(ConstElementPtr spec) {
}
}
+// checks whether the given element is a valid statistics specification
+// returns false if the specification is bad
+bool
+check_format(ConstElementPtr value, ConstElementPtr format_name) {
+ typedef std::map<std::string, std::string> format_types;
+ format_types time_formats;
+ // TODO: should be added other format types if necessary
+ time_formats.insert(
+ format_types::value_type("date-time", "%Y-%m-%dT%H:%M:%SZ") );
+ time_formats.insert(
+ format_types::value_type("date", "%Y-%m-%d") );
+ time_formats.insert(
+ format_types::value_type("time", "%H:%M:%S") );
+ BOOST_FOREACH (const format_types::value_type& f, time_formats) {
+ if (format_name->stringValue() == f.first) {
+ struct tm tm;
+ std::vector<char> buf(32);
+ memset(&tm, 0, sizeof(tm));
+ // reverse check
+ return (strptime(value->stringValue().c_str(),
+ f.second.c_str(), &tm) != NULL
+ && strftime(&buf[0], buf.size(),
+ f.second.c_str(), &tm) != 0
+ && strncmp(value->stringValue().c_str(),
+ &buf[0], buf.size()) == 0);
+ }
+ }
+ return (false);
+}
+
+void check_statistics_item_list(ConstElementPtr spec);
+
+void
+check_statistics_item_list(ConstElementPtr spec) {
+ if (spec->getType() != Element::list) {
+ throw ModuleSpecError("statistics is not a list of elements");
+ }
+ BOOST_FOREACH(ConstElementPtr item, spec->listValue()) {
+ check_config_item(item);
+ // additional checks for statistics
+ check_leaf_item(item, "item_title", Element::string, true);
+ check_leaf_item(item, "item_description", Element::string, true);
+ check_leaf_item(item, "item_format", Element::string, false);
+ // checks name of item_format and validation of item_default
+ if (item->contains("item_format")
+ && item->contains("item_default")) {
+ if(!check_format(item->get("item_default"),
+ item->get("item_format"))) {
+ throw ModuleSpecError(
+ "item_default not valid type of item_format");
+ }
+ }
+ }
+}
+
void
check_command(ConstElementPtr spec) {
check_leaf_item(spec, "command_name", Element::string, true);
@@ -113,6 +171,9 @@ check_data_specification(ConstElementPtr spec) {
if (spec->contains("commands")) {
check_command_list(spec->get("commands"));
}
+ if (spec->contains("statistics")) {
+ check_statistics_item_list(spec->get("statistics"));
+ }
}
// checks whether the given element is a valid module specification
@@ -162,6 +223,15 @@ ModuleSpec::getConfigSpec() const {
}
}
+ConstElementPtr
+ModuleSpec::getStatisticsSpec() const {
+ if (module_specification->contains("statistics")) {
+ return (module_specification->get("statistics"));
+ } else {
+ return (ElementPtr());
+ }
+}
+
const std::string
ModuleSpec::getModuleName() const {
return (module_specification->get("module_name")->stringValue());
@@ -183,6 +253,12 @@ ModuleSpec::validateConfig(ConstElementPtr data, const bool full) const {
}
bool
+ModuleSpec::validateStatistics(ConstElementPtr data, const bool full) const {
+ ConstElementPtr spec = module_specification->find("statistics");
+ return (validateSpecList(spec, data, full, ElementPtr()));
+}
+
+bool
ModuleSpec::validateCommand(const std::string& command,
ConstElementPtr args,
ElementPtr errors) const
@@ -220,6 +296,14 @@ ModuleSpec::validateConfig(ConstElementPtr data, const bool full,
return (validateSpecList(spec, data, full, errors));
}
+bool
+ModuleSpec::validateStatistics(ConstElementPtr data, const bool full,
+ ElementPtr errors) const
+{
+ ConstElementPtr spec = module_specification->find("statistics");
+ return (validateSpecList(spec, data, full, errors));
+}
+
ModuleSpec
moduleSpecFromFile(const std::string& file_name, const bool check)
throw(JSONError, ModuleSpecError)
@@ -286,7 +370,8 @@ check_type(ConstElementPtr spec, ConstElementPtr element) {
return (cur_item_type == "list");
break;
case Element::map:
- return (cur_item_type == "map");
+ return (cur_item_type == "map" ||
+ cur_item_type == "named_set");
break;
}
return (false);
@@ -323,7 +408,27 @@ ModuleSpec::validateItem(ConstElementPtr spec, ConstElementPtr data,
}
}
if (data->getType() == Element::map) {
- if (!validateSpecList(spec->get("map_item_spec"), data, full, errors)) {
+ // either a normal 'map' or a 'named set' (determined by which
+ // subspecification it has)
+ if (spec->contains("map_item_spec")) {
+ if (!validateSpecList(spec->get("map_item_spec"), data, full, errors)) {
+ return (false);
+ }
+ } else {
+ typedef std::pair<std::string, ConstElementPtr> maptype;
+
+ BOOST_FOREACH(maptype m, data->mapValue()) {
+ if (!validateItem(spec->get("named_set_item_spec"), m.second, full, errors)) {
+ return (false);
+ }
+ }
+ }
+ }
+ if (spec->contains("item_format")) {
+ if (!check_format(data, spec->get("item_format"))) {
+ if (errors) {
+ errors->add(Element::create("Format mismatch"));
+ }
return (false);
}
}
diff --git a/src/lib/config/module_spec.h b/src/lib/config/module_spec.h
index ab6e273..ce3762f 100644
--- a/src/lib/config/module_spec.h
+++ b/src/lib/config/module_spec.h
@@ -1,4 +1,4 @@
-// Copyright (C) 2010 Internet Systems Consortium.
+// Copyright (C) 2010, 2011 Internet Systems Consortium.
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -71,6 +71,12 @@ namespace isc { namespace config {
/// part of the specification
isc::data::ConstElementPtr getConfigSpec() const;
+ /// Returns the statistics part of the specification as an
+ /// ElementPtr
+ /// \return ElementPtr Shared pointer to the statistics
+ /// part of the specification
+ isc::data::ConstElementPtr getStatisticsSpec() const;
+
/// Returns the full module specification as an ElementPtr
/// \return ElementPtr Shared pointer to the specification
isc::data::ConstElementPtr getFullSpec() const {
@@ -95,6 +101,17 @@ namespace isc { namespace config {
bool validateConfig(isc::data::ConstElementPtr data,
const bool full = false) const;
+ // returns true if the given element conforms to this data
+ // statistics specification
+ /// Validates the given statistics data for this specification.
+ /// \param data The base \c Element of the data to check
+ /// \param full If true, all non-optional statistics parameters
+ /// must be specified.
+ /// \return true if the data conforms to the specification,
+ /// false otherwise.
+ bool validateStatistics(isc::data::ConstElementPtr data,
+ const bool full = false) const;
+
/// Validates the arguments for the given command
///
/// This checks the command and argument against the
@@ -142,6 +159,10 @@ namespace isc { namespace config {
bool validateConfig(isc::data::ConstElementPtr data, const bool full,
isc::data::ElementPtr errors) const;
+ /// errors must be of type ListElement
+ bool validateStatistics(isc::data::ConstElementPtr data, const bool full,
+ isc::data::ElementPtr errors) const;
+
private:
bool validateItem(isc::data::ConstElementPtr spec,
isc::data::ConstElementPtr data,
diff --git a/src/lib/config/tests/ccsession_unittests.cc b/src/lib/config/tests/ccsession_unittests.cc
index e1a4f9d..793fa30 100644
--- a/src/lib/config/tests/ccsession_unittests.cc
+++ b/src/lib/config/tests/ccsession_unittests.cc
@@ -44,7 +44,9 @@ el(const std::string& str) {
class CCSessionTest : public ::testing::Test {
protected:
- CCSessionTest() : session(el("[]"), el("[]"), el("[]")) {
+ CCSessionTest() : session(el("[]"), el("[]"), el("[]")),
+ root_name(isc::log::getRootLoggerName())
+ {
// upon creation of a ModuleCCSession, the class
// sends its specification to the config manager.
// it expects an ok answer back, so everytime we
@@ -52,8 +54,11 @@ protected:
// ok answer.
session.getMessages()->add(createAnswer());
}
- ~CCSessionTest() {}
+ ~CCSessionTest() {
+ isc::log::setRootLoggerName(root_name);
+ }
FakeSession session;
+ const std::string root_name;
};
TEST_F(CCSessionTest, createAnswer) {
@@ -151,7 +156,8 @@ TEST_F(CCSessionTest, parseCommand) {
TEST_F(CCSessionTest, session1) {
EXPECT_FALSE(session.haveSubscription("Spec1", "*"));
- ModuleCCSession mccs(ccspecfile("spec1.spec"), session, NULL, NULL);
+ ModuleCCSession mccs(ccspecfile("spec1.spec"), session, NULL, NULL,
+ true, false);
EXPECT_TRUE(session.haveSubscription("Spec1", "*"));
EXPECT_EQ(1, session.getMsgQueue()->size());
@@ -163,21 +169,22 @@ TEST_F(CCSessionTest, session1) {
EXPECT_EQ("*", to);
EXPECT_EQ(0, session.getMsgQueue()->size());
- // without explicit argument, the session should not automatically
+ // with this argument, the session should not automatically
// subscribe to logging config
EXPECT_FALSE(session.haveSubscription("Logging", "*"));
}
TEST_F(CCSessionTest, session2) {
EXPECT_FALSE(session.haveSubscription("Spec2", "*"));
- ModuleCCSession mccs(ccspecfile("spec2.spec"), session, NULL, NULL);
+ ModuleCCSession mccs(ccspecfile("spec2.spec"), session, NULL, NULL,
+ true, false);
EXPECT_TRUE(session.haveSubscription("Spec2", "*"));
EXPECT_EQ(1, session.getMsgQueue()->size());
ConstElementPtr msg;
std::string group, to;
msg = session.getFirstMessage(group, to);
- EXPECT_EQ("{ \"command\": [ \"module_spec\", { \"commands\": [ { \"command_args\": [ { \"item_default\": \"\", \"item_name\": \"message\", \"item_optional\": false, \"item_type\": \"string\" } ], \"command_description\": \"Print the given message to stdout\", \"command_name\": \"print_message\" }, { \"command_args\": [ ], \"command_description\": \"Shut down BIND 10\", \"command_name\": \"shutdown\" } ], \"config_data\": [ { \"item_default\": 1, \"item_name\": \"item1\", \"item_optional\": false, \"item_type\": \"integer\" }, { \"item_default\": 1.1, \"item_name\": \"item2\", \"item_optional\": false, \"item_type\": \"real\" }, { \"item_default\": true, \"item_name\": \"item3\", \"item_optional\": false, \"item_type\": \"boolean\" }, { \"item_default\": \"test\", \"item_name\": \"item4\", \"item_optional\": false, \"item_type\": \"string\" }, { \"item_default\": [ \"a\", \"b\" ], \"item_name\": \"item5\", \"item_optional\": false, \"item_type\": \"list\", \"list_item_sp
ec\": { \"item_default\": \"\", \"item_name\": \"list_element\", \"item_optional\": false, \"item_type\": \"string\" } }, { \"item_default\": { }, \"item_name\": \"item6\", \"item_optional\": false, \"item_type\": \"map\", \"map_item_spec\": [ { \"item_default\": \"default\", \"item_name\": \"value1\", \"item_optional\": true, \"item_type\": \"string\" }, { \"item_name\": \"value2\", \"item_optional\": true, \"item_type\": \"integer\" } ] } ], \"module_name\": \"Spec2\" } ] }", msg->str());
+ EXPECT_EQ("{ \"command\": [ \"module_spec\", { \"commands\": [ { \"command_args\": [ { \"item_default\": \"\", \"item_name\": \"message\", \"item_optional\": false, \"item_type\": \"string\" } ], \"command_description\": \"Print the given message to stdout\", \"command_name\": \"print_message\" }, { \"command_args\": [ ], \"command_description\": \"Shut down BIND 10\", \"command_name\": \"shutdown\" } ], \"config_data\": [ { \"item_default\": 1, \"item_name\": \"item1\", \"item_optional\": false, \"item_type\": \"integer\" }, { \"item_default\": 1.1, \"item_name\": \"item2\", \"item_optional\": false, \"item_type\": \"real\" }, { \"item_default\": true, \"item_name\": \"item3\", \"item_optional\": false, \"item_type\": \"boolean\" }, { \"item_default\": \"test\", \"item_name\": \"item4\", \"item_optional\": false, \"item_type\": \"string\" }, { \"item_default\": [ \"a\", \"b\" ], \"item_name\": \"item5\", \"item_optional\": false, \"item_type\": \"list\", \"list_item_sp
ec\": { \"item_default\": \"\", \"item_name\": \"list_element\", \"item_optional\": false, \"item_type\": \"string\" } }, { \"item_default\": { }, \"item_name\": \"item6\", \"item_optional\": false, \"item_type\": \"map\", \"map_item_spec\": [ { \"item_default\": \"default\", \"item_name\": \"value1\", \"item_optional\": true, \"item_type\": \"string\" }, { \"item_name\": \"value2\", \"item_optional\": true, \"item_type\": \"integer\" } ] } ], \"module_name\": \"Spec2\", \"statistics\": [ { \"item_default\": \"1970-01-01T00:00:00Z\", \"item_description\": \"A dummy date time\", \"item_format\": \"date-time\", \"item_name\": \"dummy_time\", \"item_optional\": false, \"item_title\": \"Dummy Time\", \"item_type\": \"string\" } ] } ] }", msg->str());
EXPECT_EQ("ConfigManager", group);
EXPECT_EQ("*", to);
EXPECT_EQ(0, session.getMsgQueue()->size());
@@ -217,14 +224,14 @@ TEST_F(CCSessionTest, session3) {
EXPECT_FALSE(session.haveSubscription("Spec2", "*"));
ModuleCCSession mccs(ccspecfile("spec2.spec"), session, my_config_handler,
- my_command_handler);
+ my_command_handler, true, false);
EXPECT_TRUE(session.haveSubscription("Spec2", "*"));
EXPECT_EQ(2, session.getMsgQueue()->size());
ConstElementPtr msg;
std::string group, to;
msg = session.getFirstMessage(group, to);
- EXPECT_EQ("{ \"command\": [ \"module_spec\", { \"commands\": [ { \"command_args\": [ { \"item_default\": \"\", \"item_name\": \"message\", \"item_optional\": false, \"item_type\": \"string\" } ], \"command_description\": \"Print the given message to stdout\", \"command_name\": \"print_message\" }, { \"command_args\": [ ], \"command_description\": \"Shut down BIND 10\", \"command_name\": \"shutdown\" } ], \"config_data\": [ { \"item_default\": 1, \"item_name\": \"item1\", \"item_optional\": false, \"item_type\": \"integer\" }, { \"item_default\": 1.1, \"item_name\": \"item2\", \"item_optional\": false, \"item_type\": \"real\" }, { \"item_default\": true, \"item_name\": \"item3\", \"item_optional\": false, \"item_type\": \"boolean\" }, { \"item_default\": \"test\", \"item_name\": \"item4\", \"item_optional\": false, \"item_type\": \"string\" }, { \"item_default\": [ \"a\", \"b\" ], \"item_name\": \"item5\", \"item_optional\": false, \"item_type\": \"list\", \"list_item_sp
ec\": { \"item_default\": \"\", \"item_name\": \"list_element\", \"item_optional\": false, \"item_type\": \"string\" } }, { \"item_default\": { }, \"item_name\": \"item6\", \"item_optional\": false, \"item_type\": \"map\", \"map_item_spec\": [ { \"item_default\": \"default\", \"item_name\": \"value1\", \"item_optional\": true, \"item_type\": \"string\" }, { \"item_name\": \"value2\", \"item_optional\": true, \"item_type\": \"integer\" } ] } ], \"module_name\": \"Spec2\" } ] }", msg->str());
+ EXPECT_EQ("{ \"command\": [ \"module_spec\", { \"commands\": [ { \"command_args\": [ { \"item_default\": \"\", \"item_name\": \"message\", \"item_optional\": false, \"item_type\": \"string\" } ], \"command_description\": \"Print the given message to stdout\", \"command_name\": \"print_message\" }, { \"command_args\": [ ], \"command_description\": \"Shut down BIND 10\", \"command_name\": \"shutdown\" } ], \"config_data\": [ { \"item_default\": 1, \"item_name\": \"item1\", \"item_optional\": false, \"item_type\": \"integer\" }, { \"item_default\": 1.1, \"item_name\": \"item2\", \"item_optional\": false, \"item_type\": \"real\" }, { \"item_default\": true, \"item_name\": \"item3\", \"item_optional\": false, \"item_type\": \"boolean\" }, { \"item_default\": \"test\", \"item_name\": \"item4\", \"item_optional\": false, \"item_type\": \"string\" }, { \"item_default\": [ \"a\", \"b\" ], \"item_name\": \"item5\", \"item_optional\": false, \"item_type\": \"list\", \"list_item_sp
ec\": { \"item_default\": \"\", \"item_name\": \"list_element\", \"item_optional\": false, \"item_type\": \"string\" } }, { \"item_default\": { }, \"item_name\": \"item6\", \"item_optional\": false, \"item_type\": \"map\", \"map_item_spec\": [ { \"item_default\": \"default\", \"item_name\": \"value1\", \"item_optional\": true, \"item_type\": \"string\" }, { \"item_name\": \"value2\", \"item_optional\": true, \"item_type\": \"integer\" } ] } ], \"module_name\": \"Spec2\", \"statistics\": [ { \"item_default\": \"1970-01-01T00:00:00Z\", \"item_description\": \"A dummy date time\", \"item_format\": \"date-time\", \"item_name\": \"dummy_time\", \"item_optional\": false, \"item_title\": \"Dummy Time\", \"item_type\": \"string\" } ] } ] }", msg->str());
EXPECT_EQ("ConfigManager", group);
EXPECT_EQ("*", to);
EXPECT_EQ(1, session.getMsgQueue()->size());
@@ -241,7 +248,7 @@ TEST_F(CCSessionTest, checkCommand) {
EXPECT_FALSE(session.haveSubscription("Spec29", "*"));
ModuleCCSession mccs(ccspecfile("spec29.spec"), session, my_config_handler,
- my_command_handler);
+ my_command_handler, true, false);
EXPECT_TRUE(session.haveSubscription("Spec29", "*"));
EXPECT_EQ(2, session.getMsgQueue()->size());
@@ -318,7 +325,7 @@ TEST_F(CCSessionTest, checkCommand2) {
session.getMessages()->add(createAnswer(0, el("{}")));
EXPECT_FALSE(session.haveSubscription("Spec29", "*"));
ModuleCCSession mccs(ccspecfile("spec29.spec"), session, my_config_handler,
- my_command_handler);
+ my_command_handler, true, false);
EXPECT_TRUE(session.haveSubscription("Spec29", "*"));
ConstElementPtr msg;
std::string group, to;
@@ -370,7 +377,8 @@ TEST_F(CCSessionTest, remoteConfig) {
std::string module_name;
int item1;
- ModuleCCSession mccs(ccspecfile("spec1.spec"), session, NULL, NULL, false);
+ ModuleCCSession mccs(ccspecfile("spec1.spec"), session, NULL, NULL,
+ false, false);
EXPECT_TRUE(session.haveSubscription("Spec1", "*"));
// first simply connect, with no config values, and see we get
@@ -526,7 +534,7 @@ TEST_F(CCSessionTest, ignoreRemoteConfigCommands) {
EXPECT_FALSE(session.haveSubscription("Spec29", "*"));
ModuleCCSession mccs(ccspecfile("spec29.spec"), session, my_config_handler,
- my_command_handler, false);
+ my_command_handler, false, false);
EXPECT_TRUE(session.haveSubscription("Spec29", "*"));
EXPECT_EQ(2, session.getMsgQueue()->size());
@@ -578,14 +586,15 @@ TEST_F(CCSessionTest, initializationFail) {
// Test it throws when we try to start it twice (once from the constructor)
TEST_F(CCSessionTest, doubleStartImplicit) {
- ModuleCCSession mccs(ccspecfile("spec29.spec"), session, NULL, NULL);
+ ModuleCCSession mccs(ccspecfile("spec29.spec"), session, NULL, NULL,
+ true, false);
EXPECT_THROW(mccs.start(), CCSessionError);
}
// The same, but both starts are explicit
TEST_F(CCSessionTest, doubleStartExplicit) {
ModuleCCSession mccs(ccspecfile("spec29.spec"), session, NULL, NULL,
- false);
+ false, false);
mccs.start();
EXPECT_THROW(mccs.start(), CCSessionError);
}
@@ -593,7 +602,8 @@ TEST_F(CCSessionTest, doubleStartExplicit) {
// Test we can request synchronous receive before we start the session,
// and check there's the mechanism if we do it after
TEST_F(CCSessionTest, delayedStart) {
- ModuleCCSession mccs(ccspecfile("spec2.spec"), session, NULL, NULL, false);
+ ModuleCCSession mccs(ccspecfile("spec2.spec"), session, NULL, NULL,
+ false, false);
session.getMessages()->add(createAnswer());
ConstElementPtr env, answer;
EXPECT_NO_THROW(session.group_recvmsg(env, answer, false, 3));
@@ -620,7 +630,7 @@ TEST_F(CCSessionTest, loggingStartBadSpec) {
// just give an empty config
session.getMessages()->add(createAnswer(0, el("{}")));
EXPECT_THROW(new ModuleCCSession(ccspecfile("spec2.spec"), session,
- NULL, NULL, true, true), ModuleSpecError);
+ NULL, NULL), ModuleSpecError);
EXPECT_FALSE(session.haveSubscription("Logging", "*"));
}
@@ -629,7 +639,8 @@ TEST_F(CCSessionTest, loggingStartBadSpec) {
// if we need to call addRemoteConfig().
// The correct cases are covered in remoteConfig test.
TEST_F(CCSessionTest, doubleStartWithAddRemoteConfig) {
- ModuleCCSession mccs(ccspecfile("spec29.spec"), session, NULL, NULL);
+ ModuleCCSession mccs(ccspecfile("spec29.spec"), session, NULL, NULL,
+ true, false);
session.getMessages()->add(createAnswer(0, el("{}")));
EXPECT_THROW(mccs.addRemoteConfig(ccspecfile("spec2.spec")),
FakeSession::DoubleRead);
@@ -646,41 +657,44 @@ void doRelatedLoggersTest(const char* input, const char* expected) {
TEST(LogConfigTest, relatedLoggersTest) {
// make sure logger configs for 'other' programs are ignored,
// and that * is substituted correctly
- // The default root logger name is "bind10"
+ // We'll use a root logger name of "b10-test".
+ isc::log::setRootLoggerName("b10-test");
+
doRelatedLoggersTest("[{ \"name\": \"other_module\" }]",
"[]");
doRelatedLoggersTest("[{ \"name\": \"other_module.somelib\" }]",
"[]");
- doRelatedLoggersTest("[{ \"name\": \"bind10_other\" }]",
+ doRelatedLoggersTest("[{ \"name\": \"test_other\" }]",
"[]");
- doRelatedLoggersTest("[{ \"name\": \"bind10_other.somelib\" }]",
+ doRelatedLoggersTest("[{ \"name\": \"test_other.somelib\" }]",
"[]");
doRelatedLoggersTest("[ { \"name\": \"other_module\" },"
- " { \"name\": \"bind10\" }]",
- "[ { \"name\": \"bind10\" } ]");
- doRelatedLoggersTest("[ { \"name\": \"bind10\" }]",
- "[ { \"name\": \"bind10\" } ]");
- doRelatedLoggersTest("[ { \"name\": \"bind10.somelib\" }]",
- "[ { \"name\": \"bind10.somelib\" } ]");
+ " { \"name\": \"test\" }]",
+ "[ { \"name\": \"b10-test\" } ]");
+ doRelatedLoggersTest("[ { \"name\": \"test\" }]",
+ "[ { \"name\": \"b10-test\" } ]");
+ doRelatedLoggersTest("[ { \"name\": \"test.somelib\" }]",
+ "[ { \"name\": \"b10-test.somelib\" } ]");
doRelatedLoggersTest("[ { \"name\": \"other_module.somelib\" },"
- " { \"name\": \"bind10.somelib\" }]",
- "[ { \"name\": \"bind10.somelib\" } ]");
+ " { \"name\": \"test.somelib\" }]",
+ "[ { \"name\": \"b10-test.somelib\" } ]");
doRelatedLoggersTest("[ { \"name\": \"other_module.somelib\" },"
- " { \"name\": \"bind10\" },"
- " { \"name\": \"bind10.somelib\" }]",
- "[ { \"name\": \"bind10\" },"
- " { \"name\": \"bind10.somelib\" } ]");
+ " { \"name\": \"test\" },"
+ " { \"name\": \"test.somelib\" }]",
+ "[ { \"name\": \"b10-test\" },"
+ " { \"name\": \"b10-test.somelib\" } ]");
doRelatedLoggersTest("[ { \"name\": \"*\" }]",
- "[ { \"name\": \"bind10\" } ]");
+ "[ { \"name\": \"b10-test\" } ]");
doRelatedLoggersTest("[ { \"name\": \"*.somelib\" }]",
- "[ { \"name\": \"bind10.somelib\" } ]");
+ "[ { \"name\": \"b10-test.somelib\" } ]");
doRelatedLoggersTest("[ { \"name\": \"*\", \"severity\": \"DEBUG\" },"
- " { \"name\": \"bind10\", \"severity\": \"WARN\"}]",
- "[ { \"name\": \"bind10\", \"severity\": \"WARN\"} ]");
+ " { \"name\": \"test\", \"severity\": \"WARN\"}]",
+ "[ { \"name\": \"b10-test\", \"severity\": \"WARN\"} ]");
doRelatedLoggersTest("[ { \"name\": \"*\", \"severity\": \"DEBUG\" },"
" { \"name\": \"some_module\", \"severity\": \"WARN\"}]",
- "[ { \"name\": \"bind10\", \"severity\": \"DEBUG\"} ]");
-
+ "[ { \"name\": \"b10-test\", \"severity\": \"DEBUG\"} ]");
+ doRelatedLoggersTest("[ { \"name\": \"b10-test\" }]",
+ "[]");
// make sure 'bad' things like '*foo.x' or '*lib' are ignored
// (cfgmgr should have already caught it in the logconfig plugin
// check, and is responsible for reporting the error)
@@ -690,8 +704,8 @@ TEST(LogConfigTest, relatedLoggersTest) {
"[ ]");
doRelatedLoggersTest("[ { \"name\": \"*foo\" },"
" { \"name\": \"*foo.lib\" },"
- " { \"name\": \"bind10\" } ]",
- "[ { \"name\": \"bind10\" } ]");
+ " { \"name\": \"test\" } ]",
+ "[ { \"name\": \"b10-test\" } ]");
}
}
diff --git a/src/lib/config/tests/module_spec_unittests.cc b/src/lib/config/tests/module_spec_unittests.cc
index 1b43350..b2ca7b4 100644
--- a/src/lib/config/tests/module_spec_unittests.cc
+++ b/src/lib/config/tests/module_spec_unittests.cc
@@ -1,4 +1,4 @@
-// Copyright (C) 2009 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2009, 2011 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -18,6 +18,8 @@
#include <fstream>
+#include <boost/foreach.hpp>
+
#include <config/tests/data_def_unittests_config.h>
using namespace isc::data;
@@ -57,6 +59,7 @@ TEST(ModuleSpec, ReadingSpecfiles) {
dd = moduleSpecFromFile(specfile("spec2.spec"));
EXPECT_EQ("[ { \"command_args\": [ { \"item_default\": \"\", \"item_name\": \"message\", \"item_optional\": false, \"item_type\": \"string\" } ], \"command_description\": \"Print the given message to stdout\", \"command_name\": \"print_message\" }, { \"command_args\": [ ], \"command_description\": \"Shut down BIND 10\", \"command_name\": \"shutdown\" } ]", dd.getCommandsSpec()->str());
+ EXPECT_EQ("[ { \"item_default\": \"1970-01-01T00:00:00Z\", \"item_description\": \"A dummy date time\", \"item_format\": \"date-time\", \"item_name\": \"dummy_time\", \"item_optional\": false, \"item_title\": \"Dummy Time\", \"item_type\": \"string\" } ]", dd.getStatisticsSpec()->str());
EXPECT_EQ("Spec2", dd.getModuleName());
EXPECT_EQ("", dd.getModuleDescription());
@@ -64,6 +67,11 @@ TEST(ModuleSpec, ReadingSpecfiles) {
EXPECT_EQ("Spec25", dd.getModuleName());
EXPECT_EQ("Just an empty module", dd.getModuleDescription());
EXPECT_THROW(moduleSpecFromFile(specfile("spec26.spec")), ModuleSpecError);
+ EXPECT_THROW(moduleSpecFromFile(specfile("spec34.spec")), ModuleSpecError);
+ EXPECT_THROW(moduleSpecFromFile(specfile("spec35.spec")), ModuleSpecError);
+ EXPECT_THROW(moduleSpecFromFile(specfile("spec36.spec")), ModuleSpecError);
+ EXPECT_THROW(moduleSpecFromFile(specfile("spec37.spec")), ModuleSpecError);
+ EXPECT_THROW(moduleSpecFromFile(specfile("spec38.spec")), ModuleSpecError);
std::ifstream file;
file.open(specfile("spec1.spec").c_str());
@@ -71,6 +79,7 @@ TEST(ModuleSpec, ReadingSpecfiles) {
EXPECT_EQ(dd.getFullSpec()->get("module_name")
->stringValue(), "Spec1");
EXPECT_TRUE(isNull(dd.getCommandsSpec()));
+ EXPECT_TRUE(isNull(dd.getStatisticsSpec()));
std::ifstream file2;
file2.open(specfile("spec8.spec").c_str());
@@ -114,6 +123,12 @@ TEST(ModuleSpec, SpecfileConfigData) {
"commands is not a list of elements");
}
+TEST(ModuleSpec, SpecfileStatistics) {
+ moduleSpecError("spec36.spec", "item_default not valid type of item_format");
+ moduleSpecError("spec37.spec", "statistics is not a list of elements");
+ moduleSpecError("spec38.spec", "item_default not valid type of item_format");
+}
+
TEST(ModuleSpec, SpecfileCommands) {
moduleSpecError("spec17.spec",
"command_name missing in { \"command_args\": [ { \"item_default\": \"\", \"item_name\": \"message\", \"item_optional\": false, \"item_type\": \"string\" } ], \"command_description\": \"Print the given message to stdout\" }");
@@ -137,6 +152,17 @@ dataTest(const ModuleSpec& dd, const std::string& data_file_name) {
}
bool
+statisticsTest(const ModuleSpec& dd, const std::string& data_file_name) {
+ std::ifstream data_file;
+
+ data_file.open(specfile(data_file_name).c_str());
+ ConstElementPtr data = Element::fromJSON(data_file, data_file_name);
+ data_file.close();
+
+ return (dd.validateStatistics(data));
+}
+
+bool
dataTestWithErrors(const ModuleSpec& dd, const std::string& data_file_name,
ElementPtr errors)
{
@@ -149,6 +175,19 @@ dataTestWithErrors(const ModuleSpec& dd, const std::string& data_file_name,
return (dd.validateConfig(data, true, errors));
}
+bool
+statisticsTestWithErrors(const ModuleSpec& dd, const std::string& data_file_name,
+ ElementPtr errors)
+{
+ std::ifstream data_file;
+
+ data_file.open(specfile(data_file_name).c_str());
+ ConstElementPtr data = Element::fromJSON(data_file, data_file_name);
+ data_file.close();
+
+ return (dd.validateStatistics(data, true, errors));
+}
+
TEST(ModuleSpec, DataValidation) {
ModuleSpec dd = moduleSpecFromFile(specfile("spec22.spec"));
@@ -175,6 +214,17 @@ TEST(ModuleSpec, DataValidation) {
EXPECT_EQ("[ \"Unknown item value_does_not_exist\" ]", errors->str());
}
+TEST(ModuleSpec, StatisticsValidation) {
+ ModuleSpec dd = moduleSpecFromFile(specfile("spec33.spec"));
+
+ EXPECT_TRUE(statisticsTest(dd, "data33_1.data"));
+ EXPECT_FALSE(statisticsTest(dd, "data33_2.data"));
+
+ ElementPtr errors = Element::createList();
+ EXPECT_FALSE(statisticsTestWithErrors(dd, "data33_2.data", errors));
+ EXPECT_EQ("[ \"Format mismatch\", \"Format mismatch\", \"Format mismatch\" ]", errors->str());
+}
+
TEST(ModuleSpec, CommandValidation) {
ModuleSpec dd = moduleSpecFromFile(specfile("spec2.spec"));
ConstElementPtr arg = Element::fromJSON("{}");
@@ -211,3 +261,118 @@ TEST(ModuleSpec, CommandValidation) {
EXPECT_EQ(errors->get(0)->stringValue(), "Type mismatch");
}
+
+TEST(ModuleSpec, NamedSetValidation) {
+ ModuleSpec dd = moduleSpecFromFile(specfile("spec32.spec"));
+
+ ElementPtr errors = Element::createList();
+ EXPECT_TRUE(dataTestWithErrors(dd, "data32_1.data", errors));
+ EXPECT_FALSE(dataTest(dd, "data32_2.data"));
+ EXPECT_FALSE(dataTest(dd, "data32_3.data"));
+}
+
+TEST(ModuleSpec, CheckFormat) {
+
+ const std::string json_begin = "{ \"module_spec\": { \"module_name\": \"Foo\", \"statistics\": [ { \"item_name\": \"dummy_time\", \"item_type\": \"string\", \"item_optional\": true, \"item_title\": \"Dummy Time\", \"item_description\": \"A dummy date time\"";
+ const std::string json_end = " } ] } }";
+ std::string item_default;
+ std::string item_format;
+ std::vector<std::string> specs;
+ ConstElementPtr el;
+
+ specs.clear();
+ item_default = "\"item_default\": \"2011-05-27T19:42:57Z\",";
+ item_format = "\"item_format\": \"date-time\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"2011-05-27\",";
+ item_format = "\"item_format\": \"date\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"19:42:57\",";
+ item_format = "\"item_format\": \"time\"";
+ specs.push_back("," + item_default + item_format);
+
+ item_format = "\"item_format\": \"date-time\"";
+ specs.push_back("," + item_format);
+ item_default = "";
+ item_format = "\"item_format\": \"date\"";
+ specs.push_back("," + item_format);
+ item_default = "";
+ item_format = "\"item_format\": \"time\"";
+ specs.push_back("," + item_format);
+
+ item_default = "\"item_default\": \"a\"";
+ specs.push_back("," + item_default);
+ item_default = "\"item_default\": \"b\"";
+ specs.push_back("," + item_default);
+ item_default = "\"item_default\": \"c\"";
+ specs.push_back("," + item_default);
+
+ item_format = "\"item_format\": \"dummy\"";
+ specs.push_back("," + item_format);
+
+ specs.push_back("");
+
+ BOOST_FOREACH(std::string s, specs) {
+ el = Element::fromJSON(json_begin + s + json_end)->get("module_spec");
+ EXPECT_NO_THROW(ModuleSpec(el, true));
+ }
+
+ specs.clear();
+ item_default = "\"item_default\": \"2011-05-27T19:42:57Z\",";
+ item_format = "\"item_format\": \"dummy\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"2011-05-27\",";
+ item_format = "\"item_format\": \"dummy\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"19:42:57Z\",";
+ item_format = "\"item_format\": \"dummy\"";
+ specs.push_back("," + item_default + item_format);
+
+ item_default = "\"item_default\": \"2011-13-99T99:99:99Z\",";
+ item_format = "\"item_format\": \"date-time\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"2011-13-99\",";
+ item_format = "\"item_format\": \"date\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"99:99:99Z\",";
+ item_format = "\"item_format\": \"time\"";
+ specs.push_back("," + item_default + item_format);
+
+ item_default = "\"item_default\": \"1\",";
+ item_format = "\"item_format\": \"date-time\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"1\",";
+ item_format = "\"item_format\": \"date\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"1\",";
+ item_format = "\"item_format\": \"time\"";
+ specs.push_back("," + item_default + item_format);
+
+ item_default = "\"item_default\": \"\",";
+ item_format = "\"item_format\": \"date-time\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"\",";
+ item_format = "\"item_format\": \"date\"";
+ specs.push_back("," + item_default + item_format);
+ item_default = "\"item_default\": \"\",";
+ item_format = "\"item_format\": \"time\"";
+ specs.push_back("," + item_default + item_format);
+
+ // wrong date-time-type format not ending with "Z"
+ item_default = "\"item_default\": \"2011-05-27T19:42:57\",";
+ item_format = "\"item_format\": \"date-time\"";
+ specs.push_back("," + item_default + item_format);
+ // wrong date-type format ending with "T"
+ item_default = "\"item_default\": \"2011-05-27T\",";
+ item_format = "\"item_format\": \"date\"";
+ specs.push_back("," + item_default + item_format);
+ // wrong time-type format ending with "Z"
+ item_default = "\"item_default\": \"19:42:57Z\",";
+ item_format = "\"item_format\": \"time\"";
+ specs.push_back("," + item_default + item_format);
+
+ BOOST_FOREACH(std::string s, specs) {
+ el = Element::fromJSON(json_begin + s + json_end)->get("module_spec");
+ EXPECT_THROW(ModuleSpec(el, true), ModuleSpecError);
+ }
+}
diff --git a/src/lib/config/tests/testdata/Makefile.am b/src/lib/config/tests/testdata/Makefile.am
index 57d1ed3..0d8b92e 100644
--- a/src/lib/config/tests/testdata/Makefile.am
+++ b/src/lib/config/tests/testdata/Makefile.am
@@ -22,6 +22,11 @@ EXTRA_DIST += data22_7.data
EXTRA_DIST += data22_8.data
EXTRA_DIST += data22_9.data
EXTRA_DIST += data22_10.data
+EXTRA_DIST += data32_1.data
+EXTRA_DIST += data32_2.data
+EXTRA_DIST += data32_3.data
+EXTRA_DIST += data33_1.data
+EXTRA_DIST += data33_2.data
EXTRA_DIST += spec1.spec
EXTRA_DIST += spec2.spec
EXTRA_DIST += spec3.spec
@@ -53,3 +58,10 @@ EXTRA_DIST += spec28.spec
EXTRA_DIST += spec29.spec
EXTRA_DIST += spec30.spec
EXTRA_DIST += spec31.spec
+EXTRA_DIST += spec32.spec
+EXTRA_DIST += spec33.spec
+EXTRA_DIST += spec34.spec
+EXTRA_DIST += spec35.spec
+EXTRA_DIST += spec36.spec
+EXTRA_DIST += spec37.spec
+EXTRA_DIST += spec38.spec
diff --git a/src/lib/config/tests/testdata/data32_1.data b/src/lib/config/tests/testdata/data32_1.data
new file mode 100644
index 0000000..5695b52
--- /dev/null
+++ b/src/lib/config/tests/testdata/data32_1.data
@@ -0,0 +1,3 @@
+{
+ "named_set_item": { "foo": 1, "bar": 2 }
+}
diff --git a/src/lib/config/tests/testdata/data32_2.data b/src/lib/config/tests/testdata/data32_2.data
new file mode 100644
index 0000000..d5b9765
--- /dev/null
+++ b/src/lib/config/tests/testdata/data32_2.data
@@ -0,0 +1,3 @@
+{
+ "named_set_item": { "foo": "wrongtype", "bar": 2 }
+}
diff --git a/src/lib/config/tests/testdata/data32_3.data b/src/lib/config/tests/testdata/data32_3.data
new file mode 100644
index 0000000..85f32fe
--- /dev/null
+++ b/src/lib/config/tests/testdata/data32_3.data
@@ -0,0 +1,3 @@
+{
+ "named_set_item": []
+}
diff --git a/src/lib/config/tests/testdata/data33_1.data b/src/lib/config/tests/testdata/data33_1.data
new file mode 100644
index 0000000..429852c
--- /dev/null
+++ b/src/lib/config/tests/testdata/data33_1.data
@@ -0,0 +1,7 @@
+{
+ "dummy_str": "Dummy String",
+ "dummy_int": 118,
+ "dummy_datetime": "2011-05-27T19:42:57Z",
+ "dummy_date": "2011-05-27",
+ "dummy_time": "19:42:57"
+}
diff --git a/src/lib/config/tests/testdata/data33_2.data b/src/lib/config/tests/testdata/data33_2.data
new file mode 100644
index 0000000..eb0615c
--- /dev/null
+++ b/src/lib/config/tests/testdata/data33_2.data
@@ -0,0 +1,7 @@
+{
+ "dummy_str": "Dummy String",
+ "dummy_int": 118,
+ "dummy_datetime": "xxxx",
+ "dummy_date": "xxxx",
+ "dummy_time": "xxxx"
+}
diff --git a/src/lib/config/tests/testdata/spec2.spec b/src/lib/config/tests/testdata/spec2.spec
index 59b8ebc..4352422 100644
--- a/src/lib/config/tests/testdata/spec2.spec
+++ b/src/lib/config/tests/testdata/spec2.spec
@@ -66,6 +66,17 @@
"command_description": "Shut down BIND 10",
"command_args": []
}
+ ],
+ "statistics": [
+ {
+ "item_name": "dummy_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Dummy Time",
+ "item_description": "A dummy date time",
+ "item_format": "date-time"
+ }
]
}
}
diff --git a/src/lib/config/tests/testdata/spec32.spec b/src/lib/config/tests/testdata/spec32.spec
new file mode 100644
index 0000000..68e774e
--- /dev/null
+++ b/src/lib/config/tests/testdata/spec32.spec
@@ -0,0 +1,19 @@
+{
+ "module_spec": {
+ "module_name": "Spec32",
+ "config_data": [
+ { "item_name": "named_set_item",
+ "item_type": "named_set",
+ "item_optional": false,
+ "item_default": { "a": 1, "b": 2 },
+ "named_set_item_spec": {
+ "item_name": "named_set_element",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 3
+ }
+ }
+ ]
+ }
+}
+
diff --git a/src/lib/config/tests/testdata/spec33.spec b/src/lib/config/tests/testdata/spec33.spec
new file mode 100644
index 0000000..3002488
--- /dev/null
+++ b/src/lib/config/tests/testdata/spec33.spec
@@ -0,0 +1,50 @@
+{
+ "module_spec": {
+ "module_name": "Spec33",
+ "statistics": [
+ {
+ "item_name": "dummy_str",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "Dummy",
+ "item_title": "Dummy String",
+ "item_description": "A dummy string"
+ },
+ {
+ "item_name": "dummy_int",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 0,
+ "item_title": "Dummy Integer",
+ "item_description": "A dummy integer"
+ },
+ {
+ "item_name": "dummy_datetime",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Dummy DateTime",
+ "item_description": "A dummy datetime",
+ "item_format": "date-time"
+ },
+ {
+ "item_name": "dummy_date",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01",
+ "item_title": "Dummy Date",
+ "item_description": "A dummy date",
+ "item_format": "date"
+ },
+ {
+ "item_name": "dummy_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "00:00:00",
+ "item_title": "Dummy Time",
+ "item_description": "A dummy time",
+ "item_format": "time"
+ }
+ ]
+ }
+}
diff --git a/src/lib/config/tests/testdata/spec34.spec b/src/lib/config/tests/testdata/spec34.spec
new file mode 100644
index 0000000..dd1f3ca
--- /dev/null
+++ b/src/lib/config/tests/testdata/spec34.spec
@@ -0,0 +1,14 @@
+{
+ "module_spec": {
+ "module_name": "Spec34",
+ "statistics": [
+ {
+ "item_name": "dummy_str",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "Dummy",
+ "item_description": "A dummy string"
+ }
+ ]
+ }
+}
diff --git a/src/lib/config/tests/testdata/spec35.spec b/src/lib/config/tests/testdata/spec35.spec
new file mode 100644
index 0000000..86aaf14
--- /dev/null
+++ b/src/lib/config/tests/testdata/spec35.spec
@@ -0,0 +1,15 @@
+{
+ "module_spec": {
+ "module_name": "Spec35",
+ "statistics": [
+ {
+ "item_name": "dummy_str",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "Dummy",
+ "item_title": "Dummy String"
+ }
+ ]
+ }
+}
+
diff --git a/src/lib/config/tests/testdata/spec36.spec b/src/lib/config/tests/testdata/spec36.spec
new file mode 100644
index 0000000..fb9ce26
--- /dev/null
+++ b/src/lib/config/tests/testdata/spec36.spec
@@ -0,0 +1,17 @@
+{
+ "module_spec": {
+ "module_name": "Spec36",
+ "statistics": [
+ {
+ "item_name": "dummy_str",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "Dummy",
+ "item_title": "Dummy String",
+ "item_description": "A dummy string",
+ "item_format": "dummy"
+ }
+ ]
+ }
+}
+
diff --git a/src/lib/config/tests/testdata/spec37.spec b/src/lib/config/tests/testdata/spec37.spec
new file mode 100644
index 0000000..bc444d1
--- /dev/null
+++ b/src/lib/config/tests/testdata/spec37.spec
@@ -0,0 +1,7 @@
+{
+ "module_spec": {
+ "module_name": "Spec37",
+ "statistics": 8
+ }
+}
+
diff --git a/src/lib/config/tests/testdata/spec38.spec b/src/lib/config/tests/testdata/spec38.spec
new file mode 100644
index 0000000..1892e88
--- /dev/null
+++ b/src/lib/config/tests/testdata/spec38.spec
@@ -0,0 +1,17 @@
+{
+ "module_spec": {
+ "module_name": "Spec38",
+ "statistics": [
+ {
+ "item_name": "dummy_datetime",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "11",
+ "item_title": "Dummy DateTime",
+ "item_description": "A dummy datetime",
+ "item_format": "date-time"
+ }
+ ]
+ }
+}
+
diff --git a/src/lib/datasrc/Makefile.am b/src/lib/datasrc/Makefile.am
index 457d5b0..5e193d2 100644
--- a/src/lib/datasrc/Makefile.am
+++ b/src/lib/datasrc/Makefile.am
@@ -9,7 +9,7 @@ AM_CXXFLAGS = $(B10_CXXFLAGS)
CLEANFILES = *.gcno *.gcda datasrc_messages.h datasrc_messages.cc
-lib_LTLIBRARIES = libdatasrc.la
+lib_LTLIBRARIES = libdatasrc.la sqlite3_ds.la memory_ds.la
libdatasrc_la_SOURCES = data_source.h data_source.cc
libdatasrc_la_SOURCES += static_datasrc.h static_datasrc.cc
libdatasrc_la_SOURCES += sqlite3_datasrc.h sqlite3_datasrc.cc
@@ -17,16 +17,26 @@ libdatasrc_la_SOURCES += query.h query.cc
libdatasrc_la_SOURCES += cache.h cache.cc
libdatasrc_la_SOURCES += rbtree.h
libdatasrc_la_SOURCES += zonetable.h zonetable.cc
-libdatasrc_la_SOURCES += memory_datasrc.h memory_datasrc.cc
libdatasrc_la_SOURCES += zone.h
libdatasrc_la_SOURCES += result.h
libdatasrc_la_SOURCES += logger.h logger.cc
+libdatasrc_la_SOURCES += client.h iterator.h
+libdatasrc_la_SOURCES += database.h database.cc
+#libdatasrc_la_SOURCES += sqlite3_accessor.h sqlite3_accessor.cc
+libdatasrc_la_SOURCES += factory.h factory.cc
nodist_libdatasrc_la_SOURCES = datasrc_messages.h datasrc_messages.cc
+sqlite3_ds_la_SOURCES = sqlite3_accessor.h sqlite3_accessor.cc
+sqlite3_ds_la_LDFLAGS = -module
+
+memory_ds_la_SOURCES = memory_datasrc.h memory_datasrc.cc
+memory_ds_la_LDFLAGS = -module
+
libdatasrc_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
libdatasrc_la_LIBADD += $(top_builddir)/src/lib/dns/libdns++.la
libdatasrc_la_LIBADD += $(top_builddir)/src/lib/log/liblog.la
libdatasrc_la_LIBADD += $(top_builddir)/src/lib/cc/libcc.la
+libdatasrc_la_LIBADD += $(SQLITE_LIBS)
BUILT_SOURCES = datasrc_messages.h datasrc_messages.cc
datasrc_messages.h datasrc_messages.cc: Makefile datasrc_messages.mes
diff --git a/src/lib/datasrc/cache.cc b/src/lib/datasrc/cache.cc
index 9082a6b..d88e649 100644
--- a/src/lib/datasrc/cache.cc
+++ b/src/lib/datasrc/cache.cc
@@ -232,7 +232,8 @@ HotCacheImpl::insert(const CacheNodePtr node) {
if (iter != map_.end()) {
CacheNodePtr old = iter->second;
if (old && old->isValid()) {
- LOG_DEBUG(logger, DBG_TRACE_DATA, DATASRC_CACHE_OLD_FOUND);
+ LOG_DEBUG(logger, DBG_TRACE_DATA, DATASRC_CACHE_OLD_FOUND)
+ .arg(node->getNodeName());
remove(old);
}
}
diff --git a/src/lib/datasrc/client.h b/src/lib/datasrc/client.h
new file mode 100644
index 0000000..40b7a3f
--- /dev/null
+++ b/src/lib/datasrc/client.h
@@ -0,0 +1,292 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __DATA_SOURCE_CLIENT_H
+#define __DATA_SOURCE_CLIENT_H 1
+
+#include <boost/noncopyable.hpp>
+#include <boost/shared_ptr.hpp>
+
+#include <exceptions/exceptions.h>
+
+#include <datasrc/zone.h>
+
+/// \file
+/// Datasource clients
+///
+/// The data source client API is specified in client.h, and provides the
+/// functionality to query and modify data in the data sources. There are
+/// multiple datasource implementations, and by subclassing DataSourceClient or
+/// DatabaseClient, more can be added.
+///
+/// All datasources are implemented as loadable modules, with a name of the
+/// form "<type>_ds.so". This has been chosen intentionally, to minimize
+/// confusion and potential mistakes.
+///
+/// In order to use a datasource client backend, the class
+/// DataSourceClientContainer is provided in factory.h; this will load the
+/// library, set up the instance, and clean everything up once it is destroyed.
+///
+/// Access to the actual instance is provided with the getInstance() method
+/// in DataSourceClientContainer
+///
+/// \note Depending on actual usage, we might consider making the container
+/// a transparent abstraction layer, so it can be used as a DataSourceClient
+/// directly. This has some other implications though so for now the only access
+/// provided is through getInstance()).
+///
+/// For datasource backends, we use a dynamically loaded library system (with
+/// dlopen()). This library must contain the following things;
+/// - A subclass of DataSourceClient or DatabaseClient (which itself is a
+/// subclass of DataSourceClient)
+/// - A creator function for an instance of that subclass, of the form:
+/// \code
+/// extern "C" DataSourceClient* createInstance(isc::data::ConstElementPtr cfg);
+/// \endcode
+/// - A destructor for said instance, of the form:
+/// \code
+/// extern "C" void destroyInstance(isc::data::DataSourceClient* instance);
+/// \endcode
+///
+/// See the documentation for the \link DataSourceClient \endlink class for
+/// more information on implementing subclasses of it.
+///
+
+namespace isc {
+namespace datasrc {
+
+// The iterator.h is not included on purpose, most application won't need it
+class ZoneIterator;
+typedef boost::shared_ptr<ZoneIterator> ZoneIteratorPtr;
+
+/// \brief The base class of data source clients.
+///
+/// This is an abstract base class that defines the common interface for
+/// various types of data source clients. A data source client is a top level
+/// access point to a data source, allowing various operations on the data
+/// source such as lookups, traversing or updates. The client class itself
+/// has limited focus and delegates the responsibility for these specific
+/// operations to other classes; in general methods of this class act as
+/// factories of these other classes.
+///
+/// See \link datasrc/client.h datasrc/client.h \endlink for more information
+/// on adding datasource implementations.
+///
+/// The following derived classes are currently (expected to be) provided:
+/// - \c InMemoryClient: A client of a conceptual data source that stores
+/// all necessary data in memory for faster lookups
+/// - \c DatabaseClient: A client that uses a real database backend (such as
+/// an SQL database). It would internally hold a connection to the underlying
+/// database system.
+///
+/// \note It is intentional that while the term these derived classes don't
+/// contain "DataSource" unlike their base class. It's also noteworthy
+/// that the naming of the base class is somewhat redundant because the
+/// namespace \c datasrc would indicate that it's related to a data source.
+/// The redundant naming comes from the observation that namespaces are
+/// often omitted with \c using directives, in which case "Client"
+/// would be too generic. On the other hand, concrete derived classes are
+/// generally not expected to be referenced directly from other modules and
+/// applications, so we'll give them more concise names such as InMemoryClient.
+///
+/// A single \c DataSourceClient object is expected to handle only a single
+/// RR class even if the underlying data source contains records for multiple
+/// RR classes. Likewise, (when we support views) a \c DataSourceClient
+/// object is expected to handle only a single view.
+///
+/// If the application uses multiple threads, each thread will need to
+/// create and use a separate DataSourceClient. This is because some
+/// database backend doesn't allow multiple threads to share the same
+/// connection to the database.
+///
+/// \note For a client using an in memory backend, this may result in
+/// having a multiple copies of the same data in memory, increasing the
+/// memory footprint substantially. Depending on how to support multiple
+/// CPU cores for concurrent lookups on the same single data source (which
+/// is not fully fixed yet, and for which multiple threads may be used),
+/// this design may have to be revisited.
+///
+/// This class (and therefore its derived classes) are not copyable.
+/// This is because the derived classes would generally contain attributes
+/// that are not easy to copy (such as a large size of in memory data or a
+/// network connection to a database server). In order to avoid a surprising
+/// disruption with a naive copy it's prohibited explicitly. For the expected
+/// usage of the client classes the restriction should be acceptable.
+///
+/// \todo This class is still not complete. It will need more factory methods,
+/// e.g. for (re)loading a zone.
+class DataSourceClient : boost::noncopyable {
+public:
+ /// \brief A helper structure to represent the search result of
+ /// \c find().
+ ///
+ /// This is a straightforward pair of the result code and a share pointer
+ /// to the found zone to represent the result of \c find().
+ /// We use this in order to avoid overloading the return value for both
+ /// the result code ("success" or "not found") and the found object,
+ /// i.e., avoid using \c NULL to mean "not found", etc.
+ ///
+ /// This is a simple value class with no internal state, so for
+ /// convenience we allow the applications to refer to the members
+ /// directly.
+ ///
+ /// See the description of \c find() for the semantics of the member
+ /// variables.
+ struct FindResult {
+ FindResult(result::Result param_code,
+ const ZoneFinderPtr param_zone_finder) :
+ code(param_code), zone_finder(param_zone_finder)
+ {}
+ const result::Result code;
+ const ZoneFinderPtr zone_finder;
+ };
+
+ ///
+ /// \name Constructors and Destructor.
+ ///
+protected:
+ /// Default constructor.
+ ///
+ /// This is intentionally defined as protected as this base class
+ /// should never be instantiated directly.
+ ///
+ /// The constructor of a concrete derived class may throw an exception.
+ /// This interface does not specify which exceptions can happen (at least
+ /// at this moment), and the caller should expect any type of exception
+ /// and react accordingly.
+ DataSourceClient() {}
+
+public:
+ /// The destructor.
+ virtual ~DataSourceClient() {}
+ //@}
+
+ /// Returns a \c ZoneFinder for a zone that best matches the given name.
+ ///
+ /// A concrete derived version of this method gets access to its backend
+ /// data source to search for a zone whose origin gives the longest match
+ /// against \c name. It returns the search result in the form of a
+ /// \c FindResult object as follows:
+ /// - \c code: The result code of the operation.
+ /// - \c result::SUCCESS: A zone that gives an exact match is found
+ /// - \c result::PARTIALMATCH: A zone whose origin is a
+ /// super domain of \c name is found (but there is no exact match)
+ /// - \c result::NOTFOUND: For all other cases.
+ /// - \c zone_finder: Pointer to a \c ZoneFinder object for the found zone
+ /// if one is found; otherwise \c NULL.
+ ///
+ /// A specific derived version of this method may throw an exception.
+ /// This interface does not specify which exceptions can happen (at least
+ /// at this moment), and the caller should expect any type of exception
+ /// and react accordingly.
+ ///
+ /// \param name A domain name for which the search is performed.
+ /// \return A \c FindResult object enclosing the search result (see above).
+ virtual FindResult findZone(const isc::dns::Name& name) const = 0;
+
+ /// \brief Returns an iterator to the given zone
+ ///
+ /// This allows for traversing the whole zone. The returned object can
+ /// provide the RRsets one by one.
+ ///
+ /// This throws DataSourceError when the zone does not exist in the
+ /// datasource.
+ ///
+ /// The default implementation throws isc::NotImplemented. This allows
+ /// for easy and fast deployment of minimal custom data sources, where
+ /// the user/implementator doesn't have to care about anything else but
+ /// the actual queries. Also, in some cases, it isn't possible to traverse
+ /// the zone from logic point of view (eg. dynamically generated zone
+ /// data).
+ ///
+ /// It is not fixed if a concrete implementation of this method can throw
+ /// anything else.
+ ///
+ /// \param name The name of zone apex to be traversed. It doesn't do
+ /// nearest match as findZone.
+ /// \return Pointer to the iterator.
+ virtual ZoneIteratorPtr getIterator(const isc::dns::Name& name) const {
+ // This is here to both document the parameter in doxygen (therefore it
+ // needs a name) and avoid unused parameter warning.
+ static_cast<void>(name);
+
+ isc_throw(isc::NotImplemented,
+ "Data source doesn't support iteration");
+ }
+
+ /// Return an updater to make updates to a specific zone.
+ ///
+ /// The RR class of the zone is the one that the client is expected to
+ /// handle (see the detailed description of this class).
+ ///
+ /// If the specified zone is not found via the client, a NULL pointer
+ /// will be returned; in other words a completely new zone cannot be
+ /// created using an updater. It must be created beforehand (even if
+ /// it's an empty placeholder) in a way specific to the underlying data
+ /// source.
+ ///
+ /// Conceptually, the updater will trigger a separate transaction for
+ /// subsequent updates to the zone within the context of the updater
+ /// (the actual implementation of the "transaction" may vary for the
+ /// specific underlying data source). Until \c commit() is performed
+ /// on the updater, the intermediate updates won't affect the results
+ /// of other methods (and the result of the object's methods created
+ /// by other factory methods). Likewise, if the updater is destructed
+ /// without performing \c commit(), the intermediate updates will be
+ /// effectively canceled and will never affect other methods.
+ ///
+ /// If the underlying data source allows concurrent updates, this method
+ /// can be called multiple times while the previously returned updater(s)
+ /// are still active. In this case each updater triggers a different
+ /// "transaction". Normally it would be for different zones for such a
+ /// case as handling multiple incoming AXFR streams concurrently, but
+ /// this interface does not even prohibit an attempt of getting more than
+ /// one updater for the same zone, as long as the underlying data source
+ /// allows such an operation (and any conflict resolution is left to the
+ /// specific derived class implementation).
+ ///
+ /// If \c replace is true, any existing RRs of the zone will be
+ /// deleted on successful completion of updates (after \c commit() on
+ /// the updater); if it's false, the existing RRs will be
+ /// intact unless explicitly deleted by \c deleteRRset() on the updater.
+ ///
+ /// A data source can be "read only" or can prohibit partial updates.
+ /// In such cases this method will result in an \c isc::NotImplemented
+ /// exception unconditionally or when \c replace is false).
+ ///
+ /// \note To avoid throwing the exception accidentally with a lazy
+ /// implementation, we still keep this method pure virtual without
+ /// an implementation. All derived classes must explicitly define this
+ /// method, even if it simply throws the NotImplemented exception.
+ ///
+ /// \exception NotImplemented The underlying data source does not support
+ /// updates.
+ /// \exception DataSourceError Internal error in the underlying data
+ /// source.
+ /// \exception std::bad_alloc Resource allocation failure.
+ ///
+ /// \param name The zone name to be updated
+ /// \param replace Whether to delete existing RRs before making updates
+ ///
+ /// \return A pointer to the updater; it will be NULL if the specified
+ /// zone isn't found.
+ virtual ZoneUpdaterPtr getUpdater(const isc::dns::Name& name,
+ bool replace) const = 0;
+};
+}
+}
+#endif // DATA_SOURCE_CLIENT_H
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/datasrc/data_source.cc b/src/lib/datasrc/data_source.cc
index 4e1fcde..94dec89 100644
--- a/src/lib/datasrc/data_source.cc
+++ b/src/lib/datasrc/data_source.cc
@@ -903,7 +903,7 @@ tryWildcard(Query& q, QueryTaskPtr task, ZoneInfo& zoneinfo, bool& found) {
result = proveNX(q, task, zoneinfo, true);
if (result != DataSrc::SUCCESS) {
m.setRcode(Rcode::SERVFAIL());
- logger.error(DATASRC_QUERY_WILDCARD_PROVENX_FAIL).
+ logger.error(DATASRC_QUERY_WILDCARD_PROVE_NX_FAIL).
arg(task->qname).arg(result);
return (DataSrc::ERROR);
}
@@ -945,7 +945,7 @@ tryWildcard(Query& q, QueryTaskPtr task, ZoneInfo& zoneinfo, bool& found) {
void
DataSrc::doQuery(Query& q) {
LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_QUERY_PROCESS).arg(q.qname()).
- arg(q.qclass());
+ arg(q.qtype()).arg(q.qclass());
Message& m = q.message();
vector<RRsetPtr> additional;
@@ -1162,7 +1162,7 @@ DataSrc::doQuery(Query& q) {
result = proveNX(q, task, zoneinfo, false);
if (result != DataSrc::SUCCESS) {
m.setRcode(Rcode::SERVFAIL());
- logger.error(DATASRC_QUERY_PROVENX_FAIL).arg(task->qname);
+ logger.error(DATASRC_QUERY_PROVE_NX_FAIL).arg(task->qname);
return;
}
}
diff --git a/src/lib/datasrc/data_source.h b/src/lib/datasrc/data_source.h
index ff695da..a7a15a9 100644
--- a/src/lib/datasrc/data_source.h
+++ b/src/lib/datasrc/data_source.h
@@ -184,9 +184,9 @@ public:
void setClass(isc::dns::RRClass& c) { rrclass = c; }
void setClass(const isc::dns::RRClass& c) { rrclass = c; }
- Result init() { return (NOT_IMPLEMENTED); }
- Result init(isc::data::ConstElementPtr config);
- Result close() { return (NOT_IMPLEMENTED); }
+ virtual Result init() { return (NOT_IMPLEMENTED); }
+ virtual Result init(isc::data::ConstElementPtr config);
+ virtual Result close() { return (NOT_IMPLEMENTED); }
virtual Result findRRset(const isc::dns::Name& qname,
const isc::dns::RRClass& qclass,
@@ -351,7 +351,7 @@ public:
/// \brief Returns the best enclosing zone name found for the given
// name and RR class so far.
- ///
+ ///
/// \return A pointer to the zone apex \c Name, NULL if none found yet.
///
/// This method never throws an exception.
@@ -413,6 +413,6 @@ private:
#endif
-// Local Variables:
+// Local Variables:
// mode: c++
-// End:
+// End:
diff --git a/src/lib/datasrc/database.cc b/src/lib/datasrc/database.cc
new file mode 100644
index 0000000..e476297
--- /dev/null
+++ b/src/lib/datasrc/database.cc
@@ -0,0 +1,960 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <string>
+#include <vector>
+
+#include <datasrc/database.h>
+#include <datasrc/data_source.h>
+#include <datasrc/iterator.h>
+
+#include <exceptions/exceptions.h>
+#include <dns/name.h>
+#include <dns/rrclass.h>
+#include <dns/rrttl.h>
+#include <dns/rrset.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+#include <datasrc/data_source.h>
+#include <datasrc/logger.h>
+
+#include <boost/foreach.hpp>
+
+using namespace isc::dns;
+using namespace std;
+using boost::shared_ptr;
+using namespace isc::dns::rdata;
+
+namespace isc {
+namespace datasrc {
+
+DatabaseClient::DatabaseClient(RRClass rrclass,
+ boost::shared_ptr<DatabaseAccessor>
+ accessor) :
+ rrclass_(rrclass), accessor_(accessor)
+{
+ if (!accessor_) {
+ isc_throw(isc::InvalidParameter,
+ "No database provided to DatabaseClient");
+ }
+}
+
+DataSourceClient::FindResult
+DatabaseClient::findZone(const Name& name) const {
+ std::pair<bool, int> zone(accessor_->getZone(name.toText()));
+ // Try exact first
+ if (zone.first) {
+ return (FindResult(result::SUCCESS,
+ ZoneFinderPtr(new Finder(accessor_,
+ zone.second, name))));
+ }
+ // Then super domains
+ // Start from 1, as 0 is covered above
+ for (size_t i(1); i < name.getLabelCount(); ++i) {
+ isc::dns::Name superdomain(name.split(i));
+ zone = accessor_->getZone(superdomain.toText());
+ if (zone.first) {
+ return (FindResult(result::PARTIALMATCH,
+ ZoneFinderPtr(new Finder(accessor_,
+ zone.second,
+ superdomain))));
+ }
+ }
+ // No, really nothing
+ return (FindResult(result::NOTFOUND, ZoneFinderPtr()));
+}
+
+DatabaseClient::Finder::Finder(boost::shared_ptr<DatabaseAccessor> accessor,
+ int zone_id, const isc::dns::Name& origin) :
+ accessor_(accessor),
+ zone_id_(zone_id),
+ origin_(origin)
+{ }
+
+namespace {
+// Adds the given Rdata to the given RRset
+// If the rrset is an empty pointer, a new one is
+// created with the given name, class, type and ttl
+// The type is checked if the rrset exists, but the
+// name is not.
+//
+// Then adds the given rdata to the set
+//
+// Raises a DataSourceError if the type does not
+// match, or if the given rdata string does not
+// parse correctly for the given type and class
+//
+// The DatabaseAccessor is passed to print the
+// database name in the log message if the TTL is
+// modified
+void addOrCreate(isc::dns::RRsetPtr& rrset,
+ const isc::dns::Name& name,
+ const isc::dns::RRClass& cls,
+ const isc::dns::RRType& type,
+ const isc::dns::RRTTL& ttl,
+ const std::string& rdata_str,
+ const DatabaseAccessor& db
+ )
+{
+ if (!rrset) {
+ rrset.reset(new isc::dns::RRset(name, cls, type, ttl));
+ } else {
+ // This is a check to make sure find() is not messing things up
+ assert(type == rrset->getType());
+ if (ttl != rrset->getTTL()) {
+ if (ttl < rrset->getTTL()) {
+ rrset->setTTL(ttl);
+ }
+ logger.warn(DATASRC_DATABASE_FIND_TTL_MISMATCH)
+ .arg(db.getDBName()).arg(name).arg(cls)
+ .arg(type).arg(rrset->getTTL());
+ }
+ }
+ try {
+ rrset->addRdata(isc::dns::rdata::createRdata(type, cls, rdata_str));
+ } catch (const isc::dns::rdata::InvalidRdataText& ivrt) {
+ // at this point, rrset may have been initialised for no reason,
+ // and won't be used. But the caller would drop the shared_ptr
+ // on such an error anyway, so we don't care.
+ isc_throw(DataSourceError,
+ "bad rdata in database for " << name << " "
+ << type << ": " << ivrt.what());
+ }
+}
+
+// This class keeps a short-lived store of RRSIG records encountered
+// during a call to find(). If the backend happens to return signatures
+// before the actual data, we might not know which signatures we will need
+// So if they may be relevant, we store the in this class.
+//
+// (If this class seems useful in other places, we might want to move
+// it to util. That would also provide an opportunity to add unit tests)
+class RRsigStore {
+public:
+ // Adds the given signature Rdata to the store
+ // The signature rdata MUST be of the RRSIG rdata type
+ // (the caller must make sure of this).
+ // NOTE: if we move this class to a public namespace,
+ // we should add a type_covered argument, so as not
+ // to have to do this cast here.
+ void addSig(isc::dns::rdata::RdataPtr sig_rdata) {
+ const isc::dns::RRType& type_covered =
+ static_cast<isc::dns::rdata::generic::RRSIG*>(
+ sig_rdata.get())->typeCovered();
+ sigs[type_covered].push_back(sig_rdata);
+ }
+
+ // If the store contains signatures for the type of the given
+ // rrset, they are appended to it.
+ void appendSignatures(isc::dns::RRsetPtr& rrset) const {
+ std::map<isc::dns::RRType,
+ std::vector<isc::dns::rdata::RdataPtr> >::const_iterator
+ found = sigs.find(rrset->getType());
+ if (found != sigs.end()) {
+ BOOST_FOREACH(isc::dns::rdata::RdataPtr sig, found->second) {
+ rrset->addRRsig(sig);
+ }
+ }
+ }
+
+private:
+ std::map<isc::dns::RRType, std::vector<isc::dns::rdata::RdataPtr> > sigs;
+};
+}
+
+DatabaseClient::Finder::FoundRRsets
+DatabaseClient::Finder::getRRsets(const string& name, const WantedTypes& types,
+ bool check_ns, const string* construct_name)
+{
+ RRsigStore sig_store;
+ bool records_found = false;
+ std::map<RRType, RRsetPtr> result;
+
+ // Request the context
+ DatabaseAccessor::IteratorContextPtr
+ context(accessor_->getRecords(name, zone_id_));
+ // It must not return NULL, that's a bug of the implementation
+ if (!context) {
+ isc_throw(isc::Unexpected, "Iterator context null at " + name);
+ }
+
+ std::string columns[DatabaseAccessor::COLUMN_COUNT];
+ if (construct_name == NULL) {
+ construct_name = &name;
+ }
+
+ const Name construct_name_object(*construct_name);
+
+ bool seen_cname(false);
+ bool seen_ds(false);
+ bool seen_other(false);
+ bool seen_ns(false);
+
+ while (context->getNext(columns)) {
+ // The domain is not empty
+ records_found = true;
+
+ try {
+ const RRType cur_type(columns[DatabaseAccessor::TYPE_COLUMN]);
+
+ if (cur_type == RRType::RRSIG()) {
+ // If we get signatures before we get the actual data, we
+ // can't know which ones to keep and which to drop...
+ // So we keep a separate store of any signature that may be
+ // relevant and add them to the final RRset when we are
+ // done.
+ // A possible optimization here is to not store them for
+ // types we are certain we don't need
+ sig_store.addSig(rdata::createRdata(cur_type, getClass(),
+ columns[DatabaseAccessor::RDATA_COLUMN]));
+ }
+
+ if (types.find(cur_type) != types.end()) {
+ // This type is requested, so put it into result
+ const RRTTL cur_ttl(columns[DatabaseAccessor::TTL_COLUMN]);
+ // Ths sigtype column was an optimization for finding the
+ // relevant RRSIG RRs for a lookup. Currently this column is
+ // not used in this revised datasource implementation. We
+ // should either start using it again, or remove it from use
+ // completely (i.e. also remove it from the schema and the
+ // backend implementation).
+ // Note that because we don't use it now, we also won't notice
+ // it if the value is wrong (i.e. if the sigtype column
+ // contains an rrtype that is different from the actual value
+ // of the 'type covered' field in the RRSIG Rdata).
+ //cur_sigtype(columns[SIGTYPE_COLUMN]);
+ addOrCreate(result[cur_type], construct_name_object,
+ getClass(), cur_type, cur_ttl,
+ columns[DatabaseAccessor::RDATA_COLUMN],
+ *accessor_);
+ }
+
+ if (cur_type == RRType::CNAME()) {
+ seen_cname = true;
+ } else if (cur_type == RRType::NS()) {
+ seen_ns = true;
+ } else if (cur_type == RRType::DS()) {
+ seen_ds = true;
+ } else if (cur_type != RRType::RRSIG() &&
+ cur_type != RRType::NSEC3() &&
+ cur_type != RRType::NSEC()) {
+ // NSEC and RRSIG can coexist with anything, otherwise
+ // we've seen something that can't live together with potential
+ // CNAME or NS
+ //
+ // NSEC3 lives in separate namespace from everything, therefore
+ // we just ignore it here for these checks as well.
+ seen_other = true;
+ }
+ } catch (const InvalidRRType&) {
+ isc_throw(DataSourceError, "Invalid RRType in database for " <<
+ name << ": " << columns[DatabaseAccessor::
+ TYPE_COLUMN]);
+ } catch (const InvalidRRTTL&) {
+ isc_throw(DataSourceError, "Invalid TTL in database for " <<
+ name << ": " << columns[DatabaseAccessor::
+ TTL_COLUMN]);
+ } catch (const rdata::InvalidRdataText&) {
+ isc_throw(DataSourceError, "Invalid rdata in database for " <<
+ name << ": " << columns[DatabaseAccessor::
+ RDATA_COLUMN]);
+ }
+ }
+ if (seen_cname && (seen_other || seen_ns || seen_ds)) {
+ isc_throw(DataSourceError, "CNAME shares domain " << name <<
+ " with something else");
+ }
+ if (check_ns && seen_ns && seen_other) {
+ isc_throw(DataSourceError, "NS shares domain " << name <<
+ " with something else");
+ }
+ // Add signatures to all found RRsets
+ for (std::map<RRType, RRsetPtr>::iterator i(result.begin());
+ i != result.end(); ++ i) {
+ sig_store.appendSignatures(i->second);
+ }
+
+ return (FoundRRsets(records_found, result));
+}
+
+bool
+DatabaseClient::Finder::hasSubdomains(const std::string& name) {
+ // Request the context
+ DatabaseAccessor::IteratorContextPtr
+ context(accessor_->getRecords(name, zone_id_, true));
+ // It must not return NULL, that's a bug of the implementation
+ if (!context) {
+ isc_throw(isc::Unexpected, "Iterator context null at " + name);
+ }
+
+ std::string columns[DatabaseAccessor::COLUMN_COUNT];
+ return (context->getNext(columns));
+}
+
+// Some manipulation with RRType sets
+namespace {
+
+// Bunch of functions to construct specific sets of RRTypes we will
+// ask from it.
+typedef std::set<RRType> WantedTypes;
+
+const WantedTypes&
+NSEC_TYPES() {
+ static bool initialized(false);
+ static WantedTypes result;
+
+ if (!initialized) {
+ result.insert(RRType::NSEC());
+ initialized = true;
+ }
+ return (result);
+}
+
+const WantedTypes&
+DELEGATION_TYPES() {
+ static bool initialized(false);
+ static WantedTypes result;
+
+ if (!initialized) {
+ result.insert(RRType::DNAME());
+ result.insert(RRType::NS());
+ initialized = true;
+ }
+ return (result);
+}
+
+const WantedTypes&
+FINAL_TYPES() {
+ static bool initialized(false);
+ static WantedTypes result;
+
+ if (!initialized) {
+ result.insert(RRType::CNAME());
+ result.insert(RRType::NS());
+ result.insert(RRType::NSEC());
+ initialized = true;
+ }
+ return (result);
+}
+
+}
+
+RRsetPtr
+DatabaseClient::Finder::findNSECCover(const Name& name) {
+ try {
+ // Which one should contain the NSEC record?
+ const Name coverName(findPreviousName(name));
+ // Get the record and copy it out
+ const FoundRRsets found = getRRsets(coverName.toText(), NSEC_TYPES(),
+ coverName != getOrigin());
+ const FoundIterator
+ nci(found.second.find(RRType::NSEC()));
+ if (nci != found.second.end()) {
+ return (nci->second);
+ } else {
+ // The previous doesn't contain NSEC.
+ // Badly signed zone or a bug?
+
+ // FIXME: Currently, if the zone is not signed, we could get
+ // here. In that case we can't really throw, but for now, we can't
+ // recognize it. So we don't throw at all, enable it once
+ // we have a is_signed flag or something.
+#if 0
+ isc_throw(DataSourceError, "No NSEC in " +
+ coverName.toText() + ", but it was "
+ "returned as previous - "
+ "accessor error? Badly signed zone?");
+#endif
+ }
+ }
+ catch (const isc::NotImplemented&) {
+ // Well, they want DNSSEC, but there is no available.
+ // So we don't provide anything.
+ LOG_INFO(logger, DATASRC_DATABASE_COVER_NSEC_UNSUPPORTED).
+ arg(accessor_->getDBName()).arg(name);
+ }
+ // We didn't find it, return nothing
+ return (RRsetPtr());
+}
+
+ZoneFinder::FindResult
+DatabaseClient::Finder::find(const isc::dns::Name& name,
+ const isc::dns::RRType& type,
+ isc::dns::RRsetList*,
+ const FindOptions options)
+{
+ // This variable is used to determine the difference between
+ // NXDOMAIN and NXRRSET
+ bool records_found = false;
+ bool glue_ok((options & FIND_GLUE_OK) != 0);
+ const bool dnssec_data((options & FIND_DNSSEC) != 0);
+ bool get_cover(false);
+ isc::dns::RRsetPtr result_rrset;
+ ZoneFinder::Result result_status = SUCCESS;
+ FoundRRsets found;
+ logger.debug(DBG_TRACE_DETAILED, DATASRC_DATABASE_FIND_RECORDS)
+ .arg(accessor_->getDBName()).arg(name).arg(type);
+ // In case we are in GLUE_OK mode and start matching wildcards,
+ // we can't do it under NS, so we store it here to check
+ isc::dns::RRsetPtr first_ns;
+
+ // First, do we have any kind of delegation (NS/DNAME) here?
+ const Name origin(getOrigin());
+ const size_t origin_label_count(origin.getLabelCount());
+ // Number of labels in the last known non-empty domain
+ size_t last_known(origin_label_count);
+ const size_t current_label_count(name.getLabelCount());
+ // This is how many labels we remove to get origin
+ size_t remove_labels(current_label_count - origin_label_count);
+
+ // Now go trough all superdomains from origin down
+ for (int i(remove_labels); i > 0; --i) {
+ Name superdomain(name.split(i));
+ // Look if there's NS or DNAME (but ignore the NS in origin)
+ found = getRRsets(superdomain.toText(), DELEGATION_TYPES(),
+ i != remove_labels);
+ if (found.first) {
+ // It contains some RRs, so it exists.
+ last_known = superdomain.getLabelCount();
+
+ const FoundIterator nsi(found.second.find(RRType::NS()));
+ const FoundIterator dni(found.second.find(RRType::DNAME()));
+ // In case we are in GLUE_OK mode, we want to store the
+ // highest encountered NS (but not apex)
+ if (glue_ok && !first_ns && i != remove_labels &&
+ nsi != found.second.end()) {
+ first_ns = nsi->second;
+ } else if (!glue_ok && i != remove_labels &&
+ nsi != found.second.end()) {
+ // Do a NS delegation, but ignore NS in glue_ok mode. Ignore
+ // delegation in apex
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_FOUND_DELEGATION).
+ arg(accessor_->getDBName()).arg(superdomain);
+ result_rrset = nsi->second;
+ result_status = DELEGATION;
+ // No need to go lower, found
+ break;
+ } else if (dni != found.second.end()) {
+ // Very similar with DNAME
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_FOUND_DNAME).
+ arg(accessor_->getDBName()).arg(superdomain);
+ result_rrset = dni->second;
+ result_status = DNAME;
+ if (result_rrset->getRdataCount() != 1) {
+ isc_throw(DataSourceError, "DNAME at " << superdomain <<
+ " has " << result_rrset->getRdataCount() <<
+ " rdata, 1 expected");
+ }
+ break;
+ }
+ }
+ }
+
+ if (!result_rrset) { // Only if we didn't find a redirect already
+ // Try getting the final result and extract it
+ // It is special if there's a CNAME or NS, DNAME is ignored here
+ // And we don't consider the NS in origin
+
+ WantedTypes final_types(FINAL_TYPES());
+ final_types.insert(type);
+ found = getRRsets(name.toText(), final_types, name != origin);
+ records_found = found.first;
+
+ // NS records, CNAME record and Wanted Type records
+ const FoundIterator nsi(found.second.find(RRType::NS()));
+ const FoundIterator cni(found.second.find(RRType::CNAME()));
+ const FoundIterator wti(found.second.find(type));
+ if (name != origin && !glue_ok && nsi != found.second.end()) {
+ // There's a delegation at the exact node.
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_FOUND_DELEGATION_EXACT).
+ arg(accessor_->getDBName()).arg(name);
+ result_status = DELEGATION;
+ result_rrset = nsi->second;
+ } else if (type != isc::dns::RRType::CNAME() &&
+ cni != found.second.end()) {
+ // A CNAME here
+ result_status = CNAME;
+ result_rrset = cni->second;
+ if (result_rrset->getRdataCount() != 1) {
+ isc_throw(DataSourceError, "CNAME with " <<
+ result_rrset->getRdataCount() <<
+ " rdata at " << name << ", expected 1");
+ }
+ } else if (wti != found.second.end()) {
+ // Just get the answer
+ result_rrset = wti->second;
+ } else if (!records_found) {
+ // Nothing lives here.
+ // But check if something lives below this
+ // domain and if so, pretend something is here as well.
+ if (hasSubdomains(name.toText())) {
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_FOUND_EMPTY_NONTERMINAL).
+ arg(accessor_->getDBName()).arg(name);
+ records_found = true;
+ get_cover = dnssec_data;
+ } else {
+ // It's not empty non-terminal. So check for wildcards.
+ // We remove labels one by one and look for the wildcard there.
+ // Go up to first non-empty domain.
+
+ remove_labels = current_label_count - last_known;
+ for (size_t i(1); i <= remove_labels; ++ i) {
+ // Construct the name with *
+ const Name superdomain(name.split(i));
+ const string wildcard("*." + superdomain.toText());
+ const string construct_name(name.toText());
+ // TODO What do we do about DNAME here?
+ // The types are the same as with original query
+ found = getRRsets(wildcard, final_types, true,
+ &construct_name);
+ if (found.first) {
+ if (first_ns) {
+ // In case we are under NS, we don't
+ // wildcard-match, but return delegation
+ result_rrset = first_ns;
+ result_status = DELEGATION;
+ records_found = true;
+ // We pretend to switch to non-glue_ok mode
+ glue_ok = false;
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_WILDCARD_CANCEL_NS).
+ arg(accessor_->getDBName()).arg(wildcard).
+ arg(first_ns->getName());
+ } else if (!hasSubdomains(name.split(i - 1).toText()))
+ {
+ // Nothing we added as part of the * can exist
+ // directly, as we go up only to first existing
+ // domain, but it could be empty non-terminal. In
+ // that case, we need to cancel the match.
+ records_found = true;
+ const FoundIterator
+ cni(found.second.find(RRType::CNAME()));
+ const FoundIterator
+ nsi(found.second.find(RRType::NS()));
+ const FoundIterator
+ nci(found.second.find(RRType::NSEC()));
+ const FoundIterator wti(found.second.find(type));
+ if (cni != found.second.end() &&
+ type != RRType::CNAME()) {
+ result_rrset = cni->second;
+ result_status = CNAME;
+ } else if (nsi != found.second.end()) {
+ result_rrset = nsi->second;
+ result_status = DELEGATION;
+ } else if (wti != found.second.end()) {
+ result_rrset = wti->second;
+ result_status = WILDCARD;
+ } else {
+ // NXRRSET case in the wildcard
+ result_status = WILDCARD_NXRRSET;
+ if (dnssec_data &&
+ nci != found.second.end()) {
+ // User wants a proof the wildcard doesn't
+ // contain it
+ //
+ // However, we need to get the RRset in the
+ // name of the wildcard, not the constructed
+ // one, so we walk it again
+ found = getRRsets(wildcard, NSEC_TYPES(),
+ true);
+ result_rrset =
+ found.second.find(RRType::NSEC())->
+ second;
+ }
+ }
+
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_WILDCARD).
+ arg(accessor_->getDBName()).arg(wildcard).
+ arg(name);
+ } else {
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_WILDCARD_CANCEL_SUB).
+ arg(accessor_->getDBName()).arg(wildcard).
+ arg(name).arg(superdomain);
+ }
+ break;
+ } else if (hasSubdomains(wildcard)) {
+ // Empty non-terminal asterisk
+ records_found = true;
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_WILDCARD_EMPTY).
+ arg(accessor_->getDBName()).arg(wildcard).
+ arg(name);
+ if (dnssec_data) {
+ result_rrset = findNSECCover(Name(wildcard));
+ if (result_rrset) {
+ result_status = WILDCARD_NXRRSET;
+ }
+ }
+ break;
+ }
+ }
+ // This is the NXDOMAIN case (nothing found anywhere). If
+ // they want DNSSEC data, try getting the NSEC record
+ if (dnssec_data && !records_found) {
+ get_cover = true;
+ }
+ }
+ } else if (dnssec_data) {
+ // This is the "usual" NXRRSET case
+ // So in case they want DNSSEC, provide the NSEC
+ // (which should be available already here)
+ result_status = NXRRSET;
+ const FoundIterator nci(found.second.find(RRType::NSEC()));
+ if (nci != found.second.end()) {
+ result_rrset = nci->second;
+ }
+ }
+ }
+
+ if (!result_rrset) {
+ if (result_status == SUCCESS) {
+ // Should we look for NSEC covering the name?
+ if (get_cover) {
+ result_rrset = findNSECCover(name);
+ if (result_rrset) {
+ result_status = NXDOMAIN;
+ }
+ }
+ // Something is not here and we didn't decide yet what
+ if (records_found) {
+ logger.debug(DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_FOUND_NXRRSET)
+ .arg(accessor_->getDBName()).arg(name)
+ .arg(getClass()).arg(type);
+ result_status = NXRRSET;
+ } else {
+ logger.debug(DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_FOUND_NXDOMAIN)
+ .arg(accessor_->getDBName()).arg(name)
+ .arg(getClass()).arg(type);
+ result_status = NXDOMAIN;
+ }
+ }
+ } else {
+ logger.debug(DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_FOUND_RRSET)
+ .arg(accessor_->getDBName()).arg(*result_rrset);
+ }
+ return (FindResult(result_status, result_rrset));
+}
+
+Name
+DatabaseClient::Finder::findPreviousName(const Name& name) const {
+ const string str(accessor_->findPreviousName(zone_id_,
+ name.reverse().toText()));
+ try {
+ return (Name(str));
+ }
+ /*
+ * To avoid having the same code many times, we just catch all the
+ * exceptions and handle them in a common code below
+ */
+ catch (const isc::dns::EmptyLabel&) {}
+ catch (const isc::dns::TooLongLabel&) {}
+ catch (const isc::dns::BadLabelType&) {}
+ catch (const isc::dns::BadEscape&) {}
+ catch (const isc::dns::TooLongName&) {}
+ catch (const isc::dns::IncompleteName&) {}
+ isc_throw(DataSourceError, "Bad name " + str + " from findPreviousName");
+}
+
+Name
+DatabaseClient::Finder::getOrigin() const {
+ return (origin_);
+}
+
+isc::dns::RRClass
+DatabaseClient::Finder::getClass() const {
+ // TODO Implement
+ return isc::dns::RRClass::IN();
+}
+
+namespace {
+
+/*
+ * This needs, beside of converting all data from textual representation, group
+ * together rdata of the same RRsets. To do this, we hold one row of data ahead
+ * of iteration. When we get a request to provide data, we create it from this
+ * data and load a new one. If it is to be put to the same rrset, we add it.
+ * Otherwise we just return what we have and keep the row as the one ahead
+ * for next time.
+ */
+class DatabaseIterator : public ZoneIterator {
+public:
+ DatabaseIterator(const DatabaseAccessor::IteratorContextPtr& context,
+ const RRClass& rrclass) :
+ context_(context),
+ class_(rrclass),
+ ready_(true)
+ {
+ // Prepare data for the next time
+ getData();
+ }
+
+ virtual isc::dns::ConstRRsetPtr getNextRRset() {
+ if (!ready_) {
+ isc_throw(isc::Unexpected, "Iterating past the zone end");
+ }
+ if (!data_ready_) {
+ // At the end of zone
+ ready_ = false;
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_ITERATE_END);
+ return (ConstRRsetPtr());
+ }
+ string name_str(name_), rtype_str(rtype_), ttl(ttl_);
+ Name name(name_str);
+ RRType rtype(rtype_str);
+ RRsetPtr rrset(new RRset(name, class_, rtype, RRTTL(ttl)));
+ while (data_ready_ && name_ == name_str && rtype_str == rtype_) {
+ if (ttl_ != ttl) {
+ if (ttl < ttl_) {
+ ttl_ = ttl;
+ rrset->setTTL(RRTTL(ttl));
+ }
+ LOG_WARN(logger, DATASRC_DATABASE_ITERATE_TTL_MISMATCH).
+ arg(name_).arg(class_).arg(rtype_).arg(rrset->getTTL());
+ }
+ rrset->addRdata(rdata::createRdata(rtype, class_, rdata_));
+ getData();
+ }
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED, DATASRC_DATABASE_ITERATE_NEXT).
+ arg(rrset->getName()).arg(rrset->getType());
+ return (rrset);
+ }
+private:
+ // Load next row of data
+ void getData() {
+ string data[DatabaseAccessor::COLUMN_COUNT];
+ data_ready_ = context_->getNext(data);
+ name_ = data[DatabaseAccessor::NAME_COLUMN];
+ rtype_ = data[DatabaseAccessor::TYPE_COLUMN];
+ ttl_ = data[DatabaseAccessor::TTL_COLUMN];
+ rdata_ = data[DatabaseAccessor::RDATA_COLUMN];
+ }
+
+ // The context
+ const DatabaseAccessor::IteratorContextPtr context_;
+ // Class of the zone
+ RRClass class_;
+ // Status
+ bool ready_, data_ready_;
+ // Data of the next row
+ string name_, rtype_, rdata_, ttl_;
+};
+
+}
+
+ZoneIteratorPtr
+DatabaseClient::getIterator(const isc::dns::Name& name) const {
+ // Get the zone
+ std::pair<bool, int> zone(accessor_->getZone(name.toText()));
+ if (!zone.first) {
+ // No such zone, can't continue
+ isc_throw(DataSourceError, "Zone " + name.toText() +
+ " can not be iterated, because it doesn't exist "
+ "in this data source");
+ }
+ // Request the context
+ DatabaseAccessor::IteratorContextPtr
+ context(accessor_->getAllRecords(zone.second));
+ // It must not return NULL, that's a bug of the implementation
+ if (context == DatabaseAccessor::IteratorContextPtr()) {
+ isc_throw(isc::Unexpected, "Iterator context null at " +
+ name.toText());
+ }
+ // Create the iterator and return it
+ // TODO: Once #1062 is merged with this, we need to get the
+ // actual zone class from the connection, as the DatabaseClient
+ // doesn't know it and the iterator needs it (so it wouldn't query
+ // it each time)
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED, DATASRC_DATABASE_ITERATE).
+ arg(name);
+ return (ZoneIteratorPtr(new DatabaseIterator(context, RRClass::IN())));
+}
+
+//
+// Zone updater using some database system as the underlying data source.
+//
+class DatabaseUpdater : public ZoneUpdater {
+public:
+ DatabaseUpdater(shared_ptr<DatabaseAccessor> accessor, int zone_id,
+ const Name& zone_name, const RRClass& zone_class) :
+ committed_(false), accessor_(accessor), zone_id_(zone_id),
+ db_name_(accessor->getDBName()), zone_name_(zone_name.toText()),
+ zone_class_(zone_class),
+ finder_(new DatabaseClient::Finder(accessor_, zone_id_, zone_name))
+ {
+ logger.debug(DBG_TRACE_DATA, DATASRC_DATABASE_UPDATER_CREATED)
+ .arg(zone_name_).arg(zone_class_).arg(db_name_);
+ }
+
+ virtual ~DatabaseUpdater() {
+ if (!committed_) {
+ try {
+ accessor_->rollbackUpdateZone();
+ logger.info(DATASRC_DATABASE_UPDATER_ROLLBACK)
+ .arg(zone_name_).arg(zone_class_).arg(db_name_);
+ } catch (const DataSourceError& e) {
+ // We generally expect that rollback always succeeds, and
+ // it should in fact succeed in a way we execute it. But
+ // as the public API allows rollbackUpdateZone() to fail and
+ // throw, we should expect it. Obviously we cannot re-throw
+ // it. The best we can do is to log it as a critical error.
+ logger.error(DATASRC_DATABASE_UPDATER_ROLLBACKFAIL)
+ .arg(zone_name_).arg(zone_class_).arg(db_name_)
+ .arg(e.what());
+ }
+ }
+
+ logger.debug(DBG_TRACE_DATA, DATASRC_DATABASE_UPDATER_DESTROYED)
+ .arg(zone_name_).arg(zone_class_).arg(db_name_);
+ }
+
+ virtual ZoneFinder& getFinder() { return (*finder_); }
+
+ virtual void addRRset(const RRset& rrset);
+ virtual void deleteRRset(const RRset& rrset);
+ virtual void commit();
+
+private:
+ bool committed_;
+ shared_ptr<DatabaseAccessor> accessor_;
+ const int zone_id_;
+ const string db_name_;
+ const string zone_name_;
+ const RRClass zone_class_;
+ boost::scoped_ptr<DatabaseClient::Finder> finder_;
+};
+
+void
+DatabaseUpdater::addRRset(const RRset& rrset) {
+ if (committed_) {
+ isc_throw(DataSourceError, "Add attempt after commit to zone: "
+ << zone_name_ << "/" << zone_class_);
+ }
+ if (rrset.getClass() != zone_class_) {
+ isc_throw(DataSourceError, "An RRset of a different class is being "
+ << "added to " << zone_name_ << "/" << zone_class_ << ": "
+ << rrset.toText());
+ }
+ if (rrset.getRRsig()) {
+ isc_throw(DataSourceError, "An RRset with RRSIG is being added to "
+ << zone_name_ << "/" << zone_class_ << ": "
+ << rrset.toText());
+ }
+
+ RdataIteratorPtr it = rrset.getRdataIterator();
+ if (it->isLast()) {
+ isc_throw(DataSourceError, "An empty RRset is being added for "
+ << rrset.getName() << "/" << zone_class_ << "/"
+ << rrset.getType());
+ }
+
+ string columns[DatabaseAccessor::ADD_COLUMN_COUNT]; // initialized with ""
+ columns[DatabaseAccessor::ADD_NAME] = rrset.getName().toText();
+ columns[DatabaseAccessor::ADD_REV_NAME] =
+ rrset.getName().reverse().toText();
+ columns[DatabaseAccessor::ADD_TTL] = rrset.getTTL().toText();
+ columns[DatabaseAccessor::ADD_TYPE] = rrset.getType().toText();
+ for (; !it->isLast(); it->next()) {
+ if (rrset.getType() == RRType::RRSIG()) {
+ // XXX: the current interface (based on the current sqlite3
+ // data source schema) requires a separate "sigtype" column,
+ // even though it won't be used in a newer implementation.
+ // We should eventually clean up the schema design and simplify
+ // the interface, but until then we have to conform to the schema.
+ const generic::RRSIG& rrsig_rdata =
+ dynamic_cast<const generic::RRSIG&>(it->getCurrent());
+ columns[DatabaseAccessor::ADD_SIGTYPE] =
+ rrsig_rdata.typeCovered().toText();
+ }
+ columns[DatabaseAccessor::ADD_RDATA] = it->getCurrent().toText();
+ accessor_->addRecordToZone(columns);
+ }
+}
+
+void
+DatabaseUpdater::deleteRRset(const RRset& rrset) {
+ if (committed_) {
+ isc_throw(DataSourceError, "Delete attempt after commit on zone: "
+ << zone_name_ << "/" << zone_class_);
+ }
+ if (rrset.getClass() != zone_class_) {
+ isc_throw(DataSourceError, "An RRset of a different class is being "
+ << "deleted from " << zone_name_ << "/" << zone_class_
+ << ": " << rrset.toText());
+ }
+ if (rrset.getRRsig()) {
+ isc_throw(DataSourceError, "An RRset with RRSIG is being deleted from "
+ << zone_name_ << "/" << zone_class_ << ": "
+ << rrset.toText());
+ }
+
+ RdataIteratorPtr it = rrset.getRdataIterator();
+ if (it->isLast()) {
+ isc_throw(DataSourceError, "An empty RRset is being deleted for "
+ << rrset.getName() << "/" << zone_class_ << "/"
+ << rrset.getType());
+ }
+
+ string params[DatabaseAccessor::DEL_PARAM_COUNT]; // initialized with ""
+ params[DatabaseAccessor::DEL_NAME] = rrset.getName().toText();
+ params[DatabaseAccessor::DEL_TYPE] = rrset.getType().toText();
+ for (; !it->isLast(); it->next()) {
+ params[DatabaseAccessor::DEL_RDATA] = it->getCurrent().toText();
+ accessor_->deleteRecordInZone(params);
+ }
+}
+
+void
+DatabaseUpdater::commit() {
+ if (committed_) {
+ isc_throw(DataSourceError, "Duplicate commit attempt for "
+ << zone_name_ << "/" << zone_class_ << " on "
+ << db_name_);
+ }
+ accessor_->commitUpdateZone();
+ committed_ = true; // make sure the destructor won't trigger rollback
+
+ // We release the accessor immediately after commit is completed so that
+ // we don't hold the possible internal resource any longer.
+ accessor_.reset();
+
+ logger.debug(DBG_TRACE_DATA, DATASRC_DATABASE_UPDATER_COMMIT)
+ .arg(zone_name_).arg(zone_class_).arg(db_name_);
+}
+
+// The updater factory
+ZoneUpdaterPtr
+DatabaseClient::getUpdater(const isc::dns::Name& name, bool replace) const {
+ shared_ptr<DatabaseAccessor> update_accessor(accessor_->clone());
+ const std::pair<bool, int> zone(update_accessor->startUpdateZone(
+ name.toText(), replace));
+ if (!zone.first) {
+ return (ZoneUpdaterPtr());
+ }
+
+ return (ZoneUpdaterPtr(new DatabaseUpdater(update_accessor, zone.second,
+ name, rrclass_)));
+}
+}
+}
diff --git a/src/lib/datasrc/database.h b/src/lib/datasrc/database.h
new file mode 100644
index 0000000..8295779
--- /dev/null
+++ b/src/lib/datasrc/database.h
@@ -0,0 +1,770 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __DATABASE_DATASRC_H
+#define __DATABASE_DATASRC_H
+
+#include <string>
+
+#include <boost/scoped_ptr.hpp>
+
+#include <dns/rrclass.h>
+#include <dns/rrclass.h>
+#include <dns/rrset.h>
+
+#include <datasrc/client.h>
+
+#include <dns/name.h>
+#include <exceptions/exceptions.h>
+
+#include <map>
+#include <set>
+
+namespace isc {
+namespace datasrc {
+
+/**
+ * \brief Abstraction of lowlevel database with DNS data
+ *
+ * This class is defines interface to databases. Each supported database
+ * will provide methods for accessing the data stored there in a generic
+ * manner. The methods are meant to be low-level, without much or any knowledge
+ * about DNS and should be possible to translate directly to queries.
+ *
+ * On the other hand, how the communication with database is done and in what
+ * schema (in case of relational/SQL database) is up to the concrete classes.
+ *
+ * This class is non-copyable, as copying connections to database makes little
+ * sense and will not be needed.
+ *
+ * \todo Is it true this does not need to be copied? For example the zone
+ * iterator might need it's own copy. But a virtual clone() method might
+ * be better for that than copy constructor.
+ *
+ * \note The same application may create multiple connections to the same
+ * database, having multiple instances of this class. If the database
+ * allows having multiple open queries at one connection, the connection
+ * class may share it.
+ */
+class DatabaseAccessor : boost::noncopyable {
+public:
+ /**
+ * Definitions of the fields as they are required to be filled in
+ * by IteratorContext::getNext()
+ *
+ * When implementing getNext(), the columns array should
+ * be filled with the values as described in this enumeration,
+ * in this order, i.e. TYPE_COLUMN should be the first element
+ * (index 0) of the array, TTL_COLUMN should be the second element
+ * (index 1), etc.
+ */
+ enum RecordColumns {
+ TYPE_COLUMN = 0, ///< The RRType of the record (A/NS/TXT etc.)
+ TTL_COLUMN = 1, ///< The TTL of the record (a
+ SIGTYPE_COLUMN = 2, ///< For RRSIG records, this contains the RRTYPE
+ ///< the RRSIG covers. In the current implementation,
+ ///< this field is ignored.
+ RDATA_COLUMN = 3, ///< Full text representation of the record's RDATA
+ NAME_COLUMN = 4, ///< The domain name of this RR
+ COLUMN_COUNT = 5 ///< The total number of columns, MUST be value of
+ ///< the largest other element in this enum plus 1.
+ };
+
+ /**
+ * Definitions of the fields to be passed to addRecordToZone().
+ *
+ * Each derived implementation of addRecordToZone() should expect
+ * the "columns" vector to be filled with the values as described in this
+ * enumeration, in this order.
+ */
+ enum AddRecordColumns {
+ ADD_NAME = 0, ///< The owner name of the record (a domain name)
+ ADD_REV_NAME = 1, ///< Reversed name of NAME (used for DNSSEC)
+ ADD_TTL = 2, ///< The TTL of the record (in numeric form)
+ ADD_TYPE = 3, ///< The RRType of the record (A/NS/TXT etc.)
+ ADD_SIGTYPE = 4, ///< For RRSIG records, this contains the RRTYPE
+ ///< the RRSIG covers.
+ ADD_RDATA = 5, ///< Full text representation of the record's RDATA
+ ADD_COLUMN_COUNT = 6 ///< Number of columns
+ };
+
+ /**
+ * Definitions of the fields to be passed to deleteRecordInZone().
+ *
+ * Each derived implementation of deleteRecordInZone() should expect
+ * the "params" vector to be filled with the values as described in this
+ * enumeration, in this order.
+ */
+ enum DeleteRecordParams {
+ DEL_NAME = 0, ///< The owner name of the record (a domain name)
+ DEL_TYPE = 1, ///< The RRType of the record (A/NS/TXT etc.)
+ DEL_RDATA = 2, ///< Full text representation of the record's RDATA
+ DEL_PARAM_COUNT = 3 ///< Number of parameters
+ };
+
+ /**
+ * \brief Destructor
+ *
+ * It is empty, but needs a virtual one, since we will use the derived
+ * classes in polymorphic way.
+ */
+ virtual ~DatabaseAccessor() { }
+
+ /**
+ * \brief Retrieve a zone identifier
+ *
+ * This method looks up a zone for the given name in the database. It
+ * should match only exact zone name (eg. name is equal to the zone's
+ * apex), as the DatabaseClient will loop trough the labels itself and
+ * find the most suitable zone.
+ *
+ * It is not specified if and what implementation of this method may throw,
+ * so code should expect anything.
+ *
+ * \param name The (fully qualified) domain name of the zone's apex to be
+ * looked up.
+ * \return The first part of the result indicates if a matching zone
+ * was found. In case it was, the second part is internal zone ID.
+ * This one will be passed to methods finding data in the zone.
+ * It is not required to keep them, in which case whatever might
+ * be returned - the ID is only passed back to the database as
+ * an opaque handle.
+ */
+ virtual std::pair<bool, int> getZone(const std::string& name) const = 0;
+
+ /**
+ * \brief This holds the internal context of ZoneIterator for databases
+ *
+ * While the ZoneIterator implementation from DatabaseClient does all the
+ * translation from strings to DNS classes and validation, this class
+ * holds the pointer to where the database is at reading the data.
+ *
+ * It can either hold shared pointer to the connection which created it
+ * and have some kind of statement inside (in case single database
+ * connection can handle multiple concurrent SQL statements) or it can
+ * create a new connection (or, if it is more convenient, the connection
+ * itself can inherit both from DatabaseConnection and IteratorContext
+ * and just clone itself).
+ */
+ class IteratorContext : public boost::noncopyable {
+ public:
+ /**
+ * \brief Destructor
+ *
+ * Virtual destructor, so any descendand class is destroyed correctly.
+ */
+ virtual ~IteratorContext() { }
+
+ /**
+ * \brief Function to provide next resource record
+ *
+ * This function should provide data about the next resource record
+ * from the data that is searched. The data is not converted yet.
+ *
+ * Depending on how the iterator was constructed, there is a difference
+ * in behaviour; for a 'full zone iterator', created with
+ * getAllRecords(), all COLUMN_COUNT elements of the array are
+ * overwritten.
+ * For a 'name iterator', created with getRecords(), the column
+ * NAME_COLUMN is untouched, since what would be added here is by
+ * definition already known to the caller (it already passes it as
+ * an argument to getRecords()).
+ *
+ * Once this function returns false, any subsequent call to it should
+ * result in false. The implementation of a derived class must ensure
+ * it doesn't cause any disruption due to that such as a crash or
+ * exception.
+ *
+ * \note The order of RRs is not strictly set, but the RRs for single
+ * RRset must not be interleaved with any other RRs (eg. RRsets must be
+ * "together").
+ *
+ * \param columns The data will be returned through here. The order
+ * is specified by the RecordColumns enum, and the size must be
+ * COLUMN_COUNT
+ * \todo Do we consider databases where it is stored in binary blob
+ * format?
+ * \throw DataSourceError if there's database-related error. If the
+ * exception (or any other in case of derived class) is thrown,
+ * the iterator can't be safely used any more.
+ * \return true if a record was found, and the columns array was
+ * updated. false if there was no more data, in which case
+ * the columns array is untouched.
+ */
+ virtual bool getNext(std::string (&columns)[COLUMN_COUNT]) = 0;
+ };
+
+ typedef boost::shared_ptr<IteratorContext> IteratorContextPtr;
+
+ /**
+ * \brief Creates an iterator context for a specific name.
+ *
+ * Returns an IteratorContextPtr that contains all records of the
+ * given name from the given zone.
+ *
+ * The implementation of the iterator that is returned may leave the
+ * NAME_COLUMN column of the array passed to getNext() untouched, as that
+ * data is already known (it is the same as the name argument here)
+ *
+ * \exception any Since any implementation can be used, the caller should
+ * expect any exception to be thrown.
+ *
+ * \param name The name to search for. This should be a FQDN.
+ * \param id The ID of the zone, returned from getZone().
+ * \param subdomains If set to true, match subdomains of name instead
+ * of name itself. It is used to find empty domains and match
+ * wildcards.
+ * \return Newly created iterator context. Must not be NULL.
+ */
+ virtual IteratorContextPtr getRecords(const std::string& name,
+ int id,
+ bool subdomains = false) const = 0;
+
+ /**
+ * \brief Creates an iterator context for the whole zone.
+ *
+ * Returns an IteratorContextPtr that contains all records of the
+ * zone with the given zone id.
+ *
+ * Each call to getNext() on the returned iterator should copy all
+ * column fields of the array that is passed, as defined in the
+ * RecordColumns enum.
+ *
+ * \exception any Since any implementation can be used, the caller should
+ * expect any exception to be thrown.
+ *
+ * \param id The ID of the zone, returned from getZone().
+ * \return Newly created iterator context. Must not be NULL.
+ */
+ virtual IteratorContextPtr getAllRecords(int id) const = 0;
+
+ /// Start a transaction for updating a zone.
+ ///
+ /// Each derived class version of this method starts a database
+ /// transaction to make updates to the given name of zone (whose class was
+ /// specified at the construction of the class).
+ ///
+ /// If \c replace is true, any existing records of the zone will be
+ /// deleted on successful completion of updates (after
+ /// \c commitUpdateZone()); if it's false, the existing records will be
+ /// intact unless explicitly deleted by \c deleteRecordInZone().
+ ///
+ /// A single \c DatabaseAccessor instance can perform at most one update
+ /// transaction; a duplicate call to this method before
+ /// \c commitUpdateZone() or \c rollbackUpdateZone() will result in
+ /// a \c DataSourceError exception. If multiple update attempts need
+ /// to be performed concurrently (and if the underlying database allows
+ /// such operation), separate \c DatabaseAccessor instance must be
+ /// created.
+ ///
+ /// \note The underlying database may not allow concurrent updates to
+ /// the same database instance even if different "connections" (or
+ /// something similar specific to the database implementation) are used
+ /// for different sets of updates. For example, it doesn't seem to be
+ /// possible for SQLite3 unless different databases are used. MySQL
+ /// allows concurrent updates to different tables of the same database,
+ /// but a specific operation may block others. As such, this interface
+ /// doesn't require derived classes to allow concurrent updates with
+ /// multiple \c DatabaseAccessor instances; however, the implementation
+ /// is encouraged to do the best for making it more likely to succeed
+ /// as long as the underlying database system allows concurrent updates.
+ ///
+ /// This method returns a pair of \c bool and \c int. Its first element
+ /// indicates whether the given name of zone is found. If it's false,
+ /// the transaction isn't considered to be started; a subsequent call to
+ /// this method with an existing zone name should succeed. Likewise,
+ /// if a call to this method results in an exception, the transaction
+ /// isn't considered to be started. Note also that if the zone is not
+ /// found this method doesn't try to create a new one in the database.
+ /// It must have been created by some other means beforehand.
+ ///
+ /// The second element is the internal zone ID used for subsequent
+ /// updates. Depending on implementation details of the actual derived
+ /// class method, it may be different from the one returned by
+ /// \c getZone(); for example, a specific implementation may use a
+ /// completely new zone ID when \c replace is true.
+ ///
+ /// \exception DataSourceError Duplicate call to this method, or some
+ /// internal database related error.
+ ///
+ /// \param zone_name A string representation of the zone name to be updated
+ /// \param replace Whether to replace the entire zone (see above)
+ ///
+ /// \return A pair of bool and int, indicating whether the specified zone
+ /// exists and (if so) the zone ID to be used for the update, respectively.
+ virtual std::pair<bool, int> startUpdateZone(const std::string& zone_name,
+ bool replace) = 0;
+
+ /// Add a single record to the zone to be updated.
+ ///
+ /// This method provides a simple interface to insert a new record
+ /// (a database "row") to the zone in the update context started by
+ /// \c startUpdateZone(). The zone to which the record to be added
+ /// is the one specified at the time of the \c startUpdateZone() call.
+ ///
+ /// A successful call to \c startUpdateZone() must have preceded to
+ /// this call; otherwise a \c DataSourceError exception will be thrown.
+ ///
+ /// The row is defined as a vector of strings that has exactly
+ /// ADD_COLUMN_COUNT number of elements. See AddRecordColumns for
+ /// the semantics of each element.
+ ///
+ /// Derived class methods are not required to check whether the given
+ /// values in \c columns are valid in terms of the expected semantics;
+ /// in general, it's the caller's responsibility.
+ /// For example, TTLs would normally be expected to be a textual
+ /// representation of decimal numbers, but this interface doesn't require
+ /// the implementation to perform this level of validation. It may check
+ /// the values, however, and in that case if it detects an error it
+ /// should throw a \c DataSourceError exception.
+ ///
+ /// Likewise, derived class methods are not required to detect any
+ /// duplicate record that is already in the zone.
+ ///
+ /// \note The underlying database schema may not have a trivial mapping
+ /// from this style of definition of rows to actual database records.
+ /// It's the implementation's responsibility to implement the mapping
+ /// in the actual derived method.
+ ///
+ /// \exception DataSourceError Invalid call without starting a transaction,
+ /// or other internal database error.
+ ///
+ /// \param columns An array of strings that defines a record to be added
+ /// to the zone.
+ virtual void addRecordToZone(
+ const std::string (&columns)[ADD_COLUMN_COUNT]) = 0;
+
+ /// Delete a single record from the zone to be updated.
+ ///
+ /// This method provides a simple interface to delete a record
+ /// (a database "row") from the zone in the update context started by
+ /// \c startUpdateZone(). The zone from which the record to be deleted
+ /// is the one specified at the time of the \c startUpdateZone() call.
+ ///
+ /// A successful call to \c startUpdateZone() must have preceded to
+ /// this call; otherwise a \c DataSourceError exception will be thrown.
+ ///
+ /// The record to be deleted is specified by a vector of strings that has
+ /// exactly DEL_PARAM_COUNT number of elements. See DeleteRecordParams
+ /// for the semantics of each element.
+ ///
+ /// \note In IXFR, TTL may also be specified, but we intentionally
+ /// ignore that in this interface, because it's not guaranteed
+ /// that all records have the same TTL (unlike the RRset
+ /// assumption) and there can even be multiple records for the
+ /// same name, type and rdata with different TTLs. If we only
+ /// delete one of them, subsequent lookup will still return a
+ /// positive answer, which would be confusing. It's a higher
+ /// layer's responsibility to check if there is at least one
+ /// record in the database that has the given TTL.
+ ///
+ /// Like \c addRecordToZone, derived class methods are not required to
+ /// validate the semantics of the given parameters or to check if there
+ /// is a record that matches the specified parameter; if there isn't
+ /// it simply ignores the result.
+ ///
+ /// \exception DataSourceError Invalid call without starting a transaction,
+ /// or other internal database error.
+ ///
+ /// \param params An array of strings that defines a record to be deleted
+ /// from the zone.
+ virtual void deleteRecordInZone(
+ const std::string (¶ms)[DEL_PARAM_COUNT]) = 0;
+
+ /// Commit updates to the zone.
+ ///
+ /// This method completes a transaction of making updates to the zone
+ /// in the context started by startUpdateZone.
+ ///
+ /// A successful call to \c startUpdateZone() must have preceded to
+ /// this call; otherwise a \c DataSourceError exception will be thrown.
+ /// Once this method successfully completes, the transaction isn't
+ /// considered to exist any more. So a new transaction can now be
+ /// started. On the other hand, a duplicate call to this method after
+ /// a successful completion of it is invalid and should result in
+ /// a \c DataSourceError exception.
+ ///
+ /// If some internal database error happens, a \c DataSourceError
+ /// exception must be thrown. In that case the transaction is still
+ /// considered to be valid; the caller must explicitly rollback it
+ /// or (if it's confident that the error is temporary) try to commit it
+ /// again.
+ ///
+ /// \exception DataSourceError Call without a transaction, duplicate call
+ /// to the method or internal database error.
+ virtual void commitUpdateZone() = 0;
+
+ /// Rollback updates to the zone made so far.
+ ///
+ /// This method rollbacks a transaction of making updates to the zone
+ /// in the context started by startUpdateZone. When it succeeds
+ /// (it normally should, but see below), the underlying database should
+ /// be reverted to the point before performing the corresponding
+ /// \c startUpdateZone().
+ ///
+ /// A successful call to \c startUpdateZone() must have preceded to
+ /// this call; otherwise a \c DataSourceError exception will be thrown.
+ /// Once this method successfully completes, the transaction isn't
+ /// considered to exist any more. So a new transaction can now be
+ /// started. On the other hand, a duplicate call to this method after
+ /// a successful completion of it is invalid and should result in
+ /// a \c DataSourceError exception.
+ ///
+ /// Normally this method should not fail. But it may not always be
+ /// possible to guarantee it depending on the characteristics of the
+ /// underlying database system. So this interface doesn't require the
+ /// actual implementation for the error free property. But if a specific
+ /// implementation of this method can fail, it is encouraged to document
+ /// when that can happen with its implication.
+ ///
+ /// \exception DataSourceError Call without a transaction, duplicate call
+ /// to the method or internal database error.
+ virtual void rollbackUpdateZone() = 0;
+
+ /// Clone the accessor with the same configuration.
+ ///
+ /// Each derived class implementation of this method will create a new
+ /// accessor of the same derived class with the same configuration
+ /// (such as the database server address) as that of the caller object
+ /// and return it.
+ ///
+ /// Note that other internal states won't be copied to the new accessor
+ /// even though the name of "clone" may indicate so. For example, even
+ /// if the calling accessor is in the middle of a update transaction,
+ /// the new accessor will not start a transaction to trace the same
+ /// updates.
+ ///
+ /// The intended use case of cloning is to create a separate context
+ /// where a specific set of database operations can be performed
+ /// independently from the original accessor. The updater will use it
+ /// so that multiple updaters can be created concurrently even if the
+ /// underlying database system doesn't allow running multiple transactions
+ /// in a single database connection.
+ ///
+ /// The underlying database system may not support the functionality
+ /// that would be needed to implement this method. For example, it
+ /// may not allow a single thread (or process) to have more than one
+ /// database connections. In such a case the derived class implementation
+ /// should throw a \c DataSourceError exception.
+ ///
+ /// \return A shared pointer to the cloned accessor.
+ virtual boost::shared_ptr<DatabaseAccessor> clone() = 0;
+
+ /**
+ * \brief Returns a string identifying this dabase backend
+ *
+ * The returned string is mainly intended to be used for
+ * debugging/logging purposes.
+ *
+ * Any implementation is free to choose the exact string content,
+ * but it is advisable to make it a name that is distinguishable
+ * from the others.
+ *
+ * \return the name of the database
+ */
+ virtual const std::string& getDBName() const = 0;
+
+ /**
+ * \brief It returns the previous name in DNSSEC order.
+ *
+ * This is used in DatabaseClient::findPreviousName and does more
+ * or less the real work, except for working on strings.
+ *
+ * \param rname The name to ask for previous of, in reversed form.
+ * We use the reversed form (see isc::dns::Name::reverse),
+ * because then the case insensitive order of string representation
+ * and the DNSSEC order correspond (eg. org.example.a is followed
+ * by org.example.a.b which is followed by org.example.b, etc).
+ * \param zone_id The zone to look through.
+ * \return The previous name.
+ * \note This function must return previous name even in case
+ * the queried rname does not exist in the zone.
+ * \note This method must skip under-the-zone-cut data (glue data).
+ * This might be implemented by looking for NSEC records (as glue
+ * data don't have them) in the zone or in some other way.
+ *
+ * \throw DataSourceError if there's a problem with the database.
+ * \throw NotImplemented if this database doesn't support DNSSEC
+ * or there's no previous name for the queried one (the NSECs
+ * might be missing or the queried name is less or equal the
+ * apex of the zone).
+ */
+ virtual std::string findPreviousName(int zone_id,
+ const std::string& rname) const = 0;
+};
+
+/**
+ * \brief Concrete data source client oriented at database backends.
+ *
+ * This class (together with corresponding versions of ZoneFinder,
+ * ZoneIterator, etc.) translates high-level data source queries to
+ * low-level calls on DatabaseAccessor. It calls multiple queries
+ * if necessary and validates data from the database, allowing the
+ * DatabaseAccessor to be just simple translation to SQL/other
+ * queries to database.
+ *
+ * While it is possible to subclass it for specific database in case
+ * of special needs, it is not expected to be needed. This should just
+ * work as it is with whatever DatabaseAccessor.
+ */
+class DatabaseClient : public DataSourceClient {
+public:
+ /**
+ * \brief Constructor
+ *
+ * It initializes the client with a database via the given accessor.
+ *
+ * \exception isc::InvalidParameter if accessor is NULL. It might throw
+ * standard allocation exception as well, but doesn't throw anything else.
+ *
+ * \param rrclass The RR class of the zones that this client will handle.
+ * \param accessor The accessor to the database to use to get data.
+ * As the parameter suggests, the client takes ownership of the accessor
+ * and will delete it when itself deleted.
+ */
+ DatabaseClient(isc::dns::RRClass rrclass,
+ boost::shared_ptr<DatabaseAccessor> accessor);
+
+ /**
+ * \brief Corresponding ZoneFinder implementation
+ *
+ * The zone finder implementation for database data sources. Similarly
+ * to the DatabaseClient, it translates the queries to methods of the
+ * database.
+ *
+ * Application should not come directly in contact with this class
+ * (it should handle it trough generic ZoneFinder pointer), therefore
+ * it could be completely hidden in the .cc file. But it is provided
+ * to allow testing and for rare cases when a database needs slightly
+ * different handling, so it can be subclassed.
+ *
+ * Methods directly corresponds to the ones in ZoneFinder.
+ */
+ class Finder : public ZoneFinder {
+ public:
+ /**
+ * \brief Constructor
+ *
+ * \param database The database (shared with DatabaseClient) to
+ * be used for queries (the one asked for ID before).
+ * \param zone_id The zone ID which was returned from
+ * DatabaseAccessor::getZone and which will be passed to further
+ * calls to the database.
+ * \param origin The name of the origin of this zone. It could query
+ * it from database, but as the DatabaseClient just searched for
+ * the zone using the name, it should have it.
+ */
+ Finder(boost::shared_ptr<DatabaseAccessor> database, int zone_id,
+ const isc::dns::Name& origin);
+ // The following three methods are just implementations of inherited
+ // ZoneFinder's pure virtual methods.
+ virtual isc::dns::Name getOrigin() const;
+ virtual isc::dns::RRClass getClass() const;
+
+ /**
+ * \brief Find an RRset in the datasource
+ *
+ * Searches the datasource for an RRset of the given name and
+ * type. If there is a CNAME at the given name, the CNAME rrset
+ * is returned.
+ * (this implementation is not complete, and currently only
+ * does full matches, CNAMES, and the signatures for matches and
+ * CNAMEs)
+ * \note target was used in the original design to handle ANY
+ * queries. This is not implemented yet, and may use
+ * target again for that, but it might also use something
+ * different. It is left in for compatibility at the moment.
+ * \note options are ignored at this moment
+ *
+ * \note Maybe counter intuitively, this method is not a const member
+ * function. This is intentional; some of the underlying implementations
+ * are expected to use a database backend, and would internally contain
+ * some abstraction of "database connection". In the most strict sense
+ * any (even read only) operation might change the internal state of
+ * such a connection, and in that sense the operation cannot be considered
+ * "const". In order to avoid giving a false sense of safety to the
+ * caller, we indicate a call to this method may have a surprising
+ * side effect. That said, this view may be too strict and it may
+ * make sense to say the internal database connection doesn't affect
+ * external behavior in terms of the interface of this method. As
+ * we gain more experiences with various kinds of backends we may
+ * revisit the constness.
+ *
+ * \exception DataSourceError when there is a problem reading
+ * the data from the dabase backend.
+ * This can be a connection, code, or
+ * data (parse) error.
+ *
+ * \param name The name to find
+ * \param type The RRType to find
+ * \param target Unused at this moment
+ * \param options Options about how to search.
+ * See ZoneFinder::FindOptions.
+ */
+ virtual FindResult find(const isc::dns::Name& name,
+ const isc::dns::RRType& type,
+ isc::dns::RRsetList* target = NULL,
+ const FindOptions options = FIND_DEFAULT);
+
+ /**
+ * \brief Implementation of ZoneFinder::findPreviousName method.
+ */
+ virtual isc::dns::Name findPreviousName(const isc::dns::Name& query)
+ const;
+
+ /**
+ * \brief The zone ID
+ *
+ * This function provides the stored zone ID as passed to the
+ * constructor. This is meant for testing purposes and normal
+ * applications shouldn't need it.
+ */
+ int zone_id() const { return (zone_id_); }
+
+ /**
+ * \brief The database accessor.
+ *
+ * This function provides the database accessor stored inside as
+ * passed to the constructor. This is meant for testing purposes and
+ * normal applications shouldn't need it.
+ */
+ const DatabaseAccessor& getAccessor() const {
+ return (*accessor_);
+ }
+ private:
+ boost::shared_ptr<DatabaseAccessor> accessor_;
+ const int zone_id_;
+ const isc::dns::Name origin_;
+ //
+ /// \brief Shortcut name for the result of getRRsets
+ typedef std::pair<bool, std::map<dns::RRType, dns::RRsetPtr> >
+ FoundRRsets;
+ /// \brief Just shortcut for set of types
+ typedef std::set<dns::RRType> WantedTypes;
+ /**
+ * \brief Searches database for RRsets of one domain.
+ *
+ * This method scans RRs of single domain specified by name and
+ * extracts any RRsets found and requested by parameters.
+ *
+ * It is used internally by find(), because it is called multiple
+ * times (usually with different domains).
+ *
+ * \param name Which domain name should be scanned.
+ * \param types List of types the caller is interested in.
+ * \param check_ns If this is set to true, it checks nothing lives
+ * together with NS record (with few little exceptions, like RRSIG
+ * or NSEC). This check is meant for non-apex NS records.
+ * \param construct_name If this is NULL, the resulting RRsets have
+ * their name set to name. If it is not NULL, it overrides the name
+ * and uses this one (this can be used for wildcard synthesized
+ * records).
+ * \return A pair, where the first element indicates if the domain
+ * contains any RRs at all (not only the requested, it may happen
+ * this is set to true, but the second part is empty). The second
+ * part is map from RRtypes to RRsets of the corresponding types.
+ * If the RRset is not present in DB, the RRtype is not there at
+ * all (so you'll not find NULL pointer in the result).
+ * \throw DataSourceError If there's a low-level error with the
+ * database or the database contains bad data.
+ */
+ FoundRRsets getRRsets(const std::string& name,
+ const WantedTypes& types, bool check_ns,
+ const std::string* construct_name = NULL);
+ /**
+ * \brief Checks if something lives below this domain.
+ *
+ * This looks if there's any subdomain of the given name. It can be
+ * used to test if domain is empty non-terminal.
+ *
+ * \param name The domain to check.
+ */
+ bool hasSubdomains(const std::string& name);
+
+ /**
+ * \brief Get the NSEC covering a name.
+ *
+ * This one calls findPreviousName on the given name and extracts an NSEC
+ * record on the result. It handles various error cases. The method exists
+ * to share code present at more than one location.
+ */
+ dns::RRsetPtr findNSECCover(const dns::Name& name);
+
+ /**
+ * \brief Convenience type shortcut.
+ *
+ * To find stuff in the result of getRRsets.
+ */
+ typedef std::map<dns::RRType, dns::RRsetPtr>::const_iterator
+ FoundIterator;
+ };
+
+ /**
+ * \brief Find a zone in the database
+ *
+ * This queries database's getZone to find the best matching zone.
+ * It will propagate whatever exceptions are thrown from that method
+ * (which is not restricted in any way).
+ *
+ * \param name Name of the zone or data contained there.
+ * \return FindResult containing the code and an instance of Finder, if
+ * anything is found. However, application should not rely on the
+ * ZoneFinder being instance of Finder (possible subclass of this class
+ * may return something else and it may change in future versions), it
+ * should use it as a ZoneFinder only.
+ */
+ virtual FindResult findZone(const isc::dns::Name& name) const;
+
+ /**
+ * \brief Get the zone iterator
+ *
+ * The iterator allows going through the whole zone content. If the
+ * underlying DatabaseConnection is implemented correctly, it should
+ * be possible to have multiple ZoneIterators at once and query data
+ * at the same time.
+ *
+ * \exception DataSourceError if the zone doesn't exist.
+ * \exception isc::NotImplemented if the underlying DatabaseConnection
+ * doesn't implement iteration. But in case it is not implemented
+ * and the zone doesn't exist, DataSourceError is thrown.
+ * \exception Anything else the underlying DatabaseConnection might
+ * want to throw.
+ * \param name The origin of the zone to iterate.
+ * \return Shared pointer to the iterator (it will never be NULL)
+ */
+ virtual ZoneIteratorPtr getIterator(const isc::dns::Name& name) const;
+
+ /// This implementation internally clones the accessor from the one
+ /// used in the client and starts a separate transaction using the cloned
+ /// accessor. The returned updater will be able to work separately from
+ /// the original client.
+ virtual ZoneUpdaterPtr getUpdater(const isc::dns::Name& name,
+ bool replace) const;
+
+private:
+ /// \brief The RR class that this client handles.
+ const isc::dns::RRClass rrclass_;
+
+ /// \brief The accessor to our database.
+ const boost::shared_ptr<DatabaseAccessor> accessor_;
+};
+
+}
+}
+
+#endif // __DATABASE_DATASRC_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/datasrc/datasrc_messages.mes b/src/lib/datasrc/datasrc_messages.mes
index c692364..04ad610 100644
--- a/src/lib/datasrc/datasrc_messages.mes
+++ b/src/lib/datasrc/datasrc_messages.mes
@@ -17,63 +17,149 @@ $NAMESPACE isc::datasrc
# \brief Messages for the data source library
% DATASRC_CACHE_CREATE creating the hotspot cache
-Debug information that the hotspot cache was created at startup.
+This is a debug message issued during startup when the hotspot cache
+is created.
% DATASRC_CACHE_DESTROY destroying the hotspot cache
Debug information. The hotspot cache is being destroyed.
-% DATASRC_CACHE_DISABLE disabling the cache
-The hotspot cache is disabled from now on. It is not going to store
-information or return anything.
+% DATASRC_CACHE_DISABLE disabling the hotspot cache
+A debug message issued when the hotspot cache is disabled.
-% DATASRC_CACHE_ENABLE enabling the cache
-The hotspot cache is enabled from now on.
+% DATASRC_CACHE_ENABLE enabling the hotspot cache
+A debug message issued when the hotspot cache is enabled.
-% DATASRC_CACHE_EXPIRED the item '%1' is expired
-Debug information. There was an attempt to look up an item in the hotspot
-cache. And the item was actually there, but it was too old, so it was removed
-instead and nothing is reported (the external behaviour is the same as with
-CACHE_NOT_FOUND).
+% DATASRC_CACHE_EXPIRED item '%1' in the hotspot cache has expired
+A debug message issued when a hotspot cache lookup located the item but it
+had expired. The item was removed and the program proceeded as if the item
+had not been found.
% DATASRC_CACHE_FOUND the item '%1' was found
-Debug information. An item was successfully looked up in the hotspot cache.
+Debug information. An item was successfully located in the hotspot cache.
-% DATASRC_CACHE_FULL cache is full, dropping oldest
+% DATASRC_CACHE_FULL hotspot cache is full, dropping oldest
Debug information. After inserting an item into the hotspot cache, the
maximum number of items was exceeded, so the least recently used item will
be dropped. This should be directly followed by CACHE_REMOVE.
-% DATASRC_CACHE_INSERT inserting item '%1' into the cache
-Debug information. It means a new item is being inserted into the hotspot
+% DATASRC_CACHE_INSERT inserting item '%1' into the hotspot cache
+A debug message indicating that a new item is being inserted into the hotspot
cache.
-% DATASRC_CACHE_NOT_FOUND the item '%1' was not found
-Debug information. It was attempted to look up an item in the hotspot cache,
-but it is not there.
+% DATASRC_CACHE_NOT_FOUND the item '%1' was not found in the hotspot cache
+A debug message issued when hotspot cache was searched for the specified
+item but it was not found.
-% DATASRC_CACHE_OLD_FOUND older instance of cache item found, replacing
+% DATASRC_CACHE_OLD_FOUND older instance of hotspot cache item '%1' found, replacing
Debug information. While inserting an item into the hotspot cache, an older
-instance of an item with the same name was found. The old instance will be
-removed. This should be directly followed by CACHE_REMOVE.
+instance of an item with the same name was found; the old instance will be
+removed. This will be directly followed by CACHE_REMOVE.
-% DATASRC_CACHE_REMOVE removing '%1' from the cache
+% DATASRC_CACHE_REMOVE removing '%1' from the hotspot cache
Debug information. An item is being removed from the hotspot cache.
-% DATASRC_CACHE_SLOTS setting the cache size to '%1', dropping '%2' items
+% DATASRC_CACHE_SLOTS setting the hotspot cache size to '%1', dropping '%2' items
The maximum allowed number of items of the hotspot cache is set to the given
number. If there are too many, some of them will be dropped. The size of 0
means no limit.
+% DATASRC_DATABASE_COVER_NSEC_UNSUPPORTED %1 doesn't support DNSSEC when asked for NSEC data covering %2
+The datasource tried to provide an NSEC proof that the named domain does not
+exist, but the database backend doesn't support DNSSEC. No proof is included
+in the answer as a result.
+
+% DATASRC_DATABASE_FIND_RECORDS looking in datasource %1 for record %2/%3
+Debug information. The database data source is looking up records with the given
+name and type in the database.
+
+% DATASRC_DATABASE_FIND_TTL_MISMATCH TTL values differ in %1 for elements of %2/%3/%4, setting to %5
+The datasource backend provided resource records for the given RRset with
+different TTL values. This isn't allowed on the wire and is considered
+an error, so we set it to the lowest value we found (but we don't modify the
+database). The data in database should be checked and fixed.
+
+% DATASRC_DATABASE_FOUND_DELEGATION Found delegation at %2 in %1
+When searching for a domain, the program met a delegation to a different zone
+at the given domain name. It will return that one instead.
+
+% DATASRC_DATABASE_FOUND_DELEGATION_EXACT Found delegation at %2 (exact match) in %1
+The program found the domain requested, but it is a delegation point to a
+different zone, therefore it is not authoritative for this domain name.
+It will return the NS record instead.
+
+% DATASRC_DATABASE_FOUND_DNAME Found DNAME at %2 in %1
+When searching for a domain, the program met a DNAME redirection to a different
+place in the domain space at the given domain name. It will return that one
+instead.
+
+% DATASRC_DATABASE_FOUND_EMPTY_NONTERMINAL empty non-terminal %2 in %1
+The domain name doesn't have any RRs, so it doesn't exist in the database.
+However, it has a subdomain, so it exists in the DNS address space. So we
+return NXRRSET instead of NXDOMAIN.
+
+% DATASRC_DATABASE_FOUND_NXDOMAIN search in datasource %1 resulted in NXDOMAIN for %2/%3/%4
+The data returned by the database backend did not contain any data for the given
+domain name, class and type.
+
+% DATASRC_DATABASE_FOUND_NXRRSET search in datasource %1 resulted in NXRRSET for %2/%3/%4
+The data returned by the database backend contained data for the given domain
+name and class, but not for the given type.
+
+% DATASRC_DATABASE_FOUND_RRSET search in datasource %1 resulted in RRset %2
+The data returned by the database backend contained data for the given domain
+name, and it either matches the type or has a relevant type. The RRset that is
+returned is printed.
+
+% DATASRC_DATABASE_ITERATE iterating zone %1
+The program is reading the whole zone, eg. not searching for data, but going
+through each of the RRsets there.
+
+% DATASRC_DATABASE_ITERATE_END iterating zone finished
+While iterating through the zone, the program reached end of the data.
+
+% DATASRC_DATABASE_ITERATE_NEXT next RRset in zone is %1/%2
+While iterating through the zone, the program extracted next RRset from it.
+The name and RRtype of the RRset is indicated in the message.
+
+% DATASRC_DATABASE_ITERATE_TTL_MISMATCH TTL values differ for RRs of %1/%2/%3, setting to %4
+While iterating through the zone, the time to live for RRs of the given RRset
+were found to be different. This isn't allowed on the wire and is considered
+an error, so we set it to the lowest value we found (but we don't modify the
+database). The data in database should be checked and fixed.
+
+% DATASRC_DATABASE_WILDCARD constructing RRset %3 from wildcard %2 in %1
+The database doesn't contain directly matching domain, but it does contain a
+wildcard one which is being used to synthesize the answer.
+
+% DATASRC_DATABASE_WILDCARD_CANCEL_NS canceled wildcard match on %2 because %3 contains NS in %1
+The database was queried to provide glue data and it didn't find direct match.
+It could create it from given wildcard, but matching wildcards is forbidden
+under a zone cut, which was found. Therefore the delegation will be returned
+instead.
+
+% DATASRC_DATABASE_WILDCARD_CANCEL_SUB wildcard %2 can't be used to construct %3 because %4 exists in %1
+The answer could be constructed using the wildcard, but the given subdomain
+exists, therefore this name is something like empty non-terminal (actually,
+from the protocol point of view, it is empty non-terminal, but the code
+discovers it differently).
+
+% DATASRC_DATABASE_WILDCARD_EMPTY implicit wildcard %2 used to construct %3 in %1
+The given wildcard exists implicitly in the domainspace, as empty nonterminal
+(eg. there's something like subdomain.*.example.org, so *.example.org exists
+implicitly, but is empty). This will produce NXRRSET, because the constructed
+domain is empty as well as the wildcard.
+
% DATASRC_DO_QUERY handling query for '%1/%2'
-Debug information. We're processing some internal query for given name and
-type.
+A debug message indicating that a query for the given name and RR type is being
+processed.
% DATASRC_MEM_ADD_RRSET adding RRset '%1/%2' into zone '%3'
Debug information. An RRset is being added to the in-memory data source.
% DATASRC_MEM_ADD_WILDCARD adding wildcards for '%1'
-Debug information. Some special marks above each * in wildcard name are needed.
-They are being added now for this name.
+This is a debug message issued during the processing of a wildcard
+name. The internal domain name tree is scanned and some nodes are
+specially marked to allow the wildcard lookup to succeed.
% DATASRC_MEM_ADD_ZONE adding zone '%1/%2'
Debug information. A zone is being added into the in-memory data source.
@@ -114,9 +200,9 @@ stop the search.
Debug information. A DNAME was found instead of the requested information.
% DATASRC_MEM_DNAME_NS DNAME and NS can't coexist in non-apex domain '%1'
-It was requested for DNAME and NS records to be put into the same domain
-which is not the apex (the top of the zone). This is forbidden by RFC
-2672, section 3. This indicates a problem with provided data.
+A request was made for DNAME and NS records to be put into the same
+domain which is not the apex (the top of the zone). This is forbidden
+by RFC 2672 (section 3) and indicates a problem with provided data.
% DATASRC_MEM_DOMAIN_EMPTY requested domain '%1' is empty
Debug information. The requested domain exists in the tree of domains, but
@@ -142,7 +228,7 @@ in-memory data source.
% DATASRC_MEM_LOAD loading zone '%1' from file '%2'
Debug information. The content of master file is being loaded into the memory.
-% DATASRC_MEM_NOTFOUND requested domain '%1' not found
+% DATASRC_MEM_NOT_FOUND requested domain '%1' not found
Debug information. The requested domain does not exist.
% DATASRC_MEM_NS_ENCOUNTERED encountered a NS
@@ -201,11 +287,11 @@ behave and BIND 9 refuses that as well. Please describe your intention using
different tools.
% DATASRC_META_ADD adding a data source into meta data source
-Debug information. Yet another data source is being added into the meta data
-source. (probably at startup or reconfiguration)
+This is a debug message issued during startup or reconfiguration.
+Another data source is being added into the meta data source.
% DATASRC_META_ADD_CLASS_MISMATCH mismatch between classes '%1' and '%2'
-It was attempted to add a data source into a meta data source. But their
+It was attempted to add a data source into a meta data source, but their
classes do not match.
% DATASRC_META_REMOVE removing data source from meta data source
@@ -234,11 +320,11 @@ specific error already.
The domain lives in another zone. But it is not possible to generate referral
information for it.
-% DATASRC_QUERY_CACHED data for %1/%2 found in cache
+% DATASRC_QUERY_CACHED data for %1/%2 found in hotspot cache
Debug information. The requested data were found in the hotspot cache, so
no query is sent to the real data source.
-% DATASRC_QUERY_CHECK_CACHE checking cache for '%1/%2'
+% DATASRC_QUERY_CHECK_CACHE checking hotspot cache for '%1/%2'
Debug information. While processing a query, lookup to the hotspot cache
is being made.
@@ -251,10 +337,9 @@ Debug information. The software is trying to identify delegation points on the
way down to the given domain.
% DATASRC_QUERY_EMPTY_CNAME CNAME at '%1' is empty
-There was an CNAME and it was being followed. But it contains no records,
-so there's nowhere to go. There will be no answer. This indicates a problem
-with supplied data.
-We tried to follow
+A CNAME chain was being followed and an entry was found that pointed
+to a domain name that had no RRsets associated with it. As a result,
+the query cannot be answered. This indicates a problem with supplied data.
% DATASRC_QUERY_EMPTY_DNAME the DNAME on '%1' is empty
During an attempt to synthesize CNAME from this DNAME it was discovered the
@@ -262,11 +347,11 @@ DNAME is empty (it has no records). This indicates problem with supplied data.
% DATASRC_QUERY_FAIL query failed
Some subtask of query processing failed. The reason should have been reported
-already. We are returning SERVFAIL.
+already and a SERVFAIL will be returned to the querying system.
% DATASRC_QUERY_FOLLOW_CNAME following CNAME at '%1'
-Debug information. The domain is a CNAME (or a DNAME and we created a CNAME
-for it already), so it's being followed.
+Debug information. The domain is a CNAME (or a DNAME and a CNAME for it
+has already been created) and the search is following this chain.
% DATASRC_QUERY_GET_MX_ADDITIONAL addition of A/AAAA for '%1' requested by MX '%2'
Debug information. While processing a query, a MX record was met. It
@@ -291,14 +376,14 @@ operation code.
Debug information. The last DO_QUERY is an auth query.
% DATASRC_QUERY_IS_GLUE glue query (%1/%2)
-Debug information. The last DO_QUERY is query for glue addresses.
+Debug information. The last DO_QUERY is a query for glue addresses.
% DATASRC_QUERY_IS_NOGLUE query for non-glue addresses (%1/%2)
-Debug information. The last DO_QUERY is query for addresses that are not
+Debug information. The last DO_QUERY is a query for addresses that are not
glue.
% DATASRC_QUERY_IS_REF query for referral (%1/%2)
-Debug information. The last DO_QUERY is query for referral information.
+Debug information. The last DO_QUERY is a query for referral information.
% DATASRC_QUERY_IS_SIMPLE simple query (%1/%2)
Debug information. The last DO_QUERY is a simple query.
@@ -322,11 +407,11 @@ The underlying data source failed to answer the no-glue query. 1 means some
error, 2 is not implemented. The data source should have logged the specific
error already.
-% DATASRC_QUERY_NO_CACHE_ANY_AUTH ignoring cache for ANY query (%1/%2 in %3 class)
+% DATASRC_QUERY_NO_CACHE_ANY_AUTH ignoring hotspot cache for ANY query (%1/%2 in %3 class)
Debug information. The hotspot cache is ignored for authoritative ANY queries
for consistency reasons.
-% DATASRC_QUERY_NO_CACHE_ANY_SIMPLE ignoring cache for ANY query (%1/%2 in %3 class)
+% DATASRC_QUERY_NO_CACHE_ANY_SIMPLE ignoring hotspot cache for ANY query (%1/%2 in %3 class)
Debug information. The hotspot cache is ignored for ANY queries for consistency
reasons.
@@ -345,7 +430,7 @@ domain. Maybe someone sent a query to the wrong server for some reason.
% DATASRC_QUERY_PROCESS processing query '%1/%2' in the '%3' class
Debug information. A sure query is being processed now.
-% DATASRC_QUERY_PROVENX_FAIL unable to prove nonexistence of '%1'
+% DATASRC_QUERY_PROVE_NX_FAIL unable to prove nonexistence of '%1'
The user wants DNSSEC and we discovered the entity doesn't exist (either
domain or the record). But there was an error getting NSEC/NSEC3 record
to prove the nonexistence.
@@ -365,9 +450,9 @@ error, 2 is not implemented. The data source should have logged the specific
error already.
% DATASRC_QUERY_SYNTH_CNAME synthesizing CNAME from DNAME on '%1'
-Debug information. While answering a query, a DNAME was met. The DNAME itself
-will be returned, but along with it a CNAME for clients which don't understand
-DNAMEs will be synthesized.
+This is a debug message. While answering a query, a DNAME was encountered. The
+DNAME itself will be returned, along with a synthesized CNAME for clients that
+do not understand the DNAME RR.
% DATASRC_QUERY_TASK_FAIL task failed with %1
The query subtask failed. The reason should have been reported by the subtask
@@ -391,7 +476,7 @@ domain is being looked for now.
During an attempt to cover the domain by a wildcard an error happened. The
exact kind was hopefully already reported.
-% DATASRC_QUERY_WILDCARD_PROVENX_FAIL unable to prove nonexistence of '%1' (%2)
+% DATASRC_QUERY_WILDCARD_PROVE_NX_FAIL unable to prove nonexistence of '%1' (%2)
While processing a wildcard, it wasn't possible to prove nonexistence of the
given domain or record. The code is 1 for error and 2 for not implemented.
@@ -401,17 +486,27 @@ enough information for it. The code is 1 for error, 2 for not implemented.
% DATASRC_SQLITE_CLOSE closing SQLite database
Debug information. The SQLite data source is closing the database file.
+
+% DATASRC_SQLITE_CONNOPEN Opening sqlite database file '%1'
+The database file is being opened so it can start providing data.
+
+% DATASRC_SQLITE_CONNCLOSE Closing sqlite database
+The database file is no longer needed and is being closed.
+
% DATASRC_SQLITE_CREATE SQLite data source created
Debug information. An instance of SQLite data source is being created.
% DATASRC_SQLITE_DESTROY SQLite data source destroyed
Debug information. An instance of SQLite data source is being destroyed.
+% DATASRC_SQLITE_DROPCONN SQLite3Database is being deinitialized
+The object around a database connection is being destroyed.
+
% DATASRC_SQLITE_ENCLOSURE looking for zone containing '%1'
Debug information. The SQLite data source is trying to identify which zone
should hold this domain.
-% DATASRC_SQLITE_ENCLOSURE_NOTFOUND no zone contains it
+% DATASRC_SQLITE_ENCLOSURE_NOT_FOUND no zone contains '%1'
Debug information. The last SQLITE_ENCLOSURE query was unsuccessful; there's
no such zone in our data.
@@ -459,25 +554,35 @@ source.
The SQLite data source was asked to provide a NSEC3 record for given zone.
But it doesn't contain that zone.
+% DATASRC_SQLITE_NEWCONN SQLite3Database is being initialized
+A wrapper object to hold database connection is being initialized.
+
% DATASRC_SQLITE_OPEN opening SQLite database '%1'
Debug information. The SQLite data source is loading an SQLite database in
the provided file.
% DATASRC_SQLITE_PREVIOUS looking for name previous to '%1'
-Debug information. We're trying to look up name preceding the supplied one.
+This is a debug message. The name given was not found, so the program
+is searching for the next name higher up the hierarchy (e.g. if
+www.example.com were queried for and not found, the software searches
+for the "previous" name, example.com).
% DATASRC_SQLITE_PREVIOUS_NO_ZONE no zone containing '%1'
-The SQLite data source tried to identify name preceding this one. But this
-one is not contained in any zone in the data source.
+The name given was not found, so the program is searching for the next
+name higher up the hierarchy (e.g. if www.example.com were queried
+for and not found, the software searches for the "previous" name,
+example.com). However, this name is not contained in any zone in the
+data source. This is an error since it indicates a problem in the earlier
+processing of the query.
% DATASRC_SQLITE_SETUP setting up SQLite database
The database for SQLite data source was found empty. It is assumed this is the
first run and it is being initialized with current schema. It'll still contain
no data, but it will be ready for use.
-% DATASRC_STATIC_BAD_CLASS static data source can handle CH only
-For some reason, someone asked the static data source a query that is not in
-the CH class.
+% DATASRC_STATIC_CLASS_NOT_CH static data source can handle CH class only
+An error message indicating that a query requesting a RR for a class other
+that CH was sent to the static data source (which only handles CH queries).
% DATASRC_STATIC_CREATE creating the static datasource
Debug information. The static data source (the one holding stuff like
@@ -491,3 +596,37 @@ data source.
This indicates a programming error. An internal task of unknown type was
generated.
+% DATASRC_DATABASE_UPDATER_CREATED zone updater created for '%1/%2' on %3
+Debug information. A zone updater object is created to make updates to
+the shown zone on the shown backend database.
+
+% DATASRC_DATABASE_UPDATER_DESTROYED zone updater destroyed for '%1/%2' on %3
+Debug information. A zone updater object is destroyed, either successfully
+or after failure of, making updates to the shown zone on the shown backend
+database.
+
+%DATASRC_DATABASE_UPDATER_ROLLBACK zone updates roll-backed for '%1/%2' on %3
+A zone updater is being destroyed without committing the changes.
+This would typically mean the update attempt was aborted due to some
+error, but may also be a bug of the application that forgets committing
+the changes. The intermediate changes made through the updater won't
+be applied to the underlying database. The zone name, its class, and
+the underlying database name are shown in the log message.
+
+%DATASRC_DATABASE_UPDATER_ROLLBACKFAIL failed to roll back zone updates for '%1/%2' on %3: %4
+A zone updater is being destroyed without committing the changes to
+the database, and attempts to rollback incomplete updates, but it
+unexpectedly fails. The higher level implementation does not expect
+it to fail, so this means either a serious operational error in the
+underlying data source (such as a system failure of a database) or
+software bug in the underlying data source implementation. In either
+case if this message is logged the administrator should carefully
+examine the underlying data source to see what exactly happens and
+whether the data is still valid. The zone name, its class, and the
+underlying database name as well as the error message thrown from the
+database module are shown in the log message.
+
+% DATASRC_DATABASE_UPDATER_COMMIT updates committed for '%1/%2' on %3
+Debug information. A set of updates to a zone has been successfully
+committed to the corresponding database backend. The zone name,
+its class and the database name are printed.
diff --git a/src/lib/datasrc/factory.cc b/src/lib/datasrc/factory.cc
new file mode 100644
index 0000000..eddd4f4
--- /dev/null
+++ b/src/lib/datasrc/factory.cc
@@ -0,0 +1,82 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include "factory.h"
+
+#include "data_source.h"
+#include "database.h"
+#include "sqlite3_accessor.h"
+#include "memory_datasrc.h"
+
+#include <datasrc/logger.h>
+
+#include <dlfcn.h>
+
+using namespace isc::data;
+using namespace isc::datasrc;
+
+namespace isc {
+namespace datasrc {
+
+LibraryContainer::LibraryContainer(const std::string& name) {
+ ds_lib_ = dlopen(name.c_str(), RTLD_NOW | RTLD_LOCAL);
+ if (ds_lib_ == NULL) {
+ isc_throw(DataSourceLibraryError, dlerror());
+ }
+}
+
+LibraryContainer::~LibraryContainer() {
+ dlclose(ds_lib_);
+}
+
+void*
+LibraryContainer::getSym(const char* name) {
+ // Since dlsym can return NULL on success, we check for errors by
+ // first clearing any existing errors with dlerror(), then calling dlsym,
+ // and finally checking for errors with dlerror()
+ dlerror();
+
+ void *sym = dlsym(ds_lib_, name);
+
+ const char* dlsym_error = dlerror();
+ if (dlsym_error != NULL) {
+ isc_throw(DataSourceLibrarySymbolError, dlsym_error);
+ }
+
+ return (sym);
+}
+
+DataSourceClientContainer::DataSourceClientContainer(const std::string& type,
+ ConstElementPtr config)
+: ds_lib_(type + "_ds.so")
+{
+ // We are casting from a data to a function pointer here
+ // Some compilers (rightfully) complain about that, but
+ // c-style casts are accepted the most here. If we run
+ // into any that also don't like this, we might need to
+ // use some form of union cast or memory copy to get
+ // from the void* to the function pointer.
+ ds_creator* ds_create = (ds_creator*)ds_lib_.getSym("createInstance");
+ destructor_ = (ds_destructor*)ds_lib_.getSym("destroyInstance");
+
+ instance_ = ds_create(config);
+}
+
+DataSourceClientContainer::~DataSourceClientContainer() {
+ destructor_(instance_);
+}
+
+} // end namespace datasrc
+} // end namespace isc
+
diff --git a/src/lib/datasrc/factory.h b/src/lib/datasrc/factory.h
new file mode 100644
index 0000000..8db9ec9
--- /dev/null
+++ b/src/lib/datasrc/factory.h
@@ -0,0 +1,182 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __DATA_SOURCE_FACTORY_H
+#define __DATA_SOURCE_FACTORY_H 1
+
+#include <boost/noncopyable.hpp>
+
+#include <datasrc/data_source.h>
+#include <datasrc/client.h>
+#include <exceptions/exceptions.h>
+
+#include <cc/data.h>
+
+namespace isc {
+namespace datasrc {
+
+
+/// \brief Raised if there is an error loading the datasource implementation
+/// library
+class DataSourceLibraryError : public DataSourceError {
+public:
+ DataSourceLibraryError(const char* file, size_t line, const char* what) :
+ DataSourceError(file, line, what) {}
+};
+
+/// \brief Raised if there is an error reading a symbol from the datasource
+/// implementation library
+class DataSourceLibrarySymbolError : public DataSourceError {
+public:
+ DataSourceLibrarySymbolError(const char* file, size_t line,
+ const char* what) :
+ DataSourceError(file, line, what) {}
+};
+
+/// \brief Raised if the given config contains bad data
+///
+/// Depending on the datasource type, the configuration may differ (for
+/// instance, the sqlite3 datasource needs a database file).
+class DataSourceConfigError : public DataSourceError {
+public:
+ DataSourceConfigError(const char* file, size_t line, const char* what) :
+ DataSourceError(file, line, what) {}
+ // This exception is created in the dynamic modules. Apparently
+ // sunstudio can't handle it if we then automatically derive the
+ // destructor, so we provide it explicitely
+ ~DataSourceConfigError() throw() {}
+};
+
+typedef DataSourceClient* ds_creator(isc::data::ConstElementPtr config);
+typedef void ds_destructor(DataSourceClient* instance);
+
+/// \brief Container class for dynamically loaded libraries
+///
+/// This class is used to dlopen() a library, provides access to dlsym(),
+/// and cleans up the dlopened library when the instance of this class is
+/// destroyed.
+///
+/// Its main function is to provide RAII-style access to dlopen'ed libraries.
+///
+/// \note Currently it is Datasource-backend specific. If we have need for this
+/// in other places than for dynamically loading datasources, then, apart
+/// from moving it to another location, we also need to make the
+/// exceptions raised more general.
+class LibraryContainer : boost::noncopyable {
+public:
+ /// \brief Constructor
+ ///
+ /// \param name The name of the library (.so) file. This file must be in
+ /// the library path.
+ ///
+ /// \exception DataSourceLibraryError If the library cannot be found or
+ /// cannot be loaded.
+ LibraryContainer(const std::string& name);
+
+ /// \brief Destructor
+ ///
+ /// Cleans up the library by calling dlclose()
+ ~LibraryContainer();
+
+ /// \brief Retrieve a symbol
+ ///
+ /// This retrieves a symbol from the loaded library.
+ ///
+ /// \exception DataSourceLibrarySymbolError if the symbol cannot be found,
+ /// or if another error (as reported by dlerror() occurs.
+ ///
+ /// \param name The name of the symbol to retrieve
+ /// \return A pointer to the symbol. This may be NULL, and if so, indicates
+ /// the symbol does indeed exist, but has the value NULL itself.
+ /// If the symbol does not exist, a DataSourceLibrarySymbolError is
+ /// raised.
+ ///
+ /// \note The argument is a const char* (and not a std::string like the
+ /// argument in the constructor). This argument is always a fixed
+ /// string in the code, while the other can be read from
+ /// configuration, and needs modification
+ void* getSym(const char* name);
+private:
+ /// Pointer to the dynamically loaded library structure
+ void *ds_lib_;
+};
+
+
+/// \brief Container for a specific instance of a dynamically loaded
+/// DataSourceClient implementation
+///
+/// Given a datasource type and a type-specific set of configuration data,
+/// the corresponding dynamic library is loaded (if it hadn't been already),
+/// and an instance is created. This instance is stored within this structure,
+/// and can be accessed through getInstance(). Upon destruction of this
+/// container, the stored instance of the DataSourceClient is deleted with
+/// the destructor function provided by the loaded library.
+///
+/// The 'type' is actually the name of the library, minus the '_ds.so' postfix
+/// Datasource implementation libraries therefore have a fixed name, both for
+/// easy recognition and to reduce potential mistakes.
+/// For example, the sqlite3 implementation has the type 'sqlite3', and the
+/// derived filename 'sqlite3_ds.so'
+///
+/// There are of course some demands to an implementation, not all of which
+/// can be verified compile-time. It must provide a creator and destructor
+/// functions. The creator function must return an instance of a subclass of
+/// DataSourceClient. The prototypes of these functions are as follows:
+/// \code
+/// extern "C" DataSourceClient* createInstance(isc::data::ConstElementPtr cfg);
+///
+/// extern "C" void destroyInstance(isc::data::DataSourceClient* instance);
+/// \endcode
+class DataSourceClientContainer : boost::noncopyable {
+public:
+ /// \brief Constructor
+ ///
+ /// \exception DataSourceLibraryError if there is an error loading the
+ /// backend library
+ /// \exception DataSourceLibrarySymbolError if the library does not have
+ /// the needed symbols, or if there is an error reading them
+ /// \exception DataSourceConfigError if the given config is not correct
+ /// for the given type
+ ///
+ /// \param type The type of the datasource client. Based on the value of
+ /// type, a specific backend library is used, by appending the
+ /// string '_ds.so' to the given type, and loading that as the
+ /// implementation library
+ /// \param config Type-specific configuration data, see the documentation
+ /// of the datasource backend type for information on what
+ /// configuration data to pass.
+ DataSourceClientContainer(const std::string& type,
+ isc::data::ConstElementPtr config);
+
+ /// \brief Destructor
+ ~DataSourceClientContainer();
+
+ /// \brief Accessor to the instance
+ ///
+ /// \return Reference to the DataSourceClient instance contained in this
+ /// container.
+ DataSourceClient& getInstance() { return *instance_; }
+
+private:
+ DataSourceClient* instance_;
+ ds_destructor* destructor_;
+ LibraryContainer ds_lib_;
+};
+
+} // end namespace datasrc
+} // end namespace isc
+#endif // DATA_SOURCE_FACTORY_H
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/datasrc/iterator.h b/src/lib/datasrc/iterator.h
new file mode 100644
index 0000000..0102fcb
--- /dev/null
+++ b/src/lib/datasrc/iterator.h
@@ -0,0 +1,61 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <dns/rrset.h>
+
+#include <boost/noncopyable.hpp>
+
+namespace isc {
+namespace datasrc {
+
+/**
+ * \brief Read-only iterator to a zone.
+ *
+ * You can get an instance of (descendand of) ZoneIterator from
+ * DataSourceClient::getIterator() method. The actual concrete implementation
+ * will be different depending on the actual data source used. This is the
+ * abstract interface.
+ *
+ * There's no way to start iterating from the beginning again or return.
+ */
+class ZoneIterator : public boost::noncopyable {
+public:
+ /**
+ * \brief Destructor
+ *
+ * Virtual destructor. It is empty, but ensures the right destructor from
+ * descendant is called.
+ */
+ virtual ~ ZoneIterator() { }
+
+ /**
+ * \brief Get next RRset from the zone.
+ *
+ * This returns the next RRset in the zone as a shared pointer. The
+ * shared pointer is used to allow both accessing in-memory data and
+ * automatic memory management.
+ *
+ * Any special order is not guaranteed.
+ *
+ * While this can potentially throw anything (including standard allocation
+ * errors), it should be rare.
+ *
+ * \return Pointer to the next RRset or NULL pointer when the iteration
+ * gets to the end of the zone.
+ */
+ virtual isc::dns::ConstRRsetPtr getNextRRset() = 0;
+};
+
+}
+}
diff --git a/src/lib/datasrc/memory_datasrc.cc b/src/lib/datasrc/memory_datasrc.cc
index 3c57d1b..4c9e53f 100644
--- a/src/lib/datasrc/memory_datasrc.cc
+++ b/src/lib/datasrc/memory_datasrc.cc
@@ -16,6 +16,9 @@
#include <cassert>
#include <boost/shared_ptr.hpp>
#include <boost/bind.hpp>
+#include <boost/foreach.hpp>
+
+#include <exceptions/exceptions.h>
#include <dns/name.h>
#include <dns/rrclass.h>
@@ -25,17 +28,44 @@
#include <datasrc/memory_datasrc.h>
#include <datasrc/rbtree.h>
#include <datasrc/logger.h>
+#include <datasrc/iterator.h>
+#include <datasrc/data_source.h>
+#include <datasrc/factory.h>
+
+#include <cc/data.h>
using namespace std;
using namespace isc::dns;
+using namespace isc::data;
namespace isc {
namespace datasrc {
-// Private data and hidden methods of MemoryZone
-struct MemoryZone::MemoryZoneImpl {
+namespace {
+// Some type aliases
+/*
+ * Each domain consists of some RRsets. They will be looked up by the
+ * RRType.
+ *
+ * The use of map is questionable with regard to performance - there'll
+ * be usually only few RRsets in the domain, so the log n benefit isn't
+ * much and a vector/array might be faster due to its simplicity and
+ * continuous memory location. But this is unlikely to be a performance
+ * critical place and map has better interface for the lookups, so we use
+ * that.
+ */
+typedef map<RRType, ConstRRsetPtr> Domain;
+typedef Domain::value_type DomainPair;
+typedef boost::shared_ptr<Domain> DomainPtr;
+// The tree stores domains
+typedef RBTree<Domain> DomainTree;
+typedef RBNode<Domain> DomainNode;
+}
+
+// Private data and hidden methods of InMemoryZoneFinder
+struct InMemoryZoneFinder::InMemoryZoneFinderImpl {
// Constructor
- MemoryZoneImpl(const RRClass& zone_class, const Name& origin) :
+ InMemoryZoneFinderImpl(const RRClass& zone_class, const Name& origin) :
zone_class_(zone_class), origin_(origin), origin_data_(NULL),
domains_(true)
{
@@ -44,25 +74,6 @@ struct MemoryZone::MemoryZoneImpl {
DomainPtr origin_domain(new Domain);
origin_data_->setData(origin_domain);
}
-
- // Some type aliases
- /*
- * Each domain consists of some RRsets. They will be looked up by the
- * RRType.
- *
- * The use of map is questionable with regard to performance - there'll
- * be usually only few RRsets in the domain, so the log n benefit isn't
- * much and a vector/array might be faster due to its simplicity and
- * continuous memory location. But this is unlikely to be a performance
- * critical place and map has better interface for the lookups, so we use
- * that.
- */
- typedef map<RRType, ConstRRsetPtr> Domain;
- typedef Domain::value_type DomainPair;
- typedef boost::shared_ptr<Domain> DomainPtr;
- // The tree stores domains
- typedef RBTree<Domain> DomainTree;
- typedef RBNode<Domain> DomainNode;
static const DomainNode::Flags DOMAINFLAG_WILD = DomainNode::FLAG_USER1;
// Information about the zone
@@ -129,7 +140,7 @@ struct MemoryZone::MemoryZoneImpl {
// Ensure CNAME and other type of RR don't coexist for the same
// owner name.
if (rrset->getType() == RRType::CNAME()) {
- // XXX: this check will become incorrect when we support DNSSEC
+ // TODO: this check will become incorrect when we support DNSSEC
// (depending on how we support DNSSEC). We should revisit it
// at that point.
if (!domain->empty()) {
@@ -223,12 +234,15 @@ struct MemoryZone::MemoryZoneImpl {
* Implementation of longer methods. We put them here, because the
* access is without the impl_-> and it will get inlined anyway.
*/
- // Implementation of MemoryZone::add
+ // Implementation of InMemoryZoneFinder::add
result::Result add(const ConstRRsetPtr& rrset, DomainTree* domains) {
+ // Sanitize input. This will cause an exception to be thrown
+ // if the input RRset is empty.
+ addValidation(rrset);
+
+ // OK, can add the RRset.
LOG_DEBUG(logger, DBG_TRACE_DATA, DATASRC_MEM_ADD_RRSET).
arg(rrset->getName()).arg(rrset->getType()).arg(origin_);
- // Sanitize input
- addValidation(rrset);
// Add wildcards possibly contained in the owner name to the domain
// tree.
@@ -406,7 +420,7 @@ struct MemoryZone::MemoryZoneImpl {
}
}
- // Implementation of MemoryZone::find
+ // Implementation of InMemoryZoneFinder::find
FindResult find(const Name& name, RRType type,
RRsetList* target, const FindOptions options) const
{
@@ -520,7 +534,7 @@ struct MemoryZone::MemoryZoneImpl {
// fall through
case DomainTree::NOTFOUND:
- LOG_DEBUG(logger, DBG_TRACE_DATA, DATASRC_MEM_NOTFOUND).
+ LOG_DEBUG(logger, DBG_TRACE_DATA, DATASRC_MEM_NOT_FOUND).
arg(name);
return (FindResult(NXDOMAIN, ConstRRsetPtr()));
case DomainTree::EXACTMATCH: // This one is OK, handle it
@@ -590,50 +604,50 @@ struct MemoryZone::MemoryZoneImpl {
}
};
-MemoryZone::MemoryZone(const RRClass& zone_class, const Name& origin) :
- impl_(new MemoryZoneImpl(zone_class, origin))
+InMemoryZoneFinder::InMemoryZoneFinder(const RRClass& zone_class, const Name& origin) :
+ impl_(new InMemoryZoneFinderImpl(zone_class, origin))
{
LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_MEM_CREATE).arg(origin).
arg(zone_class);
}
-MemoryZone::~MemoryZone() {
+InMemoryZoneFinder::~InMemoryZoneFinder() {
LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_MEM_DESTROY).arg(getOrigin()).
arg(getClass());
delete impl_;
}
-const Name&
-MemoryZone::getOrigin() const {
+Name
+InMemoryZoneFinder::getOrigin() const {
return (impl_->origin_);
}
-const RRClass&
-MemoryZone::getClass() const {
+RRClass
+InMemoryZoneFinder::getClass() const {
return (impl_->zone_class_);
}
-Zone::FindResult
-MemoryZone::find(const Name& name, const RRType& type,
- RRsetList* target, const FindOptions options) const
+ZoneFinder::FindResult
+InMemoryZoneFinder::find(const Name& name, const RRType& type,
+ RRsetList* target, const FindOptions options)
{
return (impl_->find(name, type, target, options));
}
result::Result
-MemoryZone::add(const ConstRRsetPtr& rrset) {
+InMemoryZoneFinder::add(const ConstRRsetPtr& rrset) {
return (impl_->add(rrset, &impl_->domains_));
}
void
-MemoryZone::load(const string& filename) {
+InMemoryZoneFinder::load(const string& filename) {
LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_MEM_LOAD).arg(getOrigin()).
arg(filename);
// Load it into a temporary tree
- MemoryZoneImpl::DomainTree tmp;
+ DomainTree tmp;
masterLoad(filename.c_str(), getOrigin(), getClass(),
- boost::bind(&MemoryZoneImpl::addFromLoad, impl_, _1, &tmp));
+ boost::bind(&InMemoryZoneFinderImpl::addFromLoad, impl_, _1, &tmp));
// If it went well, put it inside
impl_->file_name_ = filename;
tmp.swap(impl_->domains_);
@@ -641,64 +655,294 @@ MemoryZone::load(const string& filename) {
}
void
-MemoryZone::swap(MemoryZone& zone) {
+InMemoryZoneFinder::swap(InMemoryZoneFinder& zone_finder) {
LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_MEM_SWAP).arg(getOrigin()).
- arg(zone.getOrigin());
- std::swap(impl_, zone.impl_);
+ arg(zone_finder.getOrigin());
+ std::swap(impl_, zone_finder.impl_);
}
const string
-MemoryZone::getFileName() const {
+InMemoryZoneFinder::getFileName() const {
return (impl_->file_name_);
}
-/// Implementation details for \c MemoryDataSrc hidden from the public
+isc::dns::Name
+InMemoryZoneFinder::findPreviousName(const isc::dns::Name&) const {
+ isc_throw(NotImplemented, "InMemory data source doesn't support DNSSEC "
+ "yet, can't find previous name");
+}
+
+/// Implementation details for \c InMemoryClient hidden from the public
/// interface.
///
-/// For now, \c MemoryDataSrc only contains a \c ZoneTable object, which
-/// consists of (pointers to) \c MemoryZone objects, we may add more
+/// For now, \c InMemoryClient only contains a \c ZoneTable object, which
+/// consists of (pointers to) \c InMemoryZoneFinder objects, we may add more
/// member variables later for new features.
-class MemoryDataSrc::MemoryDataSrcImpl {
+class InMemoryClient::InMemoryClientImpl {
public:
- MemoryDataSrcImpl() : zone_count(0) {}
+ InMemoryClientImpl() : zone_count(0) {}
unsigned int zone_count;
ZoneTable zone_table;
};
-MemoryDataSrc::MemoryDataSrc() : impl_(new MemoryDataSrcImpl)
+InMemoryClient::InMemoryClient() : impl_(new InMemoryClientImpl)
{}
-MemoryDataSrc::~MemoryDataSrc() {
+InMemoryClient::~InMemoryClient() {
delete impl_;
}
unsigned int
-MemoryDataSrc::getZoneCount() const {
+InMemoryClient::getZoneCount() const {
return (impl_->zone_count);
}
result::Result
-MemoryDataSrc::addZone(ZonePtr zone) {
- if (!zone) {
+InMemoryClient::addZone(ZoneFinderPtr zone_finder) {
+ if (!zone_finder) {
isc_throw(InvalidParameter,
- "Null pointer is passed to MemoryDataSrc::addZone()");
+ "Null pointer is passed to InMemoryClient::addZone()");
}
LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_MEM_ADD_ZONE).
- arg(zone->getOrigin()).arg(zone->getClass().toText());
+ arg(zone_finder->getOrigin()).arg(zone_finder->getClass().toText());
- const result::Result result = impl_->zone_table.addZone(zone);
+ const result::Result result = impl_->zone_table.addZone(zone_finder);
if (result == result::SUCCESS) {
++impl_->zone_count;
}
return (result);
}
-MemoryDataSrc::FindResult
-MemoryDataSrc::findZone(const isc::dns::Name& name) const {
+InMemoryClient::FindResult
+InMemoryClient::findZone(const isc::dns::Name& name) const {
LOG_DEBUG(logger, DBG_TRACE_DATA, DATASRC_MEM_FIND_ZONE).arg(name);
- return (FindResult(impl_->zone_table.findZone(name).code,
- impl_->zone_table.findZone(name).zone));
+ ZoneTable::FindResult result(impl_->zone_table.findZone(name));
+ return (FindResult(result.code, result.zone));
+}
+
+namespace {
+
+class MemoryIterator : public ZoneIterator {
+private:
+ RBTreeNodeChain<Domain> chain_;
+ Domain::const_iterator dom_iterator_;
+ const DomainTree& tree_;
+ const DomainNode* node_;
+ bool ready_;
+public:
+ MemoryIterator(const DomainTree& tree, const Name& origin) :
+ tree_(tree),
+ ready_(true)
+ {
+ // Find the first node (origin) and preserve the node chain for future
+ // searches
+ DomainTree::Result result(tree_.find<void*>(origin, &node_, chain_,
+ NULL, NULL));
+ // It can't happen that the origin is not in there
+ if (result != DomainTree::EXACTMATCH) {
+ isc_throw(Unexpected,
+ "In-memory zone corrupted, missing origin node");
+ }
+ // Initialize the iterator if there's somewhere to point to
+ if (node_ != NULL && node_->getData() != DomainPtr()) {
+ dom_iterator_ = node_->getData()->begin();
+ }
+ }
+
+ virtual ConstRRsetPtr getNextRRset() {
+ if (!ready_) {
+ isc_throw(Unexpected, "Iterating past the zone end");
+ }
+ /*
+ * This cycle finds the first nonempty node with yet unused RRset.
+ * If it is NULL, we run out of nodes. If it is empty, it doesn't
+ * contain any RRsets. If we are at the end, just get to next one.
+ */
+ while (node_ != NULL && (node_->getData() == DomainPtr() ||
+ dom_iterator_ == node_->getData()->end())) {
+ node_ = tree_.nextNode(chain_);
+ // If there's a node, initialize the iterator and check next time
+ // if the map is empty or not
+ if (node_ != NULL && node_->getData() != NULL) {
+ dom_iterator_ = node_->getData()->begin();
+ }
+ }
+ if (node_ == NULL) {
+ // That's all, folks
+ ready_ = false;
+ return (ConstRRsetPtr());
+ }
+ // The iterator points to the next yet unused RRset now
+ ConstRRsetPtr result(dom_iterator_->second);
+ // This one is used, move it to the next time for next call
+ ++dom_iterator_;
+
+ return (result);
+ }
+};
+
+} // End of anonymous namespace
+
+ZoneIteratorPtr
+InMemoryClient::getIterator(const Name& name) const {
+ ZoneTable::FindResult result(impl_->zone_table.findZone(name));
+ if (result.code != result::SUCCESS) {
+ isc_throw(DataSourceError, "No such zone: " + name.toText());
+ }
+
+ const InMemoryZoneFinder*
+ zone(dynamic_cast<const InMemoryZoneFinder*>(result.zone.get()));
+ if (zone == NULL) {
+ /*
+ * TODO: This can happen only during some of the tests and only as
+ * a temporary solution. This should be fixed by #1159 and then
+ * this cast and check shouldn't be necessary. We don't have
+ * test for handling a "can not happen" condition.
+ */
+ isc_throw(Unexpected, "The zone at " + name.toText() +
+ " is not InMemoryZoneFinder");
+ }
+ return (ZoneIteratorPtr(new MemoryIterator(zone->impl_->domains_, name)));
+}
+
+ZoneUpdaterPtr
+InMemoryClient::getUpdater(const isc::dns::Name&, bool) const {
+ isc_throw(isc::NotImplemented, "Update attempt on in memory data source");
}
+
+
+namespace {
+// convencience function to add an error message to a list of those
+// (TODO: move functions like these to some util lib?)
+void
+addError(ElementPtr errors, const std::string& error) {
+ if (errors != ElementPtr() && errors->getType() == Element::list) {
+ errors->add(Element::create(error));
+ }
+}
+
+/// Check if the given element exists in the map, and if it is a string
+bool
+checkConfigElementString(ConstElementPtr config, const std::string& name,
+ ElementPtr errors)
+{
+ if (!config->contains(name)) {
+ addError(errors,
+ "Config for memory backend does not contain a '"
+ "type"
+ "' value");
+ return false;
+ } else if (!config->get(name) ||
+ config->get(name)->getType() != Element::string) {
+ addError(errors, "value of " + name +
+ " in memory backend config is not a string");
+ return false;
+ } else {
+ return true;
+ }
+}
+
+bool
+checkZoneConfig(ConstElementPtr config, ElementPtr errors) {
+ bool result = true;
+ if (!config || config->getType() != Element::map) {
+ addError(errors, "Elements in memory backend's zone list must be maps");
+ result = false;
+ } else {
+ if (!checkConfigElementString(config, "origin", errors)) {
+ result = false;
+ }
+ if (!checkConfigElementString(config, "file", errors)) {
+ result = false;
+ }
+ // we could add some existence/readabilty/parsability checks here
+ // if we want
+ }
+ return result;
+}
+
+bool
+checkConfig(ConstElementPtr config, ElementPtr errors) {
+ /* Specific configuration is under discussion, right now this accepts
+ * the 'old' configuration, see [TODO]
+ * So for memory datasource, we get a structure like this:
+ * { "type": string ("memory"),
+ * "class": string ("IN"/"CH"/etc),
+ * "zones": list
+ * }
+ * Zones list is a list of maps:
+ * { "origin": string,
+ * "file": string
+ * }
+ *
+ * At this moment we cannot be completely sure of the contents of the
+ * structure, so we have to do some more extensive tests than should
+ * strictly be necessary (e.g. existence and type of elements)
+ */
+ bool result = true;
+
+ if (!config || config->getType() != Element::map) {
+ addError(errors, "Base config for memory backend must be a map");
+ result = false;
+ } else {
+ if (!checkConfigElementString(config, "type", errors)) {
+ result = false;
+ } else {
+ if (config->get("type")->stringValue() != "memory") {
+ addError(errors,
+ "Config for memory backend is not of type \"memory\"");
+ result = false;
+ }
+ }
+ if (!checkConfigElementString(config, "class", errors)) {
+ result = false;
+ } else {
+ try {
+ RRClass rrc(config->get("class")->stringValue());
+ } catch (const isc::Exception& rrce) {
+ addError(errors,
+ "Error parsing class config for memory backend: " +
+ std::string(rrce.what()));
+ result = false;
+ }
+ }
+ if (!config->contains("zones")) {
+ addError(errors, "No 'zones' element in memory backend config");
+ result = false;
+ } else if (!config->get("zones") ||
+ config->get("zones")->getType() != Element::list) {
+ addError(errors, "'zones' element in memory backend config is not a list");
+ result = false;
+ } else {
+ BOOST_FOREACH(ConstElementPtr zone_config,
+ config->get("zones")->listValue()) {
+ if (!checkZoneConfig(zone_config, errors)) {
+ result = false;
+ }
+ }
+ }
+ }
+
+ return (result);
+ return true;
+}
+
+} // end anonymous namespace
+
+DataSourceClient *
+createInstance(isc::data::ConstElementPtr config) {
+ ElementPtr errors(Element::createList());
+ if (!checkConfig(config, errors)) {
+ isc_throw(DataSourceConfigError, errors->str());
+ }
+ return (new InMemoryClient());
+}
+
+void destroyInstance(DataSourceClient* instance) {
+ delete instance;
+}
+
+
} // end of namespace datasrc
-} // end of namespace dns
+} // end of namespace isc
diff --git a/src/lib/datasrc/memory_datasrc.h b/src/lib/datasrc/memory_datasrc.h
index 99bb4e8..cf467a2 100644
--- a/src/lib/datasrc/memory_datasrc.h
+++ b/src/lib/datasrc/memory_datasrc.h
@@ -17,7 +17,12 @@
#include <string>
+#include <boost/noncopyable.hpp>
+
#include <datasrc/zonetable.h>
+#include <datasrc/client.h>
+
+#include <cc/data.h>
namespace isc {
namespace dns {
@@ -27,18 +32,17 @@ class RRsetList;
namespace datasrc {
-/// A derived zone class intended to be used with the memory data source.
-class MemoryZone : public Zone {
+/// A derived zone finder class intended to be used with the memory data source.
+///
+/// Conceptually this "finder" maintains a local in-memory copy of all RRs
+/// of a single zone from some kind of source (right now it's a textual
+/// master file, but it could also be another data source with a database
+/// backend). This is why the class has methods like \c load() or \c add().
+///
+/// This class is non copyable.
+class InMemoryZoneFinder : boost::noncopyable, public ZoneFinder {
///
/// \name Constructors and Destructor.
- ///
- /// \b Note:
- /// The copy constructor and the assignment operator are intentionally
- /// defined as private, making this class non copyable.
- //@{
-private:
- MemoryZone(const MemoryZone& source);
- MemoryZone& operator=(const MemoryZone& source);
public:
/// \brief Constructor from zone parameters.
///
@@ -48,17 +52,18 @@ public:
///
/// \param rrclass The RR class of the zone.
/// \param origin The origin name of the zone.
- MemoryZone(const isc::dns::RRClass& rrclass, const isc::dns::Name& origin);
+ InMemoryZoneFinder(const isc::dns::RRClass& rrclass,
+ const isc::dns::Name& origin);
/// The destructor.
- virtual ~MemoryZone();
+ virtual ~InMemoryZoneFinder();
//@}
/// \brief Returns the origin of the zone.
- virtual const isc::dns::Name& getOrigin() const;
+ virtual isc::dns::Name getOrigin() const;
/// \brief Returns the class of the zone.
- virtual const isc::dns::RRClass& getClass() const;
+ virtual isc::dns::RRClass getClass() const;
/// \brief Looks up an RRset in the zone.
///
@@ -70,7 +75,13 @@ public:
virtual FindResult find(const isc::dns::Name& name,
const isc::dns::RRType& type,
isc::dns::RRsetList* target = NULL,
- const FindOptions options = FIND_DEFAULT) const;
+ const FindOptions options = FIND_DEFAULT);
+
+ /// \brief Imelementation of the ZoneFinder::findPreviousName method
+ ///
+ /// This one throws NotImplemented exception, as InMemory doesn't
+ /// support DNSSEC currently.
+ virtual isc::dns::Name findPreviousName(const isc::dns::Name& query) const;
/// \brief Inserts an rrset into the zone.
///
@@ -128,14 +139,14 @@ public:
/// Return the master file name of the zone
///
/// This method returns the name of the zone's master file to be loaded.
- /// The returned string will be an empty unless the zone has successfully
- /// loaded a zone.
+ /// The returned string will be an empty unless the zone finder has
+ /// successfully loaded a zone.
///
/// This method should normally not throw an exception. But the creation
/// of the return string may involve a resource allocation, and if it
/// fails, the corresponding standard exception will be thrown.
///
- /// \return The name of the zone file loaded in the zone, or an empty
+ /// \return The name of the zone file loaded in the zone finder, or an empty
/// string if the zone hasn't loaded any file.
const std::string getFileName() const;
@@ -164,144 +175,147 @@ public:
/// configuration reloading is written.
void load(const std::string& filename);
- /// Exchanges the content of \c this zone with that of the given \c zone.
+ /// Exchanges the content of \c this zone finder with that of the given
+ /// \c zone_finder.
///
/// This method never throws an exception.
///
- /// \param zone Another \c MemoryZone object which is to be swapped with
- /// \c this zone.
- void swap(MemoryZone& zone);
+ /// \param zone_finder Another \c InMemoryZone object which is to
+ /// be swapped with \c this zone finder.
+ void swap(InMemoryZoneFinder& zone_finder);
private:
/// \name Hidden private data
//@{
- struct MemoryZoneImpl;
- MemoryZoneImpl* impl_;
+ struct InMemoryZoneFinderImpl;
+ InMemoryZoneFinderImpl* impl_;
//@}
+ // The friend here is for InMemoryClient::getIterator. The iterator
+ // needs to access the data inside the zone, so the InMemoryClient
+ // extracts the pointer to data and puts it into the iterator.
+ // The access is read only.
+ friend class InMemoryClient;
};
-/// \brief A data source that uses in memory dedicated backend.
+/// \brief A data source client that holds all necessary data in memory.
///
-/// The \c MemoryDataSrc class represents a data source and provides a
-/// basic interface to help DNS lookup processing. For a given domain
-/// name, its \c findZone() method searches the in memory dedicated backend
-/// for the zone that gives a longest match against that name.
+/// The \c InMemoryClient class provides an access to a conceptual data
+/// source that maintains all necessary data in a memory image, thereby
+/// allowing much faster lookups. The in memory data is a copy of some
+/// real physical source - in the current implementation a list of zones
+/// are populated as a result of \c addZone() calls; zone data is given
+/// in a standard master file (but there's a plan to use database backends
+/// as a source of the in memory data).
///
-/// The in memory dedicated backend are assumed to be of the same RR class,
-/// but the \c MemoryDataSrc class does not enforce the assumption through
+/// Although every data source client is assumed to be of the same RR class,
+/// the \c InMemoryClient class does not enforce the assumption through
/// its interface.
/// For example, the \c addZone() method does not check if the new zone is of
-/// the same RR class as that of the others already in the dedicated backend.
+/// the same RR class as that of the others already in memory.
/// It is caller's responsibility to ensure this assumption.
///
/// <b>Notes to developer:</b>
///
-/// For now, we don't make it a derived class of AbstractDataSrc because the
-/// interface is so different (we'll eventually consider this as part of the
-/// generalization work).
-///
/// The addZone() method takes a (Boost) shared pointer because it would be
/// inconvenient to require the caller to maintain the ownership of zones,
/// while it wouldn't be safe to delete unnecessary zones inside the dedicated
/// backend.
///
-/// The findZone() method takes a domain name and returns the best matching \c
-/// MemoryZone in the form of (Boost) shared pointer, so that it can provide
-/// the general interface for all data sources.
-class MemoryDataSrc {
+/// The findZone() method takes a domain name and returns the best matching
+/// \c InMemoryZoneFinder in the form of (Boost) shared pointer, so that it can
+/// provide the general interface for all data sources.
+class InMemoryClient : public DataSourceClient {
public:
- /// \brief A helper structure to represent the search result of
- /// <code>MemoryDataSrc::find()</code>.
- ///
- /// This is a straightforward pair of the result code and a share pointer
- /// to the found zone to represent the result of \c find().
- /// We use this in order to avoid overloading the return value for both
- /// the result code ("success" or "not found") and the found object,
- /// i.e., avoid using \c NULL to mean "not found", etc.
- ///
- /// This is a simple value class with no internal state, so for
- /// convenience we allow the applications to refer to the members
- /// directly.
- ///
- /// See the description of \c find() for the semantics of the member
- /// variables.
- struct FindResult {
- FindResult(result::Result param_code, const ZonePtr param_zone) :
- code(param_code), zone(param_zone)
- {}
- const result::Result code;
- const ZonePtr zone;
- };
-
///
/// \name Constructors and Destructor.
///
- /// \b Note:
- /// The copy constructor and the assignment operator are intentionally
- /// defined as private, making this class non copyable.
//@{
-private:
- MemoryDataSrc(const MemoryDataSrc& source);
- MemoryDataSrc& operator=(const MemoryDataSrc& source);
-public:
/// Default constructor.
///
/// This constructor internally involves resource allocation, and if
/// it fails, a corresponding standard exception will be thrown.
/// It never throws an exception otherwise.
- MemoryDataSrc();
+ InMemoryClient();
/// The destructor.
- ~MemoryDataSrc();
+ ~InMemoryClient();
//@}
- /// Return the number of zones stored in the data source.
+ /// Return the number of zones stored in the client.
///
/// This method never throws an exception.
///
- /// \return The number of zones stored in the data source.
+ /// \return The number of zones stored in the client.
unsigned int getZoneCount() const;
- /// Add a \c Zone to the \c MemoryDataSrc.
+ /// Add a zone (in the form of \c ZoneFinder) to the \c InMemoryClient.
///
- /// \c Zone must not be associated with a NULL pointer; otherwise
+ /// \c zone_finder must not be associated with a NULL pointer; otherwise
/// an exception of class \c InvalidParameter will be thrown.
/// If internal resource allocation fails, a corresponding standard
/// exception will be thrown.
/// This method never throws an exception otherwise.
///
- /// \param zone A \c Zone object to be added.
- /// \return \c result::SUCCESS If the zone is successfully
- /// added to the memory data source.
+ /// \param zone_finder A \c ZoneFinder object to be added.
+ /// \return \c result::SUCCESS If the zone_finder is successfully
+ /// added to the client.
/// \return \c result::EXIST The memory data source already
/// stores a zone that has the same origin.
- result::Result addZone(ZonePtr zone);
-
- /// Find a \c Zone that best matches the given name in the \c MemoryDataSrc.
- ///
- /// It searches the internal storage for a \c Zone that gives the
- /// longest match against \c name, and returns the result in the
- /// form of a \c FindResult object as follows:
- /// - \c code: The result code of the operation.
- /// - \c result::SUCCESS: A zone that gives an exact match
- // is found
- /// - \c result::PARTIALMATCH: A zone whose origin is a
- // super domain of \c name is found (but there is no exact match)
- /// - \c result::NOTFOUND: For all other cases.
- /// - \c zone: A "Boost" shared pointer to the found \c Zone object if one
- // is found; otherwise \c NULL.
- ///
- /// This method never throws an exception.
+ result::Result addZone(ZoneFinderPtr zone_finder);
+
+ /// Returns a \c ZoneFinder for a zone_finder that best matches the given
+ /// name.
///
- /// \param name A domain name for which the search is performed.
- /// \return A \c FindResult object enclosing the search result (see above).
- FindResult findZone(const isc::dns::Name& name) const;
+ /// This derived version of the method never throws an exception.
+ /// For other details see \c DataSourceClient::findZone().
+ virtual FindResult findZone(const isc::dns::Name& name) const;
+
+ /// \brief Implementation of the getIterator method
+ virtual ZoneIteratorPtr getIterator(const isc::dns::Name& name) const;
+
+ /// In-memory data source is read-only, so this derived method will
+ /// result in a NotImplemented exception.
+ ///
+ /// \note We plan to use a database-based data source as a backend
+ /// persistent storage for an in-memory data source. When it's
+ /// implemented we may also want to allow the user of the in-memory client
+ /// to update via its updater (this may or may not be a good idea and
+ /// is subject to further discussions).
+ virtual ZoneUpdaterPtr getUpdater(const isc::dns::Name& name,
+ bool replace) const;
private:
- class MemoryDataSrcImpl;
- MemoryDataSrcImpl* impl_;
+ // TODO: Do we still need the PImpl if nobody should manipulate this class
+ // directly any more (it should be handled through DataSourceClient)?
+ class InMemoryClientImpl;
+ InMemoryClientImpl* impl_;
};
+
+/// \brief Creates an instance of the Memory datasource client
+///
+/// Currently the configuration passed here must be a MapElement, formed as
+/// follows:
+/// \code
+/// { "type": string ("memory"),
+/// "class": string ("IN"/"CH"/etc),
+/// "zones": list
+/// }
+/// Zones list is a list of maps:
+/// { "origin": string,
+/// "file": string
+/// }
+/// \endcode
+/// (i.e. the configuration that was used prior to the datasource refactor)
+///
+/// This configuration setup is currently under discussion and will change in
+/// the near future.
+extern "C" DataSourceClient* createInstance(isc::data::ConstElementPtr config);
+
+/// \brief Destroy the instance created by createInstance()
+extern "C" void destroyInstance(DataSourceClient* instance);
+
+
}
}
#endif // __DATA_SOURCE_MEMORY_H
diff --git a/src/lib/datasrc/rbtree.h b/src/lib/datasrc/rbtree.h
index 03a6967..ccdfa48 100644
--- a/src/lib/datasrc/rbtree.h
+++ b/src/lib/datasrc/rbtree.h
@@ -704,9 +704,9 @@ public:
/// \brief Find with callback and node chain.
///
/// This version of \c find() is specifically designed for the backend
- /// of the \c MemoryZone class, and implements all necessary features
- /// for that purpose. Other applications shouldn't need these additional
- /// features, and should normally use the simpler versions.
+ /// of the \c InMemoryZoneFinder class, and implements all necessary
+ /// features for that purpose. Other applications shouldn't need these
+ /// additional features, and should normally use the simpler versions.
///
/// This version of \c find() calls the callback whenever traversing (on
/// the way from root down the tree) a marked node on the way down through
diff --git a/src/lib/datasrc/sqlite3_accessor.cc b/src/lib/datasrc/sqlite3_accessor.cc
new file mode 100644
index 0000000..3607227
--- /dev/null
+++ b/src/lib/datasrc/sqlite3_accessor.cc
@@ -0,0 +1,779 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <sqlite3.h>
+
+#include <string>
+#include <vector>
+
+#include <boost/foreach.hpp>
+
+#include <datasrc/sqlite3_accessor.h>
+#include <datasrc/logger.h>
+#include <datasrc/data_source.h>
+#include <datasrc/factory.h>
+#include <util/filename.h>
+
+using namespace std;
+using namespace isc::data;
+
+#define SQLITE_SCHEMA_VERSION 1
+
+#define CONFIG_ITEM_DATABASE_FILE "database_file"
+
+namespace isc {
+namespace datasrc {
+
+// The following enum and char* array define the SQL statements commonly
+// used in this implementation. Corresponding prepared statements (of
+// type sqlite3_stmt*) are maintained in the statements_ array of the
+// SQLite3Parameters structure.
+
+enum StatementID {
+ ZONE = 0,
+ ANY = 1,
+ ANY_SUB = 2,
+ BEGIN = 3,
+ COMMIT = 4,
+ ROLLBACK = 5,
+ DEL_ZONE_RECORDS = 6,
+ ADD_RECORD = 7,
+ DEL_RECORD = 8,
+ ITERATE = 9,
+ FIND_PREVIOUS = 10,
+ NUM_STATEMENTS = 11
+};
+
+const char* const text_statements[NUM_STATEMENTS] = {
+ // note for ANY and ITERATE: the order of the SELECT values is
+ // specifically chosen to match the enum values in RecordColumns
+ "SELECT id FROM zones WHERE name=?1 AND rdclass = ?2", // ZONE
+ "SELECT rdtype, ttl, sigtype, rdata FROM records " // ANY
+ "WHERE zone_id=?1 AND name=?2",
+ "SELECT rdtype, ttl, sigtype, rdata " // ANY_SUB
+ "FROM records WHERE zone_id=?1 AND name LIKE (\"%.\" || ?2)",
+ "BEGIN", // BEGIN
+ "COMMIT", // COMMIT
+ "ROLLBACK", // ROLLBACK
+ "DELETE FROM records WHERE zone_id=?1", // DEL_ZONE_RECORDS
+ "INSERT INTO records " // ADD_RECORD
+ "(zone_id, name, rname, ttl, rdtype, sigtype, rdata) "
+ "VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)",
+ "DELETE FROM records WHERE zone_id=?1 AND name=?2 " // DEL_RECORD
+ "AND rdtype=?3 AND rdata=?4",
+ "SELECT rdtype, ttl, sigtype, rdata, name FROM records " // ITERATE
+ "WHERE zone_id = ?1 ORDER BY name, rdtype",
+ /*
+ * This one looks for previous name with NSEC record. It is done by
+ * using the reversed name. The NSEC is checked because we need to
+ * skip glue data, which don't have the NSEC.
+ */
+ "SELECT name FROM records " // FIND_PREVIOUS
+ "WHERE zone_id=?1 AND rdtype = 'NSEC' AND "
+ "rname < $2 ORDER BY rname DESC LIMIT 1"
+};
+
+struct SQLite3Parameters {
+ SQLite3Parameters() :
+ db_(NULL), version_(-1), updating_zone(false), updated_zone_id(-1)
+ {
+ for (int i = 0; i < NUM_STATEMENTS; ++i) {
+ statements_[i] = NULL;
+ }
+ }
+
+ sqlite3* db_;
+ int version_;
+ sqlite3_stmt* statements_[NUM_STATEMENTS];
+ bool updating_zone; // whether or not updating the zone
+ int updated_zone_id; // valid only when updating_zone is true
+};
+
+// This is a helper class to encapsulate the code logic of executing
+// a specific SQLite3 statement, ensuring the corresponding prepared
+// statement is always reset whether the execution is completed successfully
+// or it results in an exception.
+// Note that an object of this class is intended to be used for "ephemeral"
+// statement, which is completed with a single "step" (normally within a
+// single call to an SQLite3Database method). In particular, it cannot be
+// used for "SELECT" variants, which generally expect multiple matching rows.
+class StatementProcessor {
+public:
+ // desc will be used on failure in the what() message of the resulting
+ // DataSourceError exception.
+ StatementProcessor(SQLite3Parameters& dbparameters, StatementID stmt_id,
+ const char* desc) :
+ dbparameters_(dbparameters), stmt_id_(stmt_id), desc_(desc)
+ {
+ sqlite3_clear_bindings(dbparameters_.statements_[stmt_id_]);
+ }
+
+ ~StatementProcessor() {
+ sqlite3_reset(dbparameters_.statements_[stmt_id_]);
+ }
+
+ void exec() {
+ if (sqlite3_step(dbparameters_.statements_[stmt_id_]) != SQLITE_DONE) {
+ sqlite3_reset(dbparameters_.statements_[stmt_id_]);
+ isc_throw(DataSourceError, "failed to " << desc_ << ": " <<
+ sqlite3_errmsg(dbparameters_.db_));
+ }
+ }
+
+private:
+ SQLite3Parameters& dbparameters_;
+ const StatementID stmt_id_;
+ const char* const desc_;
+};
+
+SQLite3Accessor::SQLite3Accessor(const std::string& filename,
+ const isc::dns::RRClass& rrclass) :
+ dbparameters_(new SQLite3Parameters),
+ filename_(filename),
+ class_(rrclass.toText()),
+ database_name_("sqlite3_" +
+ isc::util::Filename(filename).nameAndExtension())
+{
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_SQLITE_NEWCONN);
+
+ open(filename);
+}
+
+SQLite3Accessor::SQLite3Accessor(const std::string& filename,
+ const string& rrclass) :
+ dbparameters_(new SQLite3Parameters),
+ filename_(filename),
+ class_(rrclass),
+ database_name_("sqlite3_" +
+ isc::util::Filename(filename).nameAndExtension())
+{
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_SQLITE_NEWCONN);
+
+ open(filename);
+}
+
+boost::shared_ptr<DatabaseAccessor>
+SQLite3Accessor::clone() {
+ return (boost::shared_ptr<DatabaseAccessor>(new SQLite3Accessor(filename_,
+ class_)));
+}
+
+namespace {
+
+// This is a helper class to initialize a Sqlite3 DB safely. An object of
+// this class encapsulates all temporary resources that are necessary for
+// the initialization, and release them in the destructor. Once everything
+// is properly initialized, the move() method moves the allocated resources
+// to the main object in an exception free manner. This way, the main code
+// for the initialization can be exception safe, and can provide the strong
+// exception guarantee.
+class Initializer {
+public:
+ ~Initializer() {
+ for (int i = 0; i < NUM_STATEMENTS; ++i) {
+ sqlite3_finalize(params_.statements_[i]);
+ }
+
+ if (params_.db_ != NULL) {
+ sqlite3_close(params_.db_);
+ }
+ }
+ void move(SQLite3Parameters* dst) {
+ *dst = params_;
+ params_ = SQLite3Parameters(); // clear everything
+ }
+ SQLite3Parameters params_;
+};
+
+const char* const SCHEMA_LIST[] = {
+ "CREATE TABLE schema_version (version INTEGER NOT NULL)",
+ "INSERT INTO schema_version VALUES (1)",
+ "CREATE TABLE zones (id INTEGER PRIMARY KEY, "
+ "name STRING NOT NULL COLLATE NOCASE, "
+ "rdclass STRING NOT NULL COLLATE NOCASE DEFAULT 'IN', "
+ "dnssec BOOLEAN NOT NULL DEFAULT 0)",
+ "CREATE INDEX zones_byname ON zones (name)",
+ "CREATE TABLE records (id INTEGER PRIMARY KEY, "
+ "zone_id INTEGER NOT NULL, name STRING NOT NULL COLLATE NOCASE, "
+ "rname STRING NOT NULL COLLATE NOCASE, ttl INTEGER NOT NULL, "
+ "rdtype STRING NOT NULL COLLATE NOCASE, sigtype STRING COLLATE NOCASE, "
+ "rdata STRING NOT NULL)",
+ "CREATE INDEX records_byname ON records (name)",
+ "CREATE INDEX records_byrname ON records (rname)",
+ "CREATE TABLE nsec3 (id INTEGER PRIMARY KEY, zone_id INTEGER NOT NULL, "
+ "hash STRING NOT NULL COLLATE NOCASE, "
+ "owner STRING NOT NULL COLLATE NOCASE, "
+ "ttl INTEGER NOT NULL, rdtype STRING NOT NULL COLLATE NOCASE, "
+ "rdata STRING NOT NULL)",
+ "CREATE INDEX nsec3_byhash ON nsec3 (hash)",
+ NULL
+};
+
+sqlite3_stmt*
+prepare(sqlite3* const db, const char* const statement) {
+ sqlite3_stmt* prepared = NULL;
+ if (sqlite3_prepare_v2(db, statement, -1, &prepared, NULL) != SQLITE_OK) {
+ isc_throw(SQLite3Error, "Could not prepare SQLite statement: " <<
+ statement);
+ }
+ return (prepared);
+}
+
+// small function to sleep for 0.1 seconds, needed when waiting for
+// exclusive database locks (which should only occur on startup, and only
+// when the database has not been created yet)
+void doSleep() {
+ struct timespec req;
+ req.tv_sec = 0;
+ req.tv_nsec = 100000000;
+ nanosleep(&req, NULL);
+}
+
+// returns the schema version if the schema version table exists
+// returns -1 if it does not
+int checkSchemaVersion(sqlite3* db) {
+ sqlite3_stmt* prepared = NULL;
+ // At this point in time, the database might be exclusively locked, in
+ // which case even prepare() will return BUSY, so we may need to try a
+ // few times
+ for (size_t i = 0; i < 50; ++i) {
+ int rc = sqlite3_prepare_v2(db, "SELECT version FROM schema_version",
+ -1, &prepared, NULL);
+ if (rc == SQLITE_ERROR) {
+ // this is the error that is returned when the table does not
+ // exist
+ return (-1);
+ } else if (rc == SQLITE_OK) {
+ break;
+ } else if (rc != SQLITE_BUSY || i == 50) {
+ isc_throw(SQLite3Error, "Unable to prepare version query: "
+ << rc << " " << sqlite3_errmsg(db));
+ }
+ doSleep();
+ }
+ if (sqlite3_step(prepared) != SQLITE_ROW) {
+ isc_throw(SQLite3Error,
+ "Unable to query version: " << sqlite3_errmsg(db));
+ }
+ int version = sqlite3_column_int(prepared, 0);
+ sqlite3_finalize(prepared);
+ return (version);
+}
+
+// return db version
+int create_database(sqlite3* db) {
+ // try to get an exclusive lock. Once that is obtained, do the version
+ // check *again*, just in case this process was racing another
+ //
+ // try for 5 secs (50*0.1)
+ int rc;
+ logger.info(DATASRC_SQLITE_SETUP);
+ for (size_t i = 0; i < 50; ++i) {
+ rc = sqlite3_exec(db, "BEGIN EXCLUSIVE TRANSACTION", NULL, NULL,
+ NULL);
+ if (rc == SQLITE_OK) {
+ break;
+ } else if (rc != SQLITE_BUSY || i == 50) {
+ isc_throw(SQLite3Error, "Unable to acquire exclusive lock "
+ "for database creation: " << sqlite3_errmsg(db));
+ }
+ doSleep();
+ }
+ int schema_version = checkSchemaVersion(db);
+ if (schema_version == -1) {
+ for (int i = 0; SCHEMA_LIST[i] != NULL; ++i) {
+ if (sqlite3_exec(db, SCHEMA_LIST[i], NULL, NULL, NULL) !=
+ SQLITE_OK) {
+ isc_throw(SQLite3Error,
+ "Failed to set up schema " << SCHEMA_LIST[i]);
+ }
+ }
+ sqlite3_exec(db, "COMMIT TRANSACTION", NULL, NULL, NULL);
+ return (SQLITE_SCHEMA_VERSION);
+ } else {
+ return (schema_version);
+ }
+}
+
+void
+checkAndSetupSchema(Initializer* initializer) {
+ sqlite3* const db = initializer->params_.db_;
+
+ int schema_version = checkSchemaVersion(db);
+ if (schema_version != SQLITE_SCHEMA_VERSION) {
+ schema_version = create_database(db);
+ }
+ initializer->params_.version_ = schema_version;
+
+ for (int i = 0; i < NUM_STATEMENTS; ++i) {
+ initializer->params_.statements_[i] = prepare(db, text_statements[i]);
+ }
+}
+
+}
+
+void
+SQLite3Accessor::open(const std::string& name) {
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_SQLITE_CONNOPEN).arg(name);
+ if (dbparameters_->db_ != NULL) {
+ // There shouldn't be a way to trigger this anyway
+ isc_throw(DataSourceError, "Duplicate SQLite open with " << name);
+ }
+
+ Initializer initializer;
+
+ if (sqlite3_open(name.c_str(), &initializer.params_.db_) != 0) {
+ isc_throw(SQLite3Error, "Cannot open SQLite database file: " << name);
+ }
+
+ checkAndSetupSchema(&initializer);
+ initializer.move(dbparameters_.get());
+}
+
+SQLite3Accessor::~SQLite3Accessor() {
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_SQLITE_DROPCONN);
+ if (dbparameters_->db_ != NULL) {
+ close();
+ }
+}
+
+void
+SQLite3Accessor::close(void) {
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_SQLITE_CONNCLOSE);
+ if (dbparameters_->db_ == NULL) {
+ isc_throw(DataSourceError,
+ "SQLite data source is being closed before open");
+ }
+
+ // XXX: sqlite3_finalize() could fail. What should we do in that case?
+ for (int i = 0; i < NUM_STATEMENTS; ++i) {
+ sqlite3_finalize(dbparameters_->statements_[i]);
+ dbparameters_->statements_[i] = NULL;
+ }
+
+ sqlite3_close(dbparameters_->db_);
+ dbparameters_->db_ = NULL;
+}
+
+std::pair<bool, int>
+SQLite3Accessor::getZone(const std::string& name) const {
+ int rc;
+ sqlite3_stmt* const stmt = dbparameters_->statements_[ZONE];
+
+ // Take the statement (simple SELECT id FROM zones WHERE...)
+ // and prepare it (bind the parameters to it)
+ sqlite3_reset(stmt);
+ rc = sqlite3_bind_text(stmt, 1, name.c_str(), -1, SQLITE_STATIC);
+ if (rc != SQLITE_OK) {
+ isc_throw(SQLite3Error, "Could not bind " << name <<
+ " to SQL statement (zone)");
+ }
+ rc = sqlite3_bind_text(stmt, 2, class_.c_str(), -1, SQLITE_STATIC);
+ if (rc != SQLITE_OK) {
+ isc_throw(SQLite3Error, "Could not bind " << class_ <<
+ " to SQL statement (zone)");
+ }
+
+ // Get the data there and see if it found anything
+ rc = sqlite3_step(stmt);
+ if (rc == SQLITE_ROW) {
+ const int zone_id = sqlite3_column_int(stmt, 0);
+ sqlite3_reset(stmt);
+ return (pair<bool, int>(true, zone_id));
+ } else if (rc == SQLITE_DONE) {
+ // Free resources
+ sqlite3_reset(stmt);
+ return (pair<bool, int>(false, 0));
+ }
+
+ sqlite3_reset(stmt);
+ isc_throw(DataSourceError, "Unexpected failure in sqlite3_step: " <<
+ sqlite3_errmsg(dbparameters_->db_));
+ // Compilers might not realize isc_throw always throws
+ return (std::pair<bool, int>(false, 0));
+}
+
+namespace {
+
+// Conversion to plain char
+const char*
+convertToPlainChar(const unsigned char* ucp, sqlite3 *db) {
+ if (ucp == NULL) {
+ // The field can really be NULL, in which case we return an
+ // empty string, or sqlite may have run out of memory, in
+ // which case we raise an error
+ if (sqlite3_errcode(db) == SQLITE_NOMEM) {
+ isc_throw(DataSourceError,
+ "Sqlite3 backend encountered a memory allocation "
+ "error in sqlite3_column_text()");
+ } else {
+ return ("");
+ }
+ }
+ const void* p = ucp;
+ return (static_cast<const char*>(p));
+}
+
+}
+class SQLite3Accessor::Context : public DatabaseAccessor::IteratorContext {
+public:
+ // Construct an iterator for all records. When constructed this
+ // way, the getNext() call will copy all fields
+ Context(const boost::shared_ptr<const SQLite3Accessor>& accessor, int id) :
+ iterator_type_(ITT_ALL),
+ accessor_(accessor),
+ statement_(NULL),
+ name_("")
+ {
+ // We create the statement now and then just keep getting data from it
+ statement_ = prepare(accessor->dbparameters_->db_,
+ text_statements[ITERATE]);
+ bindZoneId(id);
+ }
+
+ // Construct an iterator for records with a specific name. When constructed
+ // this way, the getNext() call will copy all fields except name
+ Context(const boost::shared_ptr<const SQLite3Accessor>& accessor, int id,
+ const std::string& name, bool subdomains) :
+ iterator_type_(ITT_NAME),
+ accessor_(accessor),
+ statement_(NULL),
+ name_(name)
+
+ {
+ // We create the statement now and then just keep getting data from it
+ statement_ = prepare(accessor->dbparameters_->db_,
+ subdomains ? text_statements[ANY_SUB] :
+ text_statements[ANY]);
+ bindZoneId(id);
+ bindName(name_);
+ }
+
+ bool getNext(std::string (&data)[COLUMN_COUNT]) {
+ // If there's another row, get it
+ // If finalize has been called (e.g. when previous getNext() got
+ // SQLITE_DONE), directly return false
+ if (statement_ == NULL) {
+ return false;
+ }
+ const int rc(sqlite3_step(statement_));
+ if (rc == SQLITE_ROW) {
+ // For both types, we copy the first four columns
+ copyColumn(data, TYPE_COLUMN);
+ copyColumn(data, TTL_COLUMN);
+ copyColumn(data, SIGTYPE_COLUMN);
+ copyColumn(data, RDATA_COLUMN);
+ // Only copy Name if we are iterating over every record
+ if (iterator_type_ == ITT_ALL) {
+ copyColumn(data, NAME_COLUMN);
+ }
+ return (true);
+ } else if (rc != SQLITE_DONE) {
+ isc_throw(DataSourceError,
+ "Unexpected failure in sqlite3_step: " <<
+ sqlite3_errmsg(accessor_->dbparameters_->db_));
+ }
+ finalize();
+ return (false);
+ }
+
+ virtual ~Context() {
+ finalize();
+ }
+
+private:
+ // Depending on which constructor is called, behaviour is slightly
+ // different. We keep track of what to do with the iterator type
+ // See description of getNext() and the constructors
+ enum IteratorType {
+ ITT_ALL,
+ ITT_NAME
+ };
+
+ void copyColumn(std::string (&data)[COLUMN_COUNT], int column) {
+ data[column] = convertToPlainChar(sqlite3_column_text(statement_,
+ column),
+ accessor_->dbparameters_->db_);
+ }
+
+ void bindZoneId(const int zone_id) {
+ if (sqlite3_bind_int(statement_, 1, zone_id) != SQLITE_OK) {
+ finalize();
+ isc_throw(SQLite3Error, "Could not bind int " << zone_id <<
+ " to SQL statement: " <<
+ sqlite3_errmsg(accessor_->dbparameters_->db_));
+ }
+ }
+
+ void bindName(const std::string& name) {
+ if (sqlite3_bind_text(statement_, 2, name.c_str(), -1,
+ SQLITE_TRANSIENT) != SQLITE_OK) {
+ const char* errmsg = sqlite3_errmsg(accessor_->dbparameters_->db_);
+ finalize();
+ isc_throw(SQLite3Error, "Could not bind text '" << name <<
+ "' to SQL statement: " << errmsg);
+ }
+ }
+
+ void finalize() {
+ sqlite3_finalize(statement_);
+ statement_ = NULL;
+ }
+
+ const IteratorType iterator_type_;
+ boost::shared_ptr<const SQLite3Accessor> accessor_;
+ sqlite3_stmt *statement_;
+ const std::string name_;
+};
+
+DatabaseAccessor::IteratorContextPtr
+SQLite3Accessor::getRecords(const std::string& name, int id,
+ bool subdomains) const
+{
+ return (IteratorContextPtr(new Context(shared_from_this(), id, name,
+ subdomains)));
+}
+
+DatabaseAccessor::IteratorContextPtr
+SQLite3Accessor::getAllRecords(int id) const {
+ return (IteratorContextPtr(new Context(shared_from_this(), id)));
+}
+
+pair<bool, int>
+SQLite3Accessor::startUpdateZone(const string& zone_name, const bool replace) {
+ if (dbparameters_->updating_zone) {
+ isc_throw(DataSourceError,
+ "duplicate zone update on SQLite3 data source");
+ }
+
+ const pair<bool, int> zone_info(getZone(zone_name));
+ if (!zone_info.first) {
+ return (zone_info);
+ }
+
+ StatementProcessor(*dbparameters_, BEGIN,
+ "start an SQLite3 transaction").exec();
+
+ if (replace) {
+ try {
+ StatementProcessor delzone_exec(*dbparameters_, DEL_ZONE_RECORDS,
+ "delete zone records");
+
+ sqlite3_clear_bindings(
+ dbparameters_->statements_[DEL_ZONE_RECORDS]);
+ if (sqlite3_bind_int(dbparameters_->statements_[DEL_ZONE_RECORDS],
+ 1, zone_info.second) != SQLITE_OK) {
+ isc_throw(DataSourceError,
+ "failed to bind SQLite3 parameter: " <<
+ sqlite3_errmsg(dbparameters_->db_));
+ }
+
+ delzone_exec.exec();
+ } catch (const DataSourceError&) {
+ // Once we start a transaction, if something unexpected happens
+ // we need to rollback the transaction so that a subsequent update
+ // is still possible with this accessor.
+ StatementProcessor(*dbparameters_, ROLLBACK,
+ "rollback an SQLite3 transaction").exec();
+ throw;
+ }
+ }
+
+ dbparameters_->updating_zone = true;
+ dbparameters_->updated_zone_id = zone_info.second;
+
+ return (zone_info);
+}
+
+void
+SQLite3Accessor::commitUpdateZone() {
+ if (!dbparameters_->updating_zone) {
+ isc_throw(DataSourceError, "committing zone update on SQLite3 "
+ "data source without transaction");
+ }
+
+ StatementProcessor(*dbparameters_, COMMIT,
+ "commit an SQLite3 transaction").exec();
+ dbparameters_->updating_zone = false;
+ dbparameters_->updated_zone_id = -1;
+}
+
+void
+SQLite3Accessor::rollbackUpdateZone() {
+ if (!dbparameters_->updating_zone) {
+ isc_throw(DataSourceError, "rolling back zone update on SQLite3 "
+ "data source without transaction");
+ }
+
+ StatementProcessor(*dbparameters_, ROLLBACK,
+ "rollback an SQLite3 transaction").exec();
+ dbparameters_->updating_zone = false;
+ dbparameters_->updated_zone_id = -1;
+}
+
+namespace {
+// Commonly used code sequence for adding/deleting record
+template <typename COLUMNS_TYPE>
+void
+doUpdate(SQLite3Parameters& dbparams, StatementID stmt_id,
+ COLUMNS_TYPE update_params, const char* exec_desc)
+{
+ sqlite3_stmt* const stmt = dbparams.statements_[stmt_id];
+ StatementProcessor executer(dbparams, stmt_id, exec_desc);
+
+ int param_id = 0;
+ if (sqlite3_bind_int(stmt, ++param_id, dbparams.updated_zone_id)
+ != SQLITE_OK) {
+ isc_throw(DataSourceError, "failed to bind SQLite3 parameter: " <<
+ sqlite3_errmsg(dbparams.db_));
+ }
+ const size_t column_count =
+ sizeof(update_params) / sizeof(update_params[0]);
+ for (int i = 0; i < column_count; ++i) {
+ if (sqlite3_bind_text(stmt, ++param_id, update_params[i].c_str(), -1,
+ SQLITE_TRANSIENT) != SQLITE_OK) {
+ isc_throw(DataSourceError, "failed to bind SQLite3 parameter: " <<
+ sqlite3_errmsg(dbparams.db_));
+ }
+ }
+ executer.exec();
+}
+}
+
+void
+SQLite3Accessor::addRecordToZone(const string (&columns)[ADD_COLUMN_COUNT]) {
+ if (!dbparameters_->updating_zone) {
+ isc_throw(DataSourceError, "adding record to SQLite3 "
+ "data source without transaction");
+ }
+ doUpdate<const string (&)[DatabaseAccessor::ADD_COLUMN_COUNT]>(
+ *dbparameters_, ADD_RECORD, columns, "add record to zone");
+}
+
+void
+SQLite3Accessor::deleteRecordInZone(const string (¶ms)[DEL_PARAM_COUNT]) {
+ if (!dbparameters_->updating_zone) {
+ isc_throw(DataSourceError, "deleting record in SQLite3 "
+ "data source without transaction");
+ }
+ doUpdate<const string (&)[DatabaseAccessor::DEL_PARAM_COUNT]>(
+ *dbparameters_, DEL_RECORD, params, "delete record from zone");
+}
+
+std::string
+SQLite3Accessor::findPreviousName(int zone_id, const std::string& rname)
+ const
+{
+ sqlite3_reset(dbparameters_->statements_[FIND_PREVIOUS]);
+ sqlite3_clear_bindings(dbparameters_->statements_[FIND_PREVIOUS]);
+
+ if (sqlite3_bind_int(dbparameters_->statements_[FIND_PREVIOUS], 1,
+ zone_id) != SQLITE_OK) {
+ isc_throw(SQLite3Error, "Could not bind zone ID " << zone_id <<
+ " to SQL statement (find previous): " <<
+ sqlite3_errmsg(dbparameters_->db_));
+ }
+ if (sqlite3_bind_text(dbparameters_->statements_[FIND_PREVIOUS], 2,
+ rname.c_str(), -1, SQLITE_STATIC) != SQLITE_OK) {
+ isc_throw(SQLite3Error, "Could not bind name " << rname <<
+ " to SQL statement (find previous): " <<
+ sqlite3_errmsg(dbparameters_->db_));
+ }
+
+ std::string result;
+ const int rc = sqlite3_step(dbparameters_->statements_[FIND_PREVIOUS]);
+ if (rc == SQLITE_ROW) {
+ // We found it
+ result = convertToPlainChar(sqlite3_column_text(dbparameters_->
+ statements_[FIND_PREVIOUS], 0), dbparameters_->db_);
+ }
+ sqlite3_reset(dbparameters_->statements_[FIND_PREVIOUS]);
+
+ if (rc == SQLITE_DONE) {
+ // No NSEC records here, this DB doesn't support DNSSEC or
+ // we asked before the apex
+ isc_throw(isc::NotImplemented, "The zone doesn't support DNSSEC or "
+ "query before apex");
+ }
+
+ if (rc != SQLITE_ROW && rc != SQLITE_DONE) {
+ // Some kind of error
+ isc_throw(SQLite3Error, "Could not get data for previous name");
+ }
+
+ return (result);
+}
+
+namespace {
+void
+addError(ElementPtr errors, const std::string& error) {
+ if (errors != ElementPtr() && errors->getType() == Element::list) {
+ errors->add(Element::create(error));
+ }
+}
+
+bool
+checkConfig(ConstElementPtr config, ElementPtr errors) {
+ /* Specific configuration is under discussion, right now this accepts
+ * the 'old' configuration, see header file
+ */
+ bool result = true;
+
+ if (!config || config->getType() != Element::map) {
+ addError(errors, "Base config for SQlite3 backend must be a map");
+ result = false;
+ } else {
+ if (!config->contains(CONFIG_ITEM_DATABASE_FILE)) {
+ addError(errors,
+ "Config for SQlite3 backend does not contain a '"
+ CONFIG_ITEM_DATABASE_FILE
+ "' value");
+ result = false;
+ } else if (!config->get(CONFIG_ITEM_DATABASE_FILE) ||
+ config->get(CONFIG_ITEM_DATABASE_FILE)->getType() !=
+ Element::string) {
+ addError(errors, "value of " CONFIG_ITEM_DATABASE_FILE
+ " in SQLite3 backend is not a string");
+ result = false;
+ } else if (config->get(CONFIG_ITEM_DATABASE_FILE)->stringValue() ==
+ "") {
+ addError(errors, "value of " CONFIG_ITEM_DATABASE_FILE
+ " in SQLite3 backend is empty");
+ result = false;
+ }
+ }
+
+ return (result);
+}
+
+} // end anonymous namespace
+
+DataSourceClient *
+createInstance(isc::data::ConstElementPtr config) {
+ ElementPtr errors(Element::createList());
+ if (!checkConfig(config, errors)) {
+ isc_throw(DataSourceConfigError, errors->str());
+ }
+ std::string dbfile = config->get(CONFIG_ITEM_DATABASE_FILE)->stringValue();
+ boost::shared_ptr<DatabaseAccessor> sqlite3_accessor(
+ new SQLite3Accessor(dbfile, isc::dns::RRClass::IN()));
+ return (new DatabaseClient(isc::dns::RRClass::IN(), sqlite3_accessor));
+}
+
+void destroyInstance(DataSourceClient* instance) {
+ delete instance;
+}
+
+} // end of namespace datasrc
+} // end of namespace isc
diff --git a/src/lib/datasrc/sqlite3_accessor.h b/src/lib/datasrc/sqlite3_accessor.h
new file mode 100644
index 0000000..3286f3b
--- /dev/null
+++ b/src/lib/datasrc/sqlite3_accessor.h
@@ -0,0 +1,215 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+
+#ifndef __DATASRC_SQLITE3_ACCESSOR_H
+#define __DATASRC_SQLITE3_ACCESSOR_H
+
+#include <datasrc/database.h>
+
+#include <exceptions/exceptions.h>
+
+#include <boost/enable_shared_from_this.hpp>
+#include <boost/scoped_ptr.hpp>
+#include <string>
+
+#include <cc/data.h>
+
+namespace isc {
+namespace dns {
+class RRClass;
+}
+
+namespace datasrc {
+
+/**
+ * \brief Low-level database error
+ *
+ * This exception is thrown when the SQLite library complains about something.
+ * It might mean corrupt database file, invalid request or that something is
+ * rotten in the library.
+ */
+class SQLite3Error : public Exception {
+public:
+ SQLite3Error(const char* file, size_t line, const char* what) :
+ isc::Exception(file, line, what) {}
+};
+
+struct SQLite3Parameters;
+
+/**
+ * \brief Concrete implementation of DatabaseAccessor for SQLite3 databases
+ *
+ * This opens one database file with our schema and serves data from there.
+ * According to the design, it doesn't interpret the data in any way, it just
+ * provides unified access to the DB.
+ */
+class SQLite3Accessor : public DatabaseAccessor,
+ public boost::enable_shared_from_this<SQLite3Accessor> {
+public:
+ /**
+ * \brief Constructor
+ *
+ * This opens the database and becomes ready to serve data from there.
+ *
+ * \exception SQLite3Error will be thrown if the given database file
+ * doesn't work (it is broken, doesn't exist and can't be created, etc).
+ *
+ * \param filename The database file to be used.
+ * \param rrclass Which class of data it should serve (while the database
+ * file can contain multiple classes of data, single database can
+ * provide only one class).
+ */
+ SQLite3Accessor(const std::string& filename,
+ const isc::dns::RRClass& rrclass);
+
+ /**
+ * \brief Constructor
+ *
+ * Same as the other version, but takes rrclass as a bare string.
+ * we should obsolete the other version and unify the constructor to
+ * this version; the SQLite3Accessor is expected to be "dumb" and
+ * shouldn't care about DNS specific information such as RRClass.
+ */
+ SQLite3Accessor(const std::string& filename, const std::string& rrclass);
+
+ /**
+ * \brief Destructor
+ *
+ * Closes the database.
+ */
+ ~SQLite3Accessor();
+
+ /// This implementation internally opens a new sqlite3 database for the
+ /// same file name specified in the constructor of the original accessor.
+ virtual boost::shared_ptr<DatabaseAccessor> clone();
+
+ /**
+ * \brief Look up a zone
+ *
+ * This implements the getZone from DatabaseAccessor and looks up a zone
+ * in the data. It looks for a zone with the exact given origin and class
+ * passed to the constructor.
+ *
+ * \exception SQLite3Error if something about the database is broken.
+ *
+ * \param name The (fully qualified) domain name of zone to look up
+ * \return The pair contains if the lookup was successful in the first
+ * element and the zone id in the second if it was.
+ */
+ virtual std::pair<bool, int> getZone(const std::string& name) const;
+
+ /** \brief Look up all resource records for a name
+ *
+ * This implements the getRecords() method from DatabaseAccessor
+ *
+ * \exception SQLite3Error if there is an sqlite3 error when performing
+ * the query
+ *
+ * \param name the name to look up
+ * \param id the zone id, as returned by getZone()
+ * \param subdomains Match subdomains instead of the name.
+ * \return Iterator that contains all records with the given name
+ */
+ virtual IteratorContextPtr getRecords(const std::string& name,
+ int id,
+ bool subdomains = false) const;
+
+ /** \brief Look up all resource records for a zone
+ *
+ * This implements the getRecords() method from DatabaseAccessor
+ *
+ * \exception SQLite3Error if there is an sqlite3 error when performing
+ * the query
+ *
+ * \param id the zone id, as returned by getZone()
+ * \return Iterator that contains all records in the given zone
+ */
+ virtual IteratorContextPtr getAllRecords(int id) const;
+
+ virtual std::pair<bool, int> startUpdateZone(const std::string& zone_name,
+ bool replace);
+
+ /// \note we are quite impatient here: it's quite possible that the COMMIT
+ /// fails due to other process performing SELECT on the same database
+ /// (consider the case where COMMIT is done by xfrin or dynamic update
+ /// server while an authoritative server is busy reading the DB).
+ /// In a future version we should probably need to introduce some retry
+ /// attempt and/or increase timeout before giving up the COMMIT, even
+ /// if it still doesn't guarantee 100% success. Right now this
+ /// implementation throws a \c DataSourceError exception in such a case.
+ virtual void commitUpdateZone();
+
+ /// \note In SQLite3 rollback can fail if there's another unfinished
+ /// statement is performed for the same database structure.
+ /// Although it's not expected to happen in our expected usage, it's not
+ /// guaranteed to be prevented at the API level. If it ever happens, this
+ /// method throws a \c DataSourceError exception. It should be
+ /// considered a bug of the higher level application program.
+ virtual void rollbackUpdateZone();
+
+ virtual void addRecordToZone(
+ const std::string (&columns)[ADD_COLUMN_COUNT]);
+
+ virtual void deleteRecordInZone(
+ const std::string (¶ms)[DEL_PARAM_COUNT]);
+
+ /// The SQLite3 implementation of this method returns a string starting
+ /// with a fixed prefix of "sqlite3_" followed by the DB file name
+ /// removing any path name. For example, for the DB file
+ /// /somewhere/in/the/system/bind10.sqlite3, this method will return
+ /// "sqlite3_bind10.sqlite3".
+ virtual const std::string& getDBName() const { return (database_name_); }
+
+ /// \brief Concrete implementation of the pure virtual method
+ virtual std::string findPreviousName(int zone_id, const std::string& rname)
+ const;
+
+private:
+ /// \brief Private database data
+ boost::scoped_ptr<SQLite3Parameters> dbparameters_;
+ /// \brief The filename of the DB (necessary for clone())
+ const std::string filename_;
+ /// \brief The class for which the queries are done
+ const std::string class_;
+ /// \brief Opens the database
+ void open(const std::string& filename);
+ /// \brief Closes the database
+ void close();
+ /// \brief SQLite3 implementation of IteratorContext
+ class Context;
+ friend class Context;
+ const std::string database_name_;
+};
+
+/// \brief Creates an instance of the SQlite3 datasource client
+///
+/// Currently the configuration passed here must be a MapElement, containing
+/// one item called "database_file", whose value is a string
+///
+/// This configuration setup is currently under discussion and will change in
+/// the near future.
+extern "C" DataSourceClient* createInstance(isc::data::ConstElementPtr config);
+
+/// \brief Destroy the instance created by createInstance()
+extern "C" void destroyInstance(DataSourceClient* instance);
+
+}
+}
+
+#endif // __DATASRC_SQLITE3_CONNECTION_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/datasrc/sqlite3_datasrc.cc b/src/lib/datasrc/sqlite3_datasrc.cc
index 13d98ed..03b057c 100644
--- a/src/lib/datasrc/sqlite3_datasrc.cc
+++ b/src/lib/datasrc/sqlite3_datasrc.cc
@@ -26,6 +26,8 @@
#include <dns/rrset.h>
#include <dns/rrsetlist.h>
+#define SQLITE_SCHEMA_VERSION 1
+
using namespace std;
using namespace isc::dns;
using namespace isc::dns::rdata;
@@ -77,6 +79,8 @@ const char* const SCHEMA_LIST[] = {
NULL
};
+const char* const q_version_str = "SELECT version FROM schema_version";
+
const char* const q_zone_str = "SELECT id FROM zones WHERE name=?1";
const char* const q_record_str = "SELECT rdtype, ttl, sigtype, rdata "
@@ -254,7 +258,7 @@ Sqlite3DataSrc::findRecords(const Name& name, const RRType& rdtype,
}
break;
}
-
+
sqlite3_reset(query);
sqlite3_clear_bindings(query);
@@ -295,7 +299,7 @@ Sqlite3DataSrc::findRecords(const Name& name, const RRType& rdtype,
//
sqlite3_reset(dbparameters->q_count_);
sqlite3_clear_bindings(dbparameters->q_count_);
-
+
rc = sqlite3_bind_int(dbparameters->q_count_, 1, zone_id);
if (rc != SQLITE_OK) {
isc_throw(Sqlite3Error, "Could not bind zone ID " << zone_id <<
@@ -356,7 +360,8 @@ Sqlite3DataSrc::findClosestEnclosure(DataSrcMatch& match) const {
unsigned int position;
if (findClosest(match.getName(), &position) == -1) {
- LOG_DEBUG(logger, DBG_TRACE_DATA, DATASRC_SQLITE_ENCLOSURE_NOTFOUND);
+ LOG_DEBUG(logger, DBG_TRACE_DATA, DATASRC_SQLITE_ENCLOSURE_NOT_FOUND)
+ .arg(match.getName());
return;
}
@@ -652,29 +657,90 @@ prepare(sqlite3* const db, const char* const statement) {
return (prepared);
}
-void
-checkAndSetupSchema(Sqlite3Initializer* initializer) {
- sqlite3* const db = initializer->params_.db_;
+// small function to sleep for 0.1 seconds, needed when waiting for
+// exclusive database locks (which should only occur on startup, and only
+// when the database has not been created yet)
+void do_sleep() {
+ struct timespec req;
+ req.tv_sec = 0;
+ req.tv_nsec = 100000000;
+ nanosleep(&req, NULL);
+}
+// returns the schema version if the schema version table exists
+// returns -1 if it does not
+int check_schema_version(sqlite3* db) {
sqlite3_stmt* prepared = NULL;
- if (sqlite3_prepare_v2(db, "SELECT version FROM schema_version", -1,
- &prepared, NULL) == SQLITE_OK &&
- sqlite3_step(prepared) == SQLITE_ROW) {
- initializer->params_.version_ = sqlite3_column_int(prepared, 0);
- sqlite3_finalize(prepared);
- } else {
- logger.info(DATASRC_SQLITE_SETUP);
- if (prepared != NULL) {
- sqlite3_finalize(prepared);
+ // At this point in time, the database might be exclusively locked, in
+ // which case even prepare() will return BUSY, so we may need to try a
+ // few times
+ for (size_t i = 0; i < 50; ++i) {
+ int rc = sqlite3_prepare_v2(db, q_version_str, -1, &prepared, NULL);
+ if (rc == SQLITE_ERROR) {
+ // this is the error that is returned when the table does not
+ // exist
+ return (-1);
+ } else if (rc == SQLITE_OK) {
+ break;
+ } else if (rc != SQLITE_BUSY || i == 50) {
+ isc_throw(Sqlite3Error, "Unable to prepare version query: "
+ << rc << " " << sqlite3_errmsg(db));
}
+ do_sleep();
+ }
+ if (sqlite3_step(prepared) != SQLITE_ROW) {
+ isc_throw(Sqlite3Error,
+ "Unable to query version: " << sqlite3_errmsg(db));
+ }
+ int version = sqlite3_column_int(prepared, 0);
+ sqlite3_finalize(prepared);
+ return (version);
+}
+
+// return db version
+int create_database(sqlite3* db) {
+ // try to get an exclusive lock. Once that is obtained, do the version
+ // check *again*, just in case this process was racing another
+ //
+ // try for 5 secs (50*0.1)
+ int rc;
+ logger.info(DATASRC_SQLITE_SETUP);
+ for (size_t i = 0; i < 50; ++i) {
+ rc = sqlite3_exec(db, "BEGIN EXCLUSIVE TRANSACTION", NULL, NULL,
+ NULL);
+ if (rc == SQLITE_OK) {
+ break;
+ } else if (rc != SQLITE_BUSY || i == 50) {
+ isc_throw(Sqlite3Error, "Unable to acquire exclusive lock "
+ "for database creation: " << sqlite3_errmsg(db));
+ }
+ do_sleep();
+ }
+ int schema_version = check_schema_version(db);
+ if (schema_version == -1) {
for (int i = 0; SCHEMA_LIST[i] != NULL; ++i) {
if (sqlite3_exec(db, SCHEMA_LIST[i], NULL, NULL, NULL) !=
SQLITE_OK) {
isc_throw(Sqlite3Error,
- "Failed to set up schema " << SCHEMA_LIST[i]);
+ "Failed to set up schema " << SCHEMA_LIST[i]);
}
}
+ sqlite3_exec(db, "COMMIT TRANSACTION", NULL, NULL, NULL);
+ return (SQLITE_SCHEMA_VERSION);
+ } else {
+ return (schema_version);
+ }
+}
+
+void
+checkAndSetupSchema(Sqlite3Initializer* initializer) {
+ sqlite3* const db = initializer->params_.db_;
+
+ int schema_version = check_schema_version(db);
+ if (schema_version != SQLITE_SCHEMA_VERSION) {
+ schema_version = create_database(db);
}
+ initializer->params_.version_ = schema_version;
initializer->params_.q_zone_ = prepare(db, q_zone_str);
initializer->params_.q_record_ = prepare(db, q_record_str);
diff --git a/src/lib/datasrc/static_datasrc.cc b/src/lib/datasrc/static_datasrc.cc
index dee14b9..fd43e1c 100644
--- a/src/lib/datasrc/static_datasrc.cc
+++ b/src/lib/datasrc/static_datasrc.cc
@@ -70,6 +70,7 @@ StaticDataSrcImpl::StaticDataSrcImpl() :
authors = RRsetPtr(new RRset(authors_name, RRClass::CH(),
RRType::TXT(), RRTTL(0)));
authors->addRdata(generic::TXT("Chen Zhengzhang")); // Jerry
+ authors->addRdata(generic::TXT("Dmitriy Volodin"));
authors->addRdata(generic::TXT("Evan Hunt"));
authors->addRdata(generic::TXT("Haidong Wang")); // Ocean
authors->addRdata(generic::TXT("Han Feng"));
@@ -161,7 +162,7 @@ StaticDataSrc::findRRset(const Name& qname,
arg(qtype);
flags = 0;
if (qclass != getClass() && qclass != RRClass::ANY()) {
- LOG_ERROR(logger, DATASRC_STATIC_BAD_CLASS);
+ LOG_ERROR(logger, DATASRC_STATIC_CLASS_NOT_CH);
return (ERROR);
}
diff --git a/src/lib/datasrc/tests/Makefile.am b/src/lib/datasrc/tests/Makefile.am
index fbcf9c9..3183b1d 100644
--- a/src/lib/datasrc/tests/Makefile.am
+++ b/src/lib/datasrc/tests/Makefile.am
@@ -1,8 +1,12 @@
+SUBDIRS = . testdata
+
AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
AM_CPPFLAGS += -I$(top_builddir)/src/lib/dns -I$(top_srcdir)/src/lib/dns
AM_CPPFLAGS += $(BOOST_INCLUDES)
AM_CPPFLAGS += $(SQLITE_CFLAGS)
-AM_CPPFLAGS += -DTEST_DATA_DIR=\"$(srcdir)/testdata\"
+AM_CPPFLAGS += -DTEST_DATA_DIR=\"$(abs_srcdir)/testdata\"
+AM_CPPFLAGS += -DTEST_DATA_BUILDDIR=\"$(abs_builddir)/testdata\"
+AM_CPPFLAGS += -DINSTALL_PROG=\"$(abs_top_srcdir)/install-sh\"
AM_CXXFLAGS = $(B10_CXXFLAGS)
@@ -25,9 +29,23 @@ run_unittests_SOURCES += query_unittest.cc
run_unittests_SOURCES += cache_unittest.cc
run_unittests_SOURCES += test_datasrc.h test_datasrc.cc
run_unittests_SOURCES += rbtree_unittest.cc
-run_unittests_SOURCES += zonetable_unittest.cc
-run_unittests_SOURCES += memory_datasrc_unittest.cc
+#run_unittests_SOURCES += zonetable_unittest.cc
+#run_unittests_SOURCES += memory_datasrc_unittest.cc
run_unittests_SOURCES += logger_unittest.cc
+run_unittests_SOURCES += database_unittest.cc
+run_unittests_SOURCES += client_unittest.cc
+run_unittests_SOURCES += sqlite3_accessor_unittest.cc
+if !USE_STATIC_LINK
+# This test uses dynamically loadable module. It will cause various
+# troubles with static link such as "missing" symbols in the static object
+# for the module. As a workaround we disable this particualr test
+# in this case.
+run_unittests_SOURCES += factory_unittest.cc
+endif
+# for the dlopened types we have tests for, we also need to include the
+# sources
+run_unittests_SOURCES += $(top_srcdir)/src/lib/datasrc/sqlite3_accessor.cc
+#run_unittests_SOURCES += $(top_srcdir)/src/lib/datasrc/memory_datasrc.cc
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
@@ -36,6 +54,7 @@ run_unittests_LDADD = $(GTEST_LDADD)
run_unittests_LDADD += $(SQLITE_LIBS)
run_unittests_LDADD += $(top_builddir)/src/lib/datasrc/libdatasrc.la
run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
run_unittests_LDADD += $(top_builddir)/src/lib/cc/libcc.la
@@ -57,3 +76,4 @@ EXTRA_DIST += testdata/sql1.example.com.signed
EXTRA_DIST += testdata/sql2.example.com.signed
EXTRA_DIST += testdata/test-root.sqlite3
EXTRA_DIST += testdata/test.sqlite3
+EXTRA_DIST += testdata/rwtest.sqlite3
diff --git a/src/lib/datasrc/tests/cache_unittest.cc b/src/lib/datasrc/tests/cache_unittest.cc
index 96beae0..1325f64 100644
--- a/src/lib/datasrc/tests/cache_unittest.cc
+++ b/src/lib/datasrc/tests/cache_unittest.cc
@@ -202,15 +202,15 @@ TEST_F(CacheTest, retrieveFail) {
}
TEST_F(CacheTest, expire) {
- // Insert "foo" with a duration of 2 seconds; sleep 3. The
+ // Insert "foo" with a duration of 1 seconds; sleep 2. The
// record should not be returned from the cache even though it's
// at the top of the cache.
RRsetPtr aaaa(new RRset(Name("foo"), RRClass::IN(), RRType::AAAA(),
RRTTL(0)));
aaaa->addRdata(in::AAAA("2001:db8:3:bb::5"));
- cache.addPositive(aaaa, 0, 2);
+ cache.addPositive(aaaa, 0, 1);
- sleep(3);
+ sleep(2);
RRsetPtr r;
uint32_t f;
diff --git a/src/lib/datasrc/tests/client_unittest.cc b/src/lib/datasrc/tests/client_unittest.cc
new file mode 100644
index 0000000..5b2c91a
--- /dev/null
+++ b/src/lib/datasrc/tests/client_unittest.cc
@@ -0,0 +1,50 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <datasrc/client.h>
+
+#include <dns/name.h>
+
+#include <gtest/gtest.h>
+
+using namespace isc::datasrc;
+using isc::dns::Name;
+
+namespace {
+
+/*
+ * The DataSourceClient can't be created as it has pure virtual methods.
+ * So we implement them as NOPs and test the other methods.
+ */
+class NopClient : public DataSourceClient {
+public:
+ virtual FindResult findZone(const isc::dns::Name&) const {
+ return (FindResult(result::NOTFOUND, ZoneFinderPtr()));
+ }
+ virtual ZoneUpdaterPtr getUpdater(const isc::dns::Name&, bool) const {
+ return (ZoneUpdaterPtr());
+ }
+};
+
+class ClientTest : public ::testing::Test {
+public:
+ NopClient client_;
+};
+
+// The default implementation is NotImplemented
+TEST_F(ClientTest, defaultIterator) {
+ EXPECT_THROW(client_.getIterator(Name(".")), isc::NotImplemented);
+}
+
+}
diff --git a/src/lib/datasrc/tests/database_unittest.cc b/src/lib/datasrc/tests/database_unittest.cc
new file mode 100644
index 0000000..fe57185
--- /dev/null
+++ b/src/lib/datasrc/tests/database_unittest.cc
@@ -0,0 +1,2410 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <boost/foreach.hpp>
+
+#include <gtest/gtest.h>
+
+#include <dns/name.h>
+#include <dns/rrttl.h>
+#include <dns/rrset.h>
+#include <exceptions/exceptions.h>
+
+#include <datasrc/database.h>
+#include <datasrc/zone.h>
+#include <datasrc/data_source.h>
+#include <datasrc/iterator.h>
+#include <datasrc/sqlite3_accessor.h>
+
+#include <testutils/dnsmessage_test.h>
+
+#include <map>
+
+using namespace isc::datasrc;
+using namespace std;
+using namespace boost;
+using namespace isc::dns;
+
+namespace {
+
+// Imaginary zone IDs used in the mock accessor below.
+const int READONLY_ZONE_ID = 42;
+const int WRITABLE_ZONE_ID = 4200;
+
+// Commonly used test data
+const char* const TEST_RECORDS[][5] = {
+ // some plain data
+ {"www.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"www.example.org.", "AAAA", "3600", "", "2001:db8::1"},
+ {"www.example.org.", "AAAA", "3600", "", "2001:db8::2"},
+ {"www.example.org.", "NSEC", "3600", "", "www2.example.org. A AAAA NSEC RRSIG"},
+ {"www.example.org.", "RRSIG", "3600", "", "NSEC 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
+ {"www2.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"www2.example.org.", "AAAA", "3600", "", "2001:db8::1"},
+ {"www2.example.org.", "A", "3600", "", "192.0.2.2"},
+
+ {"cname.example.org.", "CNAME", "3600", "", "www.example.org."},
+
+ // some DNSSEC-'signed' data
+ {"signed1.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"signed1.example.org.", "RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
+ {"signed1.example.org.", "RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12346 example.org. FAKEFAKEFAKE"},
+ {"signed1.example.org.", "AAAA", "3600", "", "2001:db8::1"},
+ {"signed1.example.org.", "AAAA", "3600", "", "2001:db8::2"},
+ {"signed1.example.org.", "RRSIG", "3600", "", "AAAA 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
+ {"signedcname1.example.org.", "CNAME", "3600", "", "www.example.org."},
+ {"signedcname1.example.org.", "RRSIG", "3600", "", "CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
+ // special case might fail; sig is for cname, which isn't there (should be ignored)
+ // (ignoring of 'normal' other type is done above by www.)
+ {"acnamesig1.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"acnamesig1.example.org.", "RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+ {"acnamesig1.example.org.", "RRSIG", "3600", "", "CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
+ // let's pretend we have a database that is not careful
+ // about the order in which it returns data
+ {"signed2.example.org.", "RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+ {"signed2.example.org.", "AAAA", "3600", "", "2001:db8::2"},
+ {"signed2.example.org.", "RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12346 example.org. FAKEFAKEFAKE"},
+ {"signed2.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"signed2.example.org.", "RRSIG", "3600", "", "AAAA 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+ {"signed2.example.org.", "AAAA", "3600", "", "2001:db8::1"},
+
+ {"signedcname2.example.org.", "RRSIG", "3600", "", "CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+ {"signedcname2.example.org.", "CNAME", "3600", "", "www.example.org."},
+
+ {"acnamesig2.example.org.", "RRSIG", "3600", "", "CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+ {"acnamesig2.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"acnamesig2.example.org.", "RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
+ {"acnamesig3.example.org.", "RRSIG", "3600", "", "CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+ {"acnamesig3.example.org.", "RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+ {"acnamesig3.example.org.", "A", "3600", "", "192.0.2.1"},
+
+ {"ttldiff1.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"ttldiff1.example.org.", "A", "360", "", "192.0.2.2"},
+
+ {"ttldiff2.example.org.", "A", "360", "", "192.0.2.1"},
+ {"ttldiff2.example.org.", "A", "3600", "", "192.0.2.2"},
+
+ // also add some intentionally bad data
+ {"badcname1.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"badcname1.example.org.", "CNAME", "3600", "", "www.example.org."},
+
+ {"badcname2.example.org.", "CNAME", "3600", "", "www.example.org."},
+ {"badcname2.example.org.", "A", "3600", "", "192.0.2.1"},
+
+ {"badcname3.example.org.", "CNAME", "3600", "", "www.example.org."},
+ {"badcname3.example.org.", "CNAME", "3600", "", "www.example2.org."},
+
+ {"badrdata.example.org.", "A", "3600", "", "bad"},
+
+ {"badtype.example.org.", "BAD_TYPE", "3600", "", "192.0.2.1"},
+
+ {"badttl.example.org.", "A", "badttl", "", "192.0.2.1"},
+
+ {"badsig.example.org.", "A", "badttl", "", "192.0.2.1"},
+ {"badsig.example.org.", "RRSIG", "3600", "", "A 5 3 3600 somebaddata 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
+ {"badsigtype.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"badsigtype.example.org.", "RRSIG", "3600", "TXT", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
+ // Data for testing delegation (with NS and DNAME)
+ {"delegation.example.org.", "NS", "3600", "", "ns.example.com."},
+ {"delegation.example.org.", "NS", "3600", "",
+ "ns.delegation.example.org."},
+ {"delegation.example.org.", "DS", "3600", "", "1 RSAMD5 2 abcd"},
+ {"delegation.example.org.", "RRSIG", "3600", "", "NS 5 3 3600 "
+ "20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+ {"ns.delegation.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"deep.below.delegation.example.org.", "A", "3600", "", "192.0.2.1"},
+
+ {"dname.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"dname.example.org.", "DNAME", "3600", "", "dname.example.com."},
+ {"dname.example.org.", "RRSIG", "3600", "",
+ "DNAME 5 3 3600 20000101000000 20000201000000 12345 "
+ "example.org. FAKEFAKEFAKE"},
+
+ {"below.dname.example.org.", "A", "3600", "", "192.0.2.1"},
+
+ // Broken NS
+ {"brokenns1.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"brokenns1.example.org.", "NS", "3600", "", "ns.example.com."},
+
+ {"brokenns2.example.org.", "NS", "3600", "", "ns.example.com."},
+ {"brokenns2.example.org.", "A", "3600", "", "192.0.2.1"},
+
+ // Now double DNAME, to test failure mode
+ {"baddname.example.org.", "DNAME", "3600", "", "dname1.example.com."},
+ {"baddname.example.org.", "DNAME", "3600", "", "dname2.example.com."},
+
+ // Put some data into apex (including NS) so we can check our NS
+ // doesn't break anything
+ {"example.org.", "NS", "3600", "", "ns.example.com."},
+ {"example.org.", "A", "3600", "", "192.0.2.1"},
+ {"example.org.", "NSEC", "3600", "", "acnamesig1.example.org. NS A NSEC RRSIG"},
+ {"example.org.", "RRSIG", "3600", "", "NSEC 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. FAKEFAKEFAKE"},
+ {"example.org.", "RRSIG", "3600", "", "NS 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
+ // This is because of empty domain test
+ {"a.b.example.org.", "A", "3600", "", "192.0.2.1"},
+
+ // Something for wildcards
+ {"*.wild.example.org.", "A", "3600", "", "192.0.2.5"},
+ {"*.wild.example.org.", "RRSIG", "3600", "A", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+ {"*.wild.example.org.", "NSEC", "3600", "", "cancel.here.wild.example.org. A NSEC RRSIG"},
+ {"*.wild.example.org.", "RRSIG", "3600", "", "NSEC 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+ {"cancel.here.wild.example.org.", "AAAA", "3600", "", "2001:db8::5"},
+ {"delegatedwild.example.org.", "NS", "3600", "", "ns.example.com."},
+ {"*.delegatedwild.example.org.", "A", "3600", "", "192.0.2.5"},
+ {"wild.*.foo.example.org.", "A", "3600", "", "192.0.2.5"},
+ {"wild.*.foo.*.bar.example.org.", "A", "3600", "", "192.0.2.5"},
+ {"bao.example.org.", "NSEC", "3600", "", "wild.*.foo.*.bar.example.org. NSEC"},
+ {"*.cnamewild.example.org.", "CNAME", "3600", "", "www.example.org."},
+ {"*.nswild.example.org.", "NS", "3600", "", "ns.example.com."},
+ // For NSEC empty non-terminal
+ {"l.example.org.", "NSEC", "3600", "", "empty.nonterminal.example.org. NSEC"},
+ {"empty.nonterminal.example.org.", "A", "3600", "", "192.0.2.1"},
+ // Invalid rdata
+ {"invalidrdata.example.org.", "A", "3600", "", "Bunch of nonsense"},
+ {"invalidrdata2.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"invalidrdata2.example.org.", "RRSIG", "3600", "", "Nonsense"},
+
+ {NULL, NULL, NULL, NULL, NULL},
+};
+
+/*
+ * An accessor with minimum implementation, keeping the original
+ * "NotImplemented" methods.
+ */
+class NopAccessor : public DatabaseAccessor {
+public:
+ NopAccessor() : database_name_("mock_database")
+ { }
+
+ virtual std::pair<bool, int> getZone(const std::string& name) const {
+ if (name == "example.org.") {
+ return (std::pair<bool, int>(true, READONLY_ZONE_ID));
+ } else if (name == "null.example.org.") {
+ return (std::pair<bool, int>(true, 13));
+ } else if (name == "empty.example.org.") {
+ return (std::pair<bool, int>(true, 0));
+ } else if (name == "bad.example.org.") {
+ return (std::pair<bool, int>(true, -1));
+ } else {
+ return (std::pair<bool, int>(false, 0));
+ }
+ }
+
+ virtual shared_ptr<DatabaseAccessor> clone() {
+ return (shared_ptr<DatabaseAccessor>()); // bogus data, but unused
+ }
+
+ virtual std::pair<bool, int> startUpdateZone(const std::string&, bool) {
+ // return dummy value. unused anyway.
+ return (pair<bool, int>(true, 0));
+ }
+ virtual void commitUpdateZone() {}
+ virtual void rollbackUpdateZone() {}
+ virtual void addRecordToZone(const string (&)[ADD_COLUMN_COUNT]) {}
+ virtual void deleteRecordInZone(const string (&)[DEL_PARAM_COUNT]) {}
+
+ virtual const std::string& getDBName() const {
+ return (database_name_);
+ }
+
+ virtual IteratorContextPtr getRecords(const std::string&, int, bool)
+ const
+ {
+ isc_throw(isc::NotImplemented,
+ "This database datasource can't be iterated");
+ }
+
+ virtual IteratorContextPtr getAllRecords(int) const {
+ isc_throw(isc::NotImplemented,
+ "This database datasource can't be iterated");
+ }
+
+ virtual std::string findPreviousName(int, const std::string&) const {
+ isc_throw(isc::NotImplemented,
+ "This data source doesn't support DNSSEC");
+ }
+private:
+ const std::string database_name_;
+
+};
+
+/*
+ * A virtual database accessor that pretends it contains single zone --
+ * example.org.
+ *
+ * It has the same getZone method as NopConnection, but it provides
+ * implementation of the optional functionality.
+ */
+class MockAccessor : public NopAccessor {
+ // Type of mock database "row"s
+ typedef std::map<std::string, std::vector< std::vector<std::string> > >
+ Domains;
+
+public:
+ MockAccessor() : rollbacked_(false) {
+ readonly_records_ = &readonly_records_master_;
+ update_records_ = &update_records_master_;
+ empty_records_ = &empty_records_master_;
+ fillData();
+ }
+
+ virtual shared_ptr<DatabaseAccessor> clone() {
+ shared_ptr<MockAccessor> cloned_accessor(new MockAccessor());
+ cloned_accessor->readonly_records_ = &readonly_records_master_;
+ cloned_accessor->update_records_ = &update_records_master_;
+ cloned_accessor->empty_records_ = &empty_records_master_;
+ latest_clone_ = cloned_accessor;
+ return (cloned_accessor);
+ }
+
+private:
+ class MockNameIteratorContext : public IteratorContext {
+ public:
+ MockNameIteratorContext(const MockAccessor& mock_accessor, int zone_id,
+ const std::string& name, bool subdomains) :
+ searched_name_(name), cur_record_(0)
+ {
+ // 'hardcoded' names to trigger exceptions
+ // On these names some exceptions are thrown, to test the robustness
+ // of the find() method.
+ if (searched_name_ == "dsexception.in.search.") {
+ isc_throw(DataSourceError, "datasource exception on search");
+ } else if (searched_name_ == "iscexception.in.search.") {
+ isc_throw(isc::Exception, "isc exception on search");
+ } else if (searched_name_ == "basicexception.in.search.") {
+ throw std::exception();
+ }
+
+ cur_record_ = 0;
+ const Domains& cur_records = mock_accessor.getMockRecords(zone_id);
+ if (cur_records.count(name) > 0) {
+ // we're not aiming for efficiency in this test, simply
+ // copy the relevant vector from records
+ cur_name = cur_records.find(name)->second;
+ } else if (subdomains) {
+ cur_name.clear();
+ // Just walk everything and check if it is a subdomain.
+ // If it is, just copy all data from there.
+ for (Domains::const_iterator i(cur_records.begin());
+ i != cur_records.end(); ++i) {
+ const Name local(i->first);
+ if (local.compare(Name(name)).getRelation() ==
+ isc::dns::NameComparisonResult::SUBDOMAIN) {
+ cur_name.insert(cur_name.end(), i->second.begin(),
+ i->second.end());
+ }
+ }
+ } else {
+ cur_name.clear();
+ }
+ }
+
+ virtual bool getNext(std::string (&columns)[COLUMN_COUNT]) {
+ if (searched_name_ == "dsexception.in.getnext.") {
+ isc_throw(DataSourceError, "datasource exception on getnextrecord");
+ } else if (searched_name_ == "iscexception.in.getnext.") {
+ isc_throw(isc::Exception, "isc exception on getnextrecord");
+ } else if (searched_name_ == "basicexception.in.getnext.") {
+ throw std::exception();
+ }
+
+ if (cur_record_ < cur_name.size()) {
+ for (size_t i = 0; i < COLUMN_COUNT; ++i) {
+ columns[i] = cur_name[cur_record_][i];
+ }
+ cur_record_++;
+ return (true);
+ } else {
+ return (false);
+ }
+ }
+
+ private:
+ const std::string searched_name_;
+ int cur_record_;
+ std::vector< std::vector<std::string> > cur_name;
+ };
+
+ class MockIteratorContext : public IteratorContext {
+ private:
+ int step;
+ public:
+ MockIteratorContext() :
+ step(0)
+ { }
+ virtual bool getNext(string (&data)[COLUMN_COUNT]) {
+ switch (step ++) {
+ case 0:
+ data[DatabaseAccessor::NAME_COLUMN] = "example.org";
+ data[DatabaseAccessor::TYPE_COLUMN] = "SOA";
+ data[DatabaseAccessor::TTL_COLUMN] = "300";
+ data[DatabaseAccessor::RDATA_COLUMN] = "ns1.example.org. admin.example.org. "
+ "1234 3600 1800 2419200 7200";
+ return (true);
+ case 1:
+ data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
+ data[DatabaseAccessor::TYPE_COLUMN] = "A";
+ data[DatabaseAccessor::TTL_COLUMN] = "300";
+ data[DatabaseAccessor::RDATA_COLUMN] = "192.0.2.1";
+ return (true);
+ case 2:
+ data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
+ data[DatabaseAccessor::TYPE_COLUMN] = "A";
+ data[DatabaseAccessor::TTL_COLUMN] = "300";
+ data[DatabaseAccessor::RDATA_COLUMN] = "192.0.2.2";
+ return (true);
+ case 3:
+ data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
+ data[DatabaseAccessor::TYPE_COLUMN] = "AAAA";
+ data[DatabaseAccessor::TTL_COLUMN] = "300";
+ data[DatabaseAccessor::RDATA_COLUMN] = "2001:db8::1";
+ return (true);
+ case 4:
+ data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
+ data[DatabaseAccessor::TYPE_COLUMN] = "AAAA";
+ data[DatabaseAccessor::TTL_COLUMN] = "300";
+ data[DatabaseAccessor::RDATA_COLUMN] = "2001:db8::2";
+ return (true);
+ default:
+ ADD_FAILURE() <<
+ "Request past the end of iterator context";
+ case 5:
+ return (false);
+ }
+ }
+ };
+ class EmptyIteratorContext : public IteratorContext {
+ public:
+ virtual bool getNext(string(&)[COLUMN_COUNT]) {
+ return (false);
+ }
+ };
+ class BadIteratorContext : public IteratorContext {
+ private:
+ int step;
+ public:
+ BadIteratorContext() :
+ step(0)
+ { }
+ virtual bool getNext(string (&data)[COLUMN_COUNT]) {
+ switch (step ++) {
+ case 0:
+ data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
+ data[DatabaseAccessor::TYPE_COLUMN] = "A";
+ data[DatabaseAccessor::TTL_COLUMN] = "300";
+ data[DatabaseAccessor::RDATA_COLUMN] = "192.0.2.1";
+ return (true);
+ case 1:
+ data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
+ data[DatabaseAccessor::TYPE_COLUMN] = "A";
+ data[DatabaseAccessor::TTL_COLUMN] = "301";
+ data[DatabaseAccessor::RDATA_COLUMN] = "192.0.2.2";
+ return (true);
+ default:
+ ADD_FAILURE() <<
+ "Request past the end of iterator context";
+ case 2:
+ return (false);
+ }
+ }
+ };
+public:
+ virtual IteratorContextPtr getAllRecords(int id) const {
+ if (id == READONLY_ZONE_ID) {
+ return (IteratorContextPtr(new MockIteratorContext()));
+ } else if (id == 13) {
+ return (IteratorContextPtr());
+ } else if (id == 0) {
+ return (IteratorContextPtr(new EmptyIteratorContext()));
+ } else if (id == -1) {
+ return (IteratorContextPtr(new BadIteratorContext()));
+ } else {
+ isc_throw(isc::Unexpected, "Unknown zone ID");
+ }
+ }
+
+ virtual IteratorContextPtr getRecords(const std::string& name, int id,
+ bool subdomains) const
+ {
+ if (id == READONLY_ZONE_ID || id == WRITABLE_ZONE_ID) {
+ return (IteratorContextPtr(
+ new MockNameIteratorContext(*this, id, name,
+ subdomains)));
+ } else {
+ isc_throw(isc::Unexpected, "Unknown zone ID");
+ }
+ }
+
+ virtual pair<bool, int> startUpdateZone(const std::string& zone_name,
+ bool replace)
+ {
+ const pair<bool, int> zone_info = getZone(zone_name);
+ if (!zone_info.first) {
+ return (pair<bool, int>(false, 0));
+ }
+
+ // Prepare the record set for update. If replacing the existing one,
+ // we use an empty set; otherwise we use a writable copy of the
+ // original.
+ if (replace) {
+ update_records_->clear();
+ } else {
+ *update_records_ = *readonly_records_;
+ }
+
+ return (pair<bool, int>(true, WRITABLE_ZONE_ID));
+ }
+ virtual void commitUpdateZone() {
+ *readonly_records_ = *update_records_;
+ }
+ virtual void rollbackUpdateZone() {
+ // Special hook: if something with a name of "throw.example.org"
+ // has been added, trigger an imaginary unexpected event with an
+ // exception.
+ if (update_records_->count("throw.example.org.") > 0) {
+ isc_throw(DataSourceError, "unexpected failure in rollback");
+ }
+
+ rollbacked_ = true;
+ }
+ virtual void addRecordToZone(const string (&columns)[ADD_COLUMN_COUNT]) {
+ // Copy the current value to cur_name. If it doesn't exist,
+ // operator[] will create a new one.
+ cur_name_ = (*update_records_)[columns[DatabaseAccessor::ADD_NAME]];
+
+ vector<string> record_columns;
+ record_columns.push_back(columns[DatabaseAccessor::ADD_TYPE]);
+ record_columns.push_back(columns[DatabaseAccessor::ADD_TTL]);
+ record_columns.push_back(columns[DatabaseAccessor::ADD_SIGTYPE]);
+ record_columns.push_back(columns[DatabaseAccessor::ADD_RDATA]);
+ record_columns.push_back(columns[DatabaseAccessor::ADD_NAME]);
+
+ // copy back the added entry
+ cur_name_.push_back(record_columns);
+ (*update_records_)[columns[DatabaseAccessor::ADD_NAME]] = cur_name_;
+
+ // remember this one so that test cases can check it.
+ copy(columns, columns + DatabaseAccessor::ADD_COLUMN_COUNT,
+ columns_lastadded_);
+ }
+
+ // Helper predicate class used in deleteRecordInZone().
+ struct deleteMatch {
+ deleteMatch(const string& type, const string& rdata) :
+ type_(type), rdata_(rdata)
+ {}
+ bool operator()(const vector<string>& row) const {
+ return (row[0] == type_ && row[3] == rdata_);
+ }
+ const string& type_;
+ const string& rdata_;
+ };
+
+ virtual void deleteRecordInZone(const string (¶ms)[DEL_PARAM_COUNT]) {
+ vector<vector<string> >& records =
+ (*update_records_)[params[DatabaseAccessor::DEL_NAME]];
+ records.erase(remove_if(records.begin(), records.end(),
+ deleteMatch(
+ params[DatabaseAccessor::DEL_TYPE],
+ params[DatabaseAccessor::DEL_RDATA])),
+ records.end());
+ if (records.empty()) {
+ (*update_records_).erase(params[DatabaseAccessor::DEL_NAME]);
+ }
+ }
+
+ //
+ // Helper methods to keep track of some update related activities
+ //
+ bool isRollbacked() const {
+ return (rollbacked_);
+ }
+
+ const string* getLastAdded() const {
+ return (columns_lastadded_);
+ }
+
+ // This allows the test code to get the accessor used in an update context
+ shared_ptr<const MockAccessor> getLatestClone() const {
+ return (latest_clone_);
+ }
+
+ virtual std::string findPreviousName(int id, const std::string& rname)
+ const
+ {
+ // Hardcoded for now, but we could compute it from the data
+ // Maybe do it when it is needed some time in future?
+ if (id == -1) {
+ isc_throw(isc::NotImplemented, "Test not implemented behaviour");
+ } else if (id == 42) {
+ if (rname == "org.example.nonterminal.") {
+ return ("l.example.org.");
+ } else if (rname == "org.example.aa.") {
+ return ("example.org.");
+ } else if (rname == "org.example.www2." ||
+ rname == "org.example.www1.") {
+ return ("www.example.org.");
+ } else if (rname == "org.example.badnsec2.") {
+ return ("badnsec1.example.org.");
+ } else if (rname == "org.example.brokenname.") {
+ return ("brokenname...example.org.");
+ } else if (rname == "org.example.bar.*.") {
+ return ("bao.example.org.");
+ } else if (rname == "org.example.notimplnsec." ||
+ rname == "org.example.wild.here.") {
+ isc_throw(isc::NotImplemented, "Not implemented in this test");
+ } else {
+ isc_throw(isc::Unexpected, "Unexpected name");
+ }
+ } else {
+ isc_throw(isc::Unexpected, "Unknown zone ID");
+ }
+ }
+
+private:
+ // The following member variables are storage and/or update work space
+ // of the test zone. The "master"s are the real objects that contain
+ // the data, and they are shared among all accessors cloned from
+ // an initially created one. The pointer members allow the sharing.
+ // "readonly" is for normal lookups. "update" is the workspace for
+ // updates. When update starts it will be initialized either as an
+ // empty set (when replacing the entire zone) or as a copy of the
+ // "readonly" one. "empty" is a sentinel to produce negative results.
+ Domains readonly_records_master_;
+ Domains* readonly_records_;
+ Domains update_records_master_;
+ Domains* update_records_;
+ const Domains empty_records_master_;
+ const Domains* empty_records_;
+
+ // used as temporary storage during the building of the fake data
+
+ // used as temporary storage after searchForRecord() and during
+ // getNextRecord() calls, as well as during the building of the
+ // fake data
+ std::vector< std::vector<std::string> > cur_name_;
+
+ // The columns that were most recently added via addRecordToZone()
+ string columns_lastadded_[ADD_COLUMN_COUNT];
+
+ // Whether rollback operation has been performed for the database.
+ // Not useful except for purely testing purpose.
+ bool rollbacked_;
+
+ // Remember the mock accessor that was last cloned
+ boost::shared_ptr<MockAccessor> latest_clone_;
+
+ const Domains& getMockRecords(int zone_id) const {
+ if (zone_id == READONLY_ZONE_ID) {
+ return (*readonly_records_);
+ } else if (zone_id == WRITABLE_ZONE_ID) {
+ return (*update_records_);
+ }
+ return (*empty_records_);
+ }
+
+ // Adds one record to the current name in the database
+ // The actual data will not be added to 'records' until
+ // addCurName() is called
+ void addRecord(const std::string& type,
+ const std::string& ttl,
+ const std::string& sigtype,
+ const std::string& rdata) {
+ std::vector<std::string> columns;
+ columns.push_back(type);
+ columns.push_back(ttl);
+ columns.push_back(sigtype);
+ columns.push_back(rdata);
+ cur_name_.push_back(columns);
+ }
+
+ // Adds all records we just built with calls to addRecords
+ // to the actual fake database. This will clear cur_name_,
+ // so we can immediately start adding new records.
+ void addCurName(const std::string& name) {
+ ASSERT_EQ(0, readonly_records_->count(name));
+ // Append the name to all of them
+ for (std::vector<std::vector<std::string> >::iterator
+ i(cur_name_.begin()); i != cur_name_.end(); ++ i) {
+ i->push_back(name);
+ }
+ (*readonly_records_)[name] = cur_name_;
+ cur_name_.clear();
+ }
+
+ // Fills the database with zone data.
+ // This method constructs a number of resource records (with addRecord),
+ // which will all be added for one domain name to the fake database
+ // (with addCurName). So for instance the first set of calls create
+ // data for the name 'www.example.org', which will consist of one A RRset
+ // of one record, and one AAAA RRset of two records.
+ // The order in which they are added is the order in which getNextRecord()
+ // will return them (so we can test whether find() etc. support data that
+ // might not come in 'normal' order)
+ // It shall immediately fail if you try to add the same name twice.
+ void fillData() {
+ const char* prev_name = NULL;
+ for (int i = 0; TEST_RECORDS[i][0] != NULL; ++i) {
+ if (prev_name != NULL &&
+ strcmp(prev_name, TEST_RECORDS[i][0]) != 0) {
+ addCurName(prev_name);
+ }
+ prev_name = TEST_RECORDS[i][0];
+ addRecord(TEST_RECORDS[i][1], TEST_RECORDS[i][2],
+ TEST_RECORDS[i][3], TEST_RECORDS[i][4]);
+ }
+ addCurName(prev_name);
+ }
+};
+
+// This tests the default getRecords behaviour, throwing NotImplemented
+TEST(DatabaseConnectionTest, getRecords) {
+ EXPECT_THROW(NopAccessor().getRecords(".", 1, false),
+ isc::NotImplemented);
+}
+
+// This tests the default getAllRecords behaviour, throwing NotImplemented
+TEST(DatabaseConnectionTest, getAllRecords) {
+ // The parameters don't matter
+ EXPECT_THROW(NopAccessor().getAllRecords(1),
+ isc::NotImplemented);
+}
+
+// This test fixture is templated so that we can share (most of) the test
+// cases with different types of data sources. Note that in test cases
+// we need to use 'this' to refer to member variables of the test class.
+template <typename ACCESSOR_TYPE>
+class DatabaseClientTest : public ::testing::Test {
+public:
+ DatabaseClientTest() : zname_("example.org"), qname_("www.example.org"),
+ qclass_(RRClass::IN()), qtype_(RRType::A()),
+ rrttl_(3600)
+ {
+ createClient();
+
+ // set up the commonly used finder.
+ DataSourceClient::FindResult zone(client_->findZone(zname_));
+ assert(zone.code == result::SUCCESS);
+ finder_ = dynamic_pointer_cast<DatabaseClient::Finder>(
+ zone.zone_finder);
+
+ // Test IN/A RDATA to be added in update tests. Intentionally using
+ // different data than the initial data configured in the MockAccessor.
+ rrset_.reset(new RRset(qname_, qclass_, qtype_, rrttl_));
+ rrset_->addRdata(rdata::createRdata(rrset_->getType(),
+ rrset_->getClass(), "192.0.2.2"));
+
+ // And its RRSIG. Also different from the configured one.
+ rrsigset_.reset(new RRset(qname_, qclass_, RRType::RRSIG(),
+ rrttl_));
+ rrsigset_->addRdata(rdata::createRdata(rrsigset_->getType(),
+ rrsigset_->getClass(),
+ "A 5 3 0 20000101000000 "
+ "20000201000000 0 example.org. "
+ "FAKEFAKEFAKE"));
+ }
+
+ /*
+ * We initialize the client from a function, so we can call it multiple
+ * times per test.
+ */
+ void createClient() {
+ current_accessor_ = new ACCESSOR_TYPE();
+ is_mock_ = (dynamic_cast<MockAccessor*>(current_accessor_) != NULL);
+ client_.reset(new DatabaseClient(qclass_,
+ shared_ptr<ACCESSOR_TYPE>(
+ current_accessor_)));
+ }
+
+ /**
+ * Check the zone finder is a valid one and references the zone ID and
+ * database available here.
+ */
+ void checkZoneFinder(const DataSourceClient::FindResult& zone) {
+ ASSERT_NE(ZoneFinderPtr(), zone.zone_finder) << "No zone finder";
+ shared_ptr<DatabaseClient::Finder> finder(
+ dynamic_pointer_cast<DatabaseClient::Finder>(zone.zone_finder));
+ ASSERT_NE(shared_ptr<DatabaseClient::Finder>(), finder) <<
+ "Wrong type of finder";
+ if (is_mock_) {
+ EXPECT_EQ(READONLY_ZONE_ID, finder->zone_id());
+ }
+ EXPECT_EQ(current_accessor_, &finder->getAccessor());
+ }
+
+ shared_ptr<DatabaseClient::Finder> getFinder() {
+ DataSourceClient::FindResult zone(client_->findZone(zname_));
+ EXPECT_EQ(result::SUCCESS, zone.code);
+ shared_ptr<DatabaseClient::Finder> finder(
+ dynamic_pointer_cast<DatabaseClient::Finder>(zone.zone_finder));
+ if (is_mock_) {
+ EXPECT_EQ(READONLY_ZONE_ID, finder->zone_id());
+ }
+
+ return (finder);
+ }
+
+ // Helper methods for update tests
+ bool isRollbacked(bool expected = false) const {
+ if (is_mock_) {
+ const MockAccessor& mock_accessor =
+ dynamic_cast<const MockAccessor&>(*update_accessor_);
+ return (mock_accessor.isRollbacked());
+ } else {
+ return (expected);
+ }
+ }
+
+ void checkLastAdded(const char* const expected[]) const {
+ if (is_mock_) {
+ const MockAccessor* mock_accessor =
+ dynamic_cast<const MockAccessor*>(current_accessor_);
+ for (int i = 0; i < DatabaseAccessor::ADD_COLUMN_COUNT; ++i) {
+ EXPECT_EQ(expected[i],
+ mock_accessor->getLatestClone()->getLastAdded()[i]);
+ }
+ }
+ }
+
+ void setUpdateAccessor() {
+ if (is_mock_) {
+ const MockAccessor* mock_accessor =
+ dynamic_cast<const MockAccessor*>(current_accessor_);
+ update_accessor_ = mock_accessor->getLatestClone();
+ }
+ }
+
+ // Some tests only work for MockAccessor. We remember whether our accessor
+ // is of that type.
+ bool is_mock_;
+
+ // Will be deleted by client_, just keep the current value for comparison.
+ ACCESSOR_TYPE* current_accessor_;
+ shared_ptr<DatabaseClient> client_;
+ const std::string database_name_;
+
+ // The zone finder of the test zone commonly used in various tests.
+ shared_ptr<DatabaseClient::Finder> finder_;
+
+ // Some shortcut variables for commonly used test parameters
+ const Name zname_; // the zone name stored in the test data source
+ const Name qname_; // commonly used name to be found
+ const RRClass qclass_; // commonly used RR class used with qname
+ const RRType qtype_; // commonly used RR type used with qname
+ const RRTTL rrttl_; // commonly used RR TTL
+ RRsetPtr rrset_; // for adding/deleting an RRset
+ RRsetPtr rrsigset_; // for adding/deleting an RRset
+
+ // update related objects to be tested
+ ZoneUpdaterPtr updater_;
+ shared_ptr<const DatabaseAccessor> update_accessor_;
+
+ // placeholders
+ const std::vector<std::string> empty_rdatas_; // for NXRRSET/NXDOMAIN
+ std::vector<std::string> expected_rdatas_;
+ std::vector<std::string> expected_sig_rdatas_;
+};
+
+class TestSQLite3Accessor : public SQLite3Accessor {
+public:
+ TestSQLite3Accessor() : SQLite3Accessor(
+ TEST_DATA_BUILDDIR "/rwtest.sqlite3.copied",
+ RRClass::IN())
+ {
+ startUpdateZone("example.org.", true);
+ string columns[ADD_COLUMN_COUNT];
+ for (int i = 0; TEST_RECORDS[i][0] != NULL; ++i) {
+ columns[ADD_NAME] = TEST_RECORDS[i][0];
+ columns[ADD_REV_NAME] = Name(columns[ADD_NAME]).reverse().toText();
+ columns[ADD_TYPE] = TEST_RECORDS[i][1];
+ columns[ADD_TTL] = TEST_RECORDS[i][2];
+ columns[ADD_SIGTYPE] = TEST_RECORDS[i][3];
+ columns[ADD_RDATA] = TEST_RECORDS[i][4];
+
+ addRecordToZone(columns);
+ }
+ commitUpdateZone();
+ }
+};
+
+// The following two lines instantiate test cases with concrete accessor
+// classes to be tested.
+// XXX: clang++ installed on our FreeBSD buildbot cannot complete compiling
+// this file, seemingly due to the size of the code. We'll consider more
+// complete workaround, but for a short term workaround we'll reduce the
+// number of tested accessor classes (thus reducing the amount of code
+// to be compiled) for this particular environment.
+#if defined(__clang__) && defined(__FreeBSD__)
+typedef ::testing::Types<MockAccessor> TestAccessorTypes;
+#else
+typedef ::testing::Types<MockAccessor, TestSQLite3Accessor> TestAccessorTypes;
+#endif
+
+TYPED_TEST_CASE(DatabaseClientTest, TestAccessorTypes);
+
+// In some cases the entire test fixture is for the mock accessor only.
+// We use the usual TEST_F for them with the corresponding specialized class
+// to make the code simpler.
+typedef DatabaseClientTest<MockAccessor> MockDatabaseClientTest;
+
+TYPED_TEST(DatabaseClientTest, zoneNotFound) {
+ DataSourceClient::FindResult zone(
+ this->client_->findZone(Name("example.com")));
+ EXPECT_EQ(result::NOTFOUND, zone.code);
+}
+
+TYPED_TEST(DatabaseClientTest, exactZone) {
+ DataSourceClient::FindResult zone(
+ this->client_->findZone(Name("example.org")));
+ EXPECT_EQ(result::SUCCESS, zone.code);
+ this->checkZoneFinder(zone);
+}
+
+TYPED_TEST(DatabaseClientTest, superZone) {
+ DataSourceClient::FindResult zone(this->client_->findZone(Name(
+ "sub.example.org")));
+ EXPECT_EQ(result::PARTIALMATCH, zone.code);
+ this->checkZoneFinder(zone);
+}
+
+// This test doesn't depend on derived accessor class, so we use TEST().
+TEST(GenericDatabaseClientTest, noAccessorException) {
+ // We need a dummy variable here; some compiler would regard it a mere
+ // declaration instead of an instantiation and make the test fail.
+ EXPECT_THROW(DatabaseClient dummy(RRClass::IN(),
+ shared_ptr<DatabaseAccessor>()),
+ isc::InvalidParameter);
+}
+
+// If the zone doesn't exist, exception is thrown
+TYPED_TEST(DatabaseClientTest, noZoneIterator) {
+ EXPECT_THROW(this->client_->getIterator(Name("example.com")),
+ DataSourceError);
+}
+
+// If the zone doesn't exist and iteration is not implemented, it still throws
+// the exception it doesn't exist
+TEST(GenericDatabaseClientTest, noZoneNotImplementedIterator) {
+ EXPECT_THROW(DatabaseClient(RRClass::IN(),
+ boost::shared_ptr<DatabaseAccessor>(
+ new NopAccessor())).getIterator(
+ Name("example.com")),
+ DataSourceError);
+}
+
+TEST(GenericDatabaseClientTest, notImplementedIterator) {
+ EXPECT_THROW(DatabaseClient(RRClass::IN(), shared_ptr<DatabaseAccessor>(
+ new NopAccessor())).getIterator(Name("example.org")),
+ isc::NotImplemented);
+}
+
+// Pretend a bug in the connection and pass NULL as the context
+// Should not crash, but gracefully throw. Works for the mock accessor only.
+TEST_F(MockDatabaseClientTest, nullIteratorContext) {
+ EXPECT_THROW(this->client_->getIterator(Name("null.example.org")),
+ isc::Unexpected);
+}
+
+// It doesn't crash or anything if the zone is completely empty.
+// Works for the mock accessor only.
+TEST_F(MockDatabaseClientTest, emptyIterator) {
+ ZoneIteratorPtr it(this->client_->getIterator(Name("empty.example.org")));
+ EXPECT_EQ(ConstRRsetPtr(), it->getNextRRset());
+ // This is past the end, it should throw
+ EXPECT_THROW(it->getNextRRset(), isc::Unexpected);
+}
+
+// Iterate through a zone
+TYPED_TEST(DatabaseClientTest, iterator) {
+ ZoneIteratorPtr it(this->client_->getIterator(Name("example.org")));
+ ConstRRsetPtr rrset(it->getNextRRset());
+ ASSERT_NE(ConstRRsetPtr(), rrset);
+
+ // The rest of the checks work only for the mock accessor.
+ if (!this->is_mock_) {
+ return;
+ }
+
+ EXPECT_EQ(Name("example.org"), rrset->getName());
+ EXPECT_EQ(RRClass::IN(), rrset->getClass());
+ EXPECT_EQ(RRType::SOA(), rrset->getType());
+ EXPECT_EQ(RRTTL(300), rrset->getTTL());
+ RdataIteratorPtr rit(rrset->getRdataIterator());
+ ASSERT_FALSE(rit->isLast());
+ rit->next();
+ EXPECT_TRUE(rit->isLast());
+
+ rrset = it->getNextRRset();
+ ASSERT_NE(ConstRRsetPtr(), rrset);
+ EXPECT_EQ(Name("x.example.org"), rrset->getName());
+ EXPECT_EQ(RRClass::IN(), rrset->getClass());
+ EXPECT_EQ(RRType::A(), rrset->getType());
+ EXPECT_EQ(RRTTL(300), rrset->getTTL());
+ rit = rrset->getRdataIterator();
+ ASSERT_FALSE(rit->isLast());
+ EXPECT_EQ("192.0.2.1", rit->getCurrent().toText());
+ rit->next();
+ ASSERT_FALSE(rit->isLast());
+ EXPECT_EQ("192.0.2.2", rit->getCurrent().toText());
+ rit->next();
+ EXPECT_TRUE(rit->isLast());
+
+ rrset = it->getNextRRset();
+ ASSERT_NE(ConstRRsetPtr(), rrset);
+ EXPECT_EQ(Name("x.example.org"), rrset->getName());
+ EXPECT_EQ(RRClass::IN(), rrset->getClass());
+ EXPECT_EQ(RRType::AAAA(), rrset->getType());
+ EXPECT_EQ(RRTTL(300), rrset->getTTL());
+ EXPECT_EQ(ConstRRsetPtr(), it->getNextRRset());
+ rit = rrset->getRdataIterator();
+ ASSERT_FALSE(rit->isLast());
+ EXPECT_EQ("2001:db8::1", rit->getCurrent().toText());
+ rit->next();
+ ASSERT_FALSE(rit->isLast());
+ EXPECT_EQ("2001:db8::2", rit->getCurrent().toText());
+ rit->next();
+ EXPECT_TRUE(rit->isLast());
+}
+
+// This has inconsistent TTL in the set (the rest, like nonsense in
+// the data is handled in rdata itself). Works for the mock accessor only.
+TEST_F(MockDatabaseClientTest, badIterator) {
+ // It should not throw, but get the lowest one of them
+ ZoneIteratorPtr it(this->client_->getIterator(Name("bad.example.org")));
+ EXPECT_EQ(it->getNextRRset()->getTTL(), isc::dns::RRTTL(300));
+}
+
+// checks if the given rrset matches the
+// given name, class, type and rdatas
+void
+checkRRset(isc::dns::ConstRRsetPtr rrset,
+ const isc::dns::Name& name,
+ const isc::dns::RRClass& rrclass,
+ const isc::dns::RRType& rrtype,
+ const isc::dns::RRTTL& rrttl,
+ const std::vector<std::string>& rdatas) {
+ isc::dns::RRsetPtr expected_rrset(
+ new isc::dns::RRset(name, rrclass, rrtype, rrttl));
+ for (unsigned int i = 0; i < rdatas.size(); ++i) {
+ expected_rrset->addRdata(
+ isc::dns::rdata::createRdata(rrtype, rrclass,
+ rdatas[i]));
+ }
+ isc::testutils::rrsetCheck(expected_rrset, rrset);
+}
+
+void
+doFindTest(ZoneFinder& finder,
+ const isc::dns::Name& name,
+ const isc::dns::RRType& type,
+ const isc::dns::RRType& expected_type,
+ const isc::dns::RRTTL expected_ttl,
+ ZoneFinder::Result expected_result,
+ const std::vector<std::string>& expected_rdatas,
+ const std::vector<std::string>& expected_sig_rdatas,
+ const isc::dns::Name& expected_name = isc::dns::Name::ROOT_NAME(),
+ const ZoneFinder::FindOptions options = ZoneFinder::FIND_DEFAULT)
+{
+ SCOPED_TRACE("doFindTest " + name.toText() + " " + type.toText());
+ ZoneFinder::FindResult result =
+ finder.find(name, type, NULL, options);
+ ASSERT_EQ(expected_result, result.code) << name << " " << type;
+ if (!expected_rdatas.empty() && result.rrset) {
+ checkRRset(result.rrset, expected_name != Name(".") ? expected_name :
+ name, finder.getClass(), expected_type, expected_ttl,
+ expected_rdatas);
+
+ if (!expected_sig_rdatas.empty() && result.rrset->getRRsig()) {
+ checkRRset(result.rrset->getRRsig(), expected_name != Name(".") ?
+ expected_name : name, finder.getClass(),
+ isc::dns::RRType::RRSIG(), expected_ttl,
+ expected_sig_rdatas);
+ } else if (expected_sig_rdatas.empty()) {
+ EXPECT_EQ(isc::dns::RRsetPtr(), result.rrset->getRRsig());
+ } else {
+ ADD_FAILURE() << "Missing RRSIG";
+ }
+ } else if (expected_rdatas.empty()) {
+ EXPECT_EQ(isc::dns::RRsetPtr(), result.rrset);
+ } else {
+ ADD_FAILURE() << "Missing result";
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, find) {
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ doFindTest(*finder, isc::dns::Name("www.example.org."),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_rdatas_.push_back("192.0.2.2");
+ doFindTest(*finder, isc::dns::Name("www2.example.org."),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("2001:db8::1");
+ this->expected_rdatas_.push_back("2001:db8::2");
+ doFindTest(*finder, isc::dns::Name("www.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ this->rrttl_,
+ ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ doFindTest(*finder, isc::dns::Name("www.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::TXT(),
+ this->rrttl_,
+ ZoneFinder::NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("www.example.org.");
+ doFindTest(*finder, isc::dns::Name("cname.example.org."),
+ this->qtype_, isc::dns::RRType::CNAME(), this->rrttl_,
+ ZoneFinder::CNAME, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("www.example.org.");
+ doFindTest(*finder, isc::dns::Name("cname.example.org."),
+ isc::dns::RRType::CNAME(), isc::dns::RRType::CNAME(),
+ this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ doFindTest(*finder, isc::dns::Name("doesnotexist.example.org."),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::NXDOMAIN,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12346 example.org. FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("signed1.example.org."),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("2001:db8::1");
+ this->expected_rdatas_.push_back("2001:db8::2");
+ this->expected_sig_rdatas_.push_back("AAAA 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("signed1.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ doFindTest(*finder, isc::dns::Name("signed1.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::TXT(), this->rrttl_,
+ ZoneFinder::NXRRSET, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("www.example.org.");
+ this->expected_sig_rdatas_.push_back("CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("signedcname1.example.org."),
+ this->qtype_, isc::dns::RRType::CNAME(), this->rrttl_,
+ ZoneFinder::CNAME, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12346 example.org. FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("signed2.example.org."),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("2001:db8::2");
+ this->expected_rdatas_.push_back("2001:db8::1");
+ this->expected_sig_rdatas_.push_back("AAAA 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("signed2.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ doFindTest(*finder, isc::dns::Name("signed2.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::TXT(), this->rrttl_,
+ ZoneFinder::NXRRSET, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("www.example.org.");
+ this->expected_sig_rdatas_.push_back("CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("signedcname2.example.org."),
+ this->qtype_, isc::dns::RRType::CNAME(), this->rrttl_,
+ ZoneFinder::CNAME, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("acnamesig1.example.org."),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("acnamesig2.example.org."),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("acnamesig3.example.org."),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_rdatas_.push_back("192.0.2.2");
+ doFindTest(*finder, isc::dns::Name("ttldiff1.example.org."),
+ this->qtype_, this->qtype_, isc::dns::RRTTL(360),
+ ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_rdatas_.push_back("192.0.2.2");
+ doFindTest(*finder, isc::dns::Name("ttldiff2.example.org."),
+ this->qtype_, this->qtype_, isc::dns::RRTTL(360),
+ ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ EXPECT_THROW(finder->find(isc::dns::Name("badcname1.example.org."),
+ this->qtype_,
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_THROW(finder->find(isc::dns::Name("badcname2.example.org."),
+ this->qtype_,
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_THROW(finder->find(isc::dns::Name("badcname3.example.org."),
+ this->qtype_,
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_THROW(finder->find(isc::dns::Name("badrdata.example.org."),
+ this->qtype_,
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_THROW(finder->find(isc::dns::Name("badtype.example.org."),
+ this->qtype_,
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_THROW(finder->find(isc::dns::Name("badttl.example.org."),
+ this->qtype_,
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_THROW(finder->find(isc::dns::Name("badsig.example.org."),
+ this->qtype_,
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+
+ // Trigger the hardcoded exceptions and see if find() has cleaned up
+ if (this->is_mock_) {
+ EXPECT_THROW(finder->find(isc::dns::Name("dsexception.in.search."),
+ this->qtype_,
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_THROW(finder->find(isc::dns::Name("iscexception.in.search."),
+ this->qtype_,
+ NULL, ZoneFinder::FIND_DEFAULT),
+ isc::Exception);
+ EXPECT_THROW(finder->find(isc::dns::Name("basicexception.in.search."),
+ this->qtype_,
+ NULL, ZoneFinder::FIND_DEFAULT),
+ std::exception);
+ EXPECT_THROW(finder->find(isc::dns::Name("dsexception.in.getnext."),
+ this->qtype_,
+ NULL, ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_THROW(finder->find(isc::dns::Name("iscexception.in.getnext."),
+ this->qtype_,
+ NULL, ZoneFinder::FIND_DEFAULT),
+ isc::Exception);
+ EXPECT_THROW(finder->find(isc::dns::Name("basicexception.in.getnext."),
+ this->qtype_,
+ NULL, ZoneFinder::FIND_DEFAULT),
+ std::exception);
+ }
+
+ // This RRSIG has the wrong sigtype field, which should be
+ // an error if we decide to keep using that field
+ // Right now the field is ignored, so it does not error
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("badsigtype.example.org."),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+}
+
+TYPED_TEST(DatabaseClientTest, findDelegation) {
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ // The apex should not be considered delegation point and we can access
+ // data
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ doFindTest(*finder, isc::dns::Name("example.org."),
+ this->qtype_, this->qtype_,
+ this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("ns.example.com.");
+ this->expected_sig_rdatas_.push_back("NS 5 3 3600 20000101000000 20000201000000 "
+ "12345 example.org. FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("example.org."),
+ isc::dns::RRType::NS(), isc::dns::RRType::NS(),
+ this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ // Check when we ask for something below delegation point, we get the NS
+ // (Both when the RRset there exists and doesn't)
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("ns.example.com.");
+ this->expected_rdatas_.push_back("ns.delegation.example.org.");
+ this->expected_sig_rdatas_.push_back("NS 5 3 3600 20000101000000 20000201000000 "
+ "12345 example.org. FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("ns.delegation.example.org."),
+ this->qtype_, isc::dns::RRType::NS(),
+ this->rrttl_, ZoneFinder::DELEGATION, this->expected_rdatas_,
+ this->expected_sig_rdatas_,
+ isc::dns::Name("delegation.example.org."));
+ doFindTest(*finder, isc::dns::Name("ns.delegation.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::NS(),
+ this->rrttl_, ZoneFinder::DELEGATION, this->expected_rdatas_,
+ this->expected_sig_rdatas_,
+ isc::dns::Name("delegation.example.org."));
+ doFindTest(*finder, isc::dns::Name("deep.below.delegation.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::NS(),
+ this->rrttl_, ZoneFinder::DELEGATION, this->expected_rdatas_,
+ this->expected_sig_rdatas_,
+ isc::dns::Name("delegation.example.org."));
+
+ // Even when we check directly at the delegation point, we should get
+ // the NS
+ doFindTest(*finder, isc::dns::Name("delegation.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::NS(),
+ this->rrttl_, ZoneFinder::DELEGATION, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ // And when we ask direcly for the NS, we should still get delegation
+ doFindTest(*finder, isc::dns::Name("delegation.example.org."),
+ isc::dns::RRType::NS(), isc::dns::RRType::NS(),
+ this->rrttl_, ZoneFinder::DELEGATION, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ // Now test delegation. If it is below the delegation point, we should get
+ // the DNAME (the one with data under DNAME is invalid zone, but we test
+ // the behaviour anyway just to make sure)
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("dname.example.com.");
+ this->expected_sig_rdatas_.clear();
+ this->expected_sig_rdatas_.push_back("DNAME 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("below.dname.example.org."),
+ this->qtype_, isc::dns::RRType::DNAME(),
+ this->rrttl_, ZoneFinder::DNAME, this->expected_rdatas_,
+ this->expected_sig_rdatas_, isc::dns::Name("dname.example.org."));
+ doFindTest(*finder, isc::dns::Name("below.dname.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::DNAME(),
+ this->rrttl_, ZoneFinder::DNAME, this->expected_rdatas_,
+ this->expected_sig_rdatas_, isc::dns::Name("dname.example.org."));
+ doFindTest(*finder, isc::dns::Name("really.deep.below.dname.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::DNAME(),
+ this->rrttl_, ZoneFinder::DNAME, this->expected_rdatas_,
+ this->expected_sig_rdatas_, isc::dns::Name("dname.example.org."));
+
+ // Asking direcly for DNAME should give SUCCESS
+ doFindTest(*finder, isc::dns::Name("dname.example.org."),
+ isc::dns::RRType::DNAME(), isc::dns::RRType::DNAME(),
+ this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ // But we don't delegate at DNAME point
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_sig_rdatas_.clear();
+ doFindTest(*finder, isc::dns::Name("dname.example.org."),
+ this->qtype_, this->qtype_,
+ this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+ this->expected_rdatas_.clear();
+ doFindTest(*finder, isc::dns::Name("dname.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ this->rrttl_, ZoneFinder::NXRRSET, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+
+ // This is broken dname, it contains two targets
+ EXPECT_THROW(finder->find(isc::dns::Name("below.baddname.example.org."),
+ this->qtype_, NULL,
+ ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+
+ // Broken NS - it lives together with something else
+ EXPECT_THROW(finder->find(isc::dns::Name("brokenns1.example.org."),
+ this->qtype_, NULL,
+ ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+ EXPECT_THROW(finder->find(isc::dns::Name("brokenns2.example.org."),
+ this->qtype_, NULL,
+ ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+}
+
+TYPED_TEST(DatabaseClientTest, emptyDomain) {
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ // This domain doesn't exist, but a subdomain of it does.
+ // Therefore we should pretend the domain is there, but contains no RRsets
+ doFindTest(*finder, isc::dns::Name("b.example.org."), this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+}
+
+// Glue-OK mode. Just go through NS delegations down (but not through
+// DNAME) and pretend it is not there.
+TYPED_TEST(DatabaseClientTest, glueOK) {
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ doFindTest(*finder, isc::dns::Name("ns.delegation.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ this->rrttl_, ZoneFinder::NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ isc::dns::Name("ns.delegation.example.org."),
+ ZoneFinder::FIND_GLUE_OK);
+ doFindTest(*finder, isc::dns::Name("nothere.delegation.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ this->rrttl_, ZoneFinder::NXDOMAIN,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ isc::dns::Name("nothere.delegation.example.org."),
+ ZoneFinder::FIND_GLUE_OK);
+ this->expected_rdatas_.push_back("192.0.2.1");
+ doFindTest(*finder, isc::dns::Name("ns.delegation.example.org."),
+ this->qtype_, this->qtype_,
+ this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ isc::dns::Name("ns.delegation.example.org."),
+ ZoneFinder::FIND_GLUE_OK);
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("ns.example.com.");
+ this->expected_rdatas_.push_back("ns.delegation.example.org.");
+ this->expected_sig_rdatas_.clear();
+ this->expected_sig_rdatas_.push_back("NS 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ // When we request the NS, it should be SUCCESS, not DELEGATION
+ // (different in GLUE_OK)
+ doFindTest(*finder, isc::dns::Name("delegation.example.org."),
+ isc::dns::RRType::NS(), isc::dns::RRType::NS(),
+ this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ isc::dns::Name("delegation.example.org."),
+ ZoneFinder::FIND_GLUE_OK);
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("dname.example.com.");
+ this->expected_sig_rdatas_.clear();
+ this->expected_sig_rdatas_.push_back("DNAME 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("below.dname.example.org."),
+ this->qtype_, isc::dns::RRType::DNAME(),
+ this->rrttl_, ZoneFinder::DNAME, this->expected_rdatas_,
+ this->expected_sig_rdatas_,
+ isc::dns::Name("dname.example.org."), ZoneFinder::FIND_GLUE_OK);
+ doFindTest(*finder, isc::dns::Name("below.dname.example.org."),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::DNAME(),
+ this->rrttl_, ZoneFinder::DNAME, this->expected_rdatas_,
+ this->expected_sig_rdatas_,
+ isc::dns::Name("dname.example.org."), ZoneFinder::FIND_GLUE_OK);
+}
+
+TYPED_TEST(DatabaseClientTest, wildcard) {
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ // First, simple wildcard match
+ // Check also that the RRSIG is added from the wildcard (not modified)
+ this->expected_rdatas_.push_back("192.0.2.5");
+ this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("a.wild.example.org"),
+ this->qtype_, this->qtype_, this->rrttl_,
+ ZoneFinder::WILDCARD, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+ doFindTest(*finder, isc::dns::Name("b.a.wild.example.org"),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::WILDCARD,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ doFindTest(*finder, isc::dns::Name("a.wild.example.org"),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ this->rrttl_, ZoneFinder::WILDCARD_NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+ doFindTest(*finder, isc::dns::Name("b.a.wild.example.org"),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ this->rrttl_, ZoneFinder::WILDCARD_NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ // Direct request for this wildcard
+ this->expected_rdatas_.push_back("192.0.2.5");
+ this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("*.wild.example.org"),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ doFindTest(*finder, isc::dns::Name("*.wild.example.org"),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ this->rrttl_, ZoneFinder::NXRRSET, this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+ // This is nonsense, but check it doesn't match by some stupid accident
+ doFindTest(*finder, isc::dns::Name("a.*.wild.example.org"),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::NXDOMAIN,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+ // These should be canceled, since it is below a domain which exitsts
+ doFindTest(*finder, isc::dns::Name("nothing.here.wild.example.org"),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::NXDOMAIN,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+ doFindTest(*finder, isc::dns::Name("cancel.here.wild.example.org"),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+ doFindTest(*finder,
+ isc::dns::Name("below.cancel.here.wild.example.org"),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::NXDOMAIN,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+ // And this should be just plain empty non-terminal domain, check
+ // the wildcard doesn't hurt it
+ doFindTest(*finder, isc::dns::Name("here.wild.example.org"),
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+ // Also make sure that the wildcard doesn't hurt the original data
+ // below the wildcard
+ this->expected_rdatas_.push_back("2001:db8::5");
+ doFindTest(*finder, isc::dns::Name("cancel.here.wild.example.org"),
+ isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+ this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+ this->expected_rdatas_.clear();
+
+ // How wildcard go together with delegation
+ this->expected_rdatas_.push_back("ns.example.com.");
+ doFindTest(*finder, isc::dns::Name("below.delegatedwild.example.org"),
+ this->qtype_, isc::dns::RRType::NS(), this->rrttl_,
+ ZoneFinder::DELEGATION, this->expected_rdatas_,
+ this->expected_sig_rdatas_,
+ isc::dns::Name("delegatedwild.example.org"));
+ // FIXME: This doesn't look logically OK, GLUE_OK should make it transparent,
+ // so the match should either work or be canceled, but return NXDOMAIN
+ doFindTest(*finder, isc::dns::Name("below.delegatedwild.example.org"),
+ this->qtype_, isc::dns::RRType::NS(), this->rrttl_,
+ ZoneFinder::DELEGATION, this->expected_rdatas_,
+ this->expected_sig_rdatas_,
+ isc::dns::Name("delegatedwild.example.org"),
+ ZoneFinder::FIND_GLUE_OK);
+
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.5");
+ // These are direct matches
+ const char* positive_names[] = {
+ "wild.*.foo.example.org.",
+ "wild.*.foo.*.bar.example.org.",
+ NULL
+ };
+ for (const char** name(positive_names); *name != NULL; ++ name) {
+ doFindTest(*finder, isc::dns::Name(*name), this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_,
+ this->expected_sig_rdatas_);
+ }
+
+ // These are wildcard matches against empty nonterminal asterisk
+ this->expected_rdatas_.clear();
+ const char* negative_names[] = {
+ "a.foo.example.org.",
+ "*.foo.example.org.",
+ "foo.example.org.",
+ "wild.bar.foo.example.org.",
+ "baz.foo.*.bar.example.org",
+ "baz.foo.baz.bar.example.org",
+ "*.foo.baz.bar.example.org",
+ "*.foo.*.bar.example.org",
+ "foo.*.bar.example.org",
+ "*.bar.example.org",
+ "bar.example.org",
+ NULL
+ };
+ for (const char** name(negative_names); *name != NULL; ++ name) {
+ doFindTest(*finder, isc::dns::Name(*name), this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+ // FIXME: What should be returned in this case? How does the
+ // DNSSEC logic handle it?
+ }
+
+ const char* negative_dnssec_names[] = {
+ "a.bar.example.org.",
+ "foo.baz.bar.example.org.",
+ "a.foo.bar.example.org.",
+ NULL
+ };
+
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("wild.*.foo.*.bar.example.org. NSEC");
+ this->expected_sig_rdatas_.clear();
+ for (const char** name(negative_dnssec_names); *name != NULL; ++ name) {
+ doFindTest(*finder, isc::dns::Name(*name), this->qtype_,
+ RRType::NSEC(), this->rrttl_, ZoneFinder::WILDCARD_NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ Name("bao.example.org."), ZoneFinder::FIND_DNSSEC);
+ }
+
+ // Some strange things in the wild node
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("www.example.org.");
+ this->expected_sig_rdatas_.clear();
+ doFindTest(*finder, isc::dns::Name("a.cnamewild.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::CNAME(),
+ this->rrttl_, ZoneFinder::CNAME,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("ns.example.com.");
+ doFindTest(*finder, isc::dns::Name("a.nswild.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::NS(),
+ this->rrttl_, ZoneFinder::DELEGATION,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+}
+
+TYPED_TEST(DatabaseClientTest, NXRRSET_NSEC) {
+ // The domain exists, but doesn't have this RRType
+ // So we should get its NSEC
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ this->expected_rdatas_.push_back("www2.example.org. A AAAA NSEC RRSIG");
+ this->expected_sig_rdatas_.push_back("NSEC 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("www.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::NSEC(),
+ this->rrttl_, ZoneFinder::NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ Name::ROOT_NAME(), ZoneFinder::FIND_DNSSEC);
+}
+
+TYPED_TEST(DatabaseClientTest, wildcardNXRRSET_NSEC) {
+ // The domain exists, but doesn't have this RRType
+ // So we should get its NSEC
+ //
+ // The user will have to query us again to get the correct
+ // answer (eg. prove there's not an exact match)
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ this->expected_rdatas_.push_back("cancel.here.wild.example.org. A NSEC "
+ "RRSIG");
+ this->expected_sig_rdatas_.push_back("NSEC 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ // Note that the NSEC name should NOT be synthesized.
+ doFindTest(*finder, isc::dns::Name("a.wild.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::NSEC(),
+ this->rrttl_, ZoneFinder::WILDCARD_NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ Name("*.wild.example.org"), ZoneFinder::FIND_DNSSEC);
+}
+
+TYPED_TEST(DatabaseClientTest, NXDOMAIN_NSEC) {
+ // The domain doesn't exist, so we must get the right NSEC
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ this->expected_rdatas_.push_back("www2.example.org. A AAAA NSEC RRSIG");
+ this->expected_sig_rdatas_.push_back("NSEC 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("www1.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::NSEC(),
+ this->rrttl_, ZoneFinder::NXDOMAIN,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ Name("www.example.org."), ZoneFinder::FIND_DNSSEC);
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("acnamesig1.example.org. NS A NSEC RRSIG");
+ // This tests it works correctly in apex (there was a bug, where a check
+ // for NS-alone was there and it would throw).
+ doFindTest(*finder, isc::dns::Name("aa.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::NSEC(),
+ this->rrttl_, ZoneFinder::NXDOMAIN,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ Name("example.org."), ZoneFinder::FIND_DNSSEC);
+
+ // Check that if the DB doesn't support it, the exception from there
+ // is not propagated and it only does not include the NSEC
+ if (!this->is_mock_) {
+ return; // We don't make the real DB to throw
+ }
+ EXPECT_NO_THROW(doFindTest(*finder,
+ isc::dns::Name("notimplnsec.example.org."),
+ isc::dns::RRType::TXT(),
+ isc::dns::RRType::NSEC(), this->rrttl_,
+ ZoneFinder::NXDOMAIN, this->empty_rdatas_,
+ this->empty_rdatas_, Name::ROOT_NAME(),
+ ZoneFinder::FIND_DNSSEC));
+}
+
+TYPED_TEST(DatabaseClientTest, emptyNonterminalNSEC) {
+ // Same as NXDOMAIN_NSEC, but with empty non-terminal
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ this->expected_rdatas_.push_back("empty.nonterminal.example.org. NSEC");
+ doFindTest(*finder, isc::dns::Name("nonterminal.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::NSEC(), this->rrttl_,
+ ZoneFinder::NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ Name("l.example.org."), ZoneFinder::FIND_DNSSEC);
+
+ // Check that if the DB doesn't support it, the exception from there
+ // is not propagated and it only does not include the NSEC
+ if (!this->is_mock_) {
+ return; // We don't make the real DB to throw
+ }
+ EXPECT_NO_THROW(doFindTest(*finder,
+ isc::dns::Name("here.wild.example.org."),
+ isc::dns::RRType::TXT(),
+ isc::dns::RRType::NSEC(),
+ this->rrttl_, ZoneFinder::NXRRSET,
+ this->empty_rdatas_, this->empty_rdatas_,
+ Name::ROOT_NAME(), ZoneFinder::FIND_DNSSEC));
+}
+
+TYPED_TEST(DatabaseClientTest, getOrigin) {
+ DataSourceClient::FindResult
+ zone(this->client_->findZone(Name("example.org")));
+ ASSERT_EQ(result::SUCCESS, zone.code);
+ shared_ptr<DatabaseClient::Finder> finder(
+ dynamic_pointer_cast<DatabaseClient::Finder>(zone.zone_finder));
+ if (this->is_mock_) {
+ EXPECT_EQ(READONLY_ZONE_ID, finder->zone_id());
+ }
+ EXPECT_EQ(this->zname_, finder->getOrigin());
+}
+
+TYPED_TEST(DatabaseClientTest, updaterFinder) {
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ ASSERT_TRUE(this->updater_);
+
+ // If this update isn't replacing the zone, the finder should work
+ // just like the normal find() case.
+ if (this->is_mock_) {
+ DatabaseClient::Finder& finder = dynamic_cast<DatabaseClient::Finder&>(
+ this->updater_->getFinder());
+ EXPECT_EQ(WRITABLE_ZONE_ID, finder.zone_id());
+ }
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ doFindTest(this->updater_->getFinder(), this->qname_,
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->empty_rdatas_);
+
+ // When replacing the zone, the updater's finder shouldn't see anything
+ // in the zone until something is added.
+ this->updater_.reset();
+ this->updater_ = this->client_->getUpdater(this->zname_, true);
+ ASSERT_TRUE(this->updater_);
+ if (this->is_mock_) {
+ DatabaseClient::Finder& finder = dynamic_cast<DatabaseClient::Finder&>(
+ this->updater_->getFinder());
+ EXPECT_EQ(WRITABLE_ZONE_ID, finder.zone_id());
+ }
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::NXDOMAIN,
+ this->empty_rdatas_, this->empty_rdatas_);
+}
+
+TYPED_TEST(DatabaseClientTest, flushZone) {
+ // A simple update case: flush the entire zone
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ // Before update, the name exists.
+ EXPECT_EQ(ZoneFinder::SUCCESS, finder->find(this->qname_,
+ this->qtype_).code);
+
+ // start update in the replace mode. the normal finder should still
+ // be able to see the record, but the updater's finder shouldn't.
+ this->updater_ = this->client_->getUpdater(this->zname_, true);
+ this->setUpdateAccessor();
+ EXPECT_EQ(ZoneFinder::SUCCESS,
+ finder->find(this->qname_, this->qtype_).code);
+ EXPECT_EQ(ZoneFinder::NXDOMAIN,
+ this->updater_->getFinder().find(this->qname_,
+ this->qtype_).code);
+
+ // commit the update. now the normal finder shouldn't see it.
+ this->updater_->commit();
+ EXPECT_EQ(ZoneFinder::NXDOMAIN, finder->find(this->qname_,
+ this->qtype_).code);
+
+ // Check rollback wasn't accidentally performed.
+ EXPECT_FALSE(this->isRollbacked());
+}
+
+TYPED_TEST(DatabaseClientTest, updateCancel) {
+ // similar to the previous test, but destruct the updater before commit.
+
+ ZoneFinderPtr finder = this->client_->findZone(this->zname_).zone_finder;
+ EXPECT_EQ(ZoneFinder::SUCCESS, finder->find(this->qname_,
+ this->qtype_).code);
+
+ this->updater_ = this->client_->getUpdater(this->zname_, true);
+ this->setUpdateAccessor();
+ EXPECT_EQ(ZoneFinder::NXDOMAIN,
+ this->updater_->getFinder().find(this->qname_,
+ this->qtype_).code);
+ // DB should not have been rolled back yet.
+ EXPECT_FALSE(this->isRollbacked());
+ this->updater_.reset(); // destruct without commit
+
+ // reset() should have triggered rollback (although it doesn't affect
+ // anything to the mock accessor implementation except for the result of
+ // isRollbacked())
+ EXPECT_TRUE(this->isRollbacked(true));
+ EXPECT_EQ(ZoneFinder::SUCCESS, finder->find(this->qname_,
+ this->qtype_).code);
+}
+
+TYPED_TEST(DatabaseClientTest, exceptionFromRollback) {
+ this->updater_ = this->client_->getUpdater(this->zname_, true);
+
+ this->rrset_.reset(new RRset(Name("throw.example.org"), this->qclass_,
+ this->qtype_, this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "192.0.2.1"));
+ this->updater_->addRRset(*this->rrset_);
+ // destruct without commit. The added name will result in an exception
+ // in the MockAccessor's rollback method. It shouldn't be propagated.
+ EXPECT_NO_THROW(this->updater_.reset());
+}
+
+TYPED_TEST(DatabaseClientTest, duplicateCommit) {
+ // duplicate commit. should result in exception.
+ this->updater_ = this->client_->getUpdater(this->zname_, true);
+ this->updater_->commit();
+ EXPECT_THROW(this->updater_->commit(), DataSourceError);
+}
+
+TYPED_TEST(DatabaseClientTest, addRRsetToNewZone) {
+ // Add a single RRset to a fresh empty zone
+ this->updater_ = this->client_->getUpdater(this->zname_, true);
+ this->updater_->addRRset(*this->rrset_);
+
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.2");
+ {
+ SCOPED_TRACE("add RRset");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->empty_rdatas_);
+ }
+
+ // Similar to the previous case, but with RRSIG
+ this->updater_.reset();
+ this->updater_ = this->client_->getUpdater(this->zname_, true);
+ this->updater_->addRRset(*this->rrset_);
+ this->updater_->addRRset(*this->rrsigset_);
+
+ // confirm the expected columns were passed to the accessor (if checkable).
+ const char* const rrsig_added[] = {
+ "www.example.org.", "org.example.www.", "3600", "RRSIG", "A",
+ "A 5 3 0 20000101000000 20000201000000 0 example.org. FAKEFAKEFAKE"
+ };
+ this->checkLastAdded(rrsig_added);
+
+ this->expected_sig_rdatas_.clear();
+ this->expected_sig_rdatas_.push_back(
+ rrsig_added[DatabaseAccessor::ADD_RDATA]);
+ {
+ SCOPED_TRACE("add RRset with RRSIG");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+ }
+
+ // Add the non RRSIG RRset again, to see the attempt of adding RRSIG
+ // causes any unexpected effect, in particular, whether the SIGTYPE
+ // field might remain.
+ this->updater_->addRRset(*this->rrset_);
+ const char* const rrset_added[] = {
+ "www.example.org.", "org.example.www.", "3600", "A", "", "192.0.2.2"
+ };
+ this->checkLastAdded(rrset_added);
+}
+
+TYPED_TEST(DatabaseClientTest, addRRsetToCurrentZone) {
+ // Similar to the previous test, but not replacing the existing data.
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->updater_->addRRset(*this->rrset_);
+
+ // We should see both old and new data.
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_rdatas_.push_back("192.0.2.2");
+ {
+ SCOPED_TRACE("add RRset");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->empty_rdatas_);
+ }
+ this->updater_->commit();
+ {
+ SCOPED_TRACE("add RRset after commit");
+ doFindTest(*finder, this->qname_, this->qtype_, this->qtype_,
+ this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->empty_rdatas_);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, addMultipleRRs) {
+ // Similar to the previous case, but the added RRset contains multiple
+ // RRs.
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "192.0.2.3"));
+ this->updater_->addRRset(*this->rrset_);
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_rdatas_.push_back("192.0.2.2");
+ this->expected_rdatas_.push_back("192.0.2.3");
+ {
+ SCOPED_TRACE("add multiple RRs");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->empty_rdatas_);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, addRRsetOfLargerTTL) {
+ // Similar to the previous one, but the TTL of the added RRset is larger
+ // than that of the existing record. The finder should use the smaller
+ // one.
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->rrset_->setTTL(RRTTL(7200));
+ this->updater_->addRRset(*this->rrset_);
+
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_rdatas_.push_back("192.0.2.2");
+ {
+ SCOPED_TRACE("add RRset of larger TTL");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->empty_rdatas_);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, addRRsetOfSmallerTTL) {
+ // Similar to the previous one, but the added RRset has a smaller TTL.
+ // The added TTL should be used by the finder.
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->rrset_->setTTL(RRTTL(1800));
+ this->updater_->addRRset(*this->rrset_);
+
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_rdatas_.push_back("192.0.2.2");
+ {
+ SCOPED_TRACE("add RRset of smaller TTL");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, RRTTL(1800), ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->empty_rdatas_);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, addSameRR) {
+ // Add the same RR as that is already in the data source.
+ // Currently the add interface doesn't try to suppress the duplicate,
+ // neither does the finder. We may want to revisit it in future versions.
+
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->rrset_.reset(new RRset(this->qname_, this->qclass_, this->qtype_,
+ this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "192.0.2.1"));
+ this->updater_->addRRset(*this->rrset_);
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_rdatas_.push_back("192.0.2.1");
+ {
+ SCOPED_TRACE("add same RR");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->empty_rdatas_);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, addDeviantRR) {
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+
+ // RR class mismatch. This should be detected and rejected.
+ this->rrset_.reset(new RRset(this->qname_, RRClass::CH(), RRType::TXT(),
+ this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "test text"));
+ EXPECT_THROW(this->updater_->addRRset(*this->rrset_), DataSourceError);
+
+ // Out-of-zone owner name. At a higher level this should be rejected,
+ // but it doesn't happen in this interface.
+ this->rrset_.reset(new RRset(Name("example.com"), this->qclass_,
+ this->qtype_, this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "192.0.2.100"));
+ this->updater_->addRRset(*this->rrset_);
+
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.100");
+ {
+ // Note: with the find() implementation being more strict about
+ // zone cuts, this test may fail. Then the test should be updated.
+ SCOPED_TRACE("add out-of-zone RR");
+ doFindTest(this->updater_->getFinder(), Name("example.com"),
+ this->qtype_, this->qtype_, this->rrttl_,
+ ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->empty_rdatas_);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, addEmptyRRset) {
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->rrset_.reset(new RRset(this->qname_, this->qclass_, this->qtype_,
+ this->rrttl_));
+ EXPECT_THROW(this->updater_->addRRset(*this->rrset_), DataSourceError);
+}
+
+TYPED_TEST(DatabaseClientTest, addAfterCommit) {
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->updater_->commit();
+ EXPECT_THROW(this->updater_->addRRset(*this->rrset_), DataSourceError);
+}
+
+TYPED_TEST(DatabaseClientTest, addRRsetWithRRSIG) {
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->rrset_->addRRsig(*this->rrsigset_);
+ EXPECT_THROW(this->updater_->addRRset(*this->rrset_), DataSourceError);
+}
+
+TYPED_TEST(DatabaseClientTest, deleteRRset) {
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ this->rrset_.reset(new RRset(this->qname_, this->qclass_, this->qtype_,
+ this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "192.0.2.1"));
+
+ // Delete one RR from an RRset
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->updater_->deleteRRset(*this->rrset_);
+
+ // Delete the only RR of a name
+ this->rrset_.reset(new RRset(Name("cname.example.org"), this->qclass_,
+ RRType::CNAME(), this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "www.example.org"));
+ this->updater_->deleteRRset(*this->rrset_);
+
+ // The this->updater_ finder should immediately see the deleted results.
+ {
+ SCOPED_TRACE("delete RRset");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::NXRRSET,
+ this->empty_rdatas_, this->empty_rdatas_);
+ doFindTest(this->updater_->getFinder(), Name("cname.example.org"),
+ this->qtype_, this->qtype_, this->rrttl_,
+ ZoneFinder::NXDOMAIN, this->empty_rdatas_,
+ this->empty_rdatas_);
+ }
+
+ // before committing the change, the original finder should see the
+ // original record.
+ {
+ SCOPED_TRACE("delete RRset before commit");
+ this->expected_rdatas_.push_back("192.0.2.1");
+ doFindTest(*finder, this->qname_, this->qtype_, this->qtype_,
+ this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->empty_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("www.example.org.");
+ doFindTest(*finder, Name("cname.example.org"), this->qtype_,
+ RRType::CNAME(), this->rrttl_, ZoneFinder::CNAME,
+ this->expected_rdatas_, this->empty_rdatas_);
+ }
+
+ // once committed, the record should be removed from the original finder's
+ // view, too.
+ this->updater_->commit();
+ {
+ SCOPED_TRACE("delete RRset after commit");
+ doFindTest(*finder, this->qname_, this->qtype_, this->qtype_,
+ this->rrttl_, ZoneFinder::NXRRSET, this->empty_rdatas_,
+ this->empty_rdatas_);
+ doFindTest(*finder, Name("cname.example.org"), this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::NXDOMAIN,
+ this->empty_rdatas_, this->empty_rdatas_);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, deleteRRsetToNXDOMAIN) {
+ // similar to the previous case, but it removes the only record of the
+ // given name. a subsequent find() should result in NXDOMAIN.
+ this->rrset_.reset(new RRset(Name("cname.example.org"), this->qclass_,
+ RRType::CNAME(), this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "www.example.org"));
+
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->updater_->deleteRRset(*this->rrset_);
+ {
+ SCOPED_TRACE("delete RRset to NXDOMAIN");
+ doFindTest(this->updater_->getFinder(), Name("cname.example.org"),
+ this->qtype_, this->qtype_, this->rrttl_,
+ ZoneFinder::NXDOMAIN, this->empty_rdatas_,
+ this->empty_rdatas_);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, deleteMultipleRRs) {
+ this->rrset_.reset(new RRset(this->qname_, this->qclass_, RRType::AAAA(),
+ this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "2001:db8::1"));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "2001:db8::2"));
+
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->updater_->deleteRRset(*this->rrset_);
+
+ {
+ SCOPED_TRACE("delete multiple RRs");
+ doFindTest(this->updater_->getFinder(), this->qname_, RRType::AAAA(),
+ this->qtype_, this->rrttl_, ZoneFinder::NXRRSET,
+ this->empty_rdatas_, this->empty_rdatas_);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, partialDelete) {
+ this->rrset_.reset(new RRset(this->qname_, this->qclass_, RRType::AAAA(),
+ this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "2001:db8::1"));
+ // This does not exist in the test data source:
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "2001:db8::3"));
+
+ // deleteRRset should succeed "silently", and subsequent find() should
+ // find the remaining RR.
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->updater_->deleteRRset(*this->rrset_);
+ {
+ SCOPED_TRACE("partial delete");
+ this->expected_rdatas_.push_back("2001:db8::2");
+ doFindTest(this->updater_->getFinder(), this->qname_, RRType::AAAA(),
+ RRType::AAAA(), this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->empty_rdatas_);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, deleteNoMatch) {
+ // similar to the previous test, but there's not even a match in the
+ // specified RRset. Essentially there's no difference in the result.
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->updater_->deleteRRset(*this->rrset_);
+ {
+ SCOPED_TRACE("delete no match");
+ this->expected_rdatas_.push_back("192.0.2.1");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->empty_rdatas_);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, deleteWithDifferentTTL) {
+ // Our delete interface simply ignores TTL (may change in a future version)
+ this->rrset_.reset(new RRset(this->qname_, this->qclass_, this->qtype_,
+ RRTTL(1800)));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "192.0.2.1"));
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->updater_->deleteRRset(*this->rrset_);
+ {
+ SCOPED_TRACE("delete RRset with a different TTL");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::NXRRSET,
+ this->empty_rdatas_, this->empty_rdatas_);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, deleteDeviantRR) {
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+
+ // RR class mismatch. This should be detected and rejected.
+ this->rrset_.reset(new RRset(this->qname_, RRClass::CH(), RRType::TXT(),
+ this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "test text"));
+ EXPECT_THROW(this->updater_->deleteRRset(*this->rrset_), DataSourceError);
+
+ // Out-of-zone owner name. At a higher level this should be rejected,
+ // but it doesn't happen in this interface.
+ this->rrset_.reset(new RRset(Name("example.com"), this->qclass_,
+ this->qtype_, this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "192.0.2.100"));
+ EXPECT_NO_THROW(this->updater_->deleteRRset(*this->rrset_));
+}
+
+TYPED_TEST(DatabaseClientTest, deleteAfterCommit) {
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->updater_->commit();
+ EXPECT_THROW(this->updater_->deleteRRset(*this->rrset_), DataSourceError);
+}
+
+TYPED_TEST(DatabaseClientTest, deleteEmptyRRset) {
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->rrset_.reset(new RRset(this->qname_, this->qclass_, this->qtype_,
+ this->rrttl_));
+ EXPECT_THROW(this->updater_->deleteRRset(*this->rrset_), DataSourceError);
+}
+
+TYPED_TEST(DatabaseClientTest, deleteRRsetWithRRSIG) {
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->rrset_->addRRsig(*this->rrsigset_);
+ EXPECT_THROW(this->updater_->deleteRRset(*this->rrset_), DataSourceError);
+}
+
+TYPED_TEST(DatabaseClientTest, compoundUpdate) {
+ // This test case performs an arbitrary chosen add/delete operations
+ // in a single update transaction. Essentially there is nothing new to
+ // test here, but there may be some bugs that was overlooked and can
+ // only happen in the compound update scenario, so we test it.
+
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+
+ // add a new RR to an existing RRset
+ this->updater_->addRRset(*this->rrset_);
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.1");
+ this->expected_rdatas_.push_back("192.0.2.2");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->empty_rdatas_);
+
+ // delete an existing RR
+ this->rrset_.reset(new RRset(Name("www.example.org"), this->qclass_,
+ this->qtype_, this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "192.0.2.1"));
+ this->updater_->deleteRRset(*this->rrset_);
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.2");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->empty_rdatas_);
+
+ // re-add it
+ this->updater_->addRRset(*this->rrset_);
+ this->expected_rdatas_.push_back("192.0.2.1");
+ doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+ this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->empty_rdatas_);
+
+ // add a new RR with a new name
+ const Name newname("newname.example.org");
+ const RRType newtype(RRType::AAAA());
+ doFindTest(this->updater_->getFinder(), newname, newtype, newtype,
+ this->rrttl_, ZoneFinder::NXDOMAIN, this->empty_rdatas_,
+ this->empty_rdatas_);
+ this->rrset_.reset(new RRset(newname, this->qclass_, newtype,
+ this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "2001:db8::10"));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "2001:db8::11"));
+ this->updater_->addRRset(*this->rrset_);
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("2001:db8::10");
+ this->expected_rdatas_.push_back("2001:db8::11");
+ doFindTest(this->updater_->getFinder(), newname, newtype, newtype,
+ this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->empty_rdatas_);
+
+ // delete one RR from the previous set
+ this->rrset_.reset(new RRset(newname, this->qclass_, newtype,
+ this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "2001:db8::11"));
+ this->updater_->deleteRRset(*this->rrset_);
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("2001:db8::10");
+ doFindTest(this->updater_->getFinder(), newname, newtype, newtype,
+ this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->empty_rdatas_);
+
+ // Commit the changes, confirm the entire changes applied.
+ this->updater_->commit();
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("192.0.2.2");
+ this->expected_rdatas_.push_back("192.0.2.1");
+ doFindTest(*finder, this->qname_, this->qtype_, this->qtype_, this->rrttl_,
+ ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->empty_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("2001:db8::10");
+ doFindTest(*finder, newname, newtype, newtype, this->rrttl_,
+ ZoneFinder::SUCCESS, this->expected_rdatas_,
+ this->empty_rdatas_);
+}
+
+TYPED_TEST(DatabaseClientTest, previous) {
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ EXPECT_EQ(Name("www.example.org."),
+ finder->findPreviousName(Name("www2.example.org.")));
+ // Check a name that doesn't exist there
+ EXPECT_EQ(Name("www.example.org."),
+ finder->findPreviousName(Name("www1.example.org.")));
+ if (this->is_mock_) { // We can't really force the DB to throw
+ // Check it doesn't crash or anything if the underlying DB throws
+ DataSourceClient::FindResult
+ zone(this->client_->findZone(Name("bad.example.org")));
+ finder =
+ dynamic_pointer_cast<DatabaseClient::Finder>(zone.zone_finder);
+
+ EXPECT_THROW(finder->findPreviousName(Name("bad.example.org")),
+ isc::NotImplemented);
+ } else {
+ // No need to test this on mock one, because we test only that
+ // the exception gets through
+
+ // A name before the origin
+ EXPECT_THROW(finder->findPreviousName(Name("example.com")),
+ isc::NotImplemented);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, invalidRdata) {
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ EXPECT_THROW(finder->find(Name("invalidrdata.example.org."), RRType::A()),
+ DataSourceError);
+ EXPECT_THROW(finder->find(Name("invalidrdata2.example.org."), RRType::A()),
+ DataSourceError);
+}
+
+TEST_F(MockDatabaseClientTest, missingNSEC) {
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ /*
+ * FIXME: For now, we can't really distinguish this bogus input
+ * from not-signed zone so we can't throw. But once we can,
+ * enable the original test.
+ */
+#if 0
+ EXPECT_THROW(finder->find(Name("badnsec2.example.org."), RRType::A(), NULL,
+ ZoneFinder::FIND_DNSSEC),
+ DataSourceError);
+#endif
+ doFindTest(*finder, Name("badnsec2.example.org."), RRType::A(),
+ RRType::A(), this->rrttl_, ZoneFinder::NXDOMAIN,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+}
+
+TEST_F(MockDatabaseClientTest, badName) {
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ EXPECT_THROW(finder->findPreviousName(Name("brokenname.example.org.")),
+ DataSourceError);
+}
+
+}
diff --git a/src/lib/datasrc/tests/factory_unittest.cc b/src/lib/datasrc/tests/factory_unittest.cc
new file mode 100644
index 0000000..94d1118
--- /dev/null
+++ b/src/lib/datasrc/tests/factory_unittest.cc
@@ -0,0 +1,175 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <boost/scoped_ptr.hpp>
+
+#include <datasrc/factory.h>
+#include <datasrc/data_source.h>
+#include <datasrc/sqlite3_accessor.h>
+
+#include <dns/rrclass.h>
+#include <cc/data.h>
+
+#include <gtest/gtest.h>
+
+using namespace isc::datasrc;
+using namespace isc::data;
+
+std::string SQLITE_DBFILE_EXAMPLE_ORG = TEST_DATA_DIR "/example.org.sqlite3";
+
+namespace {
+
+TEST(FactoryTest, sqlite3ClientBadConfig) {
+ // We start out by building the configuration data bit by bit,
+ // testing each form of 'bad config', until we have a good one.
+ // Then we do some very basic operation on the client (detailed
+ // tests are left to the implementation-specific backends)
+ ElementPtr config;
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceConfigError);
+
+ config = Element::create("asdf");
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceConfigError);
+
+ config = Element::createMap();
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceConfigError);
+
+ config->set("class", ElementPtr());
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceConfigError);
+
+ config->set("class", Element::create(1));
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceConfigError);
+
+ config->set("class", Element::create("FOO"));
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceConfigError);
+
+ config->set("class", Element::create("IN"));
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceConfigError);
+
+ config->set("database_file", ElementPtr());
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceConfigError);
+
+ config->set("database_file", Element::create(1));
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ DataSourceConfigError);
+
+ config->set("database_file", Element::create("/foo/bar/doesnotexist"));
+ ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+ SQLite3Error);
+
+ config->set("database_file", Element::create(SQLITE_DBFILE_EXAMPLE_ORG));
+ DataSourceClientContainer dsc("sqlite3", config);
+
+ DataSourceClient::FindResult result1(
+ dsc.getInstance().findZone(isc::dns::Name("example.org.")));
+ ASSERT_EQ(result::SUCCESS, result1.code);
+
+ DataSourceClient::FindResult result2(
+ dsc.getInstance().findZone(isc::dns::Name("no.such.zone.")));
+ ASSERT_EQ(result::NOTFOUND, result2.code);
+
+ ZoneIteratorPtr iterator(dsc.getInstance().getIterator(
+ isc::dns::Name("example.org.")));
+
+ ZoneUpdaterPtr updater(dsc.getInstance().getUpdater(
+ isc::dns::Name("example.org."), false));
+}
+
+TEST(FactoryTest, memoryClient) {
+ // We start out by building the configuration data bit by bit,
+ // testing each form of 'bad config', until we have a good one.
+ // Then we do some very basic operation on the client (detailed
+ // tests are left to the implementation-specific backends)
+ ElementPtr config;
+ ASSERT_THROW(DataSourceClientContainer client("memory", config),
+ DataSourceConfigError);
+
+ config = Element::create("asdf");
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceConfigError);
+
+ config = Element::createMap();
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceConfigError);
+
+ config->set("type", ElementPtr());
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceConfigError);
+
+ config->set("type", Element::create(1));
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceConfigError);
+
+ config->set("type", Element::create("FOO"));
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceConfigError);
+
+ config->set("type", Element::create("memory"));
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceConfigError);
+
+ config->set("class", ElementPtr());
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceConfigError);
+
+ config->set("class", Element::create(1));
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceConfigError);
+
+ config->set("class", Element::create("FOO"));
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceConfigError);
+
+ config->set("class", Element::create("IN"));
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceConfigError);
+
+ config->set("zones", ElementPtr());
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceConfigError);
+
+ config->set("zones", Element::create(1));
+ ASSERT_THROW(DataSourceClientContainer("memory", config),
+ DataSourceConfigError);
+
+ config->set("zones", Element::createList());
+ DataSourceClientContainer dsc("memory", config);
+
+ // Once it is able to load some zones, we should add a few tests
+ // here to see that it does.
+ DataSourceClient::FindResult result(
+ dsc.getInstance().findZone(isc::dns::Name("no.such.zone.")));
+ ASSERT_EQ(result::NOTFOUND, result.code);
+
+ ASSERT_THROW(dsc.getInstance().getIterator(isc::dns::Name("example.org.")),
+ DataSourceError);
+
+ ASSERT_THROW(dsc.getInstance().getUpdater(isc::dns::Name("no.such.zone."),
+ false), isc::NotImplemented);
+}
+
+TEST(FactoryTest, badType) {
+ ASSERT_THROW(DataSourceClientContainer("foo", ElementPtr()),
+ DataSourceError);
+}
+
+} // end anonymous namespace
+
diff --git a/src/lib/datasrc/tests/memory_datasrc_unittest.cc b/src/lib/datasrc/tests/memory_datasrc_unittest.cc
index 83fbb58..2b854db 100644
--- a/src/lib/datasrc/tests/memory_datasrc_unittest.cc
+++ b/src/lib/datasrc/tests/memory_datasrc_unittest.cc
@@ -29,6 +29,8 @@
#include <dns/masterload.h>
#include <datasrc/memory_datasrc.h>
+#include <datasrc/data_source.h>
+#include <datasrc/iterator.h>
#include <gtest/gtest.h>
@@ -42,119 +44,173 @@ namespace {
using result::SUCCESS;
using result::EXIST;
-class MemoryDataSrcTest : public ::testing::Test {
+class InMemoryClientTest : public ::testing::Test {
protected:
- MemoryDataSrcTest() : rrclass(RRClass::IN())
+ InMemoryClientTest() : rrclass(RRClass::IN())
{}
RRClass rrclass;
- MemoryDataSrc memory_datasrc;
+ InMemoryClient memory_client;
};
-TEST_F(MemoryDataSrcTest, add_find_Zone) {
+TEST_F(InMemoryClientTest, add_find_Zone) {
// test add zone
// Bogus zone (NULL)
- EXPECT_THROW(memory_datasrc.addZone(ZonePtr()), isc::InvalidParameter);
+ EXPECT_THROW(memory_client.addZone(ZoneFinderPtr()),
+ isc::InvalidParameter);
// add zones with different names one by one
- EXPECT_EQ(result::SUCCESS, memory_datasrc.addZone(
- ZonePtr(new MemoryZone(RRClass::IN(), Name("a")))));
- EXPECT_EQ(result::SUCCESS, memory_datasrc.addZone(
- ZonePtr(new MemoryZone(RRClass::CH(), Name("b")))));
- EXPECT_EQ(result::SUCCESS, memory_datasrc.addZone(
- ZonePtr(new MemoryZone(RRClass::IN(), Name("c")))));
+ EXPECT_EQ(result::SUCCESS, memory_client.addZone(
+ ZoneFinderPtr(new InMemoryZoneFinder(RRClass::IN(),
+ Name("a")))));
+ EXPECT_EQ(result::SUCCESS, memory_client.addZone(
+ ZoneFinderPtr(new InMemoryZoneFinder(RRClass::CH(),
+ Name("b")))));
+ EXPECT_EQ(result::SUCCESS, memory_client.addZone(
+ ZoneFinderPtr(new InMemoryZoneFinder(RRClass::IN(),
+ Name("c")))));
// add zones with the same name suffix
- EXPECT_EQ(result::SUCCESS, memory_datasrc.addZone(
- ZonePtr(new MemoryZone(RRClass::CH(),
- Name("x.d.e.f")))));
- EXPECT_EQ(result::SUCCESS, memory_datasrc.addZone(
- ZonePtr(new MemoryZone(RRClass::CH(),
- Name("o.w.y.d.e.f")))));
- EXPECT_EQ(result::SUCCESS, memory_datasrc.addZone(
- ZonePtr(new MemoryZone(RRClass::CH(),
- Name("p.w.y.d.e.f")))));
- EXPECT_EQ(result::SUCCESS, memory_datasrc.addZone(
- ZonePtr(new MemoryZone(RRClass::IN(),
- Name("q.w.y.d.e.f")))));
+ EXPECT_EQ(result::SUCCESS, memory_client.addZone(
+ ZoneFinderPtr(new InMemoryZoneFinder(RRClass::CH(),
+ Name("x.d.e.f")))));
+ EXPECT_EQ(result::SUCCESS, memory_client.addZone(
+ ZoneFinderPtr(new InMemoryZoneFinder(RRClass::CH(),
+ Name("o.w.y.d.e.f")))));
+ EXPECT_EQ(result::SUCCESS, memory_client.addZone(
+ ZoneFinderPtr(new InMemoryZoneFinder(RRClass::CH(),
+ Name("p.w.y.d.e.f")))));
+ EXPECT_EQ(result::SUCCESS, memory_client.addZone(
+ ZoneFinderPtr(new InMemoryZoneFinder(RRClass::IN(),
+ Name("q.w.y.d.e.f")))));
// add super zone and its subzone
- EXPECT_EQ(result::SUCCESS, memory_datasrc.addZone(
- ZonePtr(new MemoryZone(RRClass::CH(), Name("g.h")))));
- EXPECT_EQ(result::SUCCESS, memory_datasrc.addZone(
- ZonePtr(new MemoryZone(RRClass::IN(), Name("i.g.h")))));
- EXPECT_EQ(result::SUCCESS, memory_datasrc.addZone(
- ZonePtr(new MemoryZone(RRClass::IN(),
- Name("z.d.e.f")))));
- EXPECT_EQ(result::SUCCESS, memory_datasrc.addZone(
- ZonePtr(new MemoryZone(RRClass::IN(),
- Name("j.z.d.e.f")))));
+ EXPECT_EQ(result::SUCCESS, memory_client.addZone(
+ ZoneFinderPtr(new InMemoryZoneFinder(RRClass::CH(),
+ Name("g.h")))));
+ EXPECT_EQ(result::SUCCESS, memory_client.addZone(
+ ZoneFinderPtr(new InMemoryZoneFinder(RRClass::IN(),
+ Name("i.g.h")))));
+ EXPECT_EQ(result::SUCCESS, memory_client.addZone(
+ ZoneFinderPtr(new InMemoryZoneFinder(RRClass::IN(),
+ Name("z.d.e.f")))));
+ EXPECT_EQ(result::SUCCESS, memory_client.addZone(
+ ZoneFinderPtr(new InMemoryZoneFinder(RRClass::IN(),
+ Name("j.z.d.e.f")))));
// different zone class isn't allowed.
- EXPECT_EQ(result::EXIST, memory_datasrc.addZone(
- ZonePtr(new MemoryZone(RRClass::CH(),
- Name("q.w.y.d.e.f")))));
+ EXPECT_EQ(result::EXIST, memory_client.addZone(
+ ZoneFinderPtr(new InMemoryZoneFinder(RRClass::CH(),
+ Name("q.w.y.d.e.f")))));
// names are compared in a case insensitive manner.
- EXPECT_EQ(result::EXIST, memory_datasrc.addZone(
- ZonePtr(new MemoryZone(RRClass::IN(),
- Name("Q.W.Y.d.E.f")))));
+ EXPECT_EQ(result::EXIST, memory_client.addZone(
+ ZoneFinderPtr(new InMemoryZoneFinder(RRClass::IN(),
+ Name("Q.W.Y.d.E.f")))));
// test find zone
- EXPECT_EQ(result::SUCCESS, memory_datasrc.findZone(Name("a")).code);
+ EXPECT_EQ(result::SUCCESS, memory_client.findZone(Name("a")).code);
EXPECT_EQ(Name("a"),
- memory_datasrc.findZone(Name("a")).zone->getOrigin());
+ memory_client.findZone(Name("a")).zone_finder->getOrigin());
EXPECT_EQ(result::SUCCESS,
- memory_datasrc.findZone(Name("j.z.d.e.f")).code);
+ memory_client.findZone(Name("j.z.d.e.f")).code);
EXPECT_EQ(Name("j.z.d.e.f"),
- memory_datasrc.findZone(Name("j.z.d.e.f")).zone->getOrigin());
+ memory_client.findZone(Name("j.z.d.e.f")).zone_finder->
+ getOrigin());
// NOTFOUND
- EXPECT_EQ(result::NOTFOUND, memory_datasrc.findZone(Name("d.e.f")).code);
- EXPECT_EQ(ConstZonePtr(), memory_datasrc.findZone(Name("d.e.f")).zone);
+ EXPECT_EQ(result::NOTFOUND, memory_client.findZone(Name("d.e.f")).code);
+ EXPECT_EQ(ConstZoneFinderPtr(),
+ memory_client.findZone(Name("d.e.f")).zone_finder);
EXPECT_EQ(result::NOTFOUND,
- memory_datasrc.findZone(Name("w.y.d.e.f")).code);
- EXPECT_EQ(ConstZonePtr(),
- memory_datasrc.findZone(Name("w.y.d.e.f")).zone);
+ memory_client.findZone(Name("w.y.d.e.f")).code);
+ EXPECT_EQ(ConstZoneFinderPtr(),
+ memory_client.findZone(Name("w.y.d.e.f")).zone_finder);
// there's no exact match. the result should be the longest match,
// and the code should be PARTIALMATCH.
EXPECT_EQ(result::PARTIALMATCH,
- memory_datasrc.findZone(Name("j.g.h")).code);
+ memory_client.findZone(Name("j.g.h")).code);
EXPECT_EQ(Name("g.h"),
- memory_datasrc.findZone(Name("g.h")).zone->getOrigin());
+ memory_client.findZone(Name("g.h")).zone_finder->getOrigin());
EXPECT_EQ(result::PARTIALMATCH,
- memory_datasrc.findZone(Name("z.i.g.h")).code);
+ memory_client.findZone(Name("z.i.g.h")).code);
EXPECT_EQ(Name("i.g.h"),
- memory_datasrc.findZone(Name("z.i.g.h")).zone->getOrigin());
+ memory_client.findZone(Name("z.i.g.h")).zone_finder->
+ getOrigin());
}
-TEST_F(MemoryDataSrcTest, getZoneCount) {
- EXPECT_EQ(0, memory_datasrc.getZoneCount());
- memory_datasrc.addZone(
- ZonePtr(new MemoryZone(rrclass, Name("example.com"))));
- EXPECT_EQ(1, memory_datasrc.getZoneCount());
+TEST_F(InMemoryClientTest, iterator) {
+ // Just some preparations of data
+ boost::shared_ptr<InMemoryZoneFinder>
+ zone(new InMemoryZoneFinder(RRClass::IN(), Name("a")));
+ RRsetPtr aRRsetA(new RRset(Name("a"), RRClass::IN(), RRType::A(),
+ RRTTL(300)));
+ aRRsetA->addRdata(rdata::in::A("192.0.2.1"));
+ RRsetPtr aRRsetAAAA(new RRset(Name("a"), RRClass::IN(), RRType::AAAA(),
+ RRTTL(300)));
+ aRRsetAAAA->addRdata(rdata::in::AAAA("2001:db8::1"));
+ aRRsetAAAA->addRdata(rdata::in::AAAA("2001:db8::2"));
+ RRsetPtr subRRsetA(new RRset(Name("sub.x.a"), RRClass::IN(), RRType::A(),
+ RRTTL(300)));
+ subRRsetA->addRdata(rdata::in::A("192.0.2.2"));
+ EXPECT_EQ(result::SUCCESS, memory_client.addZone(zone));
+ // First, the zone is not there, so it should throw
+ EXPECT_THROW(memory_client.getIterator(Name("b")), DataSourceError);
+ // This zone is not there either, even when there's a zone containing this
+ EXPECT_THROW(memory_client.getIterator(Name("x.a")), DataSourceError);
+ // Now, an empty zone
+ ZoneIteratorPtr iterator(memory_client.getIterator(Name("a")));
+ EXPECT_EQ(ConstRRsetPtr(), iterator->getNextRRset());
+ // It throws Unexpected when we are past the end
+ EXPECT_THROW(iterator->getNextRRset(), isc::Unexpected);
+ EXPECT_EQ(result::SUCCESS, zone->add(aRRsetA));
+ EXPECT_EQ(result::SUCCESS, zone->add(aRRsetAAAA));
+ EXPECT_EQ(result::SUCCESS, zone->add(subRRsetA));
+ // Check it with full zone, one by one.
+ // It should be in ascending order in case of InMemory data source
+ // (isn't guaranteed in general)
+ iterator = memory_client.getIterator(Name("a"));
+ EXPECT_EQ(aRRsetA, iterator->getNextRRset());
+ EXPECT_EQ(aRRsetAAAA, iterator->getNextRRset());
+ EXPECT_EQ(subRRsetA, iterator->getNextRRset());
+ EXPECT_EQ(ConstRRsetPtr(), iterator->getNextRRset());
+}
+
+TEST_F(InMemoryClientTest, getZoneCount) {
+ EXPECT_EQ(0, memory_client.getZoneCount());
+ memory_client.addZone(
+ ZoneFinderPtr(new InMemoryZoneFinder(rrclass,
+ Name("example.com"))));
+ EXPECT_EQ(1, memory_client.getZoneCount());
// duplicate add. counter shouldn't change
- memory_datasrc.addZone(
- ZonePtr(new MemoryZone(rrclass, Name("example.com"))));
- EXPECT_EQ(1, memory_datasrc.getZoneCount());
+ memory_client.addZone(
+ ZoneFinderPtr(new InMemoryZoneFinder(rrclass,
+ Name("example.com"))));
+ EXPECT_EQ(1, memory_client.getZoneCount());
// add one more
- memory_datasrc.addZone(
- ZonePtr(new MemoryZone(rrclass, Name("example.org"))));
- EXPECT_EQ(2, memory_datasrc.getZoneCount());
+ memory_client.addZone(
+ ZoneFinderPtr(new InMemoryZoneFinder(rrclass,
+ Name("example.org"))));
+ EXPECT_EQ(2, memory_client.getZoneCount());
}
-// A helper callback of masterLoad() used in MemoryZoneTest.
+TEST_F(InMemoryClientTest, startUpdateZone) {
+ EXPECT_THROW(memory_client.getUpdater(Name("example.org"), false),
+ isc::NotImplemented);
+}
+
+// A helper callback of masterLoad() used in InMemoryZoneFinderTest.
void
setRRset(RRsetPtr rrset, vector<RRsetPtr*>::iterator& it) {
*(*it) = rrset;
++it;
}
-/// \brief Test fixture for the MemoryZone class
-class MemoryZoneTest : public ::testing::Test {
+/// \brief Test fixture for the InMemoryZoneFinder class
+class InMemoryZoneFinderTest : public ::testing::Test {
// A straightforward pair of textual RR(set) and a RRsetPtr variable
// to store the RRset. Used to build test data below.
struct RRsetData {
@@ -162,10 +218,10 @@ class MemoryZoneTest : public ::testing::Test {
RRsetPtr* rrset;
};
public:
- MemoryZoneTest() :
+ InMemoryZoneFinderTest() :
class_(RRClass::IN()),
origin_("example.org"),
- zone_(class_, origin_)
+ zone_finder_(class_, origin_)
{
// Build test RRsets. Below, we construct an RRset for
// each textual RR(s) of zone_data, and assign it to the corresponding
@@ -224,8 +280,8 @@ public:
// Some data to test with
const RRClass class_;
const Name origin_;
- // The zone to torture by tests
- MemoryZone zone_;
+ // The zone finder to torture by tests
+ InMemoryZoneFinder zone_finder_;
/*
* Some RRsets to put inside the zone.
@@ -262,9 +318,9 @@ public:
RRsetPtr rr_not_wild_another_;
/**
- * \brief Test one find query to the zone.
+ * \brief Test one find query to the zone finder.
*
- * Asks a query to the zone and checks it does not throw and returns
+ * Asks a query to the zone finder and checks it does not throw and returns
* expected results. It returns nothing, it just signals failures
* to GTEST.
*
@@ -274,29 +330,31 @@ public:
* \param check_answer Should a check against equality of the answer be
* done?
* \param answer The expected rrset, if any should be returned.
- * \param zone Check different MemoryZone object than zone_ (if NULL,
- * uses zone_)
+ * \param zone_finder Check different InMemoryZoneFinder object than
+ * zone_finder_ (if NULL, uses zone_finder_)
* \param check_wild_answer Checks that the answer has the same RRs, type
* class and TTL as the eqxpected answer and that the name corresponds
* to the one searched. It is meant for checking answers for wildcard
* queries.
*/
- void findTest(const Name& name, const RRType& rrtype, Zone::Result result,
+ void findTest(const Name& name, const RRType& rrtype,
+ ZoneFinder::Result result,
bool check_answer = true,
const ConstRRsetPtr& answer = ConstRRsetPtr(),
RRsetList* target = NULL,
- MemoryZone* zone = NULL,
- Zone::FindOptions options = Zone::FIND_DEFAULT,
+ InMemoryZoneFinder* zone_finder = NULL,
+ ZoneFinder::FindOptions options = ZoneFinder::FIND_DEFAULT,
bool check_wild_answer = false)
{
- if (!zone) {
- zone = &zone_;
+ if (zone_finder == NULL) {
+ zone_finder = &zone_finder_;
}
// The whole block is inside, because we need to check the result and
// we can't assign to FindResult
EXPECT_NO_THROW({
- Zone::FindResult find_result(zone->find(name, rrtype, target,
- options));
+ ZoneFinder::FindResult find_result(zone_finder->find(
+ name, rrtype,
+ target, options));
// Check it returns correct answers
EXPECT_EQ(result, find_result.code);
if (check_answer) {
@@ -337,14 +395,22 @@ public:
};
/**
- * \brief Test MemoryZone::MemoryZone constructor.
+ * \brief Check that findPreviousName throws as it should now.
+ */
+TEST_F(InMemoryZoneFinderTest, findPreviousName) {
+ EXPECT_THROW(zone_finder_.findPreviousName(Name("www.example.org")),
+ isc::NotImplemented);
+}
+
+/**
+ * \brief Test InMemoryZoneFinder::InMemoryZoneFinder constructor.
*
- * Takes the created zone and checks its properties they are the same
+ * Takes the created zone finder and checks its properties they are the same
* as passed parameters.
*/
-TEST_F(MemoryZoneTest, constructor) {
- ASSERT_EQ(class_, zone_.getClass());
- ASSERT_EQ(origin_, zone_.getOrigin());
+TEST_F(InMemoryZoneFinderTest, constructor) {
+ ASSERT_EQ(class_, zone_finder_.getClass());
+ ASSERT_EQ(origin_, zone_finder_.getOrigin());
}
/**
* \brief Test adding.
@@ -352,174 +418,178 @@ TEST_F(MemoryZoneTest, constructor) {
* We test that it throws at the correct moments and the correct exceptions.
* And we test the return value.
*/
-TEST_F(MemoryZoneTest, add) {
+TEST_F(InMemoryZoneFinderTest, add) {
// This one does not belong to this zone
- EXPECT_THROW(zone_.add(rr_out_), MemoryZone::OutOfZone);
+ EXPECT_THROW(zone_finder_.add(rr_out_), InMemoryZoneFinder::OutOfZone);
// Test null pointer
- EXPECT_THROW(zone_.add(ConstRRsetPtr()), MemoryZone::NullRRset);
+ EXPECT_THROW(zone_finder_.add(ConstRRsetPtr()),
+ InMemoryZoneFinder::NullRRset);
// Now put all the data we have there. It should throw nothing
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_ns_)));
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_ns_a_)));
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_ns_aaaa_)));
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_a_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_ns_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_ns_a_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_ns_aaaa_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_a_)));
// Try putting there something twice, it should be rejected
- EXPECT_NO_THROW(EXPECT_EQ(EXIST, zone_.add(rr_ns_)));
- EXPECT_NO_THROW(EXPECT_EQ(EXIST, zone_.add(rr_ns_a_)));
+ EXPECT_NO_THROW(EXPECT_EQ(EXIST, zone_finder_.add(rr_ns_)));
+ EXPECT_NO_THROW(EXPECT_EQ(EXIST, zone_finder_.add(rr_ns_a_)));
}
-TEST_F(MemoryZoneTest, addMultipleCNAMEs) {
+TEST_F(InMemoryZoneFinderTest, addMultipleCNAMEs) {
rr_cname_->addRdata(generic::CNAME("canonical2.example.org."));
- EXPECT_THROW(zone_.add(rr_cname_), MemoryZone::AddError);
+ EXPECT_THROW(zone_finder_.add(rr_cname_), InMemoryZoneFinder::AddError);
}
-TEST_F(MemoryZoneTest, addCNAMEThenOther) {
- EXPECT_EQ(SUCCESS, zone_.add(rr_cname_));
- EXPECT_THROW(zone_.add(rr_cname_a_), MemoryZone::AddError);
+TEST_F(InMemoryZoneFinderTest, addCNAMEThenOther) {
+ EXPECT_EQ(SUCCESS, zone_finder_.add(rr_cname_));
+ EXPECT_THROW(zone_finder_.add(rr_cname_a_), InMemoryZoneFinder::AddError);
}
-TEST_F(MemoryZoneTest, addOtherThenCNAME) {
- EXPECT_EQ(SUCCESS, zone_.add(rr_cname_a_));
- EXPECT_THROW(zone_.add(rr_cname_), MemoryZone::AddError);
+TEST_F(InMemoryZoneFinderTest, addOtherThenCNAME) {
+ EXPECT_EQ(SUCCESS, zone_finder_.add(rr_cname_a_));
+ EXPECT_THROW(zone_finder_.add(rr_cname_), InMemoryZoneFinder::AddError);
}
-TEST_F(MemoryZoneTest, findCNAME) {
+TEST_F(InMemoryZoneFinderTest, findCNAME) {
// install CNAME RR
- EXPECT_EQ(SUCCESS, zone_.add(rr_cname_));
+ EXPECT_EQ(SUCCESS, zone_finder_.add(rr_cname_));
// Find A RR of the same. Should match the CNAME
- findTest(rr_cname_->getName(), RRType::NS(), Zone::CNAME, true, rr_cname_);
+ findTest(rr_cname_->getName(), RRType::NS(), ZoneFinder::CNAME, true,
+ rr_cname_);
// Find the CNAME itself. Should result in normal SUCCESS
- findTest(rr_cname_->getName(), RRType::CNAME(), Zone::SUCCESS, true,
+ findTest(rr_cname_->getName(), RRType::CNAME(), ZoneFinder::SUCCESS, true,
rr_cname_);
}
-TEST_F(MemoryZoneTest, findCNAMEUnderZoneCut) {
+TEST_F(InMemoryZoneFinderTest, findCNAMEUnderZoneCut) {
// There's nothing special when we find a CNAME under a zone cut
// (with FIND_GLUE_OK). The behavior is different from BIND 9,
// so we test this case explicitly.
- EXPECT_EQ(SUCCESS, zone_.add(rr_child_ns_));
+ EXPECT_EQ(SUCCESS, zone_finder_.add(rr_child_ns_));
RRsetPtr rr_cname_under_cut_(new RRset(Name("cname.child.example.org"),
class_, RRType::CNAME(),
RRTTL(300)));
- EXPECT_EQ(SUCCESS, zone_.add(rr_cname_under_cut_));
+ EXPECT_EQ(SUCCESS, zone_finder_.add(rr_cname_under_cut_));
findTest(Name("cname.child.example.org"), RRType::AAAA(),
- Zone::CNAME, true, rr_cname_under_cut_, NULL, NULL,
- Zone::FIND_GLUE_OK);
+ ZoneFinder::CNAME, true, rr_cname_under_cut_, NULL, NULL,
+ ZoneFinder::FIND_GLUE_OK);
}
// Two DNAMEs at single domain are disallowed by RFC 2672, section 3)
// Having a CNAME there is disallowed too, but it is tested by
// addOtherThenCNAME and addCNAMEThenOther.
-TEST_F(MemoryZoneTest, addMultipleDNAMEs) {
+TEST_F(InMemoryZoneFinderTest, addMultipleDNAMEs) {
rr_dname_->addRdata(generic::DNAME("target2.example.org."));
- EXPECT_THROW(zone_.add(rr_dname_), MemoryZone::AddError);
+ EXPECT_THROW(zone_finder_.add(rr_dname_), InMemoryZoneFinder::AddError);
}
/*
* These two tests ensure that we can't have DNAME and NS at the same
* node with the exception of the apex of zone (forbidden by RFC 2672)
*/
-TEST_F(MemoryZoneTest, addDNAMEThenNS) {
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_dname_)));
- EXPECT_THROW(zone_.add(rr_dname_ns_), MemoryZone::AddError);
+TEST_F(InMemoryZoneFinderTest, addDNAMEThenNS) {
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_dname_)));
+ EXPECT_THROW(zone_finder_.add(rr_dname_ns_), InMemoryZoneFinder::AddError);
}
-TEST_F(MemoryZoneTest, addNSThenDNAME) {
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_dname_ns_)));
- EXPECT_THROW(zone_.add(rr_dname_), MemoryZone::AddError);
+TEST_F(InMemoryZoneFinderTest, addNSThenDNAME) {
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_dname_ns_)));
+ EXPECT_THROW(zone_finder_.add(rr_dname_), InMemoryZoneFinder::AddError);
}
// It is allowed to have NS and DNAME at apex
-TEST_F(MemoryZoneTest, DNAMEAndNSAtApex) {
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_dname_apex_)));
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_ns_)));
+TEST_F(InMemoryZoneFinderTest, DNAMEAndNSAtApex) {
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_dname_apex_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_ns_)));
// The NS should be possible to be found, below should be DNAME, not
// delegation
- findTest(origin_, RRType::NS(), Zone::SUCCESS, true, rr_ns_);
- findTest(rr_child_ns_->getName(), RRType::A(), Zone::DNAME, true,
+ findTest(origin_, RRType::NS(), ZoneFinder::SUCCESS, true, rr_ns_);
+ findTest(rr_child_ns_->getName(), RRType::A(), ZoneFinder::DNAME, true,
rr_dname_apex_);
}
-TEST_F(MemoryZoneTest, NSAndDNAMEAtApex) {
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_ns_)));
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_dname_apex_)));
+TEST_F(InMemoryZoneFinderTest, NSAndDNAMEAtApex) {
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_ns_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_dname_apex_)));
}
// TODO: Test (and implement) adding data under DNAME. That is forbidden by
// 2672 as well.
// Search under a DNAME record. It should return the DNAME
-TEST_F(MemoryZoneTest, findBelowDNAME) {
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_dname_)));
- findTest(Name("below.dname.example.org"), RRType::A(), Zone::DNAME, true,
- rr_dname_);
+TEST_F(InMemoryZoneFinderTest, findBelowDNAME) {
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_dname_)));
+ findTest(Name("below.dname.example.org"), RRType::A(), ZoneFinder::DNAME,
+ true, rr_dname_);
}
// Search at the domain with DNAME. It should act as DNAME isn't there, DNAME
// influences only the data below (see RFC 2672, section 3)
-TEST_F(MemoryZoneTest, findAtDNAME) {
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_dname_)));
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_dname_a_)));
+TEST_F(InMemoryZoneFinderTest, findAtDNAME) {
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_dname_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_dname_a_)));
const Name dname_name(rr_dname_->getName());
- findTest(dname_name, RRType::A(), Zone::SUCCESS, true, rr_dname_a_);
- findTest(dname_name, RRType::DNAME(), Zone::SUCCESS, true, rr_dname_);
- findTest(dname_name, RRType::TXT(), Zone::NXRRSET, true);
+ findTest(dname_name, RRType::A(), ZoneFinder::SUCCESS, true, rr_dname_a_);
+ findTest(dname_name, RRType::DNAME(), ZoneFinder::SUCCESS, true,
+ rr_dname_);
+ findTest(dname_name, RRType::TXT(), ZoneFinder::NXRRSET, true);
}
// Try searching something that is both under NS and DNAME, without and with
// GLUE_OK mode (it should stop at the NS and DNAME respectively).
-TEST_F(MemoryZoneTest, DNAMEUnderNS) {
- zone_.add(rr_child_ns_);
- zone_.add(rr_child_dname_);
+TEST_F(InMemoryZoneFinderTest, DNAMEUnderNS) {
+ zone_finder_.add(rr_child_ns_);
+ zone_finder_.add(rr_child_dname_);
Name lowName("below.dname.child.example.org.");
- findTest(lowName, RRType::A(), Zone::DELEGATION, true, rr_child_ns_);
- findTest(lowName, RRType::A(), Zone::DNAME, true, rr_child_dname_, NULL,
- NULL, Zone::FIND_GLUE_OK);
+ findTest(lowName, RRType::A(), ZoneFinder::DELEGATION, true, rr_child_ns_);
+ findTest(lowName, RRType::A(), ZoneFinder::DNAME, true, rr_child_dname_,
+ NULL, NULL, ZoneFinder::FIND_GLUE_OK);
}
// Test adding child zones and zone cut handling
-TEST_F(MemoryZoneTest, delegationNS) {
+TEST_F(InMemoryZoneFinderTest, delegationNS) {
// add in-zone data
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_ns_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_ns_)));
// install a zone cut
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_child_ns_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_child_ns_)));
// below the zone cut
- findTest(Name("www.child.example.org"), RRType::A(), Zone::DELEGATION,
- true, rr_child_ns_);
+ findTest(Name("www.child.example.org"), RRType::A(),
+ ZoneFinder::DELEGATION, true, rr_child_ns_);
// at the zone cut
- findTest(Name("child.example.org"), RRType::A(), Zone::DELEGATION,
+ findTest(Name("child.example.org"), RRType::A(), ZoneFinder::DELEGATION,
true, rr_child_ns_);
- findTest(Name("child.example.org"), RRType::NS(), Zone::DELEGATION,
+ findTest(Name("child.example.org"), RRType::NS(), ZoneFinder::DELEGATION,
true, rr_child_ns_);
// finding NS for the apex (origin) node. This must not be confused
// with delegation due to the existence of an NS RR.
- findTest(origin_, RRType::NS(), Zone::SUCCESS, true, rr_ns_);
+ findTest(origin_, RRType::NS(), ZoneFinder::SUCCESS, true, rr_ns_);
// unusual case of "nested delegation": the highest cut should be used.
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_grandchild_ns_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_grandchild_ns_)));
findTest(Name("www.grand.child.example.org"), RRType::A(),
- Zone::DELEGATION, true, rr_child_ns_); // note: !rr_grandchild_ns_
+ // note: !rr_grandchild_ns_
+ ZoneFinder::DELEGATION, true, rr_child_ns_);
}
-TEST_F(MemoryZoneTest, findAny) {
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_a_)));
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_ns_)));
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_child_glue_)));
+TEST_F(InMemoryZoneFinderTest, findAny) {
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_a_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_ns_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_child_glue_)));
// origin
RRsetList origin_rrsets;
- findTest(origin_, RRType::ANY(), Zone::SUCCESS, true,
+ findTest(origin_, RRType::ANY(), ZoneFinder::SUCCESS, true,
ConstRRsetPtr(), &origin_rrsets);
EXPECT_EQ(2, origin_rrsets.size());
EXPECT_EQ(rr_a_, origin_rrsets.findRRset(RRType::A(), RRClass::IN()));
@@ -527,13 +597,13 @@ TEST_F(MemoryZoneTest, findAny) {
// out zone name
RRsetList out_rrsets;
- findTest(Name("example.com"), RRType::ANY(), Zone::NXDOMAIN, true,
+ findTest(Name("example.com"), RRType::ANY(), ZoneFinder::NXDOMAIN, true,
ConstRRsetPtr(), &out_rrsets);
EXPECT_EQ(0, out_rrsets.size());
RRsetList glue_child_rrsets;
- findTest(rr_child_glue_->getName(), RRType::ANY(), Zone::SUCCESS, true,
- ConstRRsetPtr(), &glue_child_rrsets);
+ findTest(rr_child_glue_->getName(), RRType::ANY(), ZoneFinder::SUCCESS,
+ true, ConstRRsetPtr(), &glue_child_rrsets);
EXPECT_EQ(rr_child_glue_, glue_child_rrsets.findRRset(RRType::A(),
RRClass::IN()));
EXPECT_EQ(1, glue_child_rrsets.size());
@@ -542,59 +612,60 @@ TEST_F(MemoryZoneTest, findAny) {
// been implemented
// add zone cut
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_child_ns_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_child_ns_)));
// zone cut
RRsetList child_rrsets;
- findTest(rr_child_ns_->getName(), RRType::ANY(), Zone::DELEGATION, true,
- rr_child_ns_, &child_rrsets);
+ findTest(rr_child_ns_->getName(), RRType::ANY(), ZoneFinder::DELEGATION,
+ true, rr_child_ns_, &child_rrsets);
EXPECT_EQ(0, child_rrsets.size());
// glue for this zone cut
RRsetList new_glue_child_rrsets;
- findTest(rr_child_glue_->getName(), RRType::ANY(), Zone::DELEGATION, true,
- rr_child_ns_, &new_glue_child_rrsets);
+ findTest(rr_child_glue_->getName(), RRType::ANY(), ZoneFinder::DELEGATION,
+ true, rr_child_ns_, &new_glue_child_rrsets);
EXPECT_EQ(0, new_glue_child_rrsets.size());
}
-TEST_F(MemoryZoneTest, glue) {
+TEST_F(InMemoryZoneFinderTest, glue) {
// install zone data:
// a zone cut
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_child_ns_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_child_ns_)));
// glue for this cut
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_child_glue_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_child_glue_)));
// a nested zone cut (unusual)
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_grandchild_ns_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_grandchild_ns_)));
// glue under the deeper zone cut
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_grandchild_glue_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_grandchild_glue_)));
// by default glue is hidden due to the zone cut
- findTest(rr_child_glue_->getName(), RRType::A(), Zone::DELEGATION, true,
- rr_child_ns_);
+ findTest(rr_child_glue_->getName(), RRType::A(), ZoneFinder::DELEGATION,
+ true, rr_child_ns_);
// If we do it in the "glue OK" mode, we should find the exact match.
- findTest(rr_child_glue_->getName(), RRType::A(), Zone::SUCCESS, true,
- rr_child_glue_, NULL, NULL, Zone::FIND_GLUE_OK);
+ findTest(rr_child_glue_->getName(), RRType::A(), ZoneFinder::SUCCESS, true,
+ rr_child_glue_, NULL, NULL, ZoneFinder::FIND_GLUE_OK);
// glue OK + NXRRSET case
- findTest(rr_child_glue_->getName(), RRType::AAAA(), Zone::NXRRSET, true,
- ConstRRsetPtr(), NULL, NULL, Zone::FIND_GLUE_OK);
+ findTest(rr_child_glue_->getName(), RRType::AAAA(), ZoneFinder::NXRRSET,
+ true, ConstRRsetPtr(), NULL, NULL, ZoneFinder::FIND_GLUE_OK);
// glue OK + NXDOMAIN case
- findTest(Name("www.child.example.org"), RRType::A(), Zone::DELEGATION,
- true, rr_child_ns_, NULL, NULL, Zone::FIND_GLUE_OK);
+ findTest(Name("www.child.example.org"), RRType::A(),
+ ZoneFinder::DELEGATION, true, rr_child_ns_, NULL, NULL,
+ ZoneFinder::FIND_GLUE_OK);
// nested cut case. The glue should be found.
findTest(rr_grandchild_glue_->getName(), RRType::AAAA(),
- Zone::SUCCESS,
- true, rr_grandchild_glue_, NULL, NULL, Zone::FIND_GLUE_OK);
+ ZoneFinder::SUCCESS,
+ true, rr_grandchild_glue_, NULL, NULL, ZoneFinder::FIND_GLUE_OK);
// A non-existent name in nested cut. This should result in delegation
// at the highest zone cut.
findTest(Name("www.grand.child.example.org"), RRType::TXT(),
- Zone::DELEGATION, true, rr_child_ns_, NULL, NULL,
- Zone::FIND_GLUE_OK);
+ ZoneFinder::DELEGATION, true, rr_child_ns_, NULL, NULL,
+ ZoneFinder::FIND_GLUE_OK);
}
/**
@@ -604,28 +675,29 @@ TEST_F(MemoryZoneTest, glue) {
* \todo This doesn't do any kind of CNAME and so on. If it isn't
* directly there, it just tells it doesn't exist.
*/
-TEST_F(MemoryZoneTest, find) {
+TEST_F(InMemoryZoneFinderTest, find) {
// Fill some data inside
// Now put all the data we have there. It should throw nothing
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_ns_)));
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_ns_a_)));
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_ns_aaaa_)));
- EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_a_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_ns_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_ns_a_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_ns_aaaa_)));
+ EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_a_)));
// These two should be successful
- findTest(origin_, RRType::NS(), Zone::SUCCESS, true, rr_ns_);
- findTest(rr_ns_a_->getName(), RRType::A(), Zone::SUCCESS, true, rr_ns_a_);
+ findTest(origin_, RRType::NS(), ZoneFinder::SUCCESS, true, rr_ns_);
+ findTest(rr_ns_a_->getName(), RRType::A(), ZoneFinder::SUCCESS, true,
+ rr_ns_a_);
// These domain exist but don't have the provided RRType
- findTest(origin_, RRType::AAAA(), Zone::NXRRSET);
- findTest(rr_ns_a_->getName(), RRType::NS(), Zone::NXRRSET);
+ findTest(origin_, RRType::AAAA(), ZoneFinder::NXRRSET);
+ findTest(rr_ns_a_->getName(), RRType::NS(), ZoneFinder::NXRRSET);
// These domains don't exist (and one is out of the zone)
- findTest(Name("nothere.example.org"), RRType::A(), Zone::NXDOMAIN);
- findTest(Name("example.net"), RRType::A(), Zone::NXDOMAIN);
+ findTest(Name("nothere.example.org"), RRType::A(), ZoneFinder::NXDOMAIN);
+ findTest(Name("example.net"), RRType::A(), ZoneFinder::NXDOMAIN);
}
-TEST_F(MemoryZoneTest, emptyNode) {
+TEST_F(InMemoryZoneFinderTest, emptyNode) {
/*
* The backend RBTree for this test should look like as follows:
* example.org
@@ -645,52 +717,53 @@ TEST_F(MemoryZoneTest, emptyNode) {
for (int i = 0; names[i] != NULL; ++i) {
ConstRRsetPtr rrset(new RRset(Name(names[i]), class_, RRType::A(),
RRTTL(300)));
- EXPECT_EQ(SUCCESS, zone_.add(rrset));
+ EXPECT_EQ(SUCCESS, zone_finder_.add(rrset));
}
// empty node matching, easy case: the node for 'baz' exists with
// no data.
- findTest(Name("baz.example.org"), RRType::A(), Zone::NXRRSET);
+ findTest(Name("baz.example.org"), RRType::A(), ZoneFinder::NXRRSET);
// empty node matching, a trickier case: the node for 'foo' is part of
// "x.foo", which should be considered an empty node.
- findTest(Name("foo.example.org"), RRType::A(), Zone::NXRRSET);
+ findTest(Name("foo.example.org"), RRType::A(), ZoneFinder::NXRRSET);
// "org" is contained in "example.org", but it shouldn't be treated as
// NXRRSET because it's out of zone.
// Note: basically we don't expect such a query to be performed (the common
// operation is to identify the best matching zone first then perform
// search it), but we shouldn't be confused even in the unexpected case.
- findTest(Name("org"), RRType::A(), Zone::NXDOMAIN);
+ findTest(Name("org"), RRType::A(), ZoneFinder::NXDOMAIN);
}
-TEST_F(MemoryZoneTest, load) {
+TEST_F(InMemoryZoneFinderTest, load) {
// Put some data inside the zone
- EXPECT_NO_THROW(EXPECT_EQ(result::SUCCESS, zone_.add(rr_ns_)));
+ EXPECT_NO_THROW(EXPECT_EQ(result::SUCCESS, zone_finder_.add(rr_ns_)));
// Loading with different origin should fail
- EXPECT_THROW(zone_.load(TEST_DATA_DIR "/root.zone"), MasterLoadError);
+ EXPECT_THROW(zone_finder_.load(TEST_DATA_DIR "/root.zone"),
+ MasterLoadError);
// See the original data is still there, survived the exception
- findTest(origin_, RRType::NS(), Zone::SUCCESS, true, rr_ns_);
+ findTest(origin_, RRType::NS(), ZoneFinder::SUCCESS, true, rr_ns_);
// Create correct zone
- MemoryZone rootzone(class_, Name("."));
+ InMemoryZoneFinder rootzone(class_, Name("."));
// Try putting something inside
EXPECT_NO_THROW(EXPECT_EQ(result::SUCCESS, rootzone.add(rr_ns_aaaa_)));
// Load the zone. It should overwrite/remove the above RRset
EXPECT_NO_THROW(rootzone.load(TEST_DATA_DIR "/root.zone"));
// Now see there are some rrsets (we don't look inside, though)
- findTest(Name("."), RRType::SOA(), Zone::SUCCESS, false, ConstRRsetPtr(),
- NULL, &rootzone);
- findTest(Name("."), RRType::NS(), Zone::SUCCESS, false, ConstRRsetPtr(),
- NULL, &rootzone);
- findTest(Name("a.root-servers.net."), RRType::A(), Zone::SUCCESS, false,
- ConstRRsetPtr(), NULL, &rootzone);
+ findTest(Name("."), RRType::SOA(), ZoneFinder::SUCCESS, false,
+ ConstRRsetPtr(), NULL, &rootzone);
+ findTest(Name("."), RRType::NS(), ZoneFinder::SUCCESS, false,
+ ConstRRsetPtr(), NULL, &rootzone);
+ findTest(Name("a.root-servers.net."), RRType::A(), ZoneFinder::SUCCESS,
+ false, ConstRRsetPtr(), NULL, &rootzone);
// But this should no longer be here
- findTest(rr_ns_a_->getName(), RRType::AAAA(), Zone::NXDOMAIN, true,
+ findTest(rr_ns_a_->getName(), RRType::AAAA(), ZoneFinder::NXDOMAIN, true,
ConstRRsetPtr(), NULL, &rootzone);
// Try loading zone that is wrong in a different way
- EXPECT_THROW(zone_.load(TEST_DATA_DIR "/duplicate_rrset.zone"),
+ EXPECT_THROW(zone_finder_.load(TEST_DATA_DIR "/duplicate_rrset.zone"),
MasterLoadError);
}
@@ -698,7 +771,7 @@ TEST_F(MemoryZoneTest, load) {
* Test that puts a (simple) wildcard into the zone and checks we can
* correctly find the data.
*/
-TEST_F(MemoryZoneTest, wildcard) {
+TEST_F(InMemoryZoneFinderTest, wildcard) {
/*
* example.org.
* |
@@ -706,40 +779,41 @@ TEST_F(MemoryZoneTest, wildcard) {
* |
* *
*/
- EXPECT_EQ(SUCCESS, zone_.add(rr_wild_));
+ EXPECT_EQ(SUCCESS, zone_finder_.add(rr_wild_));
// Search at the parent. The parent will not have the A, but it will
// be in the wildcard (so check the wildcard isn't matched at the parent)
{
SCOPED_TRACE("Search at parrent");
- findTest(Name("wild.example.org"), RRType::A(), Zone::NXRRSET);
+ findTest(Name("wild.example.org"), RRType::A(), ZoneFinder::NXRRSET);
}
// Search the original name of wildcard
{
SCOPED_TRACE("Search directly at *");
- findTest(Name("*.wild.example.org"), RRType::A(), Zone::SUCCESS, true,
- rr_wild_);
+ findTest(Name("*.wild.example.org"), RRType::A(), ZoneFinder::SUCCESS,
+ true, rr_wild_);
}
// Search "created" name.
{
SCOPED_TRACE("Search at created child");
- findTest(Name("a.wild.example.org"), RRType::A(), Zone::SUCCESS, false,
- rr_wild_, NULL, NULL, Zone::FIND_DEFAULT, true);
+ findTest(Name("a.wild.example.org"), RRType::A(), ZoneFinder::SUCCESS,
+ false, rr_wild_, NULL, NULL, ZoneFinder::FIND_DEFAULT, true);
}
// Search another created name, this time little bit lower
{
SCOPED_TRACE("Search at created grand-child");
- findTest(Name("a.b.wild.example.org"), RRType::A(), Zone::SUCCESS,
- false, rr_wild_, NULL, NULL, Zone::FIND_DEFAULT, true);
+ findTest(Name("a.b.wild.example.org"), RRType::A(),
+ ZoneFinder::SUCCESS, false, rr_wild_, NULL, NULL,
+ ZoneFinder::FIND_DEFAULT, true);
}
- EXPECT_EQ(SUCCESS, zone_.add(rr_under_wild_));
+ EXPECT_EQ(SUCCESS, zone_finder_.add(rr_under_wild_));
{
SCOPED_TRACE("Search under non-wildcard");
findTest(Name("bar.foo.wild.example.org"), RRType::A(),
- Zone::NXDOMAIN);
+ ZoneFinder::NXDOMAIN);
}
}
@@ -750,33 +824,34 @@ TEST_F(MemoryZoneTest, wildcard) {
* - When the query is in another zone. That is, delegation cancels
* the wildcard defaults."
*/
-TEST_F(MemoryZoneTest, delegatedWildcard) {
- EXPECT_EQ(SUCCESS, zone_.add(rr_child_wild_));
- EXPECT_EQ(SUCCESS, zone_.add(rr_child_ns_));
+TEST_F(InMemoryZoneFinderTest, delegatedWildcard) {
+ EXPECT_EQ(SUCCESS, zone_finder_.add(rr_child_wild_));
+ EXPECT_EQ(SUCCESS, zone_finder_.add(rr_child_ns_));
{
SCOPED_TRACE("Looking under delegation point");
- findTest(Name("a.child.example.org"), RRType::A(), Zone::DELEGATION,
- true, rr_child_ns_);
+ findTest(Name("a.child.example.org"), RRType::A(),
+ ZoneFinder::DELEGATION, true, rr_child_ns_);
}
{
SCOPED_TRACE("Looking under delegation point in GLUE_OK mode");
- findTest(Name("a.child.example.org"), RRType::A(), Zone::DELEGATION,
- true, rr_child_ns_, NULL, NULL, Zone::FIND_GLUE_OK);
+ findTest(Name("a.child.example.org"), RRType::A(),
+ ZoneFinder::DELEGATION, true, rr_child_ns_, NULL, NULL,
+ ZoneFinder::FIND_GLUE_OK);
}
}
// Tests combination of wildcard and ANY.
-TEST_F(MemoryZoneTest, anyWildcard) {
- EXPECT_EQ(SUCCESS, zone_.add(rr_wild_));
+TEST_F(InMemoryZoneFinderTest, anyWildcard) {
+ EXPECT_EQ(SUCCESS, zone_finder_.add(rr_wild_));
// First try directly the name (normal match)
{
SCOPED_TRACE("Asking direcly for *");
RRsetList target;
- findTest(Name("*.wild.example.org"), RRType::ANY(), Zone::SUCCESS,
- true, ConstRRsetPtr(), &target);
+ findTest(Name("*.wild.example.org"), RRType::ANY(),
+ ZoneFinder::SUCCESS, true, ConstRRsetPtr(), &target);
ASSERT_EQ(1, target.size());
EXPECT_EQ(RRType::A(), (*target.begin())->getType());
EXPECT_EQ(Name("*.wild.example.org"), (*target.begin())->getName());
@@ -786,8 +861,8 @@ TEST_F(MemoryZoneTest, anyWildcard) {
{
SCOPED_TRACE("Asking in the wild way");
RRsetList target;
- findTest(Name("a.wild.example.org"), RRType::ANY(), Zone::SUCCESS,
- true, ConstRRsetPtr(), &target);
+ findTest(Name("a.wild.example.org"), RRType::ANY(),
+ ZoneFinder::SUCCESS, true, ConstRRsetPtr(), &target);
ASSERT_EQ(1, target.size());
EXPECT_EQ(RRType::A(), (*target.begin())->getType());
EXPECT_EQ(Name("a.wild.example.org"), (*target.begin())->getName());
@@ -796,56 +871,56 @@ TEST_F(MemoryZoneTest, anyWildcard) {
// Test there's nothing in the wildcard in the middle if we load
// wild.*.foo.example.org.
-TEST_F(MemoryZoneTest, emptyWildcard) {
+TEST_F(InMemoryZoneFinderTest, emptyWildcard) {
/*
* example.org.
* foo
* *
* wild
*/
- EXPECT_EQ(SUCCESS, zone_.add(rr_emptywild_));
+ EXPECT_EQ(SUCCESS, zone_finder_.add(rr_emptywild_));
{
SCOPED_TRACE("Asking for the original record under wildcard");
- findTest(Name("wild.*.foo.example.org"), RRType::A(), Zone::SUCCESS,
- true, rr_emptywild_);
+ findTest(Name("wild.*.foo.example.org"), RRType::A(),
+ ZoneFinder::SUCCESS, true, rr_emptywild_);
}
{
SCOPED_TRACE("Asking for A record");
- findTest(Name("a.foo.example.org"), RRType::A(), Zone::NXRRSET);
- findTest(Name("*.foo.example.org"), RRType::A(), Zone::NXRRSET);
- findTest(Name("foo.example.org"), RRType::A(), Zone::NXRRSET);
+ findTest(Name("a.foo.example.org"), RRType::A(), ZoneFinder::NXRRSET);
+ findTest(Name("*.foo.example.org"), RRType::A(), ZoneFinder::NXRRSET);
+ findTest(Name("foo.example.org"), RRType::A(), ZoneFinder::NXRRSET);
}
{
SCOPED_TRACE("Asking for ANY record");
RRsetList normalTarget;
- findTest(Name("*.foo.example.org"), RRType::ANY(), Zone::NXRRSET, true,
- ConstRRsetPtr(), &normalTarget);
+ findTest(Name("*.foo.example.org"), RRType::ANY(), ZoneFinder::NXRRSET,
+ true, ConstRRsetPtr(), &normalTarget);
EXPECT_EQ(0, normalTarget.size());
RRsetList wildTarget;
- findTest(Name("a.foo.example.org"), RRType::ANY(), Zone::NXRRSET, true,
- ConstRRsetPtr(), &wildTarget);
+ findTest(Name("a.foo.example.org"), RRType::ANY(),
+ ZoneFinder::NXRRSET, true, ConstRRsetPtr(), &wildTarget);
EXPECT_EQ(0, wildTarget.size());
}
{
SCOPED_TRACE("Asking on the non-terminal");
findTest(Name("wild.bar.foo.example.org"), RRType::A(),
- Zone::NXRRSET);
+ ZoneFinder::NXRRSET);
}
}
// Same as emptyWildcard, but with multiple * in the path.
-TEST_F(MemoryZoneTest, nestedEmptyWildcard) {
- EXPECT_EQ(SUCCESS, zone_.add(rr_nested_emptywild_));
+TEST_F(InMemoryZoneFinderTest, nestedEmptyWildcard) {
+ EXPECT_EQ(SUCCESS, zone_finder_.add(rr_nested_emptywild_));
{
SCOPED_TRACE("Asking for the original record under wildcards");
findTest(Name("wild.*.foo.*.bar.example.org"), RRType::A(),
- Zone::SUCCESS, true, rr_nested_emptywild_);
+ ZoneFinder::SUCCESS, true, rr_nested_emptywild_);
}
{
@@ -860,7 +935,7 @@ TEST_F(MemoryZoneTest, nestedEmptyWildcard) {
for (const char** name(names); *name != NULL; ++ name) {
SCOPED_TRACE(string("Node ") + *name);
- findTest(Name(*name), RRType::A(), Zone::NXRRSET);
+ findTest(Name(*name), RRType::A(), ZoneFinder::NXRRSET);
}
}
@@ -878,7 +953,7 @@ TEST_F(MemoryZoneTest, nestedEmptyWildcard) {
for (const char** name(names); *name != NULL; ++ name) {
SCOPED_TRACE(string("Node ") + *name);
- findTest(Name(*name), RRType::A(), Zone::NXRRSET);
+ findTest(Name(*name), RRType::A(), ZoneFinder::NXRRSET);
}
}
@@ -889,7 +964,7 @@ TEST_F(MemoryZoneTest, nestedEmptyWildcard) {
SCOPED_TRACE(string("Node ") + *name);
RRsetList target;
- findTest(Name(*name), RRType::ANY(), Zone::NXRRSET, true,
+ findTest(Name(*name), RRType::ANY(), ZoneFinder::NXRRSET, true,
ConstRRsetPtr(), &target);
EXPECT_EQ(0, target.size());
}
@@ -899,21 +974,21 @@ TEST_F(MemoryZoneTest, nestedEmptyWildcard) {
// We run this part twice from the below test, in two slightly different
// situations
void
-MemoryZoneTest::doCancelWildcardTest() {
+InMemoryZoneFinderTest::doCancelWildcardTest() {
// These should be canceled
{
SCOPED_TRACE("Canceled under foo.wild.example.org");
findTest(Name("aaa.foo.wild.example.org"), RRType::A(),
- Zone::NXDOMAIN);
+ ZoneFinder::NXDOMAIN);
findTest(Name("zzz.foo.wild.example.org"), RRType::A(),
- Zone::NXDOMAIN);
+ ZoneFinder::NXDOMAIN);
}
// This is existing, non-wildcard domain, shouldn't wildcard at all
{
SCOPED_TRACE("Existing domain under foo.wild.example.org");
- findTest(Name("bar.foo.wild.example.org"), RRType::A(), Zone::SUCCESS,
- true, rr_not_wild_);
+ findTest(Name("bar.foo.wild.example.org"), RRType::A(),
+ ZoneFinder::SUCCESS, true, rr_not_wild_);
}
// These should be caught by the wildcard
@@ -930,15 +1005,16 @@ MemoryZoneTest::doCancelWildcardTest() {
for (const char** name(names); *name != NULL; ++ name) {
SCOPED_TRACE(string("Node ") + *name);
- findTest(Name(*name), RRType::A(), Zone::SUCCESS, false, rr_wild_,
- NULL, NULL, Zone::FIND_DEFAULT, true);
+ findTest(Name(*name), RRType::A(), ZoneFinder::SUCCESS, false,
+ rr_wild_, NULL, NULL, ZoneFinder::FIND_DEFAULT, true);
}
}
// This shouldn't be wildcarded, it's an existing domain
{
SCOPED_TRACE("The foo.wild.example.org itself");
- findTest(Name("foo.wild.example.org"), RRType::A(), Zone::NXRRSET);
+ findTest(Name("foo.wild.example.org"), RRType::A(),
+ ZoneFinder::NXRRSET);
}
}
@@ -952,9 +1028,9 @@ MemoryZoneTest::doCancelWildcardTest() {
* Tests few cases "around" the canceled wildcard match, to see something that
* shouldn't be canceled isn't.
*/
-TEST_F(MemoryZoneTest, cancelWildcard) {
- EXPECT_EQ(SUCCESS, zone_.add(rr_wild_));
- EXPECT_EQ(SUCCESS, zone_.add(rr_not_wild_));
+TEST_F(InMemoryZoneFinderTest, cancelWildcard) {
+ EXPECT_EQ(SUCCESS, zone_finder_.add(rr_wild_));
+ EXPECT_EQ(SUCCESS, zone_finder_.add(rr_not_wild_));
{
SCOPED_TRACE("Runnig with single entry under foo.wild.example.org");
@@ -964,61 +1040,63 @@ TEST_F(MemoryZoneTest, cancelWildcard) {
// Try putting another one under foo.wild....
// The result should be the same but it will be done in another way in the
// code, because the foo.wild.example.org will exist in the tree.
- EXPECT_EQ(SUCCESS, zone_.add(rr_not_wild_another_));
+ EXPECT_EQ(SUCCESS, zone_finder_.add(rr_not_wild_another_));
{
SCOPED_TRACE("Runnig with two entries under foo.wild.example.org");
doCancelWildcardTest();
}
}
-TEST_F(MemoryZoneTest, loadBadWildcard) {
+TEST_F(InMemoryZoneFinderTest, loadBadWildcard) {
// We reject loading the zone if it contains a wildcard name for
// NS or DNAME.
- EXPECT_THROW(zone_.add(rr_nswild_), MemoryZone::AddError);
- EXPECT_THROW(zone_.add(rr_dnamewild_), MemoryZone::AddError);
+ EXPECT_THROW(zone_finder_.add(rr_nswild_), InMemoryZoneFinder::AddError);
+ EXPECT_THROW(zone_finder_.add(rr_dnamewild_),
+ InMemoryZoneFinder::AddError);
}
-TEST_F(MemoryZoneTest, swap) {
- // build one zone with some data
- MemoryZone zone1(class_, origin_);
- EXPECT_EQ(result::SUCCESS, zone1.add(rr_ns_));
- EXPECT_EQ(result::SUCCESS, zone1.add(rr_ns_aaaa_));
+TEST_F(InMemoryZoneFinderTest, swap) {
+ // build one zone finder with some data
+ InMemoryZoneFinder finder1(class_, origin_);
+ EXPECT_EQ(result::SUCCESS, finder1.add(rr_ns_));
+ EXPECT_EQ(result::SUCCESS, finder1.add(rr_ns_aaaa_));
- // build another zone of a different RR class with some other data
+ // build another zone finder of a different RR class with some other data
const Name other_origin("version.bind");
ASSERT_NE(origin_, other_origin); // make sure these two are different
- MemoryZone zone2(RRClass::CH(), other_origin);
+ InMemoryZoneFinder finder2(RRClass::CH(), other_origin);
EXPECT_EQ(result::SUCCESS,
- zone2.add(RRsetPtr(new RRset(Name("version.bind"),
+ finder2.add(RRsetPtr(new RRset(Name("version.bind"),
RRClass::CH(), RRType::TXT(),
RRTTL(0)))));
- zone1.swap(zone2);
- EXPECT_EQ(other_origin, zone1.getOrigin());
- EXPECT_EQ(origin_, zone2.getOrigin());
- EXPECT_EQ(RRClass::CH(), zone1.getClass());
- EXPECT_EQ(RRClass::IN(), zone2.getClass());
+ finder1.swap(finder2);
+ EXPECT_EQ(other_origin, finder1.getOrigin());
+ EXPECT_EQ(origin_, finder2.getOrigin());
+ EXPECT_EQ(RRClass::CH(), finder1.getClass());
+ EXPECT_EQ(RRClass::IN(), finder2.getClass());
// make sure the zone data is swapped, too
- findTest(origin_, RRType::NS(), Zone::NXDOMAIN, false, ConstRRsetPtr(),
- NULL, &zone1);
- findTest(other_origin, RRType::TXT(), Zone::SUCCESS, false,
- ConstRRsetPtr(), NULL, &zone1);
- findTest(origin_, RRType::NS(), Zone::SUCCESS, false, ConstRRsetPtr(),
- NULL, &zone2);
- findTest(other_origin, RRType::TXT(), Zone::NXDOMAIN, false,
- ConstRRsetPtr(), NULL, &zone2);
+ findTest(origin_, RRType::NS(), ZoneFinder::NXDOMAIN, false,
+ ConstRRsetPtr(), NULL, &finder1);
+ findTest(other_origin, RRType::TXT(), ZoneFinder::SUCCESS, false,
+ ConstRRsetPtr(), NULL, &finder1);
+ findTest(origin_, RRType::NS(), ZoneFinder::SUCCESS, false,
+ ConstRRsetPtr(), NULL, &finder2);
+ findTest(other_origin, RRType::TXT(), ZoneFinder::NXDOMAIN, false,
+ ConstRRsetPtr(), NULL, &finder2);
}
-TEST_F(MemoryZoneTest, getFileName) {
+TEST_F(InMemoryZoneFinderTest, getFileName) {
// for an empty zone the file name should also be empty.
- EXPECT_TRUE(zone_.getFileName().empty());
+ EXPECT_TRUE(zone_finder_.getFileName().empty());
// if loading a zone fails the file name shouldn't be set.
- EXPECT_THROW(zone_.load(TEST_DATA_DIR "/root.zone"), MasterLoadError);
- EXPECT_TRUE(zone_.getFileName().empty());
+ EXPECT_THROW(zone_finder_.load(TEST_DATA_DIR "/root.zone"),
+ MasterLoadError);
+ EXPECT_TRUE(zone_finder_.getFileName().empty());
// after a successful load, the specified file name should be set
- MemoryZone rootzone(class_, Name("."));
+ InMemoryZoneFinder rootzone(class_, Name("."));
EXPECT_NO_THROW(rootzone.load(TEST_DATA_DIR "/root.zone"));
EXPECT_EQ(TEST_DATA_DIR "/root.zone", rootzone.getFileName());
// overriding load, which will fail
@@ -1028,9 +1106,8 @@ TEST_F(MemoryZoneTest, getFileName) {
EXPECT_EQ(TEST_DATA_DIR "/root.zone", rootzone.getFileName());
// After swap, file names should also be swapped.
- zone_.swap(rootzone);
- EXPECT_EQ(TEST_DATA_DIR "/root.zone", zone_.getFileName());
+ zone_finder_.swap(rootzone);
+ EXPECT_EQ(TEST_DATA_DIR "/root.zone", zone_finder_.getFileName());
EXPECT_TRUE(rootzone.getFileName().empty());
}
-
}
diff --git a/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc b/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
new file mode 100644
index 0000000..3974977
--- /dev/null
+++ b/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
@@ -0,0 +1,773 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <algorithm>
+#include <vector>
+
+#include <datasrc/sqlite3_accessor.h>
+
+#include <datasrc/data_source.h>
+
+#include <dns/rrclass.h>
+
+#include <gtest/gtest.h>
+#include <boost/scoped_ptr.hpp>
+#include <fstream>
+#include <sqlite3.h>
+
+using namespace std;
+using namespace isc::datasrc;
+using boost::shared_ptr;
+using isc::data::ConstElementPtr;
+using isc::data::Element;
+using isc::dns::RRClass;
+using isc::dns::Name;
+
+namespace {
+// Some test data
+std::string SQLITE_DBFILE_EXAMPLE = TEST_DATA_DIR "/test.sqlite3";
+std::string SQLITE_DBFILE_EXAMPLE2 = TEST_DATA_DIR "/example2.com.sqlite3";
+std::string SQLITE_DBNAME_EXAMPLE2 = "sqlite3_example2.com.sqlite3";
+std::string SQLITE_DBFILE_EXAMPLE_ROOT = TEST_DATA_DIR "/test-root.sqlite3";
+std::string SQLITE_DBNAME_EXAMPLE_ROOT = "sqlite3_test-root.sqlite3";
+std::string SQLITE_DBFILE_BROKENDB = TEST_DATA_DIR "/brokendb.sqlite3";
+std::string SQLITE_DBFILE_MEMORY = ":memory:";
+std::string SQLITE_DBFILE_EXAMPLE_ORG = TEST_DATA_DIR "/example.org.sqlite3";
+
+// The following file must be non existent and must be non"creatable";
+// the sqlite3 library will try to create a new DB file if it doesn't exist,
+// so to test a failure case the create operation should also fail.
+// The "nodir", a non existent directory, is inserted for this purpose.
+std::string SQLITE_DBFILE_NOTEXIST = TEST_DATA_DIR "/nodir/notexist";
+
+// new db file, we don't need this to be a std::string, and given the
+// raw calls we use it in a const char* is more convenient
+const char* SQLITE_NEW_DBFILE = TEST_DATA_BUILDDIR "/newdb.sqlite3";
+
+// Opening works (the content is tested in different tests)
+TEST(SQLite3Open, common) {
+ EXPECT_NO_THROW(SQLite3Accessor accessor(SQLITE_DBFILE_EXAMPLE,
+ RRClass::IN()));
+}
+
+// The file can't be opened
+TEST(SQLite3Open, notExist) {
+ EXPECT_THROW(SQLite3Accessor accessor(SQLITE_DBFILE_NOTEXIST,
+ RRClass::IN()), SQLite3Error);
+}
+
+// It rejects broken DB
+TEST(SQLite3Open, brokenDB) {
+ EXPECT_THROW(SQLite3Accessor accessor(SQLITE_DBFILE_BROKENDB,
+ RRClass::IN()), SQLite3Error);
+}
+
+// Test we can create the schema on the fly
+TEST(SQLite3Open, memoryDB) {
+ EXPECT_NO_THROW(SQLite3Accessor accessor(SQLITE_DBFILE_MEMORY,
+ RRClass::IN()));
+}
+
+// Test fixture for querying the db
+class SQLite3AccessorTest : public ::testing::Test {
+public:
+ SQLite3AccessorTest() {
+ initAccessor(SQLITE_DBFILE_EXAMPLE, RRClass::IN());
+ }
+ // So it can be re-created with different data
+ void initAccessor(const std::string& filename, const RRClass& rrclass) {
+ accessor.reset(new SQLite3Accessor(filename, rrclass));
+ }
+ // The tested accessor
+ boost::shared_ptr<SQLite3Accessor> accessor;
+};
+
+// This zone exists in the data, so it should be found
+TEST_F(SQLite3AccessorTest, getZone) {
+ std::pair<bool, int> result(accessor->getZone("example.com."));
+ EXPECT_TRUE(result.first);
+ EXPECT_EQ(1, result.second);
+}
+
+// But it should find only the zone, nothing below it
+TEST_F(SQLite3AccessorTest, subZone) {
+ EXPECT_FALSE(accessor->getZone("sub.example.com.").first);
+}
+
+// This zone is not there at all
+TEST_F(SQLite3AccessorTest, noZone) {
+ EXPECT_FALSE(accessor->getZone("example.org.").first);
+}
+
+// This zone is there, but in different class
+TEST_F(SQLite3AccessorTest, noClass) {
+ initAccessor(SQLITE_DBFILE_EXAMPLE, RRClass::CH());
+ EXPECT_FALSE(accessor->getZone("example.com.").first);
+}
+
+// This tests the iterator context
+TEST_F(SQLite3AccessorTest, iterator) {
+ // Our test zone is conveniently small, but not empty
+ initAccessor(SQLITE_DBFILE_EXAMPLE_ORG, RRClass::IN());
+
+ const std::pair<bool, int> zone_info(accessor->getZone("example.org."));
+ ASSERT_TRUE(zone_info.first);
+
+ // Get the iterator context
+ DatabaseAccessor::IteratorContextPtr
+ context(accessor->getAllRecords(zone_info.second));
+ ASSERT_NE(DatabaseAccessor::IteratorContextPtr(), context);
+
+ std::string data[DatabaseAccessor::COLUMN_COUNT];
+ // Get and check the first and only record
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("DNAME", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("dname.example.info.", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("dname.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("DNAME", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("dname2.example.info.", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("dname2.foo.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("MX", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("10 mail.example.org.", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("NS", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("ns1.example.org.", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("NS", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("ns2.example.org.", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("NS", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("ns3.example.org.", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("SOA", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("ns1.example.org. admin.example.org. "
+ "1234 3600 1800 2419200 7200",
+ data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("A", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("192.0.2.10", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("mail.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("A", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("192.0.2.101", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("ns.sub.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("NS", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("ns.sub.example.org.", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("sub.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ EXPECT_TRUE(context->getNext(data));
+ EXPECT_EQ("A", data[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ("192.0.2.1", data[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ("www.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+
+ // Check there's no other
+ EXPECT_FALSE(context->getNext(data));
+
+ // And make sure calling it again won't cause problems.
+ EXPECT_FALSE(context->getNext(data));
+}
+
+TEST(SQLite3Open, getDBNameExample2) {
+ SQLite3Accessor accessor(SQLITE_DBFILE_EXAMPLE2, RRClass::IN());
+ EXPECT_EQ(SQLITE_DBNAME_EXAMPLE2, accessor.getDBName());
+}
+
+TEST(SQLite3Open, getDBNameExampleROOT) {
+ SQLite3Accessor accessor(SQLITE_DBFILE_EXAMPLE_ROOT, RRClass::IN());
+ EXPECT_EQ(SQLITE_DBNAME_EXAMPLE_ROOT, accessor.getDBName());
+}
+
+// Simple function to cound the number of records for
+// any name
+void
+checkRecordRow(const std::string columns[],
+ const std::string& field0,
+ const std::string& field1,
+ const std::string& field2,
+ const std::string& field3,
+ const std::string& field4)
+{
+ EXPECT_EQ(field0, columns[DatabaseAccessor::TYPE_COLUMN]);
+ EXPECT_EQ(field1, columns[DatabaseAccessor::TTL_COLUMN]);
+ EXPECT_EQ(field2, columns[DatabaseAccessor::SIGTYPE_COLUMN]);
+ EXPECT_EQ(field3, columns[DatabaseAccessor::RDATA_COLUMN]);
+ EXPECT_EQ(field4, columns[DatabaseAccessor::NAME_COLUMN]);
+}
+
+TEST_F(SQLite3AccessorTest, getRecords) {
+ const std::pair<bool, int> zone_info(accessor->getZone("example.com."));
+ ASSERT_TRUE(zone_info.first);
+
+ const int zone_id = zone_info.second;
+ ASSERT_EQ(1, zone_id);
+
+ std::string columns[DatabaseAccessor::COLUMN_COUNT];
+
+ DatabaseAccessor::IteratorContextPtr
+ context(accessor->getRecords("foo.bar", 1));
+ ASSERT_NE(DatabaseAccessor::IteratorContextPtr(),
+ context);
+ EXPECT_FALSE(context->getNext(columns));
+ checkRecordRow(columns, "", "", "", "", "");
+
+ // now try some real searches
+ context = accessor->getRecords("foo.example.com.", zone_id);
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "CNAME", "3600", "",
+ "cnametest.example.org.", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "RRSIG", "3600", "CNAME",
+ "CNAME 5 3 3600 20100322084538 20100220084538 33495 "
+ "example.com. FAKEFAKEFAKEFAKE", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "NSEC", "7200", "",
+ "mail.example.com. CNAME RRSIG NSEC", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "RRSIG", "7200", "NSEC",
+ "NSEC 5 3 7200 20100322084538 20100220084538 33495 "
+ "example.com. FAKEFAKEFAKEFAKE", "");
+ EXPECT_FALSE(context->getNext(columns));
+
+ // with no more records, the array should not have been modified
+ checkRecordRow(columns, "RRSIG", "7200", "NSEC",
+ "NSEC 5 3 7200 20100322084538 20100220084538 33495 "
+ "example.com. FAKEFAKEFAKEFAKE", "");
+
+ context = accessor->getRecords("example.com.", zone_id);
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "SOA", "3600", "",
+ "master.example.com. admin.example.com. "
+ "1234 3600 1800 2419200 7200", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "RRSIG", "3600", "SOA",
+ "SOA 5 2 3600 20100322084538 20100220084538 "
+ "33495 example.com. FAKEFAKEFAKEFAKE", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "NS", "1200", "", "dns01.example.com.", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "NS", "3600", "", "dns02.example.com.", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "NS", "1800", "", "dns03.example.com.", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "RRSIG", "3600", "NS",
+ "NS 5 2 3600 20100322084538 20100220084538 "
+ "33495 example.com. FAKEFAKEFAKEFAKE", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "MX", "3600", "", "10 mail.example.com.", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "MX", "3600", "",
+ "20 mail.subzone.example.com.", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "RRSIG", "3600", "MX",
+ "MX 5 2 3600 20100322084538 20100220084538 "
+ "33495 example.com. FAKEFAKEFAKEFAKE", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "NSEC", "7200", "",
+ "cname-ext.example.com. NS SOA MX RRSIG NSEC DNSKEY", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "RRSIG", "7200", "NSEC",
+ "NSEC 5 2 7200 20100322084538 20100220084538 "
+ "33495 example.com. FAKEFAKEFAKEFAKE", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "DNSKEY", "3600", "",
+ "256 3 5 AwEAAcOUBllYc1hf7ND9uDy+Yz1BF3sI0m4q NGV7W"
+ "cTD0WEiuV7IjXgHE36fCmS9QsUxSSOV o1I/FMxI2PJVqTYHkX"
+ "FBS7AzLGsQYMU7UjBZ SotBJ6Imt5pXMu+lEDNy8TOUzG3xm7g"
+ "0qcbW YF6qCEfvZoBtAqi5Rk7Mlrqs8agxYyMx", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "DNSKEY", "3600", "",
+ "257 3 5 AwEAAe5WFbxdCPq2jZrZhlMj7oJdff3W7syJ tbvzg"
+ "62tRx0gkoCDoBI9DPjlOQG0UAbj+xUV 4HQZJStJaZ+fHU5AwV"
+ "NT+bBZdtV+NujSikhd THb4FYLg2b3Cx9NyJvAVukHp/91HnWu"
+ "G4T36 CzAFrfPwsHIrBz9BsaIQ21VRkcmj7DswfI/i DGd8j6b"
+ "qiODyNZYQ+ZrLmF0KIJ2yPN3iO6Zq 23TaOrVTjB7d1a/h31OD"
+ "fiHAxFHrkY3t3D5J R9Nsl/7fdRmSznwtcSDgLXBoFEYmw6p86"
+ "Acv RyoYNcL1SXjaKVLG5jyU3UR+LcGZT5t/0xGf oIK/aKwEN"
+ "rsjcKZZj660b1M=", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "RRSIG", "3600", "DNSKEY",
+ "DNSKEY 5 2 3600 20100322084538 20100220084538 "
+ "4456 example.com. FAKEFAKEFAKEFAKE", "");
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "RRSIG", "3600", "DNSKEY",
+ "DNSKEY 5 2 3600 20100322084538 20100220084538 "
+ "33495 example.com. FAKEFAKEFAKEFAKE", "");
+ EXPECT_FALSE(context->getNext(columns));
+ // getnextrecord returning false should mean array is not altered
+ checkRecordRow(columns, "RRSIG", "3600", "DNSKEY",
+ "DNSKEY 5 2 3600 20100322084538 20100220084538 "
+ "33495 example.com. FAKEFAKEFAKEFAKE", "");
+
+ // check that another getNext does not cause problems
+ EXPECT_FALSE(context->getNext(columns));
+
+ // Try searching for subdomain
+ // There's foo.bar.example.com in the data
+ context = accessor->getRecords("bar.example.com.", zone_id, true);
+ ASSERT_TRUE(context->getNext(columns));
+ checkRecordRow(columns, "A", "3600", "", "192.0.2.1", "");
+ EXPECT_FALSE(context->getNext(columns));
+ // But we shouldn't match mix.example.com here
+ context = accessor->getRecords("ix.example.com.", zone_id, true);
+ EXPECT_FALSE(context->getNext(columns));
+}
+
+TEST_F(SQLite3AccessorTest, findPrevious) {
+ EXPECT_EQ("dns01.example.com.",
+ accessor->findPreviousName(1, "com.example.dns02."));
+ // A name that doesn't exist
+ EXPECT_EQ("dns01.example.com.",
+ accessor->findPreviousName(1, "com.example.dns01x."));
+ // Largest name
+ EXPECT_EQ("www.example.com.",
+ accessor->findPreviousName(1, "com.example.wwww"));
+ // Out of zone after the last name
+ EXPECT_EQ("www.example.com.",
+ accessor->findPreviousName(1, "org.example."));
+ // Case insensitive?
+ EXPECT_EQ("dns01.example.com.",
+ accessor->findPreviousName(1, "com.exaMple.DNS02."));
+ // A name that doesn't exist
+ EXPECT_EQ("dns01.example.com.",
+ accessor->findPreviousName(1, "com.exaMple.DNS01X."));
+ // The DB contains foo.bar.example.com., which would be in between
+ // these two names. However, that one does not have an NSEC record,
+ // which is how this database recognizes glue data, so it should
+ // be skipped.
+ EXPECT_EQ("example.com.",
+ accessor->findPreviousName(1, "com.example.cname-ext."));
+ // Throw when we are before the origin
+ EXPECT_THROW(accessor->findPreviousName(1, "com.example."),
+ isc::NotImplemented);
+ EXPECT_THROW(accessor->findPreviousName(1, "a.example."),
+ isc::NotImplemented);
+}
+
+TEST_F(SQLite3AccessorTest, findPreviousNoData) {
+ // This one doesn't hold any NSEC records, so it shouldn't work
+ // The underlying DB/data don't support DNSSEC, so it's not implemented
+ // (does it make sense? Or different exception here?)
+ EXPECT_THROW(accessor->findPreviousName(3, "com.example.sql2.www."),
+ isc::NotImplemented);
+}
+
+// Test fixture for creating a db that automatically deletes it before start,
+// and when done
+class SQLite3Create : public ::testing::Test {
+public:
+ SQLite3Create() {
+ remove(SQLITE_NEW_DBFILE);
+ }
+
+ ~SQLite3Create() {
+ remove(SQLITE_NEW_DBFILE);
+ }
+};
+
+bool isReadable(const char* filename) {
+ return (std::ifstream(filename).is_open());
+}
+
+TEST_F(SQLite3Create, creationtest) {
+ ASSERT_FALSE(isReadable(SQLITE_NEW_DBFILE));
+ // Should simply be created
+ SQLite3Accessor accessor(SQLITE_NEW_DBFILE, RRClass::IN());
+ ASSERT_TRUE(isReadable(SQLITE_NEW_DBFILE));
+}
+
+TEST_F(SQLite3Create, emptytest) {
+ ASSERT_FALSE(isReadable(SQLITE_NEW_DBFILE));
+
+ // open one manualle
+ sqlite3* db;
+ ASSERT_EQ(SQLITE_OK, sqlite3_open(SQLITE_NEW_DBFILE, &db));
+
+ // empty, but not locked, so creating it now should work
+ SQLite3Accessor accessor2(SQLITE_NEW_DBFILE, RRClass::IN());
+
+ sqlite3_close(db);
+
+ // should work now that we closed it
+ SQLite3Accessor accessor3(SQLITE_NEW_DBFILE, RRClass::IN());
+}
+
+TEST_F(SQLite3Create, lockedtest) {
+ ASSERT_FALSE(isReadable(SQLITE_NEW_DBFILE));
+
+ // open one manually
+ sqlite3* db;
+ ASSERT_EQ(SQLITE_OK, sqlite3_open(SQLITE_NEW_DBFILE, &db));
+ sqlite3_exec(db, "BEGIN EXCLUSIVE TRANSACTION", NULL, NULL, NULL);
+
+ // should not be able to open it
+ EXPECT_THROW(SQLite3Accessor accessor2(SQLITE_NEW_DBFILE, RRClass::IN()),
+ SQLite3Error);
+
+ sqlite3_exec(db, "ROLLBACK TRANSACTION", NULL, NULL, NULL);
+
+ // should work now that we closed it
+ SQLite3Accessor accessor3(SQLITE_NEW_DBFILE, RRClass::IN());
+}
+
+TEST_F(SQLite3AccessorTest, clone) {
+ shared_ptr<DatabaseAccessor> cloned = accessor->clone();
+ EXPECT_EQ(accessor->getDBName(), cloned->getDBName());
+
+ // The cloned accessor should have a separate connection and search
+ // context, so it should be able to perform search in concurrent with
+ // the original accessor.
+ string columns1[DatabaseAccessor::COLUMN_COUNT];
+ string columns2[DatabaseAccessor::COLUMN_COUNT];
+
+ const std::pair<bool, int> zone_info1(
+ accessor->getZone("example.com."));
+ DatabaseAccessor::IteratorContextPtr iterator1 =
+ accessor->getRecords("foo.example.com.", zone_info1.second);
+ const std::pair<bool, int> zone_info2(
+ accessor->getZone("example.com."));
+ DatabaseAccessor::IteratorContextPtr iterator2 =
+ cloned->getRecords("foo.example.com.", zone_info2.second);
+
+ ASSERT_TRUE(iterator1->getNext(columns1));
+ checkRecordRow(columns1, "CNAME", "3600", "", "cnametest.example.org.",
+ "");
+
+ ASSERT_TRUE(iterator2->getNext(columns2));
+ checkRecordRow(columns2, "CNAME", "3600", "", "cnametest.example.org.",
+ "");
+}
+
+//
+// Commonly used data for update tests
+//
+const char* const common_expected_data[] = {
+ // Test record already stored in the tested sqlite3 DB file.
+ "foo.bar.example.com.", "com.example.bar.foo.", "3600", "A", "",
+ "192.0.2.1"
+};
+const char* const new_data[] = {
+ // Newly added data commonly used by some of the tests below
+ "newdata.example.com.", "com.example.newdata.", "3600", "A", "",
+ "192.0.2.1"
+};
+const char* const deleted_data[] = {
+ // Existing data to be removed commonly used by some of the tests below
+ "foo.bar.example.com.", "A", "192.0.2.1"
+};
+
+class SQLite3Update : public SQLite3AccessorTest {
+protected:
+ SQLite3Update() {
+ // Note: if "installing" the test file fails some of the subsequent
+ // tests would fail.
+ const char *install_cmd = INSTALL_PROG " " TEST_DATA_DIR
+ "/test.sqlite3 " TEST_DATA_BUILDDIR
+ "/test.sqlite3.copied";
+ if (system(install_cmd) != 0) {
+ // any exception will do, this is failure in test setup, but nice
+ // to show the command that fails, and shouldn't be caught
+ isc_throw(isc::Exception,
+ "Error setting up; command failed: " << install_cmd);
+ };
+ initAccessor(TEST_DATA_BUILDDIR "/test.sqlite3.copied", RRClass::IN());
+ zone_id = accessor->getZone("example.com.").second;
+ another_accessor.reset(new SQLite3Accessor(
+ TEST_DATA_BUILDDIR "/test.sqlite3.copied",
+ RRClass::IN()));
+ expected_stored.push_back(common_expected_data);
+ }
+
+ int zone_id;
+ std::string get_columns[DatabaseAccessor::COLUMN_COUNT];
+ std::string add_columns[DatabaseAccessor::ADD_COLUMN_COUNT];
+ std::string del_params[DatabaseAccessor::DEL_PARAM_COUNT];
+
+ vector<const char* const*> expected_stored; // placeholder for checkRecords
+ vector<const char* const*> empty_stored; // indicate no corresponding data
+
+ // Another accessor, emulating one running on a different process/thread
+ shared_ptr<SQLite3Accessor> another_accessor;
+ DatabaseAccessor::IteratorContextPtr iterator;
+};
+
+void
+checkRecords(SQLite3Accessor& accessor, int zone_id, const std::string& name,
+ vector<const char* const*> expected_rows)
+{
+ DatabaseAccessor::IteratorContextPtr iterator =
+ accessor.getRecords(name, zone_id);
+ std::string columns[DatabaseAccessor::COLUMN_COUNT];
+ vector<const char* const*>::const_iterator it = expected_rows.begin();
+ while (iterator->getNext(columns)) {
+ ASSERT_TRUE(it != expected_rows.end());
+ checkRecordRow(columns, (*it)[3], (*it)[2], (*it)[4], (*it)[5], "");
+ ++it;
+ }
+ EXPECT_TRUE(it == expected_rows.end());
+}
+
+TEST_F(SQLite3Update, emptyUpdate) {
+ // If we do nothing between start and commit, the zone content
+ // should be intact.
+
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+ accessor->commitUpdateZone();
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+}
+
+TEST_F(SQLite3Update, flushZone) {
+ // With 'replace' being true startUpdateZone() will flush the existing
+ // zone content.
+
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+ zone_id = accessor->startUpdateZone("example.com.", true).second;
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
+ accessor->commitUpdateZone();
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
+}
+
+TEST_F(SQLite3Update, readWhileUpdate) {
+ zone_id = accessor->startUpdateZone("example.com.", true).second;
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
+
+ // Until commit is done, the other accessor should see the old data
+ checkRecords(*another_accessor, zone_id, "foo.bar.example.com.",
+ expected_stored);
+
+ // Once the changes are committed, the other accessor will see the new
+ // data.
+ accessor->commitUpdateZone();
+ checkRecords(*another_accessor, zone_id, "foo.bar.example.com.",
+ empty_stored);
+}
+
+TEST_F(SQLite3Update, rollback) {
+ zone_id = accessor->startUpdateZone("example.com.", true).second;
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
+
+ // Rollback will revert the change made by startUpdateZone(, true).
+ accessor->rollbackUpdateZone();
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+}
+
+TEST_F(SQLite3Update, rollbackFailure) {
+ // This test emulates a rare scenario of making rollback attempt fail.
+ // The iterator is paused in the middle of getting records, which prevents
+ // the rollback operation at the end of the test.
+
+ string columns[DatabaseAccessor::COLUMN_COUNT];
+ iterator = accessor->getRecords("example.com.", zone_id);
+ EXPECT_TRUE(iterator->getNext(columns));
+
+ accessor->startUpdateZone("example.com.", true);
+ EXPECT_THROW(accessor->rollbackUpdateZone(), DataSourceError);
+}
+
+TEST_F(SQLite3Update, commitConflict) {
+ // Start reading the DB by another accessor. We should stop at a single
+ // call to getNextRecord() to keep holding the lock.
+ iterator = another_accessor->getRecords("foo.example.com.", zone_id);
+ EXPECT_TRUE(iterator->getNext(get_columns));
+
+ // Due to getNextRecord() above, the other accessor holds a DB lock,
+ // which will prevent commit.
+ zone_id = accessor->startUpdateZone("example.com.", true).second;
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
+ EXPECT_THROW(accessor->commitUpdateZone(), DataSourceError);
+ accessor->rollbackUpdateZone(); // rollback should still succeed
+
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+}
+
+TEST_F(SQLite3Update, updateConflict) {
+ // Similar to the previous case, but this is a conflict with another
+ // update attempt. Note that these two accessors modify disjoint sets
+ // of data; sqlite3 only has a coarse-grained lock so we cannot allow
+ // these updates to run concurrently.
+ EXPECT_TRUE(another_accessor->startUpdateZone("sql1.example.com.",
+ true).first);
+ EXPECT_THROW(accessor->startUpdateZone("example.com.", true),
+ DataSourceError);
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+
+ // Once we rollback the other attempt of change, we should be able to
+ // start and commit the transaction using the main accessor.
+ another_accessor->rollbackUpdateZone();
+ accessor->startUpdateZone("example.com.", true);
+ accessor->commitUpdateZone();
+}
+
+TEST_F(SQLite3Update, duplicateUpdate) {
+ accessor->startUpdateZone("example.com.", false);
+ EXPECT_THROW(accessor->startUpdateZone("example.com.", false),
+ DataSourceError);
+}
+
+TEST_F(SQLite3Update, commitWithoutTransaction) {
+ EXPECT_THROW(accessor->commitUpdateZone(), DataSourceError);
+}
+
+TEST_F(SQLite3Update, rollbackWithoutTransaction) {
+ EXPECT_THROW(accessor->rollbackUpdateZone(), DataSourceError);
+}
+
+TEST_F(SQLite3Update, addRecord) {
+ // Before update, there should be no record for this name
+ checkRecords(*accessor, zone_id, "newdata.example.com.", empty_stored);
+
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+ copy(new_data, new_data + DatabaseAccessor::ADD_COLUMN_COUNT,
+ add_columns);
+ accessor->addRecordToZone(add_columns);
+
+ expected_stored.clear();
+ expected_stored.push_back(new_data);
+ checkRecords(*accessor, zone_id, "newdata.example.com.", expected_stored);
+
+ // Commit the change, and confirm the new data is still there.
+ accessor->commitUpdateZone();
+ checkRecords(*accessor, zone_id, "newdata.example.com.", expected_stored);
+}
+
+TEST_F(SQLite3Update, addThenRollback) {
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+ copy(new_data, new_data + DatabaseAccessor::ADD_COLUMN_COUNT,
+ add_columns);
+ accessor->addRecordToZone(add_columns);
+
+ expected_stored.clear();
+ expected_stored.push_back(new_data);
+ checkRecords(*accessor, zone_id, "newdata.example.com.", expected_stored);
+
+ accessor->rollbackUpdateZone();
+ checkRecords(*accessor, zone_id, "newdata.example.com.", empty_stored);
+}
+
+TEST_F(SQLite3Update, duplicateAdd) {
+ const char* const dup_data[] = {
+ "foo.bar.example.com.", "com.example.bar.foo.", "3600", "A", "",
+ "192.0.2.1"
+ };
+ expected_stored.clear();
+ expected_stored.push_back(dup_data);
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+
+ // Adding exactly the same data. As this backend is "dumb", another
+ // row of the same content will be inserted.
+ copy(dup_data, dup_data + DatabaseAccessor::ADD_COLUMN_COUNT,
+ add_columns);
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+ accessor->addRecordToZone(add_columns);
+ expected_stored.push_back(dup_data);
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+}
+
+TEST_F(SQLite3Update, invalidAdd) {
+ // An attempt of add before an explicit start of transaction
+ EXPECT_THROW(accessor->addRecordToZone(add_columns), DataSourceError);
+}
+
+TEST_F(SQLite3Update, deleteRecord) {
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+
+ copy(deleted_data, deleted_data + DatabaseAccessor::DEL_PARAM_COUNT,
+ del_params);
+ accessor->deleteRecordInZone(del_params);
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
+
+ // Commit the change, and confirm the deleted data still isn't there.
+ accessor->commitUpdateZone();
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
+}
+
+TEST_F(SQLite3Update, deleteThenRollback) {
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+
+ copy(deleted_data, deleted_data + DatabaseAccessor::DEL_PARAM_COUNT,
+ del_params);
+ accessor->deleteRecordInZone(del_params);
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
+
+ // Rollback the change, and confirm the data still exists.
+ accessor->rollbackUpdateZone();
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+}
+
+TEST_F(SQLite3Update, deleteNonexistent) {
+ zone_id = accessor->startUpdateZone("example.com.", false).second;
+ copy(deleted_data, deleted_data + DatabaseAccessor::DEL_PARAM_COUNT,
+ del_params);
+
+ // Replace the name with a non existent one, then try to delete it.
+ // nothing should happen.
+ del_params[DatabaseAccessor::DEL_NAME] = "no-such-name.example.com.";
+ checkRecords(*accessor, zone_id, "no-such-name.example.com.",
+ empty_stored);
+ accessor->deleteRecordInZone(del_params);
+ checkRecords(*accessor, zone_id, "no-such-name.example.com.",
+ empty_stored);
+
+ // Name exists but the RR type is different. Delete attempt shouldn't
+ // delete only by name.
+ copy(deleted_data, deleted_data + DatabaseAccessor::DEL_PARAM_COUNT,
+ del_params);
+ del_params[DatabaseAccessor::DEL_TYPE] = "AAAA";
+ accessor->deleteRecordInZone(del_params);
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+
+ // Similar to the previous case, but RDATA is different.
+ copy(deleted_data, deleted_data + DatabaseAccessor::DEL_PARAM_COUNT,
+ del_params);
+ del_params[DatabaseAccessor::DEL_RDATA] = "192.0.2.2";
+ accessor->deleteRecordInZone(del_params);
+ checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+}
+
+TEST_F(SQLite3Update, invalidDelete) {
+ // An attempt of delete before an explicit start of transaction
+ EXPECT_THROW(accessor->deleteRecordInZone(del_params), DataSourceError);
+}
+} // end anonymous namespace
diff --git a/src/lib/datasrc/tests/static_unittest.cc b/src/lib/datasrc/tests/static_unittest.cc
index a11e889..4c9fe42 100644
--- a/src/lib/datasrc/tests/static_unittest.cc
+++ b/src/lib/datasrc/tests/static_unittest.cc
@@ -53,6 +53,7 @@ protected:
// NOTE: in addition, the order of the following items matter.
authors_data.push_back("Chen Zhengzhang");
+ authors_data.push_back("Dmitriy Volodin");
authors_data.push_back("Evan Hunt");
authors_data.push_back("Haidong Wang");
authors_data.push_back("Han Feng");
diff --git a/src/lib/datasrc/tests/testdata/Makefile.am b/src/lib/datasrc/tests/testdata/Makefile.am
new file mode 100644
index 0000000..64ae955
--- /dev/null
+++ b/src/lib/datasrc/tests/testdata/Makefile.am
@@ -0,0 +1,6 @@
+CLEANFILES = *.copied
+BUILT_SOURCES = rwtest.sqlite3.copied
+
+# We use install-sh with the -m option to make sure it's writable
+rwtest.sqlite3.copied: $(srcdir)/rwtest.sqlite3
+ $(top_srcdir)/install-sh -m 644 $(srcdir)/rwtest.sqlite3 $@
diff --git a/src/lib/datasrc/tests/testdata/rwtest.sqlite3 b/src/lib/datasrc/tests/testdata/rwtest.sqlite3
new file mode 100644
index 0000000..ce95a1d
Binary files /dev/null and b/src/lib/datasrc/tests/testdata/rwtest.sqlite3 differ
diff --git a/src/lib/datasrc/tests/zonetable_unittest.cc b/src/lib/datasrc/tests/zonetable_unittest.cc
index a117176..fa74c0e 100644
--- a/src/lib/datasrc/tests/zonetable_unittest.cc
+++ b/src/lib/datasrc/tests/zonetable_unittest.cc
@@ -18,7 +18,7 @@
#include <dns/rrclass.h>
#include <datasrc/zonetable.h>
-// We use MemoryZone to put something into the table
+// We use InMemoryZone to put something into the table
#include <datasrc/memory_datasrc.h>
#include <gtest/gtest.h>
@@ -28,31 +28,32 @@ using namespace isc::datasrc;
namespace {
TEST(ZoneTest, init) {
- MemoryZone zone(RRClass::IN(), Name("example.com"));
+ InMemoryZoneFinder zone(RRClass::IN(), Name("example.com"));
EXPECT_EQ(Name("example.com"), zone.getOrigin());
EXPECT_EQ(RRClass::IN(), zone.getClass());
- MemoryZone ch_zone(RRClass::CH(), Name("example"));
+ InMemoryZoneFinder ch_zone(RRClass::CH(), Name("example"));
EXPECT_EQ(Name("example"), ch_zone.getOrigin());
EXPECT_EQ(RRClass::CH(), ch_zone.getClass());
}
TEST(ZoneTest, find) {
- MemoryZone zone(RRClass::IN(), Name("example.com"));
- EXPECT_EQ(Zone::NXDOMAIN,
+ InMemoryZoneFinder zone(RRClass::IN(), Name("example.com"));
+ EXPECT_EQ(ZoneFinder::NXDOMAIN,
zone.find(Name("www.example.com"), RRType::A()).code);
}
class ZoneTableTest : public ::testing::Test {
protected:
- ZoneTableTest() : zone1(new MemoryZone(RRClass::IN(),
- Name("example.com"))),
- zone2(new MemoryZone(RRClass::IN(),
- Name("example.net"))),
- zone3(new MemoryZone(RRClass::IN(), Name("example")))
+ ZoneTableTest() : zone1(new InMemoryZoneFinder(RRClass::IN(),
+ Name("example.com"))),
+ zone2(new InMemoryZoneFinder(RRClass::IN(),
+ Name("example.net"))),
+ zone3(new InMemoryZoneFinder(RRClass::IN(),
+ Name("example")))
{}
ZoneTable zone_table;
- ZonePtr zone1, zone2, zone3;
+ ZoneFinderPtr zone1, zone2, zone3;
};
TEST_F(ZoneTableTest, addZone) {
@@ -60,7 +61,8 @@ TEST_F(ZoneTableTest, addZone) {
EXPECT_EQ(result::EXIST, zone_table.addZone(zone1));
// names are compared in a case insensitive manner.
EXPECT_EQ(result::EXIST, zone_table.addZone(
- ZonePtr(new MemoryZone(RRClass::IN(), Name("EXAMPLE.COM")))));
+ ZoneFinderPtr(new InMemoryZoneFinder(RRClass::IN(),
+ Name("EXAMPLE.COM")))));
EXPECT_EQ(result::SUCCESS, zone_table.addZone(zone2));
EXPECT_EQ(result::SUCCESS, zone_table.addZone(zone3));
@@ -68,11 +70,11 @@ TEST_F(ZoneTableTest, addZone) {
// Zone table is indexed only by name. Duplicate origin name with
// different zone class isn't allowed.
EXPECT_EQ(result::EXIST, zone_table.addZone(
- ZonePtr(new MemoryZone(RRClass::CH(),
- Name("example.com")))));
+ ZoneFinderPtr(new InMemoryZoneFinder(RRClass::CH(),
+ Name("example.com")))));
/// Bogus zone (NULL)
- EXPECT_THROW(zone_table.addZone(ZonePtr()), isc::InvalidParameter);
+ EXPECT_THROW(zone_table.addZone(ZoneFinderPtr()), isc::InvalidParameter);
}
TEST_F(ZoneTableTest, DISABLED_removeZone) {
@@ -95,7 +97,7 @@ TEST_F(ZoneTableTest, findZone) {
EXPECT_EQ(result::NOTFOUND,
zone_table.findZone(Name("example.org")).code);
- EXPECT_EQ(ConstZonePtr(),
+ EXPECT_EQ(ConstZoneFinderPtr(),
zone_table.findZone(Name("example.org")).zone);
// there's no exact match. the result should be the longest match,
@@ -107,7 +109,7 @@ TEST_F(ZoneTableTest, findZone) {
// make sure the partial match is indeed the longest match by adding
// a zone with a shorter origin and query again.
- ZonePtr zone_com(new MemoryZone(RRClass::IN(), Name("com")));
+ ZoneFinderPtr zone_com(new InMemoryZoneFinder(RRClass::IN(), Name("com")));
EXPECT_EQ(result::SUCCESS, zone_table.addZone(zone_com));
EXPECT_EQ(Name("example.com"),
zone_table.findZone(Name("www.example.com")).zone->getOrigin());
diff --git a/src/lib/datasrc/zone.h b/src/lib/datasrc/zone.h
index 1252c94..c83b14b 100644
--- a/src/lib/datasrc/zone.h
+++ b/src/lib/datasrc/zone.h
@@ -15,59 +15,89 @@
#ifndef __ZONE_H
#define __ZONE_H 1
-#include <datasrc/result.h>
+#include <dns/rrset.h>
#include <dns/rrsetlist.h>
+#include <datasrc/result.h>
+
namespace isc {
namespace datasrc {
-/// \brief The base class for a single authoritative zone
-///
-/// The \c Zone class is an abstract base class for representing
-/// a DNS zone as part of data source.
+/// \brief The base class to search a zone for RRsets
///
-/// At the moment this is provided mainly for making the \c ZoneTable class
-/// and the authoritative query logic testable, and only provides a minimal
-/// set of features.
-/// This is why this class is defined in the same header file, but it may
-/// have to move to a separate header file when we understand what is
-/// necessary for this class for actual operation.
+/// The \c ZoneFinder class is an abstract base class for representing
+/// an object that performs DNS lookups in a specific zone accessible via
+/// a data source. In general, different types of data sources (in-memory,
+/// database-based, etc) define their own derived classes of \c ZoneFinder,
+/// implementing ways to retrieve the required data through the common
+/// interfaces declared in the base class. Each concrete \c ZoneFinder
+/// object is therefore (conceptually) associated with a specific zone
+/// of one specific data source instance.
///
-/// The idea is to provide a specific derived zone class for each data
-/// source, beginning with in memory one. At that point the derived classes
-/// will have more specific features. For example, they will maintain
-/// information about the location of a zone file, whether it's loaded in
-/// memory, etc.
+/// The origin name and the RR class of the associated zone are available
+/// via the \c getOrigin() and \c getClass() methods, respectively.
///
-/// It's not yet clear how the derived zone classes work with various other
-/// data sources when we integrate these components, but one possibility is
-/// something like this:
-/// - If the underlying database such as some variant of SQL doesn't have an
-/// explicit representation of zones (as part of public interface), we can
-/// probably use a "default" zone class that simply encapsulates the
-/// corresponding data source and calls a common "find" like method.
-/// - Some data source may want to specialize it by inheritance as an
-/// optimization. For example, in the current schema design of the sqlite3
-/// data source, its (derived) zone class would contain the information of
-/// the "zone ID".
+/// The most important method of this class is \c find(), which performs
+/// the lookup for a given domain and type. See the description of the
+/// method for details.
///
-/// <b>Note:</b> Unlike some other abstract base classes we don't name the
-/// class beginning with "Abstract". This is because we want to have
-/// commonly used definitions such as \c Result and \c ZonePtr, and we want
-/// to make them look more intuitive.
-class Zone {
+/// \note It's not clear whether we should request that a zone finder form a
+/// "transaction", that is, whether to ensure the finder is not susceptible
+/// to changes made by someone else than the creator of the finder. If we
+/// don't request that, for example, two different lookup results for the
+/// same name and type can be different if other threads or programs make
+/// updates to the zone between the lookups. We should revisit this point
+/// as we gain more experiences.
+class ZoneFinder {
public:
/// Result codes of the \c find() method.
///
/// Note: the codes are tentative. We may need more, or we may find
/// some of them unnecessary as we implement more details.
+ ///
+ /// Some are synonyms of others in terms of RCODE returned to user.
+ /// But they help the logic to decide if it should ask for a NSEC
+ /// that covers something or not (for example, in case of NXRRSET,
+ /// the directly returned NSEC is sufficient, but with wildcard one,
+ /// we need to add one proving there's no exact match and this is
+ /// actually the best wildcard we have). Data sources that don't
+ /// support DNSSEC don't need to distinguish them.
+ ///
+ /// In case of NXRRSET related results, the returned NSEC record
+ /// belongs to the domain which would provide the result if it
+ /// contained the correct type (in case of NXRRSET, it is the queried
+ /// domain, in case of WILDCARD_NXRRSET, it is the wildcard domain
+ /// that matched the query name). In case of an empty nonterminal,
+ /// an NSEC is provided for the interval where the empty nonterminal
+ /// lives. The end of the interval is the subdomain causing existence
+ /// of the empty nonterminal (if there's sub.x.example.com, and no record
+ /// in x.example.com, then x.example.com exists implicitly - is the empty
+ /// nonterminal and sub.x.example.com is the subdomain causing it).
+ ///
+ /// Examples: if zone "example.com" has the following record:
+ /// \code
+ /// a.b.example.com. NSEC c.example.com.
+ /// \endcode
+ /// a call to \c find() for "b.example.com." will result in NXRRSET,
+ /// and if the FIND_DNSSEC option is set this NSEC will be returned.
+ /// Likewise, if zone "example.org" has the following record,
+ /// \code
+ /// x.*.example.org. NSEC a.example.org.
+ /// \endcode
+ /// a call to \c find() for "y.example.org" will result in
+ /// WILDCARD_NXRRSET (*.example.org is an empty nonterminal wildcard node),
+ /// and if the FIND_DNSSEC option is set this NSEC will be returned.
+ ///
+ /// In case of NXDOMAIN, the returned NSEC covers the queried domain.
enum Result {
SUCCESS, ///< An exact match is found.
DELEGATION, ///< The search encounters a zone cut.
NXDOMAIN, ///< There is no domain name that matches the search name
NXRRSET, ///< There is a matching name but no RRset of the search type
CNAME, ///< The search encounters and returns a CNAME RR
- DNAME ///< The search encounters and returns a DNAME RR
+ DNAME, ///< The search encounters and returns a DNAME RR
+ WILDCARD, ///< Succes by wildcard match, for DNSSEC
+ WILDCARD_NXRRSET ///< NXRRSET on wildcard, for DNSSEC
};
/// A helper structure to represent the search result of \c find().
@@ -107,7 +137,11 @@ public:
/// performed on these values to express compound options.
enum FindOptions {
FIND_DEFAULT = 0, ///< The default options
- FIND_GLUE_OK = 1 ///< Allow search under a zone cut
+ FIND_GLUE_OK = 1, ///< Allow search under a zone cut
+ FIND_DNSSEC = 2 ///< Require DNSSEC data in the answer
+ ///< (RRSIG, NSEC, etc.). The implementation
+ ///< is allowed to include it even if it is
+ ///< not set.
};
///
@@ -119,10 +153,10 @@ protected:
///
/// This is intentionally defined as \c protected as this base class should
/// never be instantiated (except as part of a derived class).
- Zone() {}
+ ZoneFinder() {}
public:
/// The destructor.
- virtual ~Zone() {}
+ virtual ~ZoneFinder() {}
//@}
///
@@ -131,14 +165,14 @@ public:
/// These methods should never throw an exception.
//@{
/// Return the origin name of the zone.
- virtual const isc::dns::Name& getOrigin() const = 0;
+ virtual isc::dns::Name getOrigin() const = 0;
/// Return the RR class of the zone.
- virtual const isc::dns::RRClass& getClass() const = 0;
+ virtual isc::dns::RRClass getClass() const = 0;
//@}
///
- /// \name Search Method
+ /// \name Search Methods
///
//@{
/// Search the zone for a given pair of domain name and RR type.
@@ -170,8 +204,8 @@ public:
/// We should revisit the interface before we heavily rely on it.
///
/// The \c options parameter specifies customized behavior of the search.
- /// Their semantics is as follows:
- /// - \c GLUE_OK Allow search under a zone cut. By default the search
+ /// Their semantics is as follows (they are or bit-field):
+ /// - \c FIND_GLUE_OK Allow search under a zone cut. By default the search
/// will stop once it encounters a zone cut. If this option is specified
/// it remembers information about the highest zone cut and continues
/// the search until it finds an exact match for the given name or it
@@ -179,6 +213,9 @@ public:
/// RRsets for that name are searched just like the normal case;
/// otherwise, if the search has encountered a zone cut, \c DELEGATION
/// with the information of the highest zone cut will be returned.
+ /// - \c FIND_DNSSEC Request that DNSSEC data (like NSEC, RRSIGs) are
+ /// returned with the answer. It is allowed for the data source to
+ /// include them even when not requested.
///
/// A derived version of this method may involve internal resource
/// allocation, especially for constructing the resulting RRset, and may
@@ -197,18 +234,273 @@ public:
const isc::dns::RRType& type,
isc::dns::RRsetList* target = NULL,
const FindOptions options
- = FIND_DEFAULT) const = 0;
+ = FIND_DEFAULT) = 0;
+
+ /// \brief Get previous name in the zone
+ ///
+ /// Gets the previous name in the DNSSEC order. This can be used
+ /// to find the correct NSEC records for proving nonexistence
+ /// of domains.
+ ///
+ /// The concrete implementation might throw anything it thinks appropriate,
+ /// however it is recommended to stick to the ones listed here. The user
+ /// of this method should be able to handle any exceptions.
+ ///
+ /// This method does not include under-zone-cut data (glue data).
+ ///
+ /// \param query The name for which one we look for a previous one. The
+ /// queried name doesn't have to exist in the zone.
+ /// \return The preceding name
+ ///
+ /// \throw NotImplemented in case the data source backend doesn't support
+ /// DNSSEC or there is no previous in the zone (NSEC records might be
+ /// missing in the DB, the queried name is less or equal to the apex).
+ /// \throw DataSourceError for low-level or internal datasource errors
+ /// (like broken connection to database, wrong data living there).
+ /// \throw std::bad_alloc For allocation errors.
+ virtual isc::dns::Name findPreviousName(const isc::dns::Name& query)
+ const = 0;
//@}
};
-/// \brief A pointer-like type pointing to a \c Zone object.
-typedef boost::shared_ptr<Zone> ZonePtr;
+/// \brief Operator to combine FindOptions
+///
+/// We would need to manually static-cast the options if we put or
+/// between them, which is undesired with bit-flag options. Therefore
+/// we hide the cast here, which is the simplest solution and it still
+/// provides reasonable level of type safety.
+inline ZoneFinder::FindOptions operator |(ZoneFinder::FindOptions a,
+ ZoneFinder::FindOptions b)
+{
+ return (static_cast<ZoneFinder::FindOptions>(static_cast<unsigned>(a) |
+ static_cast<unsigned>(b)));
+}
-/// \brief A pointer-like type pointing to a \c Zone object.
-typedef boost::shared_ptr<const Zone> ConstZonePtr;
+/// \brief A pointer-like type pointing to a \c ZoneFinder object.
+typedef boost::shared_ptr<ZoneFinder> ZoneFinderPtr;
-}
-}
+/// \brief A pointer-like type pointing to a \c ZoneFinder object.
+typedef boost::shared_ptr<const ZoneFinder> ConstZoneFinderPtr;
+
+/// The base class to make updates to a single zone.
+///
+/// On construction, each derived class object will start a "transaction"
+/// for making updates to a specific zone (this means a constructor of
+/// a derived class would normally take parameters to identify the zone
+/// to be updated). The underlying realization of a "transaction" will differ
+/// for different derived classes; if it uses a general purpose database
+/// as a backend, it will involve performing some form of "begin transaction"
+/// statement for the database.
+///
+/// Updates (adding or deleting RRs) are made via \c addRRset() and
+/// \c deleteRRset() methods. Until the \c commit() method is called the
+/// changes are local to the updater object. For example, they won't be
+/// visible via a \c ZoneFinder object except the one returned by the
+/// updater's own \c getFinder() method. The \c commit() completes the
+/// transaction and makes the changes visible to others.
+///
+/// This class does not provide an explicit "rollback" interface. If
+/// something wrong or unexpected happens during the updates and the
+/// caller wants to cancel the intermediate updates, the caller should
+/// simply destruct the updater object without calling \c commit().
+/// The destructor is supposed to perform the "rollback" operation,
+/// depending on the internal details of the derived class.
+///
+/// \note This initial implementation provides a quite simple interface of
+/// adding and deleting RRs (see the description of the related methods).
+/// It may be revisited as we gain more experiences.
+class ZoneUpdater {
+protected:
+ /// The default constructor.
+ ///
+ /// This is intentionally defined as protected to ensure that this base
+ /// class is never instantiated directly.
+ ZoneUpdater() {}
+
+public:
+ /// The destructor
+ ///
+ /// Each derived class implementation must ensure that if \c commit()
+ /// has not been performed by the time of the call to it, then it
+ /// "rollbacks" the updates made via the updater so far.
+ virtual ~ZoneUpdater() {}
+
+ /// Return a finder for the zone being updated.
+ ///
+ /// The returned finder provides the functionalities of \c ZoneFinder
+ /// for the zone as updates are made via the updater. That is, before
+ /// making any update, the finder will be able to find all RRsets that
+ /// exist in the zone at the time the updater is created. If RRsets
+ /// are added or deleted via \c addRRset() or \c deleteRRset(),
+ /// this finder will find the added ones or miss the deleted ones
+ /// respectively.
+ ///
+ /// The finder returned by this method is effective only while the updates
+ /// are performed, i.e., from the construction of the corresponding
+ /// updater until \c commit() is performed or the updater is destructed
+ /// without commit. The result of a subsequent call to this method (or
+ /// the use of the result) after that is undefined.
+ ///
+ /// \return A reference to a \c ZoneFinder for the updated zone
+ virtual ZoneFinder& getFinder() = 0;
+
+ /// Add an RRset to a zone via the updater
+ ///
+ /// This may be revisited in a future version, but right now the intended
+ /// behavior of this method is simple: It "naively" adds the specified
+ /// RRset to the zone specified on creation of the updater.
+ /// It performs minimum level of validation on the specified RRset:
+ /// - Whether the RR class is identical to that for the zone to be updated
+ /// - Whether the RRset is not empty, i.e., it has at least one RDATA
+ /// - Whether the RRset is not associated with an RRSIG, i.e.,
+ /// whether \c getRRsig() on the RRset returns a NULL pointer.
+ ///
+ /// and otherwise does not check any oddity. For example, it doesn't
+ /// check whether the owner name of the specified RRset is a subdomain
+ /// of the zone's origin; it doesn't care whether or not there is already
+ /// an RRset of the same name and RR type in the zone, and if there is,
+ /// whether any of the existing RRs have duplicate RDATA with the added
+ /// ones. If these conditions matter the calling application must examine
+ /// the existing data beforehand using the \c ZoneFinder returned by
+ /// \c getFinder().
+ ///
+ /// The validation requirement on the associated RRSIG is temporary.
+ /// If we find it more reasonable and useful to allow adding a pair of
+ /// RRset and its RRSIG RRset as we gain experiences with the interface,
+ /// we may remove this restriction. Until then we explicitly check it
+ /// to prevent accidental misuse.
+ ///
+ /// Conceptually, on successful call to this method, the zone will have
+ /// the specified RRset, and if there is already an RRset of the same
+ /// name and RR type, these two sets will be "merged". "Merged" means
+ /// that a subsequent call to \c ZoneFinder::find() for the name and type
+ /// will result in success and the returned RRset will contain all
+ /// previously existing and newly added RDATAs with the TTL being the
+ /// minimum of the two RRsets. The underlying representation of the
+ /// "merged" RRsets may vary depending on the characteristic of the
+ /// underlying data source. For example, if it uses a general purpose
+ /// database that stores each RR of the same RRset separately, it may
+ /// simply be a larger sets of RRs based on both the existing and added
+ /// RRsets; the TTLs of the RRs may be different within the database, and
+ /// there may even be duplicate RRs in different database rows. As long
+ /// as the RRset returned via \c ZoneFinder::find() conforms to the
+ /// concept of "merge", the actual internal representation is up to the
+ /// implementation.
+ ///
+ /// This method must not be called once commit() is performed. If it
+ /// calls after \c commit() the implementation must throw a
+ /// \c DataSourceError exception.
+ ///
+ /// \todo As noted above we may have to revisit the design details as we
+ /// gain experiences:
+ ///
+ /// - we may want to check (and maybe reject) if there is already a
+ /// duplicate RR (that has the same RDATA).
+ /// - we may want to check (and maybe reject) if there is already an
+ /// RRset of the same name and RR type with different TTL
+ /// - we may even want to check if there is already any RRset of the
+ /// same name and RR type.
+ /// - we may want to add an "options" parameter that can control the
+ /// above points
+ /// - we may want to have this method return a value containing the
+ /// information on whether there's a duplicate, etc.
+ ///
+ /// \exception DataSourceError Called after \c commit(), RRset is invalid
+ /// (see above), internal data source error
+ /// \exception std::bad_alloc Resource allocation failure
+ ///
+ /// \param rrset The RRset to be added
+ virtual void addRRset(const isc::dns::RRset& rrset) = 0;
+
+ /// Delete an RRset from a zone via the updater
+ ///
+ /// Like \c addRRset(), the detailed semantics and behavior of this method
+ /// may have to be revisited in a future version. The following are
+ /// based on the initial implementation decisions.
+ ///
+ /// On successful completion of this method, it will remove from the zone
+ /// the RRs of the specified owner name and RR type that match one of
+ /// the RDATAs of the specified RRset. There are several points to be
+ /// noted:
+ /// - Existing RRs that don't match any of the specified RDATAs will
+ /// remain in the zone.
+ /// - Any RRs of the specified RRset that doesn't exist in the zone will
+ /// simply be ignored; the implementation of this method is not supposed
+ /// to check that condition.
+ /// - The TTL of the RRset is ignored; matching is only performed by
+ /// the owner name, RR type and RDATA
+ ///
+ /// Ignoring the TTL may not look sensible, but it's based on the
+ /// observation that it will result in more intuitive result, especially
+ /// when the underlying data source is a general purpose database.
+ /// See also \c DatabaseAccessor::deleteRecordInZone() on this point.
+ /// It also matches the dynamic update protocol (RFC2136), where TTLs
+ /// are ignored when deleting RRs.
+ ///
+ /// \note Since the TTL is ignored, this method could take the RRset
+ /// to be deleted as a tuple of name, RR type, and a list of RDATAs.
+ /// But in practice, it's quite likely that the caller has the RRset
+ /// in the form of the \c RRset object (e.g., extracted from a dynamic
+ /// update request message), so this interface would rather be more
+ /// convenient. If it turns out not to be true we can change or extend
+ /// the method signature.
+ ///
+ /// This method performs minimum level of validation on the specified
+ /// RRset:
+ /// - Whether the RR class is identical to that for the zone to be updated
+ /// - Whether the RRset is not empty, i.e., it has at least one RDATA
+ /// - Whether the RRset is not associated with an RRSIG, i.e.,
+ /// whether \c getRRsig() on the RRset returns a NULL pointer.
+ ///
+ /// This method must not be called once commit() is performed. If it
+ /// calls after \c commit() the implementation must throw a
+ /// \c DataSourceError exception.
+ ///
+ /// \todo As noted above we may have to revisit the design details as we
+ /// gain experiences:
+ ///
+ /// - we may want to check (and maybe reject) if some or all of the RRs
+ /// for the specified RRset don't exist in the zone
+ /// - we may want to allow an option to "delete everything" for specified
+ /// name and/or specified name + RR type.
+ /// - as mentioned above, we may want to include the TTL in matching the
+ /// deleted RRs
+ /// - we may want to add an "options" parameter that can control the
+ /// above points
+ /// - we may want to have this method return a value containing the
+ /// information on whether there's any RRs that are specified but don't
+ /// exit, the number of actually deleted RRs, etc.
+ ///
+ /// \exception DataSourceError Called after \c commit(), RRset is invalid
+ /// (see above), internal data source error
+ /// \exception std::bad_alloc Resource allocation failure
+ ///
+ /// \param rrset The RRset to be deleted
+ virtual void deleteRRset(const isc::dns::RRset& rrset) = 0;
+
+ /// Commit the updates made in the updater to the zone
+ ///
+ /// This method completes the "transaction" started at the creation
+ /// of the updater. After successful completion of this method, the
+ /// updates will be visible outside the scope of the updater.
+ /// The actual internal behavior will defer for different derived classes.
+ /// For a derived class with a general purpose database as a backend,
+ /// for example, this method would perform a "commit" statement for the
+ /// database.
+ ///
+ /// This operation can only be performed at most once. A duplicate call
+ /// must result in a DatasourceError exception.
+ ///
+ /// \exception DataSourceError Duplicate call of the method,
+ /// internal data source error
+ virtual void commit() = 0;
+};
+
+/// \brief A pointer-like type pointing to a \c ZoneUpdater object.
+typedef boost::shared_ptr<ZoneUpdater> ZoneUpdaterPtr;
+
+} // end of datasrc
+} // end of isc
#endif // __ZONE_H
diff --git a/src/lib/datasrc/zonetable.cc b/src/lib/datasrc/zonetable.cc
index bc09286..644861c 100644
--- a/src/lib/datasrc/zonetable.cc
+++ b/src/lib/datasrc/zonetable.cc
@@ -28,8 +28,8 @@ namespace datasrc {
/// \short Private data and implementation of ZoneTable
struct ZoneTable::ZoneTableImpl {
// Type aliases to make it shorter
- typedef RBTree<Zone> ZoneTree;
- typedef RBNode<Zone> ZoneNode;
+ typedef RBTree<ZoneFinder> ZoneTree;
+ typedef RBNode<ZoneFinder> ZoneNode;
// The actual storage
ZoneTree zones_;
@@ -40,7 +40,7 @@ struct ZoneTable::ZoneTableImpl {
*/
// Implementation of ZoneTable::addZone
- result::Result addZone(ZonePtr zone) {
+ result::Result addZone(ZoneFinderPtr zone) {
// Sanity check
if (!zone) {
isc_throw(InvalidParameter,
@@ -85,12 +85,12 @@ struct ZoneTable::ZoneTableImpl {
break;
// We have no data there, so translate the pointer to NULL as well
case ZoneTree::NOTFOUND:
- return (FindResult(result::NOTFOUND, ZonePtr()));
+ return (FindResult(result::NOTFOUND, ZoneFinderPtr()));
// Can Not Happen
default:
assert(0);
// Because of warning
- return (FindResult(result::NOTFOUND, ZonePtr()));
+ return (FindResult(result::NOTFOUND, ZoneFinderPtr()));
}
// Can Not Happen (remember, NOTFOUND is handled)
@@ -108,7 +108,7 @@ ZoneTable::~ZoneTable() {
}
result::Result
-ZoneTable::addZone(ZonePtr zone) {
+ZoneTable::addZone(ZoneFinderPtr zone) {
return (impl_->addZone(zone));
}
diff --git a/src/lib/datasrc/zonetable.h b/src/lib/datasrc/zonetable.h
index 5b873d1..5a34480 100644
--- a/src/lib/datasrc/zonetable.h
+++ b/src/lib/datasrc/zonetable.h
@@ -41,11 +41,11 @@ namespace datasrc {
class ZoneTable {
public:
struct FindResult {
- FindResult(result::Result param_code, const ZonePtr param_zone) :
+ FindResult(result::Result param_code, const ZoneFinderPtr param_zone) :
code(param_code), zone(param_zone)
{}
const result::Result code;
- const ZonePtr zone;
+ const ZoneFinderPtr zone;
};
///
/// \name Constructors and Destructor.
@@ -83,7 +83,7 @@ public:
/// added to the zone table.
/// \return \c result::EXIST The zone table already contains
/// zone of the same origin.
- result::Result addZone(ZonePtr zone);
+ result::Result addZone(ZoneFinderPtr zone);
/// Remove a \c Zone of the given origin name from the \c ZoneTable.
///
diff --git a/src/lib/dns/Makefile.am b/src/lib/dns/Makefile.am
index 887ac09..0d2bffd 100644
--- a/src/lib/dns/Makefile.am
+++ b/src/lib/dns/Makefile.am
@@ -23,14 +23,22 @@ EXTRA_DIST += rdata/generic/cname_5.cc
EXTRA_DIST += rdata/generic/cname_5.h
EXTRA_DIST += rdata/generic/detail/nsec_bitmap.cc
EXTRA_DIST += rdata/generic/detail/nsec_bitmap.h
+EXTRA_DIST += rdata/generic/detail/txt_like.h
+EXTRA_DIST += rdata/generic/detail/ds_like.h
+EXTRA_DIST += rdata/generic/dlv_32769.cc
+EXTRA_DIST += rdata/generic/dlv_32769.h
EXTRA_DIST += rdata/generic/dname_39.cc
EXTRA_DIST += rdata/generic/dname_39.h
EXTRA_DIST += rdata/generic/dnskey_48.cc
EXTRA_DIST += rdata/generic/dnskey_48.h
EXTRA_DIST += rdata/generic/ds_43.cc
EXTRA_DIST += rdata/generic/ds_43.h
+EXTRA_DIST += rdata/generic/hinfo_13.cc
+EXTRA_DIST += rdata/generic/hinfo_13.h
EXTRA_DIST += rdata/generic/mx_15.cc
EXTRA_DIST += rdata/generic/mx_15.h
+EXTRA_DIST += rdata/generic/naptr_35.cc
+EXTRA_DIST += rdata/generic/naptr_35.h
EXTRA_DIST += rdata/generic/ns_2.cc
EXTRA_DIST += rdata/generic/ns_2.h
EXTRA_DIST += rdata/generic/nsec3_50.cc
@@ -49,14 +57,24 @@ EXTRA_DIST += rdata/generic/rrsig_46.cc
EXTRA_DIST += rdata/generic/rrsig_46.h
EXTRA_DIST += rdata/generic/soa_6.cc
EXTRA_DIST += rdata/generic/soa_6.h
+EXTRA_DIST += rdata/generic/spf_99.cc
+EXTRA_DIST += rdata/generic/spf_99.h
EXTRA_DIST += rdata/generic/txt_16.cc
EXTRA_DIST += rdata/generic/txt_16.h
+EXTRA_DIST += rdata/generic/minfo_14.cc
+EXTRA_DIST += rdata/generic/minfo_14.h
+EXTRA_DIST += rdata/generic/afsdb_18.cc
+EXTRA_DIST += rdata/generic/afsdb_18.h
EXTRA_DIST += rdata/hs_4/a_1.cc
EXTRA_DIST += rdata/hs_4/a_1.h
EXTRA_DIST += rdata/in_1/a_1.cc
EXTRA_DIST += rdata/in_1/a_1.h
EXTRA_DIST += rdata/in_1/aaaa_28.cc
EXTRA_DIST += rdata/in_1/aaaa_28.h
+EXTRA_DIST += rdata/in_1/dhcid_49.cc
+EXTRA_DIST += rdata/in_1/dhcid_49.h
+EXTRA_DIST += rdata/in_1/srv_33.cc
+EXTRA_DIST += rdata/in_1/srv_33.h
#EXTRA_DIST += rdata/template.cc
#EXTRA_DIST += rdata/template.h
@@ -88,8 +106,11 @@ libdns___la_SOURCES += tsig.h tsig.cc
libdns___la_SOURCES += tsigerror.h tsigerror.cc
libdns___la_SOURCES += tsigkey.h tsigkey.cc
libdns___la_SOURCES += tsigrecord.h tsigrecord.cc
+libdns___la_SOURCES += character_string.h character_string.cc
libdns___la_SOURCES += rdata/generic/detail/nsec_bitmap.h
libdns___la_SOURCES += rdata/generic/detail/nsec_bitmap.cc
+libdns___la_SOURCES += rdata/generic/detail/txt_like.h
+libdns___la_SOURCES += rdata/generic/detail/ds_like.h
libdns___la_CPPFLAGS = $(AM_CPPFLAGS)
# Most applications of libdns++ will only implicitly rely on libcryptolink,
diff --git a/src/lib/dns/benchmarks/Makefile.am b/src/lib/dns/benchmarks/Makefile.am
index 8645385..0d7856f 100644
--- a/src/lib/dns/benchmarks/Makefile.am
+++ b/src/lib/dns/benchmarks/Makefile.am
@@ -13,5 +13,6 @@ noinst_PROGRAMS = rdatarender_bench
rdatarender_bench_SOURCES = rdatarender_bench.cc
rdatarender_bench_LDADD = $(top_builddir)/src/lib/dns/libdns++.la
+rdatarender_bench_LDADD += $(top_builddir)/src/lib/util/libutil.la
rdatarender_bench_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
rdatarender_bench_LDADD += $(SQLITE_LIBS)
diff --git a/src/lib/dns/character_string.cc b/src/lib/dns/character_string.cc
new file mode 100644
index 0000000..3a289ac
--- /dev/null
+++ b/src/lib/dns/character_string.cc
@@ -0,0 +1,140 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include "character_string.h"
+#include "rdata.h"
+
+using namespace std;
+using namespace isc::dns::rdata;
+
+namespace isc {
+namespace dns {
+
+namespace {
+bool isDigit(char c) {
+ return (('0' <= c) && (c <= '9'));
+}
+}
+
+std::string
+characterstr::getNextCharacterString(const std::string& input_str,
+ std::string::const_iterator& input_iterator)
+{
+ string result;
+
+ // If the input string only contains white-spaces, it is an invalid
+ // <character-string>
+ if (input_iterator >= input_str.end()) {
+ isc_throw(InvalidRdataText, "Invalid text format, \
+ <character-string> field is missing.");
+ }
+
+ // Whether the <character-string> is separated with double quotes (")
+ bool quotes_separated = (*input_iterator == '"');
+ // Whether the quotes are pared if the string is quotes separated
+ bool quotes_paired = false;
+
+ if (quotes_separated) {
+ ++input_iterator;
+ }
+
+ while(input_iterator < input_str.end()){
+ // Escaped characters processing
+ if (*input_iterator == '\\') {
+ if (input_iterator + 1 == input_str.end()) {
+ isc_throw(InvalidRdataText, "<character-string> ended \
+ prematurely.");
+ } else {
+ if (isDigit(*(input_iterator + 1))) {
+ // \DDD where each D is a digit. It its the octet
+ // corresponding to the decimal number described by DDD
+ if (input_iterator + 3 >= input_str.end()) {
+ isc_throw(InvalidRdataText, "<character-string> ended \
+ prematurely.");
+ } else {
+ int n = 0;
+ ++input_iterator;
+ for (int i = 0; i < 3; ++i) {
+ if (isDigit(*input_iterator)) {
+ n = n*10 + (*input_iterator - '0');
+ ++input_iterator;
+ } else {
+ isc_throw(InvalidRdataText, "Illegal decimal \
+ escaping series");
+ }
+ }
+ if (n > 255) {
+ isc_throw(InvalidRdataText, "Illegal octet \
+ number");
+ }
+ result.push_back(n);
+ continue;
+ }
+ } else {
+ ++input_iterator;
+ result.push_back(*input_iterator);
+ ++input_iterator;
+ continue;
+ }
+ }
+ }
+
+ if (quotes_separated) {
+ // If the <character-string> is seperated with quotes symbol and
+ // another quotes symbol is encountered, it is the end of the
+ // <character-string>
+ if (*input_iterator == '"') {
+ quotes_paired = true;
+ ++input_iterator;
+ // Reach the end of character string
+ break;
+ }
+ } else if (*input_iterator == ' ') {
+ // If the <character-string> is not seperated with quotes symbol,
+ // it is seperated with <space> char
+ break;
+ }
+
+ result.push_back(*input_iterator);
+
+ ++input_iterator;
+ }
+
+ if (result.size() > MAX_CHARSTRING_LEN) {
+ isc_throw(CharStringTooLong, "<character-string> is too long");
+ }
+
+ if (quotes_separated && !quotes_paired) {
+ isc_throw(InvalidRdataText, "The quotes are not paired");
+ }
+
+ return (result);
+}
+
+std::string
+characterstr::getNextCharacterString(util::InputBuffer& buffer, size_t len) {
+ uint8_t str_len = buffer.readUint8();
+
+ size_t pos = buffer.getPosition();
+ if (len - pos < str_len) {
+ isc_throw(InvalidRdataLength, "Invalid string length");
+ }
+
+ uint8_t buf[MAX_CHARSTRING_LEN];
+ buffer.readData(buf, str_len);
+ return (string(buf, buf + str_len));
+}
+
+} // end of namespace dns
+} // end of namespace isc
diff --git a/src/lib/dns/character_string.h b/src/lib/dns/character_string.h
new file mode 100644
index 0000000..7961274
--- /dev/null
+++ b/src/lib/dns/character_string.h
@@ -0,0 +1,57 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __CHARACTER_STRING_H
+#define __CHARACTER_STRING_H
+
+#include <string>
+#include <exceptions/exceptions.h>
+#include <util/buffer.h>
+
+namespace isc {
+namespace dns {
+
+// \brief Some utility functions to extract <character-string> from string
+// or InputBuffer
+//
+// <character-string> is expressed in one or two ways: as a contiguous set
+// of characters without interior spaces, or as a string beginning with a "
+// and ending with a ". Inside a " delimited string any character can
+// occur, except for a " itself, which must be quoted using \ (back slash).
+// Ref. RFC1035
+
+
+namespace characterstr {
+ /// Get a <character-string> from a string
+ ///
+ /// \param input_str The input string
+ /// \param input_iterator The iterator from which to start extracting,
+ /// the iterator will be updated to new position after the function
+ /// is returned
+ /// \return A std::string that contains the extracted <character-string>
+ std::string getNextCharacterString(const std::string& input_str,
+ std::string::const_iterator& input_iterator);
+
+ /// Get a <character-string> from a input buffer
+ ///
+ /// \param buffer The input buffer
+ /// \param len The input buffer total length
+ /// \return A std::string that contains the extracted <character-string>
+ std::string getNextCharacterString(util::InputBuffer& buffer, size_t len);
+
+} // namespace characterstr
+} // namespace dns
+} // namespace isc
+
+#endif // __CHARACTER_STRING_H
diff --git a/src/lib/dns/gen-rdatacode.py.in b/src/lib/dns/gen-rdatacode.py.in
index b3c8da2..f3cd5df 100755
--- a/src/lib/dns/gen-rdatacode.py.in
+++ b/src/lib/dns/gen-rdatacode.py.in
@@ -133,7 +133,15 @@ def import_definitions(classcode2txt, typecode2txt, typeandclass):
if classdir_mtime < getmtime('@srcdir@/rdata'):
classdir_mtime = getmtime('@srcdir@/rdata')
- for dir in list(os.listdir('@srcdir@/rdata')):
+ # Sort directories before iterating through them so that the directory
+ # list is processed in the same order on all systems. The resulting
+ # files should compile regardless of the order in which the components
+ # are included but... Having a fixed order for the directories should
+ # eliminate system-dependent problems. (Note that the drectory names
+ # in BIND 10 are ASCII, so the order should be locale-independent.)
+ dirlist = os.listdir('@srcdir@/rdata')
+ dirlist.sort()
+ for dir in dirlist:
classdir = '@srcdir@/rdata' + os.sep + dir
m = re_typecode.match(dir)
if os.path.isdir(classdir) and (m != None or dir == 'generic'):
@@ -145,7 +153,12 @@ def import_definitions(classcode2txt, typecode2txt, typeandclass):
class_code = m.group(2)
if not class_code in classcode2txt:
classcode2txt[class_code] = class_txt
- for file in list(os.listdir(classdir)):
+
+ # Same considerations as directories regarding sorted order
+ # also apply to files.
+ filelist = os.listdir(classdir)
+ filelist.sort()
+ for file in filelist:
file = classdir + os.sep + file
m = re_typecode.match(os.path.split(file)[1])
if m != None:
diff --git a/src/lib/dns/message.cc b/src/lib/dns/message.cc
index bf7ccd5..b3e9229 100644
--- a/src/lib/dns/message.cc
+++ b/src/lib/dns/message.cc
@@ -124,10 +124,12 @@ public:
void setOpcode(const Opcode& opcode);
void setRcode(const Rcode& rcode);
int parseQuestion(InputBuffer& buffer);
- int parseSection(const Message::Section section, InputBuffer& buffer);
+ int parseSection(const Message::Section section, InputBuffer& buffer,
+ Message::ParseOptions options);
void addRR(Message::Section section, const Name& name,
const RRClass& rrclass, const RRType& rrtype,
- const RRTTL& ttl, ConstRdataPtr rdata);
+ const RRTTL& ttl, ConstRdataPtr rdata,
+ Message::ParseOptions options);
void addEDNS(Message::Section section, const Name& name,
const RRClass& rrclass, const RRType& rrtype,
const RRTTL& ttl, const Rdata& rdata);
@@ -239,7 +241,28 @@ MessageImpl::toWire(AbstractMessageRenderer& renderer, TSIGContext* tsig_ctx) {
"Message rendering attempted without Opcode set");
}
+ // Reserve the space for TSIG (if needed) so that we can handle truncation
+ // case correctly later when that happens. orig_xxx variables remember
+ // some configured parameters of renderer in case they are needed in
+ // truncation processing below.
+ const size_t tsig_len = (tsig_ctx != NULL) ? tsig_ctx->getTSIGLength() : 0;
+ const size_t orig_msg_len_limit = renderer.getLengthLimit();
+ const AbstractMessageRenderer::CompressMode orig_compress_mode =
+ renderer.getCompressMode();
+ if (tsig_len > 0) {
+ if (tsig_len > orig_msg_len_limit) {
+ isc_throw(InvalidParameter, "Failed to render DNS message: "
+ "too small limit for a TSIG (" <<
+ orig_msg_len_limit << ")");
+ }
+ renderer.setLengthLimit(orig_msg_len_limit - tsig_len);
+ }
+
// reserve room for the header
+ if (renderer.getLengthLimit() < HEADERLEN) {
+ isc_throw(InvalidParameter, "Failed to render DNS message: "
+ "too small limit for a Header");
+ }
renderer.skip(HEADERLEN);
uint16_t qdcount =
@@ -284,6 +307,22 @@ MessageImpl::toWire(AbstractMessageRenderer& renderer, TSIGContext* tsig_ctx) {
}
}
+ // If we're adding a TSIG to a truncated message, clear all RRsets
+ // from the message except for the question before adding the TSIG.
+ // If even (some of) the question doesn't fit, don't include it.
+ if (tsig_ctx != NULL && renderer.isTruncated()) {
+ renderer.clear();
+ renderer.setLengthLimit(orig_msg_len_limit - tsig_len);
+ renderer.setCompressMode(orig_compress_mode);
+ renderer.skip(HEADERLEN);
+ qdcount = for_each(questions_.begin(), questions_.end(),
+ RenderSection<QuestionPtr>(renderer,
+ false)).getTotalCount();
+ ancount = 0;
+ nscount = 0;
+ arcount = 0;
+ }
+
// Adjust the counter buffer.
// XXX: these may not be equal to the number of corresponding entries
// in rrsets_[] or questions_ if truncation occurred or an EDNS OPT RR
@@ -315,10 +354,16 @@ MessageImpl::toWire(AbstractMessageRenderer& renderer, TSIGContext* tsig_ctx) {
renderer.writeUint16At(arcount, header_pos);
// Add TSIG, if necessary, at the end of the message.
- // TODO: truncate case consideration
if (tsig_ctx != NULL) {
- tsig_ctx->sign(qid_, renderer.getData(),
- renderer.getLength())->toWire(renderer);
+ // Release the reserved space in the renderer.
+ renderer.setLengthLimit(orig_msg_len_limit);
+
+ const int tsig_count =
+ tsig_ctx->sign(qid_, renderer.getData(),
+ renderer.getLength())->toWire(renderer);
+ if (tsig_count != 1) {
+ isc_throw(Unexpected, "Failed to render a TSIG RR");
+ }
// update the ARCOUNT for the TSIG RR. Note that for a sane DNS
// message arcount should never overflow to 0.
@@ -571,7 +616,7 @@ Message::parseHeader(InputBuffer& buffer) {
}
void
-Message::fromWire(InputBuffer& buffer) {
+Message::fromWire(InputBuffer& buffer, ParseOptions options) {
if (impl_->mode_ != Message::PARSE) {
isc_throw(InvalidMessageOperation,
"Message parse attempted in non parse mode");
@@ -583,11 +628,11 @@ Message::fromWire(InputBuffer& buffer) {
impl_->counts_[SECTION_QUESTION] = impl_->parseQuestion(buffer);
impl_->counts_[SECTION_ANSWER] =
- impl_->parseSection(SECTION_ANSWER, buffer);
+ impl_->parseSection(SECTION_ANSWER, buffer, options);
impl_->counts_[SECTION_AUTHORITY] =
- impl_->parseSection(SECTION_AUTHORITY, buffer);
+ impl_->parseSection(SECTION_AUTHORITY, buffer, options);
impl_->counts_[SECTION_ADDITIONAL] =
- impl_->parseSection(SECTION_ADDITIONAL, buffer);
+ impl_->parseSection(SECTION_ADDITIONAL, buffer, options);
}
int
@@ -663,7 +708,7 @@ struct MatchRR : public unary_function<RRsetPtr, bool> {
// is hardcoded here.
int
MessageImpl::parseSection(const Message::Section section,
- InputBuffer& buffer)
+ InputBuffer& buffer, Message::ParseOptions options)
{
assert(section < MessageImpl::NUM_SECTIONS);
@@ -695,7 +740,7 @@ MessageImpl::parseSection(const Message::Section section,
addTSIG(section, count, buffer, start_position, name, rrclass, ttl,
*rdata);
} else {
- addRR(section, name, rrclass, rrtype, ttl, rdata);
+ addRR(section, name, rrclass, rrtype, ttl, rdata, options);
++added;
}
}
@@ -706,19 +751,22 @@ MessageImpl::parseSection(const Message::Section section,
void
MessageImpl::addRR(Message::Section section, const Name& name,
const RRClass& rrclass, const RRType& rrtype,
- const RRTTL& ttl, ConstRdataPtr rdata)
+ const RRTTL& ttl, ConstRdataPtr rdata,
+ Message::ParseOptions options)
{
- vector<RRsetPtr>::iterator it =
- find_if(rrsets_[section].begin(), rrsets_[section].end(),
- MatchRR(name, rrtype, rrclass));
- if (it != rrsets_[section].end()) {
- (*it)->setTTL(min((*it)->getTTL(), ttl));
- (*it)->addRdata(rdata);
- } else {
- RRsetPtr rrset(new RRset(name, rrclass, rrtype, ttl));
- rrset->addRdata(rdata);
- rrsets_[section].push_back(rrset);
+ if ((options & Message::PRESERVE_ORDER) == 0) {
+ vector<RRsetPtr>::iterator it =
+ find_if(rrsets_[section].begin(), rrsets_[section].end(),
+ MatchRR(name, rrtype, rrclass));
+ if (it != rrsets_[section].end()) {
+ (*it)->setTTL(min((*it)->getTTL(), ttl));
+ (*it)->addRdata(rdata);
+ return;
+ }
}
+ RRsetPtr rrset(new RRset(name, rrclass, rrtype, ttl));
+ rrset->addRdata(rdata);
+ rrsets_[section].push_back(rrset);
}
void
diff --git a/src/lib/dns/message.h b/src/lib/dns/message.h
index fcc53e9..f286c67 100644
--- a/src/lib/dns/message.h
+++ b/src/lib/dns/message.h
@@ -565,16 +565,74 @@ public:
/// \c tsig_ctx will be updated based on the fact it was used for signing
/// and with the latest MAC.
///
+ /// \exception InvalidMessageOperation The message is not in the Render
+ /// mode, or either Rcode or Opcode is not set.
+ /// \exception InvalidParameter The allowable limit of \c renderer is too
+ /// small for a TSIG or the Header section. Note that this shouldn't
+ /// happen with parameters as defined in the standard protocols,
+ /// so it's more likely a program bug.
+ /// \exception Unexpected Rendering the TSIG RR fails. The implementation
+ /// internally makes sure this doesn't happen, so if that ever occurs
+ /// it should mean a bug either in the TSIG context or in the renderer
+ /// implementation.
+ ///
/// \param renderer See the other version
/// \param tsig_ctx A TSIG context that is to be used for signing the
/// message
void toWire(AbstractMessageRenderer& renderer, TSIGContext& tsig_ctx);
+ /// Parse options.
+ ///
+ /// describe PRESERVE_ORDER: note doesn't affect EDNS or TSIG.
+ ///
+ /// The option values are used as a parameter for \c fromWire().
+ /// These are values of a bitmask type. Bitwise operations can be
+ /// performed on these values to express compound options.
+ enum ParseOptions {
+ PARSE_DEFAULT = 0, ///< The default options
+ PRESERVE_ORDER = 1 ///< Preserve RR order and don't combine them
+ };
+
/// \brief Parse the header section of the \c Message.
void parseHeader(isc::util::InputBuffer& buffer);
- /// \brief Parse the \c Message.
- void fromWire(isc::util::InputBuffer& buffer);
+ /// \brief (Re)build a \c Message object from wire-format data.
+ ///
+ /// This method parses the given wire format data to build a
+ /// complete Message object. On success, the values of the header section
+ /// fields can be accessible via corresponding get methods, and the
+ /// question and following sections can be accessible via the
+ /// corresponding iterators. If the message contains an EDNS or TSIG,
+ /// they can be accessible via \c getEDNS() and \c getTSIGRecord(),
+ /// respectively.
+ ///
+ /// This \c Message must be in the \c PARSE mode.
+ ///
+ /// This method performs strict validation on the given message based
+ /// on the DNS protocol specifications. If the given message data is
+ /// invalid, this method throws an exception (see the exception list).
+ ///
+ /// By default, this method combines RRs of the same name, RR type and
+ /// RR class in a section into a single RRset, even if they are interleaved
+ /// with a different type of RR (though it would be a rare case in
+ /// practice). If the \c PRESERVE_ORDER option is specified, it handles
+ /// each RR separately, in the appearing order, and converts it to a
+ /// separate RRset (so this RRset should contain exactly one Rdata).
+ /// This mode will be necessary when the higher level protocol is
+ /// ordering conscious. For example, in AXFR and IXFR, the position of
+ /// the SOA RRs are crucial.
+ ///
+ /// \exception InvalidMessageOperation \c Message is in the RENDER mode
+ /// \exception DNSMessageFORMERR The given message data is syntactically
+ /// \exception MessageTooShort The given data is shorter than a valid
+ /// header section
+ /// \exception std::bad_alloc Memory allocation failure
+ /// \exception Others \c Name, \c Rdata, and \c EDNS classes can also throw
+ ///
+ /// \param buffer A input buffer object that stores the wire data
+ /// \param options Parse options
+ void fromWire(isc::util::InputBuffer& buffer, ParseOptions options
+ = PARSE_DEFAULT);
///
/// \name Protocol constants
@@ -618,6 +676,6 @@ std::ostream& operator<<(std::ostream& os, const Message& message);
}
#endif // __MESSAGE_H
-// Local Variables:
+// Local Variables:
// mode: c++
-// End:
+// End:
diff --git a/src/lib/dns/python/Makefile.am b/src/lib/dns/python/Makefile.am
index 6c4ef54..3b89358 100644
--- a/src/lib/dns/python/Makefile.am
+++ b/src/lib/dns/python/Makefile.am
@@ -4,40 +4,47 @@ AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
AM_CPPFLAGS += $(BOOST_INCLUDES)
AM_CXXFLAGS = $(B10_CXXFLAGS)
-pyexec_LTLIBRARIES = pydnspp.la
-pydnspp_la_SOURCES = pydnspp.cc pydnspp_common.cc pydnspp_towire.h
-pydnspp_la_SOURCES += name_python.cc name_python.h
-pydnspp_la_SOURCES += messagerenderer_python.cc messagerenderer_python.h
-pydnspp_la_SOURCES += rcode_python.cc rcode_python.h
-pydnspp_la_SOURCES += tsigkey_python.cc tsigkey_python.h
-pydnspp_la_SOURCES += tsigerror_python.cc tsigerror_python.h
-pydnspp_la_SOURCES += tsig_rdata_python.cc tsig_rdata_python.h
-pydnspp_la_SOURCES += tsigrecord_python.cc tsigrecord_python.h
-pydnspp_la_SOURCES += tsig_python.cc tsig_python.h
+lib_LTLIBRARIES = libpydnspp.la
+libpydnspp_la_SOURCES = pydnspp_common.cc pydnspp_common.h pydnspp_towire.h
+libpydnspp_la_SOURCES += name_python.cc name_python.h
+libpydnspp_la_SOURCES += rrset_python.cc rrset_python.h
+libpydnspp_la_SOURCES += rrclass_python.cc rrclass_python.h
+libpydnspp_la_SOURCES += rrtype_python.cc rrtype_python.h
+libpydnspp_la_SOURCES += rrttl_python.cc rrttl_python.h
+libpydnspp_la_SOURCES += rdata_python.cc rdata_python.h
+libpydnspp_la_SOURCES += messagerenderer_python.cc messagerenderer_python.h
+libpydnspp_la_SOURCES += rcode_python.cc rcode_python.h
+libpydnspp_la_SOURCES += opcode_python.cc opcode_python.h
+libpydnspp_la_SOURCES += question_python.cc question_python.h
+libpydnspp_la_SOURCES += tsigkey_python.cc tsigkey_python.h
+libpydnspp_la_SOURCES += tsigerror_python.cc tsigerror_python.h
+libpydnspp_la_SOURCES += tsig_rdata_python.cc tsig_rdata_python.h
+libpydnspp_la_SOURCES += tsigrecord_python.cc tsigrecord_python.h
+libpydnspp_la_SOURCES += tsig_python.cc tsig_python.h
+libpydnspp_la_SOURCES += edns_python.cc edns_python.h
+libpydnspp_la_SOURCES += message_python.cc message_python.h
+
+libpydnspp_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
+libpydnspp_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
+libpydnspp_la_LDFLAGS = $(PYTHON_LDFLAGS)
+
+
+pyexec_LTLIBRARIES = pydnspp.la
+pydnspp_la_SOURCES = pydnspp.cc
pydnspp_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
# Note: PYTHON_CXXFLAGS may have some -Wno... workaround, which must be
# placed after -Wextra defined in AM_CXXFLAGS
pydnspp_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
pydnspp_la_LDFLAGS = $(PYTHON_LDFLAGS)
-# directly included from source files, so these don't have their own
-# rules
-EXTRA_DIST = pydnspp_common.h
-EXTRA_DIST += edns_python.cc
-EXTRA_DIST += message_python.cc
-EXTRA_DIST += rrclass_python.cc
-EXTRA_DIST += opcode_python.cc
-EXTRA_DIST += rrset_python.cc
-EXTRA_DIST += question_python.cc
-EXTRA_DIST += rrttl_python.cc
-EXTRA_DIST += rdata_python.cc
-EXTRA_DIST += rrtype_python.cc
-EXTRA_DIST += tsigerror_python_inc.cc
+EXTRA_DIST = tsigerror_python_inc.cc
+EXTRA_DIST += message_python_inc.cc
# Python prefers .so, while some OSes (specifically MacOS) use a different
# suffix for dynamic objects. -module is necessary to work this around.
pydnspp_la_LDFLAGS += -module
pydnspp_la_LIBADD = $(top_builddir)/src/lib/dns/libdns++.la
pydnspp_la_LIBADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
+pydnspp_la_LIBADD += libpydnspp.la
pydnspp_la_LIBADD += $(PYTHON_LIB)
diff --git a/src/lib/dns/python/edns_python.cc b/src/lib/dns/python/edns_python.cc
index 83c3bfa..8f0f1a4 100644
--- a/src/lib/dns/python/edns_python.cc
+++ b/src/lib/dns/python/edns_python.cc
@@ -12,38 +12,38 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <Python.h>
+
#include <cassert>
#include <dns/edns.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <util/python/pycppwrapper_util.h>
+
+#include "edns_python.h"
+#include "name_python.h"
+#include "rrclass_python.h"
+#include "rrtype_python.h"
+#include "rrttl_python.h"
+#include "rdata_python.h"
+#include "messagerenderer_python.h"
+#include "pydnspp_common.h"
using namespace isc::dns;
-using namespace isc::util;
using namespace isc::dns::rdata;
-
-//
-// Definition of the classes
-//
-
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
+using namespace isc::dns::python;
+using namespace isc::util;
+using namespace isc::util::python;
namespace {
-//
-// EDNS
-//
-
-// The s_* Class simply covers one instantiation of the object
class s_EDNS : public PyObject {
public:
- EDNS* edns;
+ EDNS* cppobj;
};
-//
-// We declare the functions here, the definitions are below
-// the type definition of the object, since both can use the other
-//
+typedef CPPPyObjectContainer<s_EDNS, EDNS> EDNSContainer;
// General creation and destruction
int EDNS_init(s_EDNS* self, PyObject* args);
@@ -103,60 +103,6 @@ PyMethodDef EDNS_methods[] = {
{ NULL, NULL, 0, NULL }
};
-// This defines the complete type for reflection in python and
-// parsing of PyObject* to s_EDNS
-// Most of the functions are not actually implemented and NULL here.
-PyTypeObject edns_type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "pydnspp.EDNS",
- sizeof(s_EDNS), // tp_basicsize
- 0, // tp_itemsize
- (destructor)EDNS_destroy, // tp_dealloc
- NULL, // tp_print
- NULL, // tp_getattr
- NULL, // tp_setattr
- NULL, // tp_reserved
- NULL, // tp_repr
- NULL, // tp_as_number
- NULL, // tp_as_sequence
- NULL, // tp_as_mapping
- NULL, // tp_hash
- NULL, // tp_call
- EDNS_str, // tp_str
- NULL, // tp_getattro
- NULL, // tp_setattro
- NULL, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
- "The EDNS class encapsulates DNS extensions "
- "provided by the EDNSx protocol.",
- NULL, // tp_traverse
- NULL, // tp_clear
- NULL, // tp_richcompare
- 0, // tp_weaklistoffset
- NULL, // tp_iter
- NULL, // tp_iternext
- EDNS_methods, // tp_methods
- NULL, // tp_members
- NULL, // tp_getset
- NULL, // tp_base
- NULL, // tp_dict
- NULL, // tp_descr_get
- NULL, // tp_descr_set
- 0, // tp_dictoffset
- (initproc)EDNS_init, // tp_init
- NULL, // tp_alloc
- PyType_GenericNew, // tp_new
- NULL, // tp_free
- NULL, // tp_is_gc
- NULL, // tp_bases
- NULL, // tp_mro
- NULL, // tp_cache
- NULL, // tp_subclasses
- NULL, // tp_weaklist
- NULL, // tp_del
- 0 // tp_version_tag
-};
-
EDNS*
createFromRR(const Name& name, const RRClass& rrclass, const RRType& rrtype,
const RRTTL& rrttl, const Rdata& rdata, uint8_t& extended_rcode)
@@ -179,15 +125,15 @@ createFromRR(const Name& name, const RRClass& rrclass, const RRType& rrtype,
int
EDNS_init(s_EDNS* self, PyObject* args) {
uint8_t version = EDNS::SUPPORTED_VERSION;
- const s_Name* name;
- const s_RRClass* rrclass;
- const s_RRType* rrtype;
- const s_RRTTL* rrttl;
- const s_Rdata* rdata;
+ const PyObject* name;
+ const PyObject* rrclass;
+ const PyObject* rrtype;
+ const PyObject* rrttl;
+ const PyObject* rdata;
if (PyArg_ParseTuple(args, "|b", &version)) {
try {
- self->edns = new EDNS(version);
+ self->cppobj = new EDNS(version);
} catch (const isc::InvalidParameter& ex) {
PyErr_SetString(po_InvalidParameter, ex.what());
return (-1);
@@ -203,10 +149,12 @@ EDNS_init(s_EDNS* self, PyObject* args) {
// in this context so that we can share the try-catch logic with
// EDNS_createFromRR() (see below).
uint8_t extended_rcode;
- self->edns = createFromRR(*name->cppobj, *rrclass->rrclass,
- *rrtype->rrtype, *rrttl->rrttl,
- *rdata->rdata, extended_rcode);
- return (self->edns != NULL ? 0 : -1);
+ self->cppobj = createFromRR(PyName_ToName(name),
+ PyRRClass_ToRRClass(rrclass),
+ PyRRType_ToRRType(rrtype),
+ PyRRTTL_ToRRTTL(rrttl),
+ PyRdata_ToRdata(rdata), extended_rcode);
+ return (self->cppobj != NULL ? 0 : -1);
}
PyErr_Clear();
@@ -217,19 +165,19 @@ EDNS_init(s_EDNS* self, PyObject* args) {
void
EDNS_destroy(s_EDNS* const self) {
- delete self->edns;
- self->edns = NULL;
+ delete self->cppobj;
+ self->cppobj = NULL;
Py_TYPE(self)->tp_free(self);
}
PyObject*
EDNS_toText(const s_EDNS* const self) {
// Py_BuildValue makes python objects from native data
- return (Py_BuildValue("s", self->edns->toText().c_str()));
+ return (Py_BuildValue("s", self->cppobj->toText().c_str()));
}
PyObject*
-EDNS_str(PyObject* const self) {
+EDNS_str(PyObject* self) {
// Simply call the to_text method we already defined
return (PyObject_CallMethod(self,
const_cast<char*>("to_text"),
@@ -240,14 +188,14 @@ PyObject*
EDNS_toWire(const s_EDNS* const self, PyObject* args) {
PyObject* bytes;
uint8_t extended_rcode;
- s_MessageRenderer* renderer;
+ PyObject* renderer;
if (PyArg_ParseTuple(args, "Ob", &bytes, &extended_rcode) &&
PySequence_Check(bytes)) {
PyObject* bytes_o = bytes;
-
+
OutputBuffer buffer(0);
- self->edns->toWire(buffer, extended_rcode);
+ self->cppobj->toWire(buffer, extended_rcode);
PyObject* rd_bytes = PyBytes_FromStringAndSize(
static_cast<const char*>(buffer.getData()), buffer.getLength());
PyObject* result = PySequence_InPlaceConcat(bytes_o, rd_bytes);
@@ -257,8 +205,8 @@ EDNS_toWire(const s_EDNS* const self, PyObject* args) {
return (result);
} else if (PyArg_ParseTuple(args, "O!b", &messagerenderer_type,
&renderer, &extended_rcode)) {
- const unsigned int n = self->edns->toWire(*renderer->messagerenderer,
- extended_rcode);
+ const unsigned int n = self->cppobj->toWire(
+ PyMessageRenderer_ToMessageRenderer(renderer), extended_rcode);
return (Py_BuildValue("I", n));
}
@@ -269,12 +217,12 @@ EDNS_toWire(const s_EDNS* const self, PyObject* args) {
PyObject*
EDNS_getVersion(const s_EDNS* const self) {
- return (Py_BuildValue("B", self->edns->getVersion()));
+ return (Py_BuildValue("B", self->cppobj->getVersion()));
}
PyObject*
EDNS_getDNSSECAwareness(const s_EDNS* const self) {
- if (self->edns->getDNSSECAwareness()) {
+ if (self->cppobj->getDNSSECAwareness()) {
Py_RETURN_TRUE;
} else {
Py_RETURN_FALSE;
@@ -287,13 +235,13 @@ EDNS_setDNSSECAwareness(s_EDNS* self, PyObject* args) {
if (!PyArg_ParseTuple(args, "O!", &PyBool_Type, &b)) {
return (NULL);
}
- self->edns->setDNSSECAwareness(b == Py_True);
+ self->cppobj->setDNSSECAwareness(b == Py_True);
Py_RETURN_NONE;
}
PyObject*
EDNS_getUDPSize(const s_EDNS* const self) {
- return (Py_BuildValue("I", self->edns->getUDPSize()));
+ return (Py_BuildValue("I", self->cppobj->getUDPSize()));
}
PyObject*
@@ -310,17 +258,17 @@ EDNS_setUDPSize(s_EDNS* self, PyObject* args) {
"UDP size is not an unsigned 16-bit integer");
return (NULL);
}
- self->edns->setUDPSize(size);
+ self->cppobj->setUDPSize(size);
Py_RETURN_NONE;
}
PyObject*
EDNS_createFromRR(const s_EDNS* null_self, PyObject* args) {
- const s_Name* name;
- const s_RRClass* rrclass;
- const s_RRType* rrtype;
- const s_RRTTL* rrttl;
- const s_Rdata* rdata;
+ const PyObject* name;
+ const PyObject* rrclass;
+ const PyObject* rrtype;
+ const PyObject* rrttl;
+ const PyObject* rdata;
s_EDNS* edns_obj = NULL;
assert(null_self == NULL);
@@ -334,14 +282,17 @@ EDNS_createFromRR(const s_EDNS* null_self, PyObject* args) {
return (NULL);
}
- edns_obj->edns = createFromRR(*name->cppobj, *rrclass->rrclass,
- *rrtype->rrtype, *rrttl->rrttl,
- *rdata->rdata, extended_rcode);
- if (edns_obj->edns != NULL) {
+ edns_obj->cppobj = createFromRR(PyName_ToName(name),
+ PyRRClass_ToRRClass(rrclass),
+ PyRRType_ToRRType(rrtype),
+ PyRRTTL_ToRRTTL(rrttl),
+ PyRdata_ToRdata(rdata),
+ extended_rcode);
+ if (edns_obj->cppobj != NULL) {
PyObject* extrcode_obj = Py_BuildValue("B", extended_rcode);
return (Py_BuildValue("OO", edns_obj, extrcode_obj));
}
-
+
Py_DECREF(edns_obj);
return (NULL);
}
@@ -353,23 +304,90 @@ EDNS_createFromRR(const s_EDNS* null_self, PyObject* args) {
}
} // end of anonymous namespace
-// end of EDNS
-// Module Initialization, all statics are initialized here
+namespace isc {
+namespace dns {
+namespace python {
+
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_EDNS
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject edns_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pydnspp.EDNS",
+ sizeof(s_EDNS), // tp_basicsize
+ 0, // tp_itemsize
+ (destructor)EDNS_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ EDNS_str, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "The EDNS class encapsulates DNS extensions "
+ "provided by the EDNSx protocol.",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ EDNS_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ (initproc)EDNS_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+PyObject*
+createEDNSObject(const EDNS& source) {
+ EDNSContainer container(PyObject_New(s_EDNS, &edns_type));
+ container.set(new EDNS(source));
+ return (container.release());
+}
+
bool
-initModulePart_EDNS(PyObject* mod) {
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module. This is not just a check! (leaving
- // this out results in segmentation faults)
- if (PyType_Ready(&edns_type) < 0) {
- return (false);
+PyEDNS_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
}
- Py_INCREF(&edns_type);
- void* p = &edns_type;
- PyModule_AddObject(mod, "EDNS", static_cast<PyObject*>(p));
-
- addClassVariable(edns_type, "SUPPORTED_VERSION",
- Py_BuildValue("B", EDNS::SUPPORTED_VERSION));
+ return (PyObject_TypeCheck(obj, &edns_type));
+}
- return (true);
+const EDNS&
+PyEDNS_ToEDNS(const PyObject* edns_obj) {
+ if (edns_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in EDNS PyObject conversion");
+ }
+ const s_EDNS* edns = static_cast<const s_EDNS*>(edns_obj);
+ return (*edns->cppobj);
}
+
+} // end namespace python
+} // end namespace dns
+} // end namespace isc
diff --git a/src/lib/dns/python/edns_python.h b/src/lib/dns/python/edns_python.h
new file mode 100644
index 0000000..30d92ab
--- /dev/null
+++ b/src/lib/dns/python/edns_python.h
@@ -0,0 +1,64 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_EDNS_H
+#define __PYTHON_EDNS_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class EDNS;
+
+namespace python {
+
+extern PyTypeObject edns_type;
+
+/// This is a simple shortcut to create a python EDNS object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createEDNSObject(const EDNS& source);
+
+/// \brief Checks if the given python object is a EDNS object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type EDNS, false otherwise
+bool PyEDNS_Check(PyObject* obj);
+
+/// \brief Returns a reference to the EDNS object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type EDNS; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyEDNS_Check()
+///
+/// \note This is not a copy; if the EDNS is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param edns_obj The edns object to convert
+const EDNS& PyEDNS_ToEDNS(const PyObject* edns_obj);
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_EDNS_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/message_python.cc b/src/lib/dns/python/message_python.cc
index 2842588..2349401 100644
--- a/src/lib/dns/python/message_python.cc
+++ b/src/lib/dns/python/message_python.cc
@@ -12,49 +12,42 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#define PY_SSIZE_T_CLEAN
+#include <Python.h>
+
#include <exceptions/exceptions.h>
#include <dns/message.h>
#include <dns/rcode.h>
#include <dns/tsig.h>
-
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+
+#include "name_python.h"
+#include "question_python.h"
+#include "edns_python.h"
+#include "rcode_python.h"
+#include "opcode_python.h"
+#include "rrset_python.h"
+#include "message_python.h"
+#include "messagerenderer_python.h"
+#include "tsig_python.h"
+#include "tsigrecord_python.h"
+#include "pydnspp_common.h"
+
+using namespace std;
using namespace isc::dns;
+using namespace isc::dns::python;
using namespace isc::util;
-namespace {
-//
-// Declaration of the custom exceptions
-// Initialization and addition of these go in the initModulePart
-// function at the end of this file
-//
-PyObject* po_MessageTooShort;
-PyObject* po_InvalidMessageSection;
-PyObject* po_InvalidMessageOperation;
-PyObject* po_InvalidMessageUDPSize;
-
-//
-// Definition of the classes
-//
+// Import pydoc text
+#include "message_python_inc.cc"
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
-
-//
-// Message
-//
-
-// The s_* Class simply coverst one instantiation of the object
+namespace {
class s_Message : public PyObject {
public:
- Message* message;
+ isc::dns::Message* cppobj;
};
-//
-// We declare the functions here, the definitions are below
-// the type definition of the object, since both can use the other
-//
-
-// General creation and destruction
int Message_init(s_Message* self, PyObject* args);
void Message_destroy(s_Message* self);
@@ -85,7 +78,7 @@ PyObject* Message_makeResponse(s_Message* self);
PyObject* Message_toText(s_Message* self);
PyObject* Message_str(PyObject* self);
PyObject* Message_toWire(s_Message* self, PyObject* args);
-PyObject* Message_fromWire(s_Message* self, PyObject* args);
+PyObject* Message_fromWire(PyObject* pyself, PyObject* args);
// This list contains the actual set of functions we have in
// python. Each entry has
@@ -167,70 +160,10 @@ PyMethodDef Message_methods[] = {
"If the given message is not in RENDER mode, an "
"InvalidMessageOperation is raised.\n"
},
- { "from_wire", reinterpret_cast<PyCFunction>(Message_fromWire), METH_VARARGS,
- "Parses the given wire format to a Message object.\n"
- "The first argument is a Message to parse the data into.\n"
- "The second argument must implement the buffer interface.\n"
- "If the given message is not in PARSE mode, an "
- "InvalidMessageOperation is raised.\n"
- "Raises MessageTooShort, DNSMessageFORMERR or DNSMessageBADVERS "
- " if there is a problem parsing the message." },
+ { "from_wire", Message_fromWire, METH_VARARGS, Message_fromWire_doc },
{ NULL, NULL, 0, NULL }
};
-// This defines the complete type for reflection in python and
-// parsing of PyObject* to s_Message
-// Most of the functions are not actually implemented and NULL here.
-PyTypeObject message_type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "pydnspp.Message",
- sizeof(s_Message), // tp_basicsize
- 0, // tp_itemsize
- (destructor)Message_destroy, // tp_dealloc
- NULL, // tp_print
- NULL, // tp_getattr
- NULL, // tp_setattr
- NULL, // tp_reserved
- NULL, // tp_repr
- NULL, // tp_as_number
- NULL, // tp_as_sequence
- NULL, // tp_as_mapping
- NULL, // tp_hash
- NULL, // tp_call
- Message_str, // tp_str
- NULL, // tp_getattro
- NULL, // tp_setattro
- NULL, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
- "The Message class encapsulates a standard DNS message.",
- NULL, // tp_traverse
- NULL, // tp_clear
- NULL, // tp_richcompare
- 0, // tp_weaklistoffset
- NULL, // tp_iter
- NULL, // tp_iternext
- Message_methods, // tp_methods
- NULL, // tp_members
- NULL, // tp_getset
- NULL, // tp_base
- NULL, // tp_dict
- NULL, // tp_descr_get
- NULL, // tp_descr_set
- 0, // tp_dictoffset
- (initproc)Message_init, // tp_init
- NULL, // tp_alloc
- PyType_GenericNew, // tp_new
- NULL, // tp_free
- NULL, // tp_is_gc
- NULL, // tp_bases
- NULL, // tp_mro
- NULL, // tp_cache
- NULL, // tp_subclasses
- NULL, // tp_weaklist
- NULL, // tp_del
- 0 // tp_version_tag
-};
-
int
Message_init(s_Message* self, PyObject* args) {
int i;
@@ -238,10 +171,10 @@ Message_init(s_Message* self, PyObject* args) {
if (PyArg_ParseTuple(args, "i", &i)) {
PyErr_Clear();
if (i == Message::PARSE) {
- self->message = new Message(Message::PARSE);
+ self->cppobj = new Message(Message::PARSE);
return (0);
} else if (i == Message::RENDER) {
- self->message = new Message(Message::RENDER);
+ self->cppobj = new Message(Message::RENDER);
return (0);
} else {
PyErr_SetString(PyExc_TypeError, "Message mode must be Message.PARSE or Message.RENDER");
@@ -256,8 +189,8 @@ Message_init(s_Message* self, PyObject* args) {
void
Message_destroy(s_Message* self) {
- delete self->message;
- self->message = NULL;
+ delete self->cppobj;
+ self->cppobj = NULL;
Py_TYPE(self)->tp_free(self);
}
@@ -271,7 +204,7 @@ Message_getHeaderFlag(s_Message* self, PyObject* args) {
return (NULL);
}
- if (self->message->getHeaderFlag(
+ if (self->cppobj->getHeaderFlag(
static_cast<Message::HeaderFlag>(messageflag))) {
Py_RETURN_TRUE;
} else {
@@ -296,7 +229,7 @@ Message_setHeaderFlag(s_Message* self, PyObject* args) {
}
try {
- self->message->setHeaderFlag(
+ self->cppobj->setHeaderFlag(
static_cast<Message::HeaderFlag>(messageflag), on == Py_True);
Py_RETURN_NONE;
} catch (const InvalidMessageOperation& imo) {
@@ -312,7 +245,7 @@ Message_setHeaderFlag(s_Message* self, PyObject* args) {
PyObject*
Message_getQid(s_Message* self) {
- return (Py_BuildValue("I", self->message->getQid()));
+ return (Py_BuildValue("I", self->cppobj->getQid()));
}
PyObject*
@@ -331,7 +264,7 @@ Message_setQid(s_Message* self, PyObject* args) {
}
try {
- self->message->setQid(id);
+ self->cppobj->setQid(id);
Py_RETURN_NONE;
} catch (const InvalidMessageOperation& imo) {
PyErr_SetString(po_InvalidMessageOperation, imo.what());
@@ -341,35 +274,25 @@ Message_setQid(s_Message* self, PyObject* args) {
PyObject*
Message_getRcode(s_Message* self) {
- s_Rcode* rcode;
-
- rcode = static_cast<s_Rcode*>(rcode_type.tp_alloc(&rcode_type, 0));
- if (rcode != NULL) {
- rcode->cppobj = NULL;
- try {
- rcode->cppobj = new Rcode(self->message->getRcode());
- } catch (const InvalidMessageOperation& imo) {
- PyErr_SetString(po_InvalidMessageOperation, imo.what());
- } catch (...) {
- PyErr_SetString(po_IscException, "Unexpected exception");
- }
- if (rcode->cppobj == NULL) {
- Py_DECREF(rcode);
- return (NULL);
- }
+ try {
+ return (createRcodeObject(self->cppobj->getRcode()));
+ } catch (const InvalidMessageOperation& imo) {
+ PyErr_SetString(po_InvalidMessageOperation, imo.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(po_IscException, "Unexpected exception");
+ return (NULL);
}
-
- return (rcode);
}
PyObject*
Message_setRcode(s_Message* self, PyObject* args) {
- s_Rcode* rcode;
+ PyObject* rcode;
if (!PyArg_ParseTuple(args, "O!", &rcode_type, &rcode)) {
return (NULL);
}
try {
- self->message->setRcode(*rcode->cppobj);
+ self->cppobj->setRcode(PyRcode_ToRcode(rcode));
Py_RETURN_NONE;
} catch (const InvalidMessageOperation& imo) {
PyErr_SetString(po_InvalidMessageOperation, imo.what());
@@ -379,35 +302,31 @@ Message_setRcode(s_Message* self, PyObject* args) {
PyObject*
Message_getOpcode(s_Message* self) {
- s_Opcode* opcode;
-
- opcode = static_cast<s_Opcode*>(opcode_type.tp_alloc(&opcode_type, 0));
- if (opcode != NULL) {
- opcode->opcode = NULL;
- try {
- opcode->opcode = new Opcode(self->message->getOpcode());
- } catch (const InvalidMessageOperation& imo) {
- PyErr_SetString(po_InvalidMessageOperation, imo.what());
- } catch (...) {
- PyErr_SetString(po_IscException, "Unexpected exception");
- }
- if (opcode->opcode == NULL) {
- Py_DECREF(opcode);
- return (NULL);
- }
+ try {
+ return (createOpcodeObject(self->cppobj->getOpcode()));
+ } catch (const InvalidMessageOperation& imo) {
+ PyErr_SetString(po_InvalidMessageOperation, imo.what());
+ return (NULL);
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Failed to get message opcode: " + string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(po_IscException,
+ "Unexpected exception getting opcode from message");
+ return (NULL);
}
-
- return (opcode);
}
PyObject*
Message_setOpcode(s_Message* self, PyObject* args) {
- s_Opcode* opcode;
+ PyObject* opcode;
if (!PyArg_ParseTuple(args, "O!", &opcode_type, &opcode)) {
return (NULL);
}
try {
- self->message->setOpcode(*opcode->opcode);
+ self->cppobj->setOpcode(PyOpcode_ToOpcode(opcode));
Py_RETURN_NONE;
} catch (const InvalidMessageOperation& imo) {
PyErr_SetString(po_InvalidMessageOperation, imo.what());
@@ -417,32 +336,31 @@ Message_setOpcode(s_Message* self, PyObject* args) {
PyObject*
Message_getEDNS(s_Message* self) {
- s_EDNS* edns;
- EDNS* edns_body;
- ConstEDNSPtr src = self->message->getEDNS();
-
+ ConstEDNSPtr src = self->cppobj->getEDNS();
if (!src) {
Py_RETURN_NONE;
}
- if ((edns_body = new(nothrow) EDNS(*src)) == NULL) {
- return (PyErr_NoMemory());
- }
- edns = static_cast<s_EDNS*>(opcode_type.tp_alloc(&edns_type, 0));
- if (edns != NULL) {
- edns->edns = edns_body;
+ try {
+ return (createEDNSObject(*src));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Failed to get EDNS from message: " + string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure getting EDNS from message");
}
-
- return (edns);
+ return (NULL);
}
PyObject*
Message_setEDNS(s_Message* self, PyObject* args) {
- s_EDNS* edns;
+ PyObject* edns;
if (!PyArg_ParseTuple(args, "O!", &edns_type, &edns)) {
return (NULL);
}
try {
- self->message->setEDNS(EDNSPtr(new EDNS(*edns->edns)));
+ self->cppobj->setEDNS(EDNSPtr(new EDNS(PyEDNS_ToEDNS(edns))));
Py_RETURN_NONE;
} catch (const InvalidMessageOperation& imo) {
PyErr_SetString(po_InvalidMessageOperation, imo.what());
@@ -453,7 +371,7 @@ Message_setEDNS(s_Message* self, PyObject* args) {
PyObject*
Message_getTSIGRecord(s_Message* self) {
try {
- const TSIGRecord* tsig_record = self->message->getTSIGRecord();
+ const TSIGRecord* tsig_record = self->cppobj->getTSIGRecord();
if (tsig_record == NULL) {
Py_RETURN_NONE;
@@ -483,7 +401,7 @@ Message_getRRCount(s_Message* self, PyObject* args) {
return (NULL);
}
try {
- return (Py_BuildValue("I", self->message->getRRCount(
+ return (Py_BuildValue("I", self->cppobj->getRRCount(
static_cast<Message::Section>(section))));
} catch (const isc::OutOfRange& ex) {
PyErr_SetString(PyExc_OverflowError, ex.what());
@@ -496,8 +414,8 @@ PyObject*
Message_getQuestion(s_Message* self) {
QuestionIterator qi, qi_end;
try {
- qi = self->message->beginQuestion();
- qi_end = self->message->endQuestion();
+ qi = self->cppobj->beginQuestion();
+ qi_end = self->cppobj->endQuestion();
} catch (const InvalidMessageSection& ex) {
PyErr_SetString(po_InvalidMessageSection, ex.what());
return (NULL);
@@ -512,23 +430,25 @@ Message_getQuestion(s_Message* self) {
return (NULL);
}
- for (; qi != qi_end; ++qi) {
- s_Question *question = static_cast<s_Question*>(
- question_type.tp_alloc(&question_type, 0));
- if (question == NULL) {
- Py_DECREF(question);
- Py_DECREF(list);
- return (NULL);
- }
- question->question = *qi;
- if (PyList_Append(list, question) == -1) {
- Py_DECREF(question);
- Py_DECREF(list);
- return (NULL);
+ try {
+ for (; qi != qi_end; ++qi) {
+ if (PyList_Append(list, createQuestionObject(**qi)) == -1) {
+ Py_DECREF(list);
+ return (NULL);
+ }
}
- Py_DECREF(question);
+ return (list);
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Unexpected failure getting Question section: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure getting Question section");
}
- return (list);
+ Py_DECREF(list);
+ return (NULL);
}
PyObject*
@@ -542,9 +462,9 @@ Message_getSection(s_Message* self, PyObject* args) {
}
RRsetIterator rrsi, rrsi_end;
try {
- rrsi = self->message->beginSection(
+ rrsi = self->cppobj->beginSection(
static_cast<Message::Section>(section));
- rrsi_end = self->message->endSection(
+ rrsi_end = self->cppobj->endSection(
static_cast<Message::Section>(section));
} catch (const isc::OutOfRange& ex) {
PyErr_SetString(PyExc_OverflowError, ex.what());
@@ -562,25 +482,25 @@ Message_getSection(s_Message* self, PyObject* args) {
if (list == NULL) {
return (NULL);
}
- for (; rrsi != rrsi_end; ++rrsi) {
- s_RRset *rrset = static_cast<s_RRset*>(
- rrset_type.tp_alloc(&rrset_type, 0));
- if (rrset == NULL) {
- Py_DECREF(rrset);
- Py_DECREF(list);
- return (NULL);
- }
- rrset->rrset = *rrsi;
- if (PyList_Append(list, rrset) == -1) {
- Py_DECREF(rrset);
- Py_DECREF(list);
- return (NULL);
+ try {
+ for (; rrsi != rrsi_end; ++rrsi) {
+ if (PyList_Append(list, createRRsetObject(**rrsi)) == -1) {
+ Py_DECREF(list);
+ return (NULL);
+ }
}
- // PyList_Append increases refcount, so we remove ours since
- // we don't need it anymore
- Py_DECREF(rrset);
+ return (list);
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Unexpected failure creating Question object: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure creating Question object");
}
- return (list);
+ Py_DECREF(list);
+ return (NULL);
}
//static PyObject* Message_beginQuestion(s_Message* self, PyObject* args);
@@ -590,14 +510,14 @@ Message_getSection(s_Message* self, PyObject* args) {
//static PyObject* Message_addQuestion(s_Message* self, PyObject* args);
PyObject*
Message_addQuestion(s_Message* self, PyObject* args) {
- s_Question *question;
+ PyObject* question;
if (!PyArg_ParseTuple(args, "O!", &question_type, &question)) {
return (NULL);
}
- self->message->addQuestion(question->question);
-
+ self->cppobj->addQuestion(PyQuestion_ToQuestion(question));
+
Py_RETURN_NONE;
}
@@ -605,15 +525,15 @@ PyObject*
Message_addRRset(s_Message* self, PyObject* args) {
PyObject *sign = Py_False;
int section;
- s_RRset* rrset;
+ PyObject* rrset;
if (!PyArg_ParseTuple(args, "iO!|O!", §ion, &rrset_type, &rrset,
&PyBool_Type, &sign)) {
return (NULL);
}
try {
- self->message->addRRset(static_cast<Message::Section>(section),
- rrset->rrset, sign == Py_True);
+ self->cppobj->addRRset(static_cast<Message::Section>(section),
+ PyRRset_ToRRsetPtr(rrset), sign == Py_True);
Py_RETURN_NONE;
} catch (const InvalidMessageOperation& imo) {
PyErr_SetString(po_InvalidMessageOperation, imo.what());
@@ -634,10 +554,10 @@ Message_clear(s_Message* self, PyObject* args) {
if (PyArg_ParseTuple(args, "i", &i)) {
PyErr_Clear();
if (i == Message::PARSE) {
- self->message->clear(Message::PARSE);
+ self->cppobj->clear(Message::PARSE);
Py_RETURN_NONE;
} else if (i == Message::RENDER) {
- self->message->clear(Message::RENDER);
+ self->cppobj->clear(Message::RENDER);
Py_RETURN_NONE;
} else {
PyErr_SetString(PyExc_TypeError,
@@ -651,7 +571,7 @@ Message_clear(s_Message* self, PyObject* args) {
PyObject*
Message_makeResponse(s_Message* self) {
- self->message->makeResponse();
+ self->cppobj->makeResponse();
Py_RETURN_NONE;
}
@@ -659,7 +579,7 @@ PyObject*
Message_toText(s_Message* self) {
// Py_BuildValue makes python objects from native data
try {
- return (Py_BuildValue("s", self->message->toText().c_str()));
+ return (Py_BuildValue("s", self->cppobj->toText().c_str()));
} catch (const InvalidMessageOperation& imo) {
PyErr_Clear();
PyErr_SetString(po_InvalidMessageOperation, imo.what());
@@ -680,16 +600,17 @@ Message_str(PyObject* self) {
PyObject*
Message_toWire(s_Message* self, PyObject* args) {
- s_MessageRenderer* mr;
- s_TSIGContext* tsig_ctx = NULL;
-
+ PyObject* mr;
+ PyObject* tsig_ctx = NULL;
+
if (PyArg_ParseTuple(args, "O!|O!", &messagerenderer_type, &mr,
&tsigcontext_type, &tsig_ctx)) {
try {
if (tsig_ctx == NULL) {
- self->message->toWire(*mr->messagerenderer);
+ self->cppobj->toWire(PyMessageRenderer_ToMessageRenderer(mr));
} else {
- self->message->toWire(*mr->messagerenderer, *tsig_ctx->cppobj);
+ self->cppobj->toWire(PyMessageRenderer_ToMessageRenderer(mr),
+ PyTSIGContext_ToTSIGContext(tsig_ctx));
}
// If we return NULL it is seen as an error, so use this for
// None returns
@@ -703,6 +624,15 @@ Message_toWire(s_Message* self, PyObject* args) {
// python program has a bug.
PyErr_SetString(po_TSIGContextError, ex.what());
return (NULL);
+ } catch (const std::exception& ex) {
+ // Other exceptions should be rare (most likely an implementation
+ // bug)
+ PyErr_SetString(po_TSIGContextError, ex.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "Unexpected C++ exception in Message.to_wire");
+ return (NULL);
}
}
PyErr_Clear();
@@ -712,97 +642,125 @@ Message_toWire(s_Message* self, PyObject* args) {
}
PyObject*
-Message_fromWire(s_Message* self, PyObject* args) {
+Message_fromWire(PyObject* pyself, PyObject* args) {
+ s_Message* const self = static_cast<s_Message*>(pyself);
const char* b;
Py_ssize_t len;
- if (!PyArg_ParseTuple(args, "y#", &b, &len)) {
- return (NULL);
- }
-
- InputBuffer inbuf(b, len);
- try {
- self->message->fromWire(inbuf);
- Py_RETURN_NONE;
- } catch (const InvalidMessageOperation& imo) {
- PyErr_SetString(po_InvalidMessageOperation, imo.what());
- return (NULL);
- } catch (const DNSMessageFORMERR& dmfe) {
- PyErr_SetString(po_DNSMessageFORMERR, dmfe.what());
- return (NULL);
- } catch (const DNSMessageBADVERS& dmfe) {
- PyErr_SetString(po_DNSMessageBADVERS, dmfe.what());
- return (NULL);
- } catch (const MessageTooShort& mts) {
- PyErr_SetString(po_MessageTooShort, mts.what());
- return (NULL);
+ unsigned int options = Message::PARSE_DEFAULT;
+
+ if (PyArg_ParseTuple(args, "y#", &b, &len) ||
+ PyArg_ParseTuple(args, "y#I", &b, &len, &options)) {
+ // We need to clear the error in case the first call to ParseTuple
+ // fails.
+ PyErr_Clear();
+
+ InputBuffer inbuf(b, len);
+ try {
+ self->cppobj->fromWire(
+ inbuf, static_cast<Message::ParseOptions>(options));
+ Py_RETURN_NONE;
+ } catch (const InvalidMessageOperation& imo) {
+ PyErr_SetString(po_InvalidMessageOperation, imo.what());
+ return (NULL);
+ } catch (const DNSMessageFORMERR& dmfe) {
+ PyErr_SetString(po_DNSMessageFORMERR, dmfe.what());
+ return (NULL);
+ } catch (const DNSMessageBADVERS& dmfe) {
+ PyErr_SetString(po_DNSMessageBADVERS, dmfe.what());
+ return (NULL);
+ } catch (const MessageTooShort& mts) {
+ PyErr_SetString(po_MessageTooShort, mts.what());
+ return (NULL);
+ } catch (const InvalidBufferPosition& ex) {
+ PyErr_SetString(po_DNSMessageFORMERR, ex.what());
+ return (NULL);
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Error in Message.from_wire: " + string(ex.what());
+ PyErr_SetString(PyExc_RuntimeError, ex_what.c_str());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "Unexpected exception in Message.from_wire");
+ return (NULL);
+ }
}
-}
-// Module Initialization, all statics are initialized here
-bool
-initModulePart_Message(PyObject* mod) {
- if (PyType_Ready(&message_type) < 0) {
- return (false);
- }
- Py_INCREF(&message_type);
-
- // Class variables
- // These are added to the tp_dict of the type object
- //
- addClassVariable(message_type, "PARSE",
- Py_BuildValue("I", Message::PARSE));
- addClassVariable(message_type, "RENDER",
- Py_BuildValue("I", Message::RENDER));
-
- addClassVariable(message_type, "HEADERFLAG_QR",
- Py_BuildValue("I", Message::HEADERFLAG_QR));
- addClassVariable(message_type, "HEADERFLAG_AA",
- Py_BuildValue("I", Message::HEADERFLAG_AA));
- addClassVariable(message_type, "HEADERFLAG_TC",
- Py_BuildValue("I", Message::HEADERFLAG_TC));
- addClassVariable(message_type, "HEADERFLAG_RD",
- Py_BuildValue("I", Message::HEADERFLAG_RD));
- addClassVariable(message_type, "HEADERFLAG_RA",
- Py_BuildValue("I", Message::HEADERFLAG_RA));
- addClassVariable(message_type, "HEADERFLAG_AD",
- Py_BuildValue("I", Message::HEADERFLAG_AD));
- addClassVariable(message_type, "HEADERFLAG_CD",
- Py_BuildValue("I", Message::HEADERFLAG_CD));
-
- addClassVariable(message_type, "SECTION_QUESTION",
- Py_BuildValue("I", Message::SECTION_QUESTION));
- addClassVariable(message_type, "SECTION_ANSWER",
- Py_BuildValue("I", Message::SECTION_ANSWER));
- addClassVariable(message_type, "SECTION_AUTHORITY",
- Py_BuildValue("I", Message::SECTION_AUTHORITY));
- addClassVariable(message_type, "SECTION_ADDITIONAL",
- Py_BuildValue("I", Message::SECTION_ADDITIONAL));
-
- addClassVariable(message_type, "DEFAULT_MAX_UDPSIZE",
- Py_BuildValue("I", Message::DEFAULT_MAX_UDPSIZE));
-
- /* Class-specific exceptions */
- po_MessageTooShort = PyErr_NewException("pydnspp.MessageTooShort", NULL,
- NULL);
- PyModule_AddObject(mod, "MessageTooShort", po_MessageTooShort);
- po_InvalidMessageSection =
- PyErr_NewException("pydnspp.InvalidMessageSection", NULL, NULL);
- PyModule_AddObject(mod, "InvalidMessageSection", po_InvalidMessageSection);
- po_InvalidMessageOperation =
- PyErr_NewException("pydnspp.InvalidMessageOperation", NULL, NULL);
- PyModule_AddObject(mod, "InvalidMessageOperation",
- po_InvalidMessageOperation);
- po_InvalidMessageUDPSize =
- PyErr_NewException("pydnspp.InvalidMessageUDPSize", NULL, NULL);
- PyModule_AddObject(mod, "InvalidMessageUDPSize", po_InvalidMessageUDPSize);
- po_DNSMessageBADVERS = PyErr_NewException("pydnspp.DNSMessageBADVERS",
- NULL, NULL);
- PyModule_AddObject(mod, "DNSMessageBADVERS", po_DNSMessageBADVERS);
-
- PyModule_AddObject(mod, "Message",
- reinterpret_cast<PyObject*>(&message_type));
-
-
- return (true);
+ PyErr_SetString(PyExc_TypeError,
+ "from_wire() arguments must be a byte object and "
+ "(optional) parse options");
+ return (NULL);
}
+
} // end of unnamed namespace
+
+namespace isc {
+namespace dns {
+namespace python {
+
+//
+// Declaration of the custom exceptions
+// Initialization and addition of these go in the initModulePart
+// function in pydnspp.cc
+//
+PyObject* po_MessageTooShort;
+PyObject* po_InvalidMessageSection;
+PyObject* po_InvalidMessageOperation;
+PyObject* po_InvalidMessageUDPSize;
+
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_Message
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject message_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pydnspp.Message",
+ sizeof(s_Message), // tp_basicsize
+ 0, // tp_itemsize
+ (destructor)Message_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ Message_str, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "The Message class encapsulates a standard DNS message.",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ Message_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ (initproc)Message_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+} // end python namespace
+} // end dns namespace
+} // end isc namespace
diff --git a/src/lib/dns/python/message_python.h b/src/lib/dns/python/message_python.h
new file mode 100644
index 0000000..be23890
--- /dev/null
+++ b/src/lib/dns/python/message_python.h
@@ -0,0 +1,40 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_MESSAGE_H
+#define __PYTHON_MESSAGE_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class Message;
+
+namespace python {
+
+extern PyObject* po_MessageTooShort;
+extern PyObject* po_InvalidMessageSection;
+extern PyObject* po_InvalidMessageOperation;
+extern PyObject* po_InvalidMessageUDPSize;
+
+extern PyTypeObject message_type;
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_MESSAGE_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/message_python_inc.cc b/src/lib/dns/python/message_python_inc.cc
new file mode 100644
index 0000000..561c494
--- /dev/null
+++ b/src/lib/dns/python/message_python_inc.cc
@@ -0,0 +1,41 @@
+namespace {
+const char* const Message_fromWire_doc = "\
+from_wire(data, options=PARSE_DEFAULT)\n\
+\n\
+(Re)build a Message object from wire-format data.\n\
+\n\
+This method parses the given wire format data to build a complete\n\
+Message object. On success, the values of the header section fields\n\
+can be accessible via corresponding get methods, and the question and\n\
+following sections can be accessible via the corresponding iterators.\n\
+If the message contains an EDNS or TSIG, they can be accessible via\n\
+get_edns() and get_tsig_record(), respectively.\n\
+\n\
+This Message must be in the PARSE mode.\n\
+\n\
+This method performs strict validation on the given message based on\n\
+the DNS protocol specifications. If the given message data is invalid,\n\
+this method throws an exception (see the exception list).\n\
+\n\
+By default, this method combines RRs of the same name, RR type and RR\n\
+class in a section into a single RRset, even if they are interleaved\n\
+with a different type of RR (though it would be a rare case in\n\
+practice). If the PRESERVE_ORDER option is specified, it handles each\n\
+RR separately, in the appearing order, and converts it to a separate\n\
+RRset (so this RRset should contain exactly one Rdata). This mode will\n\
+be necessary when the higher level protocol is ordering conscious. For\n\
+example, in AXFR and IXFR, the position of the SOA RRs are crucial.\n\
+\n\
+Exceptions:\n\
+ InvalidMessageOperation Message is in the RENDER mode\n\
+ DNSMessageFORMERR The given message data is syntactically\n\
+ MessageTooShort The given data is shorter than a valid header\n\
+ section\n\
+ Others Name, Rdata, and EDNS classes can also throw\n\
+\n\
+Parameters:\n\
+ data A byte object of the wire data\n\
+ options Parse options\n\
+\n\
+";
+} // unnamed namespace
diff --git a/src/lib/dns/python/messagerenderer_python.cc b/src/lib/dns/python/messagerenderer_python.cc
index e6f5d3e..bb89622 100644
--- a/src/lib/dns/python/messagerenderer_python.cc
+++ b/src/lib/dns/python/messagerenderer_python.cc
@@ -17,6 +17,7 @@
#include <util/buffer.h>
#include <dns/messagerenderer.h>
+#include <util/python/pycppwrapper_util.h>
#include "pydnspp_common.h"
#include "messagerenderer_python.h"
@@ -24,15 +25,21 @@
using namespace isc::dns;
using namespace isc::dns::python;
using namespace isc::util;
-
-// MessageRenderer
-
-s_MessageRenderer::s_MessageRenderer() : outputbuffer(NULL),
- messagerenderer(NULL)
-{
-}
+using namespace isc::util::python;
namespace {
+// The s_* Class simply covers one instantiation of the object.
+//
+// since we don't use *Buffer in the python version (but work with
+// the already existing bytearray type where we use these custom buffers
+// in C++, we need to keep track of one here.
+class s_MessageRenderer : public PyObject {
+public:
+ s_MessageRenderer();
+ isc::util::OutputBuffer* outputbuffer;
+ MessageRenderer* cppobj;
+};
+
int MessageRenderer_init(s_MessageRenderer* self);
void MessageRenderer_destroy(s_MessageRenderer* self);
@@ -72,15 +79,15 @@ PyMethodDef MessageRenderer_methods[] = {
int
MessageRenderer_init(s_MessageRenderer* self) {
self->outputbuffer = new OutputBuffer(4096);
- self->messagerenderer = new MessageRenderer(*self->outputbuffer);
+ self->cppobj = new MessageRenderer(*self->outputbuffer);
return (0);
}
void
MessageRenderer_destroy(s_MessageRenderer* self) {
- delete self->messagerenderer;
+ delete self->cppobj;
delete self->outputbuffer;
- self->messagerenderer = NULL;
+ self->cppobj = NULL;
self->outputbuffer = NULL;
Py_TYPE(self)->tp_free(self);
}
@@ -88,18 +95,18 @@ MessageRenderer_destroy(s_MessageRenderer* self) {
PyObject*
MessageRenderer_getData(s_MessageRenderer* self) {
return (Py_BuildValue("y#",
- self->messagerenderer->getData(),
- self->messagerenderer->getLength()));
+ self->cppobj->getData(),
+ self->cppobj->getLength()));
}
PyObject*
MessageRenderer_getLength(s_MessageRenderer* self) {
- return (Py_BuildValue("I", self->messagerenderer->getLength()));
+ return (Py_BuildValue("I", self->cppobj->getLength()));
}
PyObject*
MessageRenderer_isTruncated(s_MessageRenderer* self) {
- if (self->messagerenderer->isTruncated()) {
+ if (self->cppobj->isTruncated()) {
Py_RETURN_TRUE;
} else {
Py_RETURN_FALSE;
@@ -108,17 +115,17 @@ MessageRenderer_isTruncated(s_MessageRenderer* self) {
PyObject*
MessageRenderer_getLengthLimit(s_MessageRenderer* self) {
- return (Py_BuildValue("I", self->messagerenderer->getLengthLimit()));
+ return (Py_BuildValue("I", self->cppobj->getLengthLimit()));
}
PyObject*
MessageRenderer_getCompressMode(s_MessageRenderer* self) {
- return (Py_BuildValue("I", self->messagerenderer->getCompressMode()));
+ return (Py_BuildValue("I", self->cppobj->getCompressMode()));
}
PyObject*
MessageRenderer_setTruncated(s_MessageRenderer* self) {
- self->messagerenderer->setTruncated();
+ self->cppobj->setTruncated();
Py_RETURN_NONE;
}
@@ -138,7 +145,7 @@ MessageRenderer_setLengthLimit(s_MessageRenderer* self,
"MessageRenderer length limit out of range");
return (NULL);
}
- self->messagerenderer->setLengthLimit(lengthlimit);
+ self->cppobj->setLengthLimit(lengthlimit);
Py_RETURN_NONE;
}
@@ -152,12 +159,12 @@ MessageRenderer_setCompressMode(s_MessageRenderer* self,
}
if (mode == MessageRenderer::CASE_INSENSITIVE) {
- self->messagerenderer->setCompressMode(MessageRenderer::CASE_INSENSITIVE);
+ self->cppobj->setCompressMode(MessageRenderer::CASE_INSENSITIVE);
// If we return NULL it is seen as an error, so use this for
// None returns, it also applies to CASE_SENSITIVE.
Py_RETURN_NONE;
} else if (mode == MessageRenderer::CASE_SENSITIVE) {
- self->messagerenderer->setCompressMode(MessageRenderer::CASE_SENSITIVE);
+ self->cppobj->setCompressMode(MessageRenderer::CASE_SENSITIVE);
Py_RETURN_NONE;
} else {
PyErr_SetString(PyExc_TypeError,
@@ -169,12 +176,11 @@ MessageRenderer_setCompressMode(s_MessageRenderer* self,
PyObject*
MessageRenderer_clear(s_MessageRenderer* self) {
- self->messagerenderer->clear();
+ self->cppobj->clear();
Py_RETURN_NONE;
}
} // end of unnamed namespace
-// end of MessageRenderer
namespace isc {
namespace dns {
namespace python {
@@ -233,37 +239,29 @@ PyTypeObject messagerenderer_type = {
0 // tp_version_tag
};
-// Module Initialization, all statics are initialized here
-bool
-initModulePart_MessageRenderer(PyObject* mod) {
- // Add the exceptions to the module
+// If we need a createMessageRendererObject(), should we copy? can we?
+// copy the existing buffer into a new one, then create a new renderer with
+// that buffer?
- // Add the enums to the module
-
- // Add the constants to the module
-
- // Add the classes to the module
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module
+bool
+PyMessageRenderer_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
+ }
+ return (PyObject_TypeCheck(obj, &messagerenderer_type));
+}
- // NameComparisonResult
- if (PyType_Ready(&messagerenderer_type) < 0) {
- return (false);
+MessageRenderer&
+PyMessageRenderer_ToMessageRenderer(PyObject* messagerenderer_obj) {
+ if (messagerenderer_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in MessageRenderer PyObject conversion");
}
- Py_INCREF(&messagerenderer_type);
+ s_MessageRenderer* messagerenderer = static_cast<s_MessageRenderer*>(messagerenderer_obj);
+ return (*messagerenderer->cppobj);
+}
- // Class variables
- // These are added to the tp_dict of the type object
- addClassVariable(messagerenderer_type, "CASE_INSENSITIVE",
- Py_BuildValue("I", MessageRenderer::CASE_INSENSITIVE));
- addClassVariable(messagerenderer_type, "CASE_SENSITIVE",
- Py_BuildValue("I", MessageRenderer::CASE_SENSITIVE));
- PyModule_AddObject(mod, "MessageRenderer",
- reinterpret_cast<PyObject*>(&messagerenderer_type));
-
- return (true);
-}
} // namespace python
} // namespace dns
} // namespace isc
diff --git a/src/lib/dns/python/messagerenderer_python.h b/src/lib/dns/python/messagerenderer_python.h
index 3bb096e..ea9a940 100644
--- a/src/lib/dns/python/messagerenderer_python.h
+++ b/src/lib/dns/python/messagerenderer_python.h
@@ -17,30 +17,35 @@
#include <Python.h>
+#include <util/buffer.h>
+
namespace isc {
-namespace util {
-class OutputBuffer;
-}
namespace dns {
class MessageRenderer;
namespace python {
-// The s_* Class simply covers one instantiation of the object.
-//
-// since we don't use *Buffer in the python version (but work with
-// the already existing bytearray type where we use these custom buffers
-// in C++, we need to keep track of one here.
-class s_MessageRenderer : public PyObject {
-public:
- s_MessageRenderer();
- isc::util::OutputBuffer* outputbuffer;
- MessageRenderer* messagerenderer;
-};
-
extern PyTypeObject messagerenderer_type;
-bool initModulePart_MessageRenderer(PyObject* mod);
+/// \brief Checks if the given python object is a MessageRenderer object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type MessageRenderer, false otherwise
+bool PyMessageRenderer_Check(PyObject* obj);
+
+/// \brief Returns a reference to the MessageRenderer object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type MessageRenderer; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyMessageRenderer_Check()
+///
+/// \note This is not a copy; if the MessageRenderer is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param messagerenderer_obj The messagerenderer object to convert
+MessageRenderer& PyMessageRenderer_ToMessageRenderer(PyObject* messagerenderer_obj);
} // namespace python
} // namespace dns
diff --git a/src/lib/dns/python/name_python.cc b/src/lib/dns/python/name_python.cc
index d00c6f7..4043445 100644
--- a/src/lib/dns/python/name_python.cc
+++ b/src/lib/dns/python/name_python.cc
@@ -25,20 +25,25 @@
#include "messagerenderer_python.h"
#include "name_python.h"
-//
-// Definition of the classes
-//
-
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
using namespace isc::dns;
using namespace isc::dns::python;
using namespace isc::util;
using namespace isc::util::python;
namespace {
-// NameComparisonResult
+// The s_* Class simply covers one instantiation of the object.
+class s_NameComparisonResult : public PyObject {
+public:
+ s_NameComparisonResult() : cppobj(NULL) {}
+ NameComparisonResult* cppobj;
+};
+
+class s_Name : public PyObject {
+public:
+ s_Name() : cppobj(NULL), position(0) {}
+ Name* cppobj;
+ size_t position;
+};
int NameComparisonResult_init(s_NameComparisonResult*, PyObject*);
void NameComparisonResult_destroy(s_NameComparisonResult* self);
@@ -84,9 +89,7 @@ PyObject*
NameComparisonResult_getRelation(s_NameComparisonResult* self) {
return (Py_BuildValue("I", self->cppobj->getRelation()));
}
-// end of NameComparisonResult
-// Name
// Shortcut type which would be convenient for adding class variables safely.
typedef CPPPyObjectContainer<s_Name, Name> NameContainer;
@@ -292,7 +295,7 @@ Name_str(PyObject* self) {
PyObject*
Name_toWire(s_Name* self, PyObject* args) {
PyObject* bytes;
- s_MessageRenderer* mr;
+ PyObject* mr;
if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
PyObject* bytes_o = bytes;
@@ -306,7 +309,7 @@ Name_toWire(s_Name* self, PyObject* args) {
Py_DECREF(name_bytes);
return (result);
} else if (PyArg_ParseTuple(args, "O!", &messagerenderer_type, &mr)) {
- self->cppobj->toWire(*mr->messagerenderer);
+ self->cppobj->toWire(PyMessageRenderer_ToMessageRenderer(mr));
// If we return NULL it is seen as an error, so use this for
// None returns
Py_RETURN_NONE;
@@ -495,7 +498,7 @@ Name_isWildCard(s_Name* self) {
Py_RETURN_FALSE;
}
}
-// end of Name
+
} // end of unnamed namespace
namespace isc {
@@ -634,94 +637,32 @@ PyTypeObject name_type = {
0 // tp_version_tag
};
-// Module Initialization, all statics are initialized here
-bool
-initModulePart_Name(PyObject* mod) {
- // Add the classes to the module
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module
-
- //
- // NameComparisonResult
- //
- if (PyType_Ready(&name_comparison_result_type) < 0) {
- return (false);
- }
- Py_INCREF(&name_comparison_result_type);
-
- // Add the enums to the module
- po_NameRelation = Py_BuildValue("{i:s,i:s,i:s,i:s}",
- NameComparisonResult::SUPERDOMAIN, "SUPERDOMAIN",
- NameComparisonResult::SUBDOMAIN, "SUBDOMAIN",
- NameComparisonResult::EQUAL, "EQUAL",
- NameComparisonResult::COMMONANCESTOR, "COMMONANCESTOR");
- addClassVariable(name_comparison_result_type, "NameRelation", po_NameRelation);
-
- PyModule_AddObject(mod, "NameComparisonResult",
- reinterpret_cast<PyObject*>(&name_comparison_result_type));
-
- //
- // Name
- //
-
- if (PyType_Ready(&name_type) < 0) {
- return (false);
- }
- Py_INCREF(&name_type);
-
- // Add the constants to the module
- addClassVariable(name_type, "MAX_WIRE", Py_BuildValue("I", Name::MAX_WIRE));
- addClassVariable(name_type, "MAX_LABELS", Py_BuildValue("I", Name::MAX_LABELS));
- addClassVariable(name_type, "MAX_LABELLEN", Py_BuildValue("I", Name::MAX_LABELLEN));
- addClassVariable(name_type, "MAX_COMPRESS_POINTER", Py_BuildValue("I", Name::MAX_COMPRESS_POINTER));
- addClassVariable(name_type, "COMPRESS_POINTER_MARK8", Py_BuildValue("I", Name::COMPRESS_POINTER_MARK8));
- addClassVariable(name_type, "COMPRESS_POINTER_MARK16", Py_BuildValue("I", Name::COMPRESS_POINTER_MARK16));
-
- s_Name* root_name = PyObject_New(s_Name, &name_type);
- root_name->cppobj = new Name(Name::ROOT_NAME());
- PyObject* po_ROOT_NAME = root_name;
- addClassVariable(name_type, "ROOT_NAME", po_ROOT_NAME);
-
- PyModule_AddObject(mod, "Name",
- reinterpret_cast<PyObject*>(&name_type));
-
-
- // Add the exceptions to the module
- po_EmptyLabel = PyErr_NewException("pydnspp.EmptyLabel", NULL, NULL);
- PyModule_AddObject(mod, "EmptyLabel", po_EmptyLabel);
-
- po_TooLongName = PyErr_NewException("pydnspp.TooLongName", NULL, NULL);
- PyModule_AddObject(mod, "TooLongName", po_TooLongName);
-
- po_TooLongLabel = PyErr_NewException("pydnspp.TooLongLabel", NULL, NULL);
- PyModule_AddObject(mod, "TooLongLabel", po_TooLongLabel);
-
- po_BadLabelType = PyErr_NewException("pydnspp.BadLabelType", NULL, NULL);
- PyModule_AddObject(mod, "BadLabelType", po_BadLabelType);
-
- po_BadEscape = PyErr_NewException("pydnspp.BadEscape", NULL, NULL);
- PyModule_AddObject(mod, "BadEscape", po_BadEscape);
-
- po_IncompleteName = PyErr_NewException("pydnspp.IncompleteName", NULL, NULL);
- PyModule_AddObject(mod, "IncompleteName", po_IncompleteName);
-
- po_InvalidBufferPosition = PyErr_NewException("pydnspp.InvalidBufferPosition", NULL, NULL);
- PyModule_AddObject(mod, "InvalidBufferPosition", po_InvalidBufferPosition);
-
- // This one could have gone into the message_python.cc file, but is
- // already needed here.
- po_DNSMessageFORMERR = PyErr_NewException("pydnspp.DNSMessageFORMERR", NULL, NULL);
- PyModule_AddObject(mod, "DNSMessageFORMERR", po_DNSMessageFORMERR);
-
- return (true);
-}
-
PyObject*
createNameObject(const Name& source) {
- NameContainer container = PyObject_New(s_Name, &name_type);
+ NameContainer container(PyObject_New(s_Name, &name_type));
container.set(new Name(source));
return (container.release());
}
+
+bool
+PyName_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
+ }
+ return (PyObject_TypeCheck(obj, &name_type));
+}
+
+const Name&
+PyName_ToName(const PyObject* name_obj) {
+ if (name_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in Name PyObject conversion");
+ }
+ const s_Name* name = static_cast<const s_Name*>(name_obj);
+ return (*name->cppobj);
+}
+
+
} // namespace python
} // namespace dns
} // namespace isc
diff --git a/src/lib/dns/python/name_python.h b/src/lib/dns/python/name_python.h
index f8e793d..86d7fd0 100644
--- a/src/lib/dns/python/name_python.h
+++ b/src/lib/dns/python/name_python.h
@@ -17,20 +17,12 @@
#include <Python.h>
-#include <util/python/pycppwrapper_util.h>
-
namespace isc {
namespace dns {
-class NameComparisonResult;
class Name;
namespace python {
-//
-// Declaration of the custom exceptions
-// Initialization and addition of these go in the module init at the
-// end
-//
extern PyObject* po_EmptyLabel;
extern PyObject* po_TooLongName;
extern PyObject* po_TooLongLabel;
@@ -47,25 +39,9 @@ extern PyObject* po_DNSMessageFORMERR;
//
extern PyObject* po_NameRelation;
-// The s_* Class simply covers one instantiation of the object.
-class s_NameComparisonResult : public PyObject {
-public:
- s_NameComparisonResult() : cppobj(NULL) {}
- NameComparisonResult* cppobj;
-};
-
-class s_Name : public PyObject {
-public:
- s_Name() : cppobj(NULL), position(0) {}
- Name* cppobj;
- size_t position;
-};
-
extern PyTypeObject name_comparison_result_type;
extern PyTypeObject name_type;
-bool initModulePart_Name(PyObject* mod);
-
/// This is A simple shortcut to create a python Name object (in the
/// form of a pointer to PyObject) with minimal exception safety.
/// On success, it returns a valid pointer to PyObject with a reference
@@ -74,6 +50,27 @@ bool initModulePart_Name(PyObject* mod);
/// This function is expected to be called with in a try block
/// followed by necessary setup for python exception.
PyObject* createNameObject(const Name& source);
+
+/// \brief Checks if the given python object is a Name object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type Name, false otherwise
+bool PyName_Check(PyObject* obj);
+
+/// \brief Returns a reference to the Name object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type Name; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyName_Check()
+///
+/// \note This is not a copy; if the Name is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param name_obj The name object to convert
+const Name& PyName_ToName(const PyObject* name_obj);
+
} // namespace python
} // namespace dns
} // namespace isc
diff --git a/src/lib/dns/python/opcode_python.cc b/src/lib/dns/python/opcode_python.cc
index 0e2a30b..50436a9 100644
--- a/src/lib/dns/python/opcode_python.cc
+++ b/src/lib/dns/python/opcode_python.cc
@@ -12,32 +12,31 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
-#include <dns/opcode.h>
-
-using namespace isc::dns;
+#include <Python.h>
-//
-// Declaration of the custom exceptions (None for this class)
+#include <dns/opcode.h>
+#include <util/python/pycppwrapper_util.h>
-//
-// Definition of the classes
-//
+#include "pydnspp_common.h"
+#include "opcode_python.h"
+#include "edns_python.h"
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
+using namespace isc::dns;
+using namespace isc::dns::python;
+using namespace isc::util;
+using namespace isc::util::python;
namespace {
-//
-// Opcode
-//
+
class s_Opcode : public PyObject {
public:
- s_Opcode() : opcode(NULL), static_code(false) {}
- const Opcode* opcode;
+ s_Opcode() : cppobj(NULL), static_code(false) {}
+ const isc::dns::Opcode* cppobj;
bool static_code;
};
+typedef CPPPyObjectContainer<s_Opcode, Opcode> OpcodeContainer;
+
int Opcode_init(s_Opcode* const self, PyObject* args);
void Opcode_destroy(s_Opcode* const self);
@@ -103,64 +102,13 @@ PyMethodDef Opcode_methods[] = {
{ NULL, NULL, 0, NULL }
};
-PyTypeObject opcode_type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "pydnspp.Opcode",
- sizeof(s_Opcode), // tp_basicsize
- 0, // tp_itemsize
- (destructor)Opcode_destroy, // tp_dealloc
- NULL, // tp_print
- NULL, // tp_getattr
- NULL, // tp_setattr
- NULL, // tp_reserved
- NULL, // tp_repr
- NULL, // tp_as_number
- NULL, // tp_as_sequence
- NULL, // tp_as_mapping
- NULL, // tp_hash
- NULL, // tp_call
- Opcode_str, // tp_str
- NULL, // tp_getattro
- NULL, // tp_setattro
- NULL, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
- "The Opcode class objects represent standard OPCODEs "
- "of the header section of DNS messages.",
- NULL, // tp_traverse
- NULL, // tp_clear
- (richcmpfunc)Opcode_richcmp, // tp_richcompare
- 0, // tp_weaklistoffset
- NULL, // tp_iter
- NULL, // tp_iternext
- Opcode_methods, // tp_methods
- NULL, // tp_members
- NULL, // tp_getset
- NULL, // tp_base
- NULL, // tp_dict
- NULL, // tp_descr_get
- NULL, // tp_descr_set
- 0, // tp_dictoffset
- (initproc)Opcode_init, // tp_init
- NULL, // tp_alloc
- PyType_GenericNew, // tp_new
- NULL, // tp_free
- NULL, // tp_is_gc
- NULL, // tp_bases
- NULL, // tp_mro
- NULL, // tp_cache
- NULL, // tp_subclasses
- NULL, // tp_weaklist
- NULL, // tp_del
- 0 // tp_version_tag
-};
-
int
Opcode_init(s_Opcode* const self, PyObject* args) {
uint8_t code = 0;
if (PyArg_ParseTuple(args, "b", &code)) {
try {
- self->opcode = new Opcode(code);
+ self->cppobj = new Opcode(code);
self->static_code = false;
} catch (const isc::OutOfRange& ex) {
PyErr_SetString(PyExc_OverflowError, ex.what());
@@ -181,22 +129,22 @@ Opcode_init(s_Opcode* const self, PyObject* args) {
void
Opcode_destroy(s_Opcode* const self) {
// Depending on whether we created the rcode or are referring
- // to a global static one, we do or do not delete self->opcode here
+ // to a global static one, we do or do not delete self->cppobj here
if (!self->static_code) {
- delete self->opcode;
+ delete self->cppobj;
}
- self->opcode = NULL;
+ self->cppobj = NULL;
Py_TYPE(self)->tp_free(self);
}
PyObject*
Opcode_getCode(const s_Opcode* const self) {
- return (Py_BuildValue("I", self->opcode->getCode()));
+ return (Py_BuildValue("I", self->cppobj->getCode()));
}
PyObject*
Opcode_toText(const s_Opcode* const self) {
- return (Py_BuildValue("s", self->opcode->toText().c_str()));
+ return (Py_BuildValue("s", self->cppobj->toText().c_str()));
}
PyObject*
@@ -211,7 +159,7 @@ PyObject*
Opcode_createStatic(const Opcode& opcode) {
s_Opcode* ret = PyObject_New(s_Opcode, &opcode_type);
if (ret != NULL) {
- ret->opcode = &opcode;
+ ret->cppobj = &opcode;
ret->static_code = true;
}
return (ret);
@@ -297,7 +245,7 @@ Opcode_RESERVED15(const s_Opcode*) {
return (Opcode_createStatic(Opcode::RESERVED15()));
}
-PyObject*
+PyObject*
Opcode_richcmp(const s_Opcode* const self, const s_Opcode* const other,
const int op)
{
@@ -318,10 +266,10 @@ Opcode_richcmp(const s_Opcode* const self, const s_Opcode* const other,
PyErr_SetString(PyExc_TypeError, "Unorderable type; Opcode");
return (NULL);
case Py_EQ:
- c = (*self->opcode == *other->opcode);
+ c = (*self->cppobj == *other->cppobj);
break;
case Py_NE:
- c = (*self->opcode != *other->opcode);
+ c = (*self->cppobj != *other->cppobj);
break;
case Py_GT:
PyErr_SetString(PyExc_TypeError, "Unorderable type; Opcode");
@@ -336,55 +284,88 @@ Opcode_richcmp(const s_Opcode* const self, const s_Opcode* const other,
Py_RETURN_FALSE;
}
-// Module Initialization, all statics are initialized here
+} // end of unnamed namespace
+
+namespace isc {
+namespace dns {
+namespace python {
+
+PyTypeObject opcode_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pydnspp.Opcode",
+ sizeof(s_Opcode), // tp_basicsize
+ 0, // tp_itemsize
+ (destructor)Opcode_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ Opcode_str, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "The Opcode class objects represent standard OPCODEs "
+ "of the header section of DNS messages.",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ (richcmpfunc)Opcode_richcmp, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ Opcode_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ (initproc)Opcode_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+PyObject*
+createOpcodeObject(const Opcode& source) {
+ OpcodeContainer container(PyObject_New(s_Opcode, &opcode_type));
+ container.set(new Opcode(source));
+ return (container.release());
+}
+
bool
-initModulePart_Opcode(PyObject* mod) {
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module. This is not just a check! (leaving
- // this out results in segmentation faults)
- if (PyType_Ready(&opcode_type) < 0) {
- return (false);
- }
- Py_INCREF(&opcode_type);
- void* p = &opcode_type;
- if (PyModule_AddObject(mod, "Opcode", static_cast<PyObject*>(p)) != 0) {
- Py_DECREF(&opcode_type);
- return (false);
+PyOpcode_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
}
+ return (PyObject_TypeCheck(obj, &opcode_type));
+}
- addClassVariable(opcode_type, "QUERY_CODE",
- Py_BuildValue("h", Opcode::QUERY_CODE));
- addClassVariable(opcode_type, "IQUERY_CODE",
- Py_BuildValue("h", Opcode::IQUERY_CODE));
- addClassVariable(opcode_type, "STATUS_CODE",
- Py_BuildValue("h", Opcode::STATUS_CODE));
- addClassVariable(opcode_type, "RESERVED3_CODE",
- Py_BuildValue("h", Opcode::RESERVED3_CODE));
- addClassVariable(opcode_type, "NOTIFY_CODE",
- Py_BuildValue("h", Opcode::NOTIFY_CODE));
- addClassVariable(opcode_type, "UPDATE_CODE",
- Py_BuildValue("h", Opcode::UPDATE_CODE));
- addClassVariable(opcode_type, "RESERVED6_CODE",
- Py_BuildValue("h", Opcode::RESERVED6_CODE));
- addClassVariable(opcode_type, "RESERVED7_CODE",
- Py_BuildValue("h", Opcode::RESERVED7_CODE));
- addClassVariable(opcode_type, "RESERVED8_CODE",
- Py_BuildValue("h", Opcode::RESERVED8_CODE));
- addClassVariable(opcode_type, "RESERVED9_CODE",
- Py_BuildValue("h", Opcode::RESERVED9_CODE));
- addClassVariable(opcode_type, "RESERVED10_CODE",
- Py_BuildValue("h", Opcode::RESERVED10_CODE));
- addClassVariable(opcode_type, "RESERVED11_CODE",
- Py_BuildValue("h", Opcode::RESERVED11_CODE));
- addClassVariable(opcode_type, "RESERVED12_CODE",
- Py_BuildValue("h", Opcode::RESERVED12_CODE));
- addClassVariable(opcode_type, "RESERVED13_CODE",
- Py_BuildValue("h", Opcode::RESERVED13_CODE));
- addClassVariable(opcode_type, "RESERVED14_CODE",
- Py_BuildValue("h", Opcode::RESERVED14_CODE));
- addClassVariable(opcode_type, "RESERVED15_CODE",
- Py_BuildValue("h", Opcode::RESERVED15_CODE));
-
- return (true);
+const Opcode&
+PyOpcode_ToOpcode(const PyObject* opcode_obj) {
+ if (opcode_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in Opcode PyObject conversion");
+ }
+ const s_Opcode* opcode = static_cast<const s_Opcode*>(opcode_obj);
+ return (*opcode->cppobj);
}
-} // end of unnamed namespace
+
+} // end python namespace
+} // end dns namespace
+} // end isc namespace
diff --git a/src/lib/dns/python/opcode_python.h b/src/lib/dns/python/opcode_python.h
new file mode 100644
index 0000000..d0aec15
--- /dev/null
+++ b/src/lib/dns/python/opcode_python.h
@@ -0,0 +1,64 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_OPCODE_H
+#define __PYTHON_OPCODE_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class Opcode;
+
+namespace python {
+
+extern PyTypeObject opcode_type;
+
+/// This is a simple shortcut to create a python Opcode object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createOpcodeObject(const Opcode& source);
+
+/// \brief Checks if the given python object is a Opcode object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type Opcode, false otherwise
+bool PyOpcode_Check(PyObject* obj);
+
+/// \brief Returns a reference to the Opcode object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type Opcode; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyOpcode_Check()
+///
+/// \note This is not a copy; if the Opcode is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param opcode_obj The opcode object to convert
+const Opcode& PyOpcode_ToOpcode(const PyObject* opcode_obj);
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_OPCODE_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/pydnspp.cc b/src/lib/dns/python/pydnspp.cc
index 07abf71..0a7d8e5 100644
--- a/src/lib/dns/python/pydnspp.cc
+++ b/src/lib/dns/python/pydnspp.cc
@@ -21,63 +21,707 @@
// name initModulePart_<name>, and return true/false instead of
// NULL/*mod
//
-// And of course care has to be taken that all identifiers be unique
+// The big init function is split up into a separate initModulePart function
+// for each class we add.
#define PY_SSIZE_T_CLEAN
#include <Python.h>
#include <structmember.h>
-#include <config.h>
-
-#include <exceptions/exceptions.h>
-
-#include <util/buffer.h>
-
-#include <dns/exceptions.h>
-#include <dns/name.h>
-#include <dns/messagerenderer.h>
+#include <dns/message.h>
+#include <dns/opcode.h>
+#include <dns/tsig.h>
+#include <util/python/pycppwrapper_util.h>
#include "pydnspp_common.h"
+
+#include "edns_python.h"
+#include "message_python.h"
#include "messagerenderer_python.h"
#include "name_python.h"
+#include "opcode_python.h"
+#include "pydnspp_common.h"
+#include "pydnspp_towire.h"
+#include "question_python.h"
#include "rcode_python.h"
+#include "rdata_python.h"
+#include "rrclass_python.h"
+#include "rrset_python.h"
+#include "rrttl_python.h"
+#include "rrtype_python.h"
+#include "tsigerror_python.h"
#include "tsigkey_python.h"
+#include "tsig_python.h"
#include "tsig_rdata_python.h"
-#include "tsigerror_python.h"
#include "tsigrecord_python.h"
-#include "tsig_python.h"
-namespace isc {
-namespace dns {
-namespace python {
-// For our 'general' isc::Exceptions
-PyObject* po_IscException;
-PyObject* po_InvalidParameter;
+using namespace isc::dns;
+using namespace isc::dns::python;
+using namespace isc::util::python;
+
+namespace {
+
+bool
+initModulePart_EDNS(PyObject* mod) {
+ // We initialize the static description object with PyType_Ready(),
+ // then add it to the module. This is not just a check! (leaving
+ // this out results in segmentation faults)
+ //
+ // After the type has been initialized, we initialize any exceptions
+ // that are defined in the wrapper for this class, and add constants
+ // to the type, if any
+
+ if (PyType_Ready(&edns_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&edns_type);
+ void* p = &edns_type;
+ PyModule_AddObject(mod, "EDNS", static_cast<PyObject*>(p));
+
+ addClassVariable(edns_type, "SUPPORTED_VERSION",
+ Py_BuildValue("B", EDNS::SUPPORTED_VERSION));
-// For our own isc::dns::Exception
-PyObject* po_DNSMessageBADVERS;
+ return (true);
}
+
+bool
+initModulePart_Message(PyObject* mod) {
+ if (PyType_Ready(&message_type) < 0) {
+ return (false);
+ }
+ void* p = &message_type;
+ if (PyModule_AddObject(mod, "Message", static_cast<PyObject*>(p)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&message_type);
+
+ try {
+ //
+ // Constant class variables
+ //
+
+ // Parse mode
+ installClassVariable(message_type, "PARSE",
+ Py_BuildValue("I", Message::PARSE));
+ installClassVariable(message_type, "RENDER",
+ Py_BuildValue("I", Message::RENDER));
+
+ // Parse options
+ installClassVariable(message_type, "PARSE_DEFAULT",
+ Py_BuildValue("I", Message::PARSE_DEFAULT));
+ installClassVariable(message_type, "PRESERVE_ORDER",
+ Py_BuildValue("I", Message::PRESERVE_ORDER));
+
+ // Header flags
+ installClassVariable(message_type, "HEADERFLAG_QR",
+ Py_BuildValue("I", Message::HEADERFLAG_QR));
+ installClassVariable(message_type, "HEADERFLAG_AA",
+ Py_BuildValue("I", Message::HEADERFLAG_AA));
+ installClassVariable(message_type, "HEADERFLAG_TC",
+ Py_BuildValue("I", Message::HEADERFLAG_TC));
+ installClassVariable(message_type, "HEADERFLAG_RD",
+ Py_BuildValue("I", Message::HEADERFLAG_RD));
+ installClassVariable(message_type, "HEADERFLAG_RA",
+ Py_BuildValue("I", Message::HEADERFLAG_RA));
+ installClassVariable(message_type, "HEADERFLAG_AD",
+ Py_BuildValue("I", Message::HEADERFLAG_AD));
+ installClassVariable(message_type, "HEADERFLAG_CD",
+ Py_BuildValue("I", Message::HEADERFLAG_CD));
+
+ // Sections
+ installClassVariable(message_type, "SECTION_QUESTION",
+ Py_BuildValue("I", Message::SECTION_QUESTION));
+ installClassVariable(message_type, "SECTION_ANSWER",
+ Py_BuildValue("I", Message::SECTION_ANSWER));
+ installClassVariable(message_type, "SECTION_AUTHORITY",
+ Py_BuildValue("I", Message::SECTION_AUTHORITY));
+ installClassVariable(message_type, "SECTION_ADDITIONAL",
+ Py_BuildValue("I", Message::SECTION_ADDITIONAL));
+
+ // Protocol constant
+ installClassVariable(message_type, "DEFAULT_MAX_UDPSIZE",
+ Py_BuildValue("I", Message::DEFAULT_MAX_UDPSIZE));
+
+ /* Class-specific exceptions */
+ po_MessageTooShort =
+ PyErr_NewException("pydnspp.MessageTooShort", NULL, NULL);
+ PyObjectContainer(po_MessageTooShort).installToModule(
+ mod, "MessageTooShort");
+ po_InvalidMessageSection =
+ PyErr_NewException("pydnspp.InvalidMessageSection", NULL, NULL);
+ PyObjectContainer(po_InvalidMessageSection).installToModule(
+ mod, "InvalidMessageSection");
+ po_InvalidMessageOperation =
+ PyErr_NewException("pydnspp.InvalidMessageOperation", NULL, NULL);
+ PyObjectContainer(po_InvalidMessageOperation).installToModule(
+ mod, "InvalidMessageOperation");
+ po_InvalidMessageUDPSize =
+ PyErr_NewException("pydnspp.InvalidMessageUDPSize", NULL, NULL);
+ PyObjectContainer(po_InvalidMessageUDPSize).installToModule(
+ mod, "InvalidMessageUDPSize");
+ po_DNSMessageBADVERS =
+ PyErr_NewException("pydnspp.DNSMessageBADVERS", NULL, NULL);
+ PyObjectContainer(po_DNSMessageBADVERS).installToModule(
+ mod, "DNSMessageBADVERS");
+ } catch (const std::exception& ex) {
+ const std::string ex_what =
+ "Unexpected failure in Message initialization: " +
+ std::string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (false);
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure in Message initialization");
+ return (false);
+ }
+
+ return (true);
}
+
+bool
+initModulePart_MessageRenderer(PyObject* mod) {
+ if (PyType_Ready(&messagerenderer_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&messagerenderer_type);
+
+ addClassVariable(messagerenderer_type, "CASE_INSENSITIVE",
+ Py_BuildValue("I", MessageRenderer::CASE_INSENSITIVE));
+ addClassVariable(messagerenderer_type, "CASE_SENSITIVE",
+ Py_BuildValue("I", MessageRenderer::CASE_SENSITIVE));
+
+ PyModule_AddObject(mod, "MessageRenderer",
+ reinterpret_cast<PyObject*>(&messagerenderer_type));
+
+ return (true);
}
-// order is important here!
-using namespace isc::dns::python;
+bool
+initModulePart_Name(PyObject* mod) {
-#include <dns/python/rrclass_python.cc> // needs Messagerenderer
-#include <dns/python/rrtype_python.cc> // needs Messagerenderer
-#include <dns/python/rrttl_python.cc> // needs Messagerenderer
-#include <dns/python/rdata_python.cc> // needs Type, Class
-#include <dns/python/rrset_python.cc> // needs Rdata, RRTTL
-#include <dns/python/question_python.cc> // needs RRClass, RRType, RRTTL,
- // Name
-#include <dns/python/opcode_python.cc>
-#include <dns/python/edns_python.cc> // needs Messagerenderer, Rcode
-#include <dns/python/message_python.cc> // needs RRset, Question
+ //
+ // NameComparisonResult
+ //
+ if (PyType_Ready(&name_comparison_result_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&name_comparison_result_type);
+
+ // Add the enums to the module
+ po_NameRelation = Py_BuildValue("{i:s,i:s,i:s,i:s}",
+ NameComparisonResult::SUPERDOMAIN, "SUPERDOMAIN",
+ NameComparisonResult::SUBDOMAIN, "SUBDOMAIN",
+ NameComparisonResult::EQUAL, "EQUAL",
+ NameComparisonResult::COMMONANCESTOR, "COMMONANCESTOR");
+ addClassVariable(name_comparison_result_type, "NameRelation",
+ po_NameRelation);
+
+ PyModule_AddObject(mod, "NameComparisonResult",
+ reinterpret_cast<PyObject*>(&name_comparison_result_type));
+
+ //
+ // Name
+ //
+
+ if (PyType_Ready(&name_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&name_type);
+
+ // Add the constants to the module
+ addClassVariable(name_type, "MAX_WIRE",
+ Py_BuildValue("I", Name::MAX_WIRE));
+ addClassVariable(name_type, "MAX_LABELS",
+ Py_BuildValue("I", Name::MAX_LABELS));
+ addClassVariable(name_type, "MAX_LABELLEN",
+ Py_BuildValue("I", Name::MAX_LABELLEN));
+ addClassVariable(name_type, "MAX_COMPRESS_POINTER",
+ Py_BuildValue("I", Name::MAX_COMPRESS_POINTER));
+ addClassVariable(name_type, "COMPRESS_POINTER_MARK8",
+ Py_BuildValue("I", Name::COMPRESS_POINTER_MARK8));
+ addClassVariable(name_type, "COMPRESS_POINTER_MARK16",
+ Py_BuildValue("I", Name::COMPRESS_POINTER_MARK16));
+
+ addClassVariable(name_type, "ROOT_NAME",
+ createNameObject(Name::ROOT_NAME()));
+
+ PyModule_AddObject(mod, "Name",
+ reinterpret_cast<PyObject*>(&name_type));
+
+
+ // Add the exceptions to the module
+ po_EmptyLabel = PyErr_NewException("pydnspp.EmptyLabel", NULL, NULL);
+ PyModule_AddObject(mod, "EmptyLabel", po_EmptyLabel);
+
+ po_TooLongName = PyErr_NewException("pydnspp.TooLongName", NULL, NULL);
+ PyModule_AddObject(mod, "TooLongName", po_TooLongName);
+
+ po_TooLongLabel = PyErr_NewException("pydnspp.TooLongLabel", NULL, NULL);
+ PyModule_AddObject(mod, "TooLongLabel", po_TooLongLabel);
+
+ po_BadLabelType = PyErr_NewException("pydnspp.BadLabelType", NULL, NULL);
+ PyModule_AddObject(mod, "BadLabelType", po_BadLabelType);
+
+ po_BadEscape = PyErr_NewException("pydnspp.BadEscape", NULL, NULL);
+ PyModule_AddObject(mod, "BadEscape", po_BadEscape);
+
+ po_IncompleteName = PyErr_NewException("pydnspp.IncompleteName", NULL, NULL);
+ PyModule_AddObject(mod, "IncompleteName", po_IncompleteName);
+
+ po_InvalidBufferPosition =
+ PyErr_NewException("pydnspp.InvalidBufferPosition", NULL, NULL);
+ PyModule_AddObject(mod, "InvalidBufferPosition", po_InvalidBufferPosition);
+
+ // This one could have gone into the message_python.cc file, but is
+ // already needed here.
+ po_DNSMessageFORMERR = PyErr_NewException("pydnspp.DNSMessageFORMERR",
+ NULL, NULL);
+ PyModule_AddObject(mod, "DNSMessageFORMERR", po_DNSMessageFORMERR);
+
+ return (true);
+}
+
+bool
+initModulePart_Opcode(PyObject* mod) {
+ if (PyType_Ready(&opcode_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&opcode_type);
+ void* p = &opcode_type;
+ if (PyModule_AddObject(mod, "Opcode", static_cast<PyObject*>(p)) != 0) {
+ Py_DECREF(&opcode_type);
+ return (false);
+ }
+
+ addClassVariable(opcode_type, "QUERY_CODE",
+ Py_BuildValue("h", Opcode::QUERY_CODE));
+ addClassVariable(opcode_type, "IQUERY_CODE",
+ Py_BuildValue("h", Opcode::IQUERY_CODE));
+ addClassVariable(opcode_type, "STATUS_CODE",
+ Py_BuildValue("h", Opcode::STATUS_CODE));
+ addClassVariable(opcode_type, "RESERVED3_CODE",
+ Py_BuildValue("h", Opcode::RESERVED3_CODE));
+ addClassVariable(opcode_type, "NOTIFY_CODE",
+ Py_BuildValue("h", Opcode::NOTIFY_CODE));
+ addClassVariable(opcode_type, "UPDATE_CODE",
+ Py_BuildValue("h", Opcode::UPDATE_CODE));
+ addClassVariable(opcode_type, "RESERVED6_CODE",
+ Py_BuildValue("h", Opcode::RESERVED6_CODE));
+ addClassVariable(opcode_type, "RESERVED7_CODE",
+ Py_BuildValue("h", Opcode::RESERVED7_CODE));
+ addClassVariable(opcode_type, "RESERVED8_CODE",
+ Py_BuildValue("h", Opcode::RESERVED8_CODE));
+ addClassVariable(opcode_type, "RESERVED9_CODE",
+ Py_BuildValue("h", Opcode::RESERVED9_CODE));
+ addClassVariable(opcode_type, "RESERVED10_CODE",
+ Py_BuildValue("h", Opcode::RESERVED10_CODE));
+ addClassVariable(opcode_type, "RESERVED11_CODE",
+ Py_BuildValue("h", Opcode::RESERVED11_CODE));
+ addClassVariable(opcode_type, "RESERVED12_CODE",
+ Py_BuildValue("h", Opcode::RESERVED12_CODE));
+ addClassVariable(opcode_type, "RESERVED13_CODE",
+ Py_BuildValue("h", Opcode::RESERVED13_CODE));
+ addClassVariable(opcode_type, "RESERVED14_CODE",
+ Py_BuildValue("h", Opcode::RESERVED14_CODE));
+ addClassVariable(opcode_type, "RESERVED15_CODE",
+ Py_BuildValue("h", Opcode::RESERVED15_CODE));
+
+ return (true);
+}
+
+bool
+initModulePart_Question(PyObject* mod) {
+ if (PyType_Ready(&question_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&question_type);
+ PyModule_AddObject(mod, "Question",
+ reinterpret_cast<PyObject*>(&question_type));
+
+ return (true);
+}
+
+bool
+initModulePart_Rcode(PyObject* mod) {
+ if (PyType_Ready(&rcode_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&rcode_type);
+ void* p = &rcode_type;
+ if (PyModule_AddObject(mod, "Rcode", static_cast<PyObject*>(p)) != 0) {
+ Py_DECREF(&rcode_type);
+ return (false);
+ }
+
+ addClassVariable(rcode_type, "NOERROR_CODE",
+ Py_BuildValue("h", Rcode::NOERROR_CODE));
+ addClassVariable(rcode_type, "FORMERR_CODE",
+ Py_BuildValue("h", Rcode::FORMERR_CODE));
+ addClassVariable(rcode_type, "SERVFAIL_CODE",
+ Py_BuildValue("h", Rcode::SERVFAIL_CODE));
+ addClassVariable(rcode_type, "NXDOMAIN_CODE",
+ Py_BuildValue("h", Rcode::NXDOMAIN_CODE));
+ addClassVariable(rcode_type, "NOTIMP_CODE",
+ Py_BuildValue("h", Rcode::NOTIMP_CODE));
+ addClassVariable(rcode_type, "REFUSED_CODE",
+ Py_BuildValue("h", Rcode::REFUSED_CODE));
+ addClassVariable(rcode_type, "YXDOMAIN_CODE",
+ Py_BuildValue("h", Rcode::YXDOMAIN_CODE));
+ addClassVariable(rcode_type, "YXRRSET_CODE",
+ Py_BuildValue("h", Rcode::YXRRSET_CODE));
+ addClassVariable(rcode_type, "NXRRSET_CODE",
+ Py_BuildValue("h", Rcode::NXRRSET_CODE));
+ addClassVariable(rcode_type, "NOTAUTH_CODE",
+ Py_BuildValue("h", Rcode::NOTAUTH_CODE));
+ addClassVariable(rcode_type, "NOTZONE_CODE",
+ Py_BuildValue("h", Rcode::NOTZONE_CODE));
+ addClassVariable(rcode_type, "RESERVED11_CODE",
+ Py_BuildValue("h", Rcode::RESERVED11_CODE));
+ addClassVariable(rcode_type, "RESERVED12_CODE",
+ Py_BuildValue("h", Rcode::RESERVED12_CODE));
+ addClassVariable(rcode_type, "RESERVED13_CODE",
+ Py_BuildValue("h", Rcode::RESERVED13_CODE));
+ addClassVariable(rcode_type, "RESERVED14_CODE",
+ Py_BuildValue("h", Rcode::RESERVED14_CODE));
+ addClassVariable(rcode_type, "RESERVED15_CODE",
+ Py_BuildValue("h", Rcode::RESERVED15_CODE));
+ addClassVariable(rcode_type, "BADVERS_CODE",
+ Py_BuildValue("h", Rcode::BADVERS_CODE));
+
+ return (true);
+}
+
+bool
+initModulePart_Rdata(PyObject* mod) {
+ if (PyType_Ready(&rdata_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&rdata_type);
+ PyModule_AddObject(mod, "Rdata",
+ reinterpret_cast<PyObject*>(&rdata_type));
+
+ // Add the exceptions to the class
+ po_InvalidRdataLength = PyErr_NewException("pydnspp.InvalidRdataLength",
+ NULL, NULL);
+ PyModule_AddObject(mod, "InvalidRdataLength", po_InvalidRdataLength);
+
+ po_InvalidRdataText = PyErr_NewException("pydnspp.InvalidRdataText",
+ NULL, NULL);
+ PyModule_AddObject(mod, "InvalidRdataText", po_InvalidRdataText);
+
+ po_CharStringTooLong = PyErr_NewException("pydnspp.CharStringTooLong",
+ NULL, NULL);
+ PyModule_AddObject(mod, "CharStringTooLong", po_CharStringTooLong);
+
+
+ return (true);
+}
+
+bool
+initModulePart_RRClass(PyObject* mod) {
+ po_InvalidRRClass = PyErr_NewException("pydnspp.InvalidRRClass",
+ NULL, NULL);
+ Py_INCREF(po_InvalidRRClass);
+ PyModule_AddObject(mod, "InvalidRRClass", po_InvalidRRClass);
+ po_IncompleteRRClass = PyErr_NewException("pydnspp.IncompleteRRClass",
+ NULL, NULL);
+ Py_INCREF(po_IncompleteRRClass);
+ PyModule_AddObject(mod, "IncompleteRRClass", po_IncompleteRRClass);
+
+ if (PyType_Ready(&rrclass_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&rrclass_type);
+ PyModule_AddObject(mod, "RRClass",
+ reinterpret_cast<PyObject*>(&rrclass_type));
+
+ return (true);
+}
+
+bool
+initModulePart_RRset(PyObject* mod) {
+ po_EmptyRRset = PyErr_NewException("pydnspp.EmptyRRset", NULL, NULL);
+ PyModule_AddObject(mod, "EmptyRRset", po_EmptyRRset);
+
+ // NameComparisonResult
+ if (PyType_Ready(&rrset_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&rrset_type);
+ PyModule_AddObject(mod, "RRset",
+ reinterpret_cast<PyObject*>(&rrset_type));
+
+ return (true);
+}
+
+bool
+initModulePart_RRTTL(PyObject* mod) {
+ po_InvalidRRTTL = PyErr_NewException("pydnspp.InvalidRRTTL", NULL, NULL);
+ PyModule_AddObject(mod, "InvalidRRTTL", po_InvalidRRTTL);
+ po_IncompleteRRTTL = PyErr_NewException("pydnspp.IncompleteRRTTL",
+ NULL, NULL);
+ PyModule_AddObject(mod, "IncompleteRRTTL", po_IncompleteRRTTL);
+
+ if (PyType_Ready(&rrttl_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&rrttl_type);
+ PyModule_AddObject(mod, "RRTTL",
+ reinterpret_cast<PyObject*>(&rrttl_type));
+
+ return (true);
+}
+
+bool
+initModulePart_RRType(PyObject* mod) {
+ // Add the exceptions to the module
+ po_InvalidRRType = PyErr_NewException("pydnspp.InvalidRRType", NULL, NULL);
+ PyModule_AddObject(mod, "InvalidRRType", po_InvalidRRType);
+ po_IncompleteRRType = PyErr_NewException("pydnspp.IncompleteRRType",
+ NULL, NULL);
+ PyModule_AddObject(mod, "IncompleteRRType", po_IncompleteRRType);
+
+ if (PyType_Ready(&rrtype_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&rrtype_type);
+ PyModule_AddObject(mod, "RRType",
+ reinterpret_cast<PyObject*>(&rrtype_type));
+
+ return (true);
+}
+
+bool
+initModulePart_TSIGError(PyObject* mod) {
+ if (PyType_Ready(&tsigerror_type) < 0) {
+ return (false);
+ }
+ void* p = &tsigerror_type;
+ if (PyModule_AddObject(mod, "TSIGError", static_cast<PyObject*>(p)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&tsigerror_type);
+
+ try {
+ // Constant class variables
+ // Error codes (bare values)
+ installClassVariable(tsigerror_type, "BAD_SIG_CODE",
+ Py_BuildValue("H", TSIGError::BAD_SIG_CODE));
+ installClassVariable(tsigerror_type, "BAD_KEY_CODE",
+ Py_BuildValue("H", TSIGError::BAD_KEY_CODE));
+ installClassVariable(tsigerror_type, "BAD_TIME_CODE",
+ Py_BuildValue("H", TSIGError::BAD_TIME_CODE));
+
+ // Error codes (constant objects)
+ installClassVariable(tsigerror_type, "NOERROR",
+ createTSIGErrorObject(TSIGError::NOERROR()));
+ installClassVariable(tsigerror_type, "FORMERR",
+ createTSIGErrorObject(TSIGError::FORMERR()));
+ installClassVariable(tsigerror_type, "SERVFAIL",
+ createTSIGErrorObject(TSIGError::SERVFAIL()));
+ installClassVariable(tsigerror_type, "NXDOMAIN",
+ createTSIGErrorObject(TSIGError::NXDOMAIN()));
+ installClassVariable(tsigerror_type, "NOTIMP",
+ createTSIGErrorObject(TSIGError::NOTIMP()));
+ installClassVariable(tsigerror_type, "REFUSED",
+ createTSIGErrorObject(TSIGError::REFUSED()));
+ installClassVariable(tsigerror_type, "YXDOMAIN",
+ createTSIGErrorObject(TSIGError::YXDOMAIN()));
+ installClassVariable(tsigerror_type, "YXRRSET",
+ createTSIGErrorObject(TSIGError::YXRRSET()));
+ installClassVariable(tsigerror_type, "NXRRSET",
+ createTSIGErrorObject(TSIGError::NXRRSET()));
+ installClassVariable(tsigerror_type, "NOTAUTH",
+ createTSIGErrorObject(TSIGError::NOTAUTH()));
+ installClassVariable(tsigerror_type, "NOTZONE",
+ createTSIGErrorObject(TSIGError::NOTZONE()));
+ installClassVariable(tsigerror_type, "RESERVED11",
+ createTSIGErrorObject(TSIGError::RESERVED11()));
+ installClassVariable(tsigerror_type, "RESERVED12",
+ createTSIGErrorObject(TSIGError::RESERVED12()));
+ installClassVariable(tsigerror_type, "RESERVED13",
+ createTSIGErrorObject(TSIGError::RESERVED13()));
+ installClassVariable(tsigerror_type, "RESERVED14",
+ createTSIGErrorObject(TSIGError::RESERVED14()));
+ installClassVariable(tsigerror_type, "RESERVED15",
+ createTSIGErrorObject(TSIGError::RESERVED15()));
+ installClassVariable(tsigerror_type, "BAD_SIG",
+ createTSIGErrorObject(TSIGError::BAD_SIG()));
+ installClassVariable(tsigerror_type, "BAD_KEY",
+ createTSIGErrorObject(TSIGError::BAD_KEY()));
+ installClassVariable(tsigerror_type, "BAD_TIME",
+ createTSIGErrorObject(TSIGError::BAD_TIME()));
+ } catch (const std::exception& ex) {
+ const std::string ex_what =
+ "Unexpected failure in TSIGError initialization: " +
+ std::string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (false);
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure in TSIGError initialization");
+ return (false);
+ }
+
+ return (true);
+}
+
+bool
+initModulePart_TSIGKey(PyObject* mod) {
+ if (PyType_Ready(&tsigkey_type) < 0) {
+ return (false);
+ }
+ void* p = &tsigkey_type;
+ if (PyModule_AddObject(mod, "TSIGKey", static_cast<PyObject*>(p)) != 0) {
+ return (false);
+ }
+ Py_INCREF(&tsigkey_type);
+
+ try {
+ // Constant class variables
+ installClassVariable(tsigkey_type, "HMACMD5_NAME",
+ createNameObject(TSIGKey::HMACMD5_NAME()));
+ installClassVariable(tsigkey_type, "HMACSHA1_NAME",
+ createNameObject(TSIGKey::HMACSHA1_NAME()));
+ installClassVariable(tsigkey_type, "HMACSHA256_NAME",
+ createNameObject(TSIGKey::HMACSHA256_NAME()));
+ installClassVariable(tsigkey_type, "HMACSHA224_NAME",
+ createNameObject(TSIGKey::HMACSHA224_NAME()));
+ installClassVariable(tsigkey_type, "HMACSHA384_NAME",
+ createNameObject(TSIGKey::HMACSHA384_NAME()));
+ installClassVariable(tsigkey_type, "HMACSHA512_NAME",
+ createNameObject(TSIGKey::HMACSHA512_NAME()));
+ } catch (const std::exception& ex) {
+ const std::string ex_what =
+ "Unexpected failure in TSIGKey initialization: " +
+ std::string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (false);
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure in TSIGKey initialization");
+ return (false);
+ }
+
+ return (true);
+}
+
+bool
+initModulePart_TSIGKeyRing(PyObject* mod) {
+ if (PyType_Ready(&tsigkeyring_type) < 0) {
+ return (false);
+ }
+ Py_INCREF(&tsigkeyring_type);
+ void* p = &tsigkeyring_type;
+ if (PyModule_AddObject(mod, "TSIGKeyRing",
+ static_cast<PyObject*>(p)) != 0) {
+ Py_DECREF(&tsigkeyring_type);
+ return (false);
+ }
+
+ addClassVariable(tsigkeyring_type, "SUCCESS",
+ Py_BuildValue("I", TSIGKeyRing::SUCCESS));
+ addClassVariable(tsigkeyring_type, "EXIST",
+ Py_BuildValue("I", TSIGKeyRing::EXIST));
+ addClassVariable(tsigkeyring_type, "NOTFOUND",
+ Py_BuildValue("I", TSIGKeyRing::NOTFOUND));
+
+ return (true);
+}
+
+bool
+initModulePart_TSIGContext(PyObject* mod) {
+ if (PyType_Ready(&tsigcontext_type) < 0) {
+ return (false);
+ }
+ void* p = &tsigcontext_type;
+ if (PyModule_AddObject(mod, "TSIGContext",
+ static_cast<PyObject*>(p)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&tsigcontext_type);
+
+ try {
+ // Class specific exceptions
+ po_TSIGContextError = PyErr_NewException("pydnspp.TSIGContextError",
+ po_IscException, NULL);
+ PyObjectContainer(po_TSIGContextError).installToModule(
+ mod, "TSIGContextError");
+
+ // Constant class variables
+ installClassVariable(tsigcontext_type, "STATE_INIT",
+ Py_BuildValue("I", TSIGContext::INIT));
+ installClassVariable(tsigcontext_type, "STATE_SENT_REQUEST",
+ Py_BuildValue("I", TSIGContext::SENT_REQUEST));
+ installClassVariable(tsigcontext_type, "STATE_RECEIVED_REQUEST",
+ Py_BuildValue("I", TSIGContext::RECEIVED_REQUEST));
+ installClassVariable(tsigcontext_type, "STATE_SENT_RESPONSE",
+ Py_BuildValue("I", TSIGContext::SENT_RESPONSE));
+ installClassVariable(tsigcontext_type, "STATE_VERIFIED_RESPONSE",
+ Py_BuildValue("I",
+ TSIGContext::VERIFIED_RESPONSE));
+
+ installClassVariable(tsigcontext_type, "DEFAULT_FUDGE",
+ Py_BuildValue("H", TSIGContext::DEFAULT_FUDGE));
+ } catch (const std::exception& ex) {
+ const std::string ex_what =
+ "Unexpected failure in TSIGContext initialization: " +
+ std::string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (false);
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure in TSIGContext initialization");
+ return (false);
+ }
+
+ return (true);
+}
+
+bool
+initModulePart_TSIG(PyObject* mod) {
+ if (PyType_Ready(&tsig_type) < 0) {
+ return (false);
+ }
+ void* p = &tsig_type;
+ if (PyModule_AddObject(mod, "TSIG", static_cast<PyObject*>(p)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&tsig_type);
+
+ return (true);
+}
+
+bool
+initModulePart_TSIGRecord(PyObject* mod) {
+ if (PyType_Ready(&tsigrecord_type) < 0) {
+ return (false);
+ }
+ void* p = &tsigrecord_type;
+ if (PyModule_AddObject(mod, "TSIGRecord", static_cast<PyObject*>(p)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&tsigrecord_type);
+
+ try {
+ // Constant class variables
+ installClassVariable(tsigrecord_type, "TSIG_TTL",
+ Py_BuildValue("I", 0));
+ } catch (const std::exception& ex) {
+ const std::string ex_what =
+ "Unexpected failure in TSIGRecord initialization: " +
+ std::string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (false);
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure in TSIGRecord initialization");
+ return (false);
+ }
+
+ return (true);
+}
-//
-// Definition of the module
-//
-namespace {
PyModuleDef pydnspp = {
{ PyObject_HEAD_INIT(NULL) NULL, 0, NULL},
"pydnspp",
diff --git a/src/lib/dns/python/pydnspp_common.cc b/src/lib/dns/python/pydnspp_common.cc
index 8ca763a..0f0f873 100644
--- a/src/lib/dns/python/pydnspp_common.cc
+++ b/src/lib/dns/python/pydnspp_common.cc
@@ -15,9 +15,45 @@
#include <Python.h>
#include <pydnspp_common.h>
+#include <exceptions/exceptions.h>
+
+#include <util/buffer.h>
+
+#include <dns/exceptions.h>
+#include <dns/name.h>
+#include <dns/messagerenderer.h>
+
+#include "pydnspp_common.h"
+#include "messagerenderer_python.h"
+#include "name_python.h"
+#include "rdata_python.h"
+#include "rrclass_python.h"
+#include "rrtype_python.h"
+#include "rrttl_python.h"
+#include "rrset_python.h"
+#include "rcode_python.h"
+#include "opcode_python.h"
+#include "tsigkey_python.h"
+#include "tsig_rdata_python.h"
+#include "tsigerror_python.h"
+#include "tsigrecord_python.h"
+#include "tsig_python.h"
+#include "question_python.h"
+#include "message_python.h"
+
+using namespace isc::dns::python;
+
namespace isc {
namespace dns {
namespace python {
+// For our 'general' isc::Exceptions
+PyObject* po_IscException;
+PyObject* po_InvalidParameter;
+
+// For our own isc::dns::Exception
+PyObject* po_DNSMessageBADVERS;
+
+
int
readDataFromSequence(uint8_t *data, size_t len, PyObject* sequence) {
PyObject* el = NULL;
diff --git a/src/lib/dns/python/pydnspp_common.h b/src/lib/dns/python/pydnspp_common.h
index ed90998..8092b08 100644
--- a/src/lib/dns/python/pydnspp_common.h
+++ b/src/lib/dns/python/pydnspp_common.h
@@ -20,8 +20,6 @@
#include <stdexcept>
#include <string>
-#include <util/python/pycppwrapper_util.h>
-
namespace isc {
namespace dns {
namespace python {
diff --git a/src/lib/dns/python/pydnspp_towire.h b/src/lib/dns/python/pydnspp_towire.h
index 66362a0..e987a29 100644
--- a/src/lib/dns/python/pydnspp_towire.h
+++ b/src/lib/dns/python/pydnspp_towire.h
@@ -93,10 +93,10 @@ toWireWrapper(const PYSTRUCT* const self, PyObject* args) {
}
// To MessageRenderer version
- s_MessageRenderer* renderer;
+ PyObject* renderer;
if (PyArg_ParseTuple(args, "O!", &messagerenderer_type, &renderer)) {
const unsigned int n = TOWIRECALLER(*self->cppobj)(
- *renderer->messagerenderer);
+ PyMessageRenderer_ToMessageRenderer(renderer));
return (Py_BuildValue("I", n));
}
diff --git a/src/lib/dns/python/question_python.cc b/src/lib/dns/python/question_python.cc
index c702f85..44d68a2 100644
--- a/src/lib/dns/python/question_python.cc
+++ b/src/lib/dns/python/question_python.cc
@@ -12,25 +12,34 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#define PY_SSIZE_T_CLEAN
+#include <Python.h>
#include <dns/question.h>
+#include <dns/messagerenderer.h>
+#include <dns/exceptions.h>
+#include <util/buffer.h>
+#include <util/python/pycppwrapper_util.h>
+
+#include "pydnspp_common.h"
+#include "question_python.h"
+#include "name_python.h"
+#include "rrclass_python.h"
+#include "rrtype_python.h"
+#include "messagerenderer_python.h"
+
+using namespace std;
using namespace isc::dns;
+using namespace isc::dns::python;
+using namespace isc::util;
+using namespace isc::util::python;
+using namespace isc;
-//
-// Question
-//
-
-// The s_* Class simply coverst one instantiation of the object
+namespace {
class s_Question : public PyObject {
public:
- QuestionPtr question;
+ isc::dns::QuestionPtr cppobj;
};
-//
-// We declare the functions here, the definitions are below
-// the type definition of the object, since both can use the other
-//
-
-// General creation and destruction
static int Question_init(s_Question* self, PyObject* args);
static void Question_destroy(s_Question* self);
@@ -69,60 +78,6 @@ static PyMethodDef Question_methods[] = {
{ NULL, NULL, 0, NULL }
};
-// This defines the complete type for reflection in python and
-// parsing of PyObject* to s_Question
-// Most of the functions are not actually implemented and NULL here.
-static PyTypeObject question_type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "pydnspp.Question",
- sizeof(s_Question), // tp_basicsize
- 0, // tp_itemsize
- (destructor)Question_destroy, // tp_dealloc
- NULL, // tp_print
- NULL, // tp_getattr
- NULL, // tp_setattr
- NULL, // tp_reserved
- NULL, // tp_repr
- NULL, // tp_as_number
- NULL, // tp_as_sequence
- NULL, // tp_as_mapping
- NULL, // tp_hash
- NULL, // tp_call
- Question_str, // tp_str
- NULL, // tp_getattro
- NULL, // tp_setattro
- NULL, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
- "The Question class encapsulates the common search key of DNS"
- "lookup, consisting of owner name, RR type and RR class.",
- NULL, // tp_traverse
- NULL, // tp_clear
- NULL, // tp_richcompare
- 0, // tp_weaklistoffset
- NULL, // tp_iter
- NULL, // tp_iternext
- Question_methods, // tp_methods
- NULL, // tp_members
- NULL, // tp_getset
- NULL, // tp_base
- NULL, // tp_dict
- NULL, // tp_descr_get
- NULL, // tp_descr_set
- 0, // tp_dictoffset
- (initproc)Question_init, // tp_init
- NULL, // tp_alloc
- PyType_GenericNew, // tp_new
- NULL, // tp_free
- NULL, // tp_is_gc
- NULL, // tp_bases
- NULL, // tp_mro
- NULL, // tp_cache
- NULL, // tp_subclasses
- NULL, // tp_weaklist
- NULL, // tp_del
- 0 // tp_version_tag
-};
-
static int
Question_init(s_Question* self, PyObject* args) {
// Try out the various combinations of arguments to call the
@@ -131,9 +86,9 @@ Question_init(s_Question* self, PyObject* args) {
// that if we try several like here. Otherwise the *next* python
// call will suddenly appear to throw an exception.
// (the way to do exceptions is to set PyErr and return -1)
- s_Name* name;
- s_RRClass* rrclass;
- s_RRType* rrtype;
+ PyObject* name;
+ PyObject* rrclass;
+ PyObject* rrtype;
const char* b;
Py_ssize_t len;
@@ -141,17 +96,18 @@ Question_init(s_Question* self, PyObject* args) {
try {
if (PyArg_ParseTuple(args, "O!O!O!", &name_type, &name,
- &rrclass_type, &rrclass,
- &rrtype_type, &rrtype
+ &rrclass_type, &rrclass,
+ &rrtype_type, &rrtype
)) {
- self->question = QuestionPtr(new Question(*name->cppobj, *rrclass->rrclass,
- *rrtype->rrtype));
+ self->cppobj = QuestionPtr(new Question(PyName_ToName(name),
+ PyRRClass_ToRRClass(rrclass),
+ PyRRType_ToRRType(rrtype)));
return (0);
} else if (PyArg_ParseTuple(args, "y#|I", &b, &len, &position)) {
PyErr_Clear();
InputBuffer inbuf(b, len);
inbuf.setPosition(position);
- self->question = QuestionPtr(new Question(inbuf));
+ self->cppobj = QuestionPtr(new Question(inbuf));
return (0);
}
} catch (const DNSMessageFORMERR& dmfe) {
@@ -168,7 +124,7 @@ Question_init(s_Question* self, PyObject* args) {
return (-1);
}
- self->question = QuestionPtr();
+ self->cppobj = QuestionPtr();
PyErr_Clear();
PyErr_SetString(PyExc_TypeError,
@@ -178,52 +134,62 @@ Question_init(s_Question* self, PyObject* args) {
static void
Question_destroy(s_Question* self) {
- self->question.reset();
+ self->cppobj.reset();
Py_TYPE(self)->tp_free(self);
}
static PyObject*
Question_getName(s_Question* self) {
- s_Name* name;
-
- // is this the best way to do this?
- name = static_cast<s_Name*>(name_type.tp_alloc(&name_type, 0));
- if (name != NULL) {
- name->cppobj = new Name(self->question->getName());
+ try {
+ return (createNameObject(self->cppobj->getName()));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Unexpected failure getting question Name: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure getting question Name");
}
-
- return (name);
+ return (NULL);
}
static PyObject*
Question_getType(s_Question* self) {
- s_RRType* rrtype;
-
- rrtype = static_cast<s_RRType*>(rrtype_type.tp_alloc(&rrtype_type, 0));
- if (rrtype != NULL) {
- rrtype->rrtype = new RRType(self->question->getType());
+ try {
+ return (createRRTypeObject(self->cppobj->getType()));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Unexpected failure getting question RRType: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure getting question RRType");
}
-
- return (rrtype);
+ return (NULL);
}
static PyObject*
Question_getClass(s_Question* self) {
- s_RRClass* rrclass;
-
- rrclass = static_cast<s_RRClass*>(rrclass_type.tp_alloc(&rrclass_type, 0));
- if (rrclass != NULL) {
- rrclass->rrclass = new RRClass(self->question->getClass());
+ try {
+ return (createRRClassObject(self->cppobj->getClass()));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Unexpected failure getting question RRClass: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure getting question RRClass");
}
-
- return (rrclass);
+ return (NULL);
}
-
static PyObject*
Question_toText(s_Question* self) {
// Py_BuildValue makes python objects from native data
- return (Py_BuildValue("s", self->question->toText().c_str()));
+ return (Py_BuildValue("s", self->cppobj->toText().c_str()));
}
static PyObject*
@@ -237,14 +203,14 @@ Question_str(PyObject* self) {
static PyObject*
Question_toWire(s_Question* self, PyObject* args) {
PyObject* bytes;
- s_MessageRenderer* mr;
-
+ PyObject* mr;
+
if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
PyObject* bytes_o = bytes;
// Max length is Name::MAX_WIRE + rrclass (2) + rrtype (2)
OutputBuffer buffer(Name::MAX_WIRE + 4);
- self->question->toWire(buffer);
+ self->cppobj->toWire(buffer);
PyObject* n = PyBytes_FromStringAndSize(static_cast<const char*>(buffer.getData()),
buffer.getLength());
PyObject* result = PySequence_InPlaceConcat(bytes_o, n);
@@ -253,7 +219,7 @@ Question_toWire(s_Question* self, PyObject* args) {
Py_DECREF(n);
return (result);
} else if (PyArg_ParseTuple(args, "O!", &messagerenderer_type, &mr)) {
- self->question->toWire(*mr->messagerenderer);
+ self->cppobj->toWire(PyMessageRenderer_ToMessageRenderer(mr));
// If we return NULL it is seen as an error, so use this for
// None returns
Py_RETURN_NONE;
@@ -264,23 +230,92 @@ Question_toWire(s_Question* self, PyObject* args) {
return (NULL);
}
-// end of Question
+} // end of unnamed namespace
+
+namespace isc {
+namespace dns {
+namespace python {
+
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_Question
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject question_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pydnspp.Question",
+ sizeof(s_Question), // tp_basicsize
+ 0, // tp_itemsize
+ (destructor)Question_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ Question_str, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "The Question class encapsulates the common search key of DNS"
+ "lookup, consisting of owner name, RR type and RR class.",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ Question_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ (initproc)Question_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+PyObject*
+createQuestionObject(const Question& source) {
+ s_Question* question =
+ static_cast<s_Question*>(question_type.tp_alloc(&question_type, 0));
+ question->cppobj = QuestionPtr(new Question(source));
+ return (question);
+}
-// Module Initialization, all statics are initialized here
bool
-initModulePart_Question(PyObject* mod) {
- // Add the exceptions to the module
+PyQuestion_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
+ }
+ return (PyObject_TypeCheck(obj, &question_type));
+}
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module. This is not just a check! (leaving
- // this out results in segmentation faults)
- if (PyType_Ready(&question_type) < 0) {
- return (false);
+const Question&
+PyQuestion_ToQuestion(const PyObject* question_obj) {
+ if (question_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in Question PyObject conversion");
}
- Py_INCREF(&question_type);
- PyModule_AddObject(mod, "Question",
- reinterpret_cast<PyObject*>(&question_type));
-
- return (true);
+ const s_Question* question = static_cast<const s_Question*>(question_obj);
+ return (*question->cppobj);
}
+
+} // end python namespace
+} // end dns namespace
+} // end isc namespace
diff --git a/src/lib/dns/python/question_python.h b/src/lib/dns/python/question_python.h
new file mode 100644
index 0000000..f5d78b1
--- /dev/null
+++ b/src/lib/dns/python/question_python.h
@@ -0,0 +1,66 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_QUESTION_H
+#define __PYTHON_QUESTION_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class Question;
+
+namespace python {
+
+extern PyObject* po_EmptyQuestion;
+
+extern PyTypeObject question_type;
+
+/// This is a simple shortcut to create a python Question object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createQuestionObject(const Question& source);
+
+/// \brief Checks if the given python object is a Question object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type Question, false otherwise
+bool PyQuestion_Check(PyObject* obj);
+
+/// \brief Returns a reference to the Question object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type Question; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyQuestion_Check()
+///
+/// \note This is not a copy; if the Question is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param question_obj The question object to convert
+const Question& PyQuestion_ToQuestion(const PyObject* question_obj);
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_QUESTION_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/rcode_python.cc b/src/lib/dns/python/rcode_python.cc
index b594ad3..42b48e7 100644
--- a/src/lib/dns/python/rcode_python.cc
+++ b/src/lib/dns/python/rcode_python.cc
@@ -15,34 +15,39 @@
#include <Python.h>
#include <exceptions/exceptions.h>
-
#include <dns/rcode.h>
+#include <util/python/pycppwrapper_util.h>
#include "pydnspp_common.h"
#include "rcode_python.h"
using namespace isc::dns;
using namespace isc::dns::python;
+using namespace isc::util::python;
+namespace {
+// The s_* Class simply covers one instantiation of the object.
//
-// Declaration of the custom exceptions (None for this class)
-
-//
-// Definition of the classes
-//
-
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
-
-//
-// Rcode
+// We added a helper variable static_code here
+// Since we can create Rcodes dynamically with Rcode(int), but also
+// use the static globals (Rcode::NOERROR() etc), we use this
+// variable to see if the code came from one of the latter, in which
+// case Rcode_destroy should not free it (the other option is to
+// allocate new Rcodes for every use of the static ones, but this
+// seems more efficient).
//
+// Follow-up note: we don't have to use the proxy function in the python lib;
+// we can just define class specific constants directly (see TSIGError).
+// We should make this cleanup later.
+class s_Rcode : public PyObject {
+public:
+ s_Rcode() : cppobj(NULL), static_code(false) {};
+ const Rcode* cppobj;
+ bool static_code;
+};
-// Trivial constructor.
-s_Rcode::s_Rcode() : cppobj(NULL), static_code(false) {}
+typedef CPPPyObjectContainer<s_Rcode, Rcode> RcodeContainer;
-namespace {
int Rcode_init(s_Rcode* const self, PyObject* args);
void Rcode_destroy(s_Rcode* const self);
@@ -282,7 +287,7 @@ Rcode_BADVERS(const s_Rcode*) {
return (Rcode_createStatic(Rcode::BADVERS()));
}
-PyObject*
+PyObject*
Rcode_richcmp(const s_Rcode* const self, const s_Rcode* const other,
const int op)
{
@@ -376,59 +381,31 @@ PyTypeObject rcode_type = {
0 // tp_version_tag
};
-// Module Initialization, all statics are initialized here
+PyObject*
+createRcodeObject(const Rcode& source) {
+ RcodeContainer container(PyObject_New(s_Rcode, &rcode_type));
+ container.set(new Rcode(source));
+ return (container.release());
+}
+
bool
-initModulePart_Rcode(PyObject* mod) {
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module. This is not just a check! (leaving
- // this out results in segmentation faults)
- if (PyType_Ready(&rcode_type) < 0) {
- return (false);
- }
- Py_INCREF(&rcode_type);
- void* p = &rcode_type;
- if (PyModule_AddObject(mod, "Rcode", static_cast<PyObject*>(p)) != 0) {
- Py_DECREF(&rcode_type);
- return (false);
+PyRcode_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
}
+ return (PyObject_TypeCheck(obj, &rcode_type));
+}
- addClassVariable(rcode_type, "NOERROR_CODE",
- Py_BuildValue("h", Rcode::NOERROR_CODE));
- addClassVariable(rcode_type, "FORMERR_CODE",
- Py_BuildValue("h", Rcode::FORMERR_CODE));
- addClassVariable(rcode_type, "SERVFAIL_CODE",
- Py_BuildValue("h", Rcode::SERVFAIL_CODE));
- addClassVariable(rcode_type, "NXDOMAIN_CODE",
- Py_BuildValue("h", Rcode::NXDOMAIN_CODE));
- addClassVariable(rcode_type, "NOTIMP_CODE",
- Py_BuildValue("h", Rcode::NOTIMP_CODE));
- addClassVariable(rcode_type, "REFUSED_CODE",
- Py_BuildValue("h", Rcode::REFUSED_CODE));
- addClassVariable(rcode_type, "YXDOMAIN_CODE",
- Py_BuildValue("h", Rcode::YXDOMAIN_CODE));
- addClassVariable(rcode_type, "YXRRSET_CODE",
- Py_BuildValue("h", Rcode::YXRRSET_CODE));
- addClassVariable(rcode_type, "NXRRSET_CODE",
- Py_BuildValue("h", Rcode::NXRRSET_CODE));
- addClassVariable(rcode_type, "NOTAUTH_CODE",
- Py_BuildValue("h", Rcode::NOTAUTH_CODE));
- addClassVariable(rcode_type, "NOTZONE_CODE",
- Py_BuildValue("h", Rcode::NOTZONE_CODE));
- addClassVariable(rcode_type, "RESERVED11_CODE",
- Py_BuildValue("h", Rcode::RESERVED11_CODE));
- addClassVariable(rcode_type, "RESERVED12_CODE",
- Py_BuildValue("h", Rcode::RESERVED12_CODE));
- addClassVariable(rcode_type, "RESERVED13_CODE",
- Py_BuildValue("h", Rcode::RESERVED13_CODE));
- addClassVariable(rcode_type, "RESERVED14_CODE",
- Py_BuildValue("h", Rcode::RESERVED14_CODE));
- addClassVariable(rcode_type, "RESERVED15_CODE",
- Py_BuildValue("h", Rcode::RESERVED15_CODE));
- addClassVariable(rcode_type, "BADVERS_CODE",
- Py_BuildValue("h", Rcode::BADVERS_CODE));
-
- return (true);
+const Rcode&
+PyRcode_ToRcode(const PyObject* rcode_obj) {
+ if (rcode_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in Rcode PyObject conversion");
+ }
+ const s_Rcode* rcode = static_cast<const s_Rcode*>(rcode_obj);
+ return (*rcode->cppobj);
}
+
} // namespace python
} // namespace dns
} // namespace isc
diff --git a/src/lib/dns/python/rcode_python.h b/src/lib/dns/python/rcode_python.h
index 9b5e699..a149406 100644
--- a/src/lib/dns/python/rcode_python.h
+++ b/src/lib/dns/python/rcode_python.h
@@ -23,29 +23,36 @@ class Rcode;
namespace python {
-// The s_* Class simply covers one instantiation of the object.
-//
-// We added a helper variable static_code here
-// Since we can create Rcodes dynamically with Rcode(int), but also
-// use the static globals (Rcode::NOERROR() etc), we use this
-// variable to see if the code came from one of the latter, in which
-// case Rcode_destroy should not free it (the other option is to
-// allocate new Rcodes for every use of the static ones, but this
-// seems more efficient).
-//
-// Follow-up note: we don't have to use the proxy function in the python lib;
-// we can just define class specific constants directly (see TSIGError).
-// We should make this cleanup later.
-class s_Rcode : public PyObject {
-public:
- s_Rcode();
- const Rcode* cppobj;
- bool static_code;
-};
-
extern PyTypeObject rcode_type;
-bool initModulePart_Rcode(PyObject* mod);
+/// This is a simple shortcut to create a python Rcode object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createRcodeObject(const Rcode& source);
+
+/// \brief Checks if the given python object is a Rcode object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type Rcode, false otherwise
+bool PyRcode_Check(PyObject* obj);
+
+/// \brief Returns a reference to the Rcode object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type Rcode; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyRcode_Check()
+///
+/// \note This is not a copy; if the Rcode is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param rcode_obj The rcode object to convert
+const Rcode& PyRcode_ToRcode(const PyObject* rcode_obj);
} // namespace python
} // namespace dns
diff --git a/src/lib/dns/python/rdata_python.cc b/src/lib/dns/python/rdata_python.cc
index faa4f4c..06c0263 100644
--- a/src/lib/dns/python/rdata_python.cc
+++ b/src/lib/dns/python/rdata_python.cc
@@ -12,60 +12,48 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#define PY_SSIZE_T_CLEAN
+#include <Python.h>
#include <dns/rdata.h>
+#include <dns/messagerenderer.h>
+#include <util/buffer.h>
+#include <util/python/pycppwrapper_util.h>
+
+#include "rdata_python.h"
+#include "rrtype_python.h"
+#include "rrclass_python.h"
+#include "messagerenderer_python.h"
+
using namespace isc::dns;
+using namespace isc::dns::python;
using namespace isc::util;
+using namespace isc::util::python;
using namespace isc::dns::rdata;
-//
-// Declaration of the custom exceptions
-// Initialization and addition of these go in the initModulePart
-// function at the end of this file
-//
-static PyObject* po_InvalidRdataLength;
-static PyObject* po_InvalidRdataText;
-static PyObject* po_CharStringTooLong;
-
-//
-// Definition of the classes
-//
-
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
-
-//
-// Rdata
-//
-
-// The s_* Class simply coverst one instantiation of the object
-
-// Using a shared_ptr here should not really be necessary (PyObject
-// is already reference-counted), however internally on the cpp side,
-// not doing so might result in problems, since we can't copy construct
-// rdata field, adding them to rrsets results in a problem when the
-// rrset is destroyed later
+namespace {
class s_Rdata : public PyObject {
public:
- RdataPtr rdata;
+ isc::dns::rdata::ConstRdataPtr cppobj;
};
+typedef CPPPyObjectContainer<s_Rdata, Rdata> RdataContainer;
+
//
// We declare the functions here, the definitions are below
// the type definition of the object, since both can use the other
//
// General creation and destruction
-static int Rdata_init(s_Rdata* self, PyObject* args);
-static void Rdata_destroy(s_Rdata* self);
+int Rdata_init(s_Rdata* self, PyObject* args);
+void Rdata_destroy(s_Rdata* self);
// These are the functions we export
-static PyObject* Rdata_toText(s_Rdata* self);
+PyObject* Rdata_toText(s_Rdata* self);
// This is a second version of toText, we need one where the argument
// is a PyObject*, for the str() function in python.
-static PyObject* Rdata_str(PyObject* self);
-static PyObject* Rdata_toWire(s_Rdata* self, PyObject* args);
-static PyObject* RData_richcmp(s_Rdata* self, s_Rdata* other, int op);
+PyObject* Rdata_str(PyObject* self);
+PyObject* Rdata_toWire(s_Rdata* self, PyObject* args);
+PyObject* RData_richcmp(s_Rdata* self, s_Rdata* other, int op);
// This list contains the actual set of functions we have in
// python. Each entry has
@@ -73,7 +61,7 @@ static PyObject* RData_richcmp(s_Rdata* self, s_Rdata* other, int op);
// 2. Our static function here
// 3. Argument type
// 4. Documentation
-static PyMethodDef Rdata_methods[] = {
+PyMethodDef Rdata_methods[] = {
{ "to_text", reinterpret_cast<PyCFunction>(Rdata_toText), METH_NOARGS,
"Returns the string representation" },
{ "to_wire", reinterpret_cast<PyCFunction>(Rdata_toWire), METH_VARARGS,
@@ -86,64 +74,10 @@ static PyMethodDef Rdata_methods[] = {
{ NULL, NULL, 0, NULL }
};
-// This defines the complete type for reflection in python and
-// parsing of PyObject* to s_Rdata
-// Most of the functions are not actually implemented and NULL here.
-static PyTypeObject rdata_type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "pydnspp.Rdata",
- sizeof(s_Rdata), // tp_basicsize
- 0, // tp_itemsize
- (destructor)Rdata_destroy, // tp_dealloc
- NULL, // tp_print
- NULL, // tp_getattr
- NULL, // tp_setattr
- NULL, // tp_reserved
- NULL, // tp_repr
- NULL, // tp_as_number
- NULL, // tp_as_sequence
- NULL, // tp_as_mapping
- NULL, // tp_hash
- NULL, // tp_call
- Rdata_str, // tp_str
- NULL, // tp_getattro
- NULL, // tp_setattro
- NULL, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
- "The Rdata class is an abstract base class that provides "
- "a set of common interfaces to manipulate concrete RDATA objects.",
- NULL, // tp_traverse
- NULL, // tp_clear
- (richcmpfunc)RData_richcmp, // tp_richcompare
- 0, // tp_weaklistoffset
- NULL, // tp_iter
- NULL, // tp_iternext
- Rdata_methods, // tp_methods
- NULL, // tp_members
- NULL, // tp_getset
- NULL, // tp_base
- NULL, // tp_dict
- NULL, // tp_descr_get
- NULL, // tp_descr_set
- 0, // tp_dictoffset
- (initproc)Rdata_init, // tp_init
- NULL, // tp_alloc
- PyType_GenericNew, // tp_new
- NULL, // tp_free
- NULL, // tp_is_gc
- NULL, // tp_bases
- NULL, // tp_mro
- NULL, // tp_cache
- NULL, // tp_subclasses
- NULL, // tp_weaklist
- NULL, // tp_del
- 0 // tp_version_tag
-};
-
-static int
+int
Rdata_init(s_Rdata* self, PyObject* args) {
- s_RRType* rrtype;
- s_RRClass* rrclass;
+ PyObject* rrtype;
+ PyObject* rrclass;
const char* s;
const char* data;
Py_ssize_t len;
@@ -152,34 +86,36 @@ Rdata_init(s_Rdata* self, PyObject* args) {
if (PyArg_ParseTuple(args, "O!O!s", &rrtype_type, &rrtype,
&rrclass_type, &rrclass,
&s)) {
- self->rdata = createRdata(*rrtype->rrtype, *rrclass->rrclass, s);
+ self->cppobj = createRdata(PyRRType_ToRRType(rrtype),
+ PyRRClass_ToRRClass(rrclass), s);
return (0);
} else if (PyArg_ParseTuple(args, "O!O!y#", &rrtype_type, &rrtype,
&rrclass_type, &rrclass, &data, &len)) {
InputBuffer input_buffer(data, len);
- self->rdata = createRdata(*rrtype->rrtype, *rrclass->rrclass,
- input_buffer, len);
+ self->cppobj = createRdata(PyRRType_ToRRType(rrtype),
+ PyRRClass_ToRRClass(rrclass),
+ input_buffer, len);
return (0);
}
return (-1);
}
-static void
+void
Rdata_destroy(s_Rdata* self) {
// Clear the shared_ptr so that its reference count is zero
// before we call tp_free() (there is no direct release())
- self->rdata.reset();
+ self->cppobj.reset();
Py_TYPE(self)->tp_free(self);
}
-static PyObject*
+PyObject*
Rdata_toText(s_Rdata* self) {
// Py_BuildValue makes python objects from native data
- return (Py_BuildValue("s", self->rdata->toText().c_str()));
+ return (Py_BuildValue("s", self->cppobj->toText().c_str()));
}
-static PyObject*
+PyObject*
Rdata_str(PyObject* self) {
// Simply call the to_text method we already defined
return (PyObject_CallMethod(self,
@@ -187,16 +123,16 @@ Rdata_str(PyObject* self) {
const_cast<char*>("")));
}
-static PyObject*
+PyObject*
Rdata_toWire(s_Rdata* self, PyObject* args) {
PyObject* bytes;
- s_MessageRenderer* mr;
-
+ PyObject* mr;
+
if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
PyObject* bytes_o = bytes;
-
+
OutputBuffer buffer(4);
- self->rdata->toWire(buffer);
+ self->cppobj->toWire(buffer);
PyObject* rd_bytes = PyBytes_FromStringAndSize(static_cast<const char*>(buffer.getData()), buffer.getLength());
PyObject* result = PySequence_InPlaceConcat(bytes_o, rd_bytes);
// We need to release the object we temporarily created here
@@ -204,7 +140,7 @@ Rdata_toWire(s_Rdata* self, PyObject* args) {
Py_DECREF(rd_bytes);
return (result);
} else if (PyArg_ParseTuple(args, "O!", &messagerenderer_type, &mr)) {
- self->rdata->toWire(*mr->messagerenderer);
+ self->cppobj->toWire(PyMessageRenderer_ToMessageRenderer(mr));
// If we return NULL it is seen as an error, so use this for
// None returns
Py_RETURN_NONE;
@@ -215,9 +151,7 @@ Rdata_toWire(s_Rdata* self, PyObject* args) {
return (NULL);
}
-
-
-static PyObject*
+PyObject*
RData_richcmp(s_Rdata* self, s_Rdata* other, int op) {
bool c;
@@ -229,24 +163,24 @@ RData_richcmp(s_Rdata* self, s_Rdata* other, int op) {
switch (op) {
case Py_LT:
- c = self->rdata->compare(*other->rdata) < 0;
+ c = self->cppobj->compare(*other->cppobj) < 0;
break;
case Py_LE:
- c = self->rdata->compare(*other->rdata) < 0 ||
- self->rdata->compare(*other->rdata) == 0;
+ c = self->cppobj->compare(*other->cppobj) < 0 ||
+ self->cppobj->compare(*other->cppobj) == 0;
break;
case Py_EQ:
- c = self->rdata->compare(*other->rdata) == 0;
+ c = self->cppobj->compare(*other->cppobj) == 0;
break;
case Py_NE:
- c = self->rdata->compare(*other->rdata) != 0;
+ c = self->cppobj->compare(*other->cppobj) != 0;
break;
case Py_GT:
- c = self->rdata->compare(*other->rdata) > 0;
+ c = self->cppobj->compare(*other->cppobj) > 0;
break;
case Py_GE:
- c = self->rdata->compare(*other->rdata) > 0 ||
- self->rdata->compare(*other->rdata) == 0;
+ c = self->cppobj->compare(*other->cppobj) > 0 ||
+ self->cppobj->compare(*other->cppobj) == 0;
break;
default:
PyErr_SetString(PyExc_IndexError,
@@ -258,32 +192,107 @@ RData_richcmp(s_Rdata* self, s_Rdata* other, int op) {
else
Py_RETURN_FALSE;
}
-// end of Rdata
+} // end of unnamed namespace
-// Module Initialization, all statics are initialized here
-bool
-initModulePart_Rdata(PyObject* mod) {
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module. This is not just a check! (leaving
- // this out results in segmentation faults)
- if (PyType_Ready(&rdata_type) < 0) {
- return (false);
- }
- Py_INCREF(&rdata_type);
- PyModule_AddObject(mod, "Rdata",
- reinterpret_cast<PyObject*>(&rdata_type));
+namespace isc {
+namespace dns {
+namespace python {
- // Add the exceptions to the class
- po_InvalidRdataLength = PyErr_NewException("pydnspp.InvalidRdataLength", NULL, NULL);
- PyModule_AddObject(mod, "InvalidRdataLength", po_InvalidRdataLength);
- po_InvalidRdataText = PyErr_NewException("pydnspp.InvalidRdataText", NULL, NULL);
- PyModule_AddObject(mod, "InvalidRdataText", po_InvalidRdataText);
+//
+// Declaration of the custom exceptions
+// Initialization and addition of these go in the initModulePart
+// function in pydnspp
+//
+PyObject* po_InvalidRdataLength;
+PyObject* po_InvalidRdataText;
+PyObject* po_CharStringTooLong;
- po_CharStringTooLong = PyErr_NewException("pydnspp.CharStringTooLong", NULL, NULL);
- PyModule_AddObject(mod, "CharStringTooLong", po_CharStringTooLong);
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_Rdata
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject rdata_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pydnspp.Rdata",
+ sizeof(s_Rdata), // tp_basicsize
+ 0, // tp_itemsize
+ (destructor)Rdata_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ Rdata_str, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "The Rdata class is an abstract base class that provides "
+ "a set of common interfaces to manipulate concrete RDATA objects.",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ (richcmpfunc)RData_richcmp, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ Rdata_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ (initproc)Rdata_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
-
- return (true);
+PyObject*
+createRdataObject(ConstRdataPtr source) {
+ s_Rdata* py_rdata =
+ static_cast<s_Rdata*>(rdata_type.tp_alloc(&rdata_type, 0));
+ if (py_rdata == NULL) {
+ isc_throw(PyCPPWrapperException, "Unexpected NULL C++ object, "
+ "probably due to short memory");
+ }
+ py_rdata->cppobj = source;
+ return (py_rdata);
}
+
+bool
+PyRdata_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
+ }
+ return (PyObject_TypeCheck(obj, &rdata_type));
+}
+
+const Rdata&
+PyRdata_ToRdata(const PyObject* rdata_obj) {
+ if (rdata_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in Rdata PyObject conversion");
+ }
+ const s_Rdata* rdata = static_cast<const s_Rdata*>(rdata_obj);
+ return (*rdata->cppobj);
+}
+
+} // end python namespace
+} // end dns namespace
+} // end isc namespace
diff --git a/src/lib/dns/python/rdata_python.h b/src/lib/dns/python/rdata_python.h
new file mode 100644
index 0000000..c7ddd57
--- /dev/null
+++ b/src/lib/dns/python/rdata_python.h
@@ -0,0 +1,68 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_RDATA_H
+#define __PYTHON_RDATA_H 1
+
+#include <Python.h>
+
+#include <dns/rdata.h>
+
+namespace isc {
+namespace dns {
+namespace python {
+
+extern PyObject* po_InvalidRdataLength;
+extern PyObject* po_InvalidRdataText;
+extern PyObject* po_CharStringTooLong;
+
+extern PyTypeObject rdata_type;
+
+/// This is a simple shortcut to create a python Rdata object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createRdataObject(isc::dns::rdata::ConstRdataPtr source);
+
+/// \brief Checks if the given python object is a Rdata object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type Rdata, false otherwise
+bool PyRdata_Check(PyObject* obj);
+
+/// \brief Returns a reference to the Rdata object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type Rdata; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyRdata_Check()
+///
+/// \note This is not a copy; if the Rdata is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param rdata_obj The rdata object to convert
+const isc::dns::rdata::Rdata& PyRdata_ToRdata(const PyObject* rdata_obj);
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_RDATA_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/rrclass_python.cc b/src/lib/dns/python/rrclass_python.cc
index 6d150c2..0014187 100644
--- a/src/lib/dns/python/rrclass_python.cc
+++ b/src/lib/dns/python/rrclass_python.cc
@@ -11,35 +11,28 @@
// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <Python.h>
#include <dns/rrclass.h>
-using namespace isc::dns;
-using namespace isc::util;
-
-//
-// Declaration of the custom exceptions
-// Initialization and addition of these go in the initModulePart
-// function at the end of this file
-//
-static PyObject* po_InvalidRRClass;
-static PyObject* po_IncompleteRRClass;
-
-//
-// Definition of the classes
-//
+#include <dns/messagerenderer.h>
+#include <util/buffer.h>
+#include <util/python/pycppwrapper_util.h>
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
+#include "rrclass_python.h"
+#include "messagerenderer_python.h"
+#include "pydnspp_common.h"
-//
-// RRClass
-//
+using namespace isc::dns;
+using namespace isc::dns::python;
+using namespace isc::util;
+using namespace isc::util::python;
+namespace {
// The s_* Class simply covers one instantiation of the object
class s_RRClass : public PyObject {
public:
- RRClass* rrclass;
+ s_RRClass() : cppobj(NULL) {};
+ RRClass* cppobj;
};
//
@@ -48,25 +41,26 @@ public:
//
// General creation and destruction
-static int RRClass_init(s_RRClass* self, PyObject* args);
-static void RRClass_destroy(s_RRClass* self);
+int RRClass_init(s_RRClass* self, PyObject* args);
+void RRClass_destroy(s_RRClass* self);
// These are the functions we export
-static PyObject* RRClass_toText(s_RRClass* self);
+PyObject* RRClass_toText(s_RRClass* self);
// This is a second version of toText, we need one where the argument
// is a PyObject*, for the str() function in python.
-static PyObject* RRClass_str(PyObject* self);
-static PyObject* RRClass_toWire(s_RRClass* self, PyObject* args);
-static PyObject* RRClass_getCode(s_RRClass* self);
-static PyObject* RRClass_richcmp(s_RRClass* self, s_RRClass* other, int op);
+PyObject* RRClass_str(PyObject* self);
+PyObject* RRClass_toWire(s_RRClass* self, PyObject* args);
+PyObject* RRClass_getCode(s_RRClass* self);
+PyObject* RRClass_richcmp(s_RRClass* self, s_RRClass* other, int op);
// Static function for direct class creation
-static PyObject* RRClass_IN(s_RRClass *self);
-static PyObject* RRClass_CH(s_RRClass *self);
-static PyObject* RRClass_HS(s_RRClass *self);
-static PyObject* RRClass_NONE(s_RRClass *self);
-static PyObject* RRClass_ANY(s_RRClass *self);
+PyObject* RRClass_IN(s_RRClass *self);
+PyObject* RRClass_CH(s_RRClass *self);
+PyObject* RRClass_HS(s_RRClass *self);
+PyObject* RRClass_NONE(s_RRClass *self);
+PyObject* RRClass_ANY(s_RRClass *self);
+typedef CPPPyObjectContainer<s_RRClass, RRClass> RRClassContainer;
// This list contains the actual set of functions we have in
// python. Each entry has
@@ -74,7 +68,7 @@ static PyObject* RRClass_ANY(s_RRClass *self);
// 2. Our static function here
// 3. Argument type
// 4. Documentation
-static PyMethodDef RRClass_methods[] = {
+PyMethodDef RRClass_methods[] = {
{ "to_text", reinterpret_cast<PyCFunction>(RRClass_toText), METH_NOARGS,
"Returns the string representation" },
{ "to_wire", reinterpret_cast<PyCFunction>(RRClass_toWire), METH_VARARGS,
@@ -94,63 +88,7 @@ static PyMethodDef RRClass_methods[] = {
{ NULL, NULL, 0, NULL }
};
-// This defines the complete type for reflection in python and
-// parsing of PyObject* to s_RRClass
-// Most of the functions are not actually implemented and NULL here.
-static PyTypeObject rrclass_type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "pydnspp.RRClass",
- sizeof(s_RRClass), // tp_basicsize
- 0, // tp_itemsize
- (destructor)RRClass_destroy, // tp_dealloc
- NULL, // tp_print
- NULL, // tp_getattr
- NULL, // tp_setattr
- NULL, // tp_reserved
- NULL, // tp_repr
- NULL, // tp_as_number
- NULL, // tp_as_sequence
- NULL, // tp_as_mapping
- NULL, // tp_hash
- NULL, // tp_call
- RRClass_str, // tp_str
- NULL, // tp_getattro
- NULL, // tp_setattro
- NULL, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
- "The RRClass class encapsulates DNS resource record classes.\n"
- "This class manages the 16-bit integer class codes in quite a straightforward"
- "way. The only non trivial task is to handle textual representations of"
- "RR classes, such as \"IN\", \"CH\", or \"CLASS65534\".",
- NULL, // tp_traverse
- NULL, // tp_clear
- (richcmpfunc)RRClass_richcmp, // tp_richcompare
- 0, // tp_weaklistoffset
- NULL, // tp_iter
- NULL, // tp_iternext
- RRClass_methods, // tp_methods
- NULL, // tp_members
- NULL, // tp_getset
- NULL, // tp_base
- NULL, // tp_dict
- NULL, // tp_descr_get
- NULL, // tp_descr_set
- 0, // tp_dictoffset
- (initproc)RRClass_init, // tp_init
- NULL, // tp_alloc
- PyType_GenericNew, // tp_new
- NULL, // tp_free
- NULL, // tp_is_gc
- NULL, // tp_bases
- NULL, // tp_mro
- NULL, // tp_cache
- NULL, // tp_subclasses
- NULL, // tp_weaklist
- NULL, // tp_del
- 0 // tp_version_tag
-};
-
-static int
+int
RRClass_init(s_RRClass* self, PyObject* args) {
const char* s;
long i;
@@ -164,7 +102,7 @@ RRClass_init(s_RRClass* self, PyObject* args) {
// (the way to do exceptions is to set PyErr and return -1)
try {
if (PyArg_ParseTuple(args, "s", &s)) {
- self->rrclass = new RRClass(s);
+ self->cppobj = new RRClass(s);
return (0);
} else if (PyArg_ParseTuple(args, "l", &i)) {
if (i < 0 || i > 0xffff) {
@@ -173,7 +111,7 @@ RRClass_init(s_RRClass* self, PyObject* args) {
"RR class number out of range");
return (-1);
}
- self->rrclass = new RRClass(i);
+ self->cppobj = new RRClass(i);
return (0);
} else if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
uint8_t data[2];
@@ -182,7 +120,7 @@ RRClass_init(s_RRClass* self, PyObject* args) {
return (result);
}
InputBuffer ib(data, 2);
- self->rrclass = new RRClass(ib);
+ self->cppobj = new RRClass(ib);
PyErr_Clear();
return (0);
}
@@ -199,20 +137,20 @@ RRClass_init(s_RRClass* self, PyObject* args) {
return (-1);
}
-static void
+void
RRClass_destroy(s_RRClass* self) {
- delete self->rrclass;
- self->rrclass = NULL;
+ delete self->cppobj;
+ self->cppobj = NULL;
Py_TYPE(self)->tp_free(self);
}
-static PyObject*
+PyObject*
RRClass_toText(s_RRClass* self) {
// Py_BuildValue makes python objects from native data
- return (Py_BuildValue("s", self->rrclass->toText().c_str()));
+ return (Py_BuildValue("s", self->cppobj->toText().c_str()));
}
-static PyObject*
+PyObject*
RRClass_str(PyObject* self) {
// Simply call the to_text method we already defined
return (PyObject_CallMethod(self,
@@ -220,16 +158,16 @@ RRClass_str(PyObject* self) {
const_cast<char*>("")));
}
-static PyObject*
+PyObject*
RRClass_toWire(s_RRClass* self, PyObject* args) {
PyObject* bytes;
- s_MessageRenderer* mr;
-
+ PyObject* mr;
+
if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
PyObject* bytes_o = bytes;
-
+
OutputBuffer buffer(2);
- self->rrclass->toWire(buffer);
+ self->cppobj->toWire(buffer);
PyObject* n = PyBytes_FromStringAndSize(static_cast<const char*>(buffer.getData()), buffer.getLength());
PyObject* result = PySequence_InPlaceConcat(bytes_o, n);
// We need to release the object we temporarily created here
@@ -237,7 +175,7 @@ RRClass_toWire(s_RRClass* self, PyObject* args) {
Py_DECREF(n);
return (result);
} else if (PyArg_ParseTuple(args, "O!", &messagerenderer_type, &mr)) {
- self->rrclass->toWire(*mr->messagerenderer);
+ self->cppobj->toWire(PyMessageRenderer_ToMessageRenderer(mr));
// If we return NULL it is seen as an error, so use this for
// None returns
Py_RETURN_NONE;
@@ -248,12 +186,12 @@ RRClass_toWire(s_RRClass* self, PyObject* args) {
return (NULL);
}
-static PyObject*
+PyObject*
RRClass_getCode(s_RRClass* self) {
- return (Py_BuildValue("I", self->rrclass->getCode()));
+ return (Py_BuildValue("I", self->cppobj->getCode()));
}
-static PyObject*
+PyObject*
RRClass_richcmp(s_RRClass* self, s_RRClass* other, int op) {
bool c;
@@ -265,24 +203,24 @@ RRClass_richcmp(s_RRClass* self, s_RRClass* other, int op) {
switch (op) {
case Py_LT:
- c = *self->rrclass < *other->rrclass;
+ c = *self->cppobj < *other->cppobj;
break;
case Py_LE:
- c = *self->rrclass < *other->rrclass ||
- *self->rrclass == *other->rrclass;
+ c = *self->cppobj < *other->cppobj ||
+ *self->cppobj == *other->cppobj;
break;
case Py_EQ:
- c = *self->rrclass == *other->rrclass;
+ c = *self->cppobj == *other->cppobj;
break;
case Py_NE:
- c = *self->rrclass != *other->rrclass;
+ c = *self->cppobj != *other->cppobj;
break;
case Py_GT:
- c = *other->rrclass < *self->rrclass;
+ c = *other->cppobj < *self->cppobj;
break;
case Py_GE:
- c = *other->rrclass < *self->rrclass ||
- *self->rrclass == *other->rrclass;
+ c = *other->cppobj < *self->cppobj ||
+ *self->cppobj == *other->cppobj;
break;
default:
PyErr_SetString(PyExc_IndexError,
@@ -298,56 +236,131 @@ RRClass_richcmp(s_RRClass* self, s_RRClass* other, int op) {
//
// Common function for RRClass_IN/CH/etc.
//
-static PyObject* RRClass_createStatic(RRClass stc) {
+PyObject* RRClass_createStatic(RRClass stc) {
s_RRClass* ret = PyObject_New(s_RRClass, &rrclass_type);
if (ret != NULL) {
- ret->rrclass = new RRClass(stc);
+ ret->cppobj = new RRClass(stc);
}
return (ret);
}
-static PyObject* RRClass_IN(s_RRClass*) {
+PyObject* RRClass_IN(s_RRClass*) {
return (RRClass_createStatic(RRClass::IN()));
}
-static PyObject* RRClass_CH(s_RRClass*) {
+PyObject* RRClass_CH(s_RRClass*) {
return (RRClass_createStatic(RRClass::CH()));
}
-static PyObject* RRClass_HS(s_RRClass*) {
+PyObject* RRClass_HS(s_RRClass*) {
return (RRClass_createStatic(RRClass::HS()));
}
-static PyObject* RRClass_NONE(s_RRClass*) {
+PyObject* RRClass_NONE(s_RRClass*) {
return (RRClass_createStatic(RRClass::NONE()));
}
-static PyObject* RRClass_ANY(s_RRClass*) {
+PyObject* RRClass_ANY(s_RRClass*) {
return (RRClass_createStatic(RRClass::ANY()));
}
-// end of RRClass
+
+} // end anonymous namespace
+
+namespace isc {
+namespace dns {
+namespace python {
+
+//
+// Declaration of the custom exceptions
+// Initialization and addition of these go in the initModulePart
+// function in pydnspp.cc
+//
+PyObject* po_InvalidRRClass;
+PyObject* po_IncompleteRRClass;
+
+
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_RRClass
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject rrclass_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pydnspp.RRClass",
+ sizeof(s_RRClass), // tp_basicsize
+ 0, // tp_itemsize
+ (destructor)RRClass_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ RRClass_str, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "The RRClass class encapsulates DNS resource record classes.\n"
+ "This class manages the 16-bit integer class codes in quite a straightforward"
+ "way. The only non trivial task is to handle textual representations of"
+ "RR classes, such as \"IN\", \"CH\", or \"CLASS65534\".",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ (richcmpfunc)RRClass_richcmp, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ RRClass_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ (initproc)RRClass_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+PyObject*
+createRRClassObject(const RRClass& source) {
+ RRClassContainer container(PyObject_New(s_RRClass, &rrclass_type));
+ container.set(new RRClass(source));
+ return (container.release());
+}
-// Module Initialization, all statics are initialized here
bool
-initModulePart_RRClass(PyObject* mod) {
- // Add the exceptions to the module
- po_InvalidRRClass = PyErr_NewException("pydnspp.InvalidRRClass", NULL, NULL);
- Py_INCREF(po_InvalidRRClass);
- PyModule_AddObject(mod, "InvalidRRClass", po_InvalidRRClass);
- po_IncompleteRRClass = PyErr_NewException("pydnspp.IncompleteRRClass", NULL, NULL);
- Py_INCREF(po_IncompleteRRClass);
- PyModule_AddObject(mod, "IncompleteRRClass", po_IncompleteRRClass);
-
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module. This is not just a check! (leaving
- // this out results in segmentation faults)
- if (PyType_Ready(&rrclass_type) < 0) {
- return (false);
+PyRRClass_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
}
- Py_INCREF(&rrclass_type);
- PyModule_AddObject(mod, "RRClass",
- reinterpret_cast<PyObject*>(&rrclass_type));
-
- return (true);
+ return (PyObject_TypeCheck(obj, &rrclass_type));
}
+
+const RRClass&
+PyRRClass_ToRRClass(const PyObject* rrclass_obj) {
+ if (rrclass_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in RRClass PyObject conversion");
+ }
+ const s_RRClass* rrclass = static_cast<const s_RRClass*>(rrclass_obj);
+ return (*rrclass->cppobj);
+}
+
+} // end namespace python
+} // end namespace dns
+} // end namespace isc
diff --git a/src/lib/dns/python/rrclass_python.h b/src/lib/dns/python/rrclass_python.h
new file mode 100644
index 0000000..f58bba6
--- /dev/null
+++ b/src/lib/dns/python/rrclass_python.h
@@ -0,0 +1,68 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_RRCLASS_H
+#define __PYTHON_RRCLASS_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class RRClass;
+
+namespace python {
+
+extern PyObject* po_InvalidRRClass;
+extern PyObject* po_IncompleteRRClass;
+
+extern PyTypeObject rrclass_type;
+
+/// This is a simple shortcut to create a python RRClass object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createRRClassObject(const RRClass& source);
+
+/// \brief Checks if the given python object is a RRClass object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type RRClass, false otherwise
+bool PyRRClass_Check(PyObject* obj);
+
+/// \brief Returns a reference to the RRClass object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type RRClass; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyRRClass_Check()
+///
+/// \note This is not a copy; if the RRClass is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param rrclass_obj The rrclass object to convert
+const RRClass& PyRRClass_ToRRClass(const PyObject* rrclass_obj);
+
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_RRCLASS_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/rrset_python.cc b/src/lib/dns/python/rrset_python.cc
index 71a0710..9fc3d79 100644
--- a/src/lib/dns/python/rrset_python.cc
+++ b/src/lib/dns/python/rrset_python.cc
@@ -12,55 +12,63 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
-#include <dns/rrset.h>
+#include <Python.h>
-//
-// Declaration of the custom exceptions
-// Initialization and addition of these go in the module init at the
-// end
-//
-static PyObject* po_EmptyRRset;
+#include <util/python/pycppwrapper_util.h>
-//
-// Definition of the classes
-//
-
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
+#include <dns/rrset.h>
+#include <dns/name.h>
+#include <dns/messagerenderer.h>
+
+#include "name_python.h"
+#include "pydnspp_common.h"
+#include "rrset_python.h"
+#include "rrclass_python.h"
+#include "rrtype_python.h"
+#include "rrttl_python.h"
+#include "rdata_python.h"
+#include "messagerenderer_python.h"
+
+using namespace std;
using namespace isc::dns;
+using namespace isc::dns::python;
using namespace isc::util;
+using namespace isc::util::python;
+
+namespace {
-// RRset
+// The s_* Class simply coverst one instantiation of the object
// Using a shared_ptr here should not really be necessary (PyObject
// is already reference-counted), however internally on the cpp side,
// not doing so might result in problems, since we can't copy construct
-// rrsets, adding them to messages results in a problem when the
-// message is destroyed or cleared later
+// rdata field, adding them to rrsets results in a problem when the
+// rrset is destroyed later
class s_RRset : public PyObject {
public:
- RRsetPtr rrset;
+ isc::dns::RRsetPtr cppobj;
};
-static int RRset_init(s_RRset* self, PyObject* args);
-static void RRset_destroy(s_RRset* self);
-
-static PyObject* RRset_getRdataCount(s_RRset* self);
-static PyObject* RRset_getName(s_RRset* self);
-static PyObject* RRset_getClass(s_RRset* self);
-static PyObject* RRset_getType(s_RRset* self);
-static PyObject* RRset_getTTL(s_RRset* self);
-static PyObject* RRset_setName(s_RRset* self, PyObject* args);
-static PyObject* RRset_setTTL(s_RRset* self, PyObject* args);
-static PyObject* RRset_toText(s_RRset* self);
-static PyObject* RRset_str(PyObject* self);
-static PyObject* RRset_toWire(s_RRset* self, PyObject* args);
-static PyObject* RRset_addRdata(s_RRset* self, PyObject* args);
-static PyObject* RRset_getRdata(s_RRset* self);
+int RRset_init(s_RRset* self, PyObject* args);
+void RRset_destroy(s_RRset* self);
+
+PyObject* RRset_getRdataCount(s_RRset* self);
+PyObject* RRset_getName(s_RRset* self);
+PyObject* RRset_getClass(s_RRset* self);
+PyObject* RRset_getType(s_RRset* self);
+PyObject* RRset_getTTL(s_RRset* self);
+PyObject* RRset_setName(s_RRset* self, PyObject* args);
+PyObject* RRset_setTTL(s_RRset* self, PyObject* args);
+PyObject* RRset_toText(s_RRset* self);
+PyObject* RRset_str(PyObject* self);
+PyObject* RRset_toWire(s_RRset* self, PyObject* args);
+PyObject* RRset_addRdata(s_RRset* self, PyObject* args);
+PyObject* RRset_getRdata(s_RRset* self);
+PyObject* RRset_removeRRsig(s_RRset* self);
+
// TODO: iterator?
-static PyMethodDef RRset_methods[] = {
+PyMethodDef RRset_methods[] = {
{ "get_rdata_count", reinterpret_cast<PyCFunction>(RRset_getRdataCount), METH_NOARGS,
"Returns the number of rdata fields." },
{ "get_name", reinterpret_cast<PyCFunction>(RRset_getName), METH_NOARGS,
@@ -88,208 +96,142 @@ static PyMethodDef RRset_methods[] = {
"Adds the rdata for one RR to the RRset.\nTakes an Rdata object as an argument" },
{ "get_rdata", reinterpret_cast<PyCFunction>(RRset_getRdata), METH_NOARGS,
"Returns a List containing all Rdata elements" },
+ { "remove_rrsig", reinterpret_cast<PyCFunction>(RRset_removeRRsig), METH_NOARGS,
+ "Clears the list of RRsigs for this RRset" },
{ NULL, NULL, 0, NULL }
};
-static PyTypeObject rrset_type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "pydnspp.RRset",
- sizeof(s_RRset), // tp_basicsize
- 0, // tp_itemsize
- (destructor)RRset_destroy, // tp_dealloc
- NULL, // tp_print
- NULL, // tp_getattr
- NULL, // tp_setattr
- NULL, // tp_reserved
- NULL, // tp_repr
- NULL, // tp_as_number
- NULL, // tp_as_sequence
- NULL, // tp_as_mapping
- NULL, // tp_hash
- NULL, // tp_call
- RRset_str, // tp_str
- NULL, // tp_getattro
- NULL, // tp_setattro
- NULL, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
- "The AbstractRRset class is an abstract base class that "
- "models a DNS RRset.\n\n"
- "An object of (a specific derived class of) AbstractRRset "
- "models an RRset as described in the DNS standard:\n"
- "A set of DNS resource records (RRs) of the same type and class. "
- "The standard requires the TTL of all RRs in an RRset be the same; "
- "this class follows that requirement.\n\n"
- "Note about duplicate RDATA: RFC2181 states that it's meaningless that an "
- "RRset contains two identical RRs and that name servers should suppress "
- "such duplicates.\n"
- "This class is not responsible for ensuring this requirement: For example, "
- "addRdata() method doesn't check if there's already RDATA identical "
- "to the one being added.\n"
- "This is because such checks can be expensive, and it's often easy to "
- "ensure the uniqueness requirement at the %data preparation phase "
- "(e.g. when loading a zone).",
- NULL, // tp_traverse
- NULL, // tp_clear
- NULL, // tp_richcompare
- 0, // tp_weaklistoffset
- NULL, // tp_iter
- NULL, // tp_iternext
- RRset_methods, // tp_methods
- NULL, // tp_members
- NULL, // tp_getset
- NULL, // tp_base
- NULL, // tp_dict
- NULL, // tp_descr_get
- NULL, // tp_descr_set
- 0, // tp_dictoffset
- (initproc)RRset_init, // tp_init
- NULL, // tp_alloc
- PyType_GenericNew, // tp_new
- NULL, // tp_free
- NULL, // tp_is_gc
- NULL, // tp_bases
- NULL, // tp_mro
- NULL, // tp_cache
- NULL, // tp_subclasses
- NULL, // tp_weaklist
- NULL, // tp_del
- 0 // tp_version_tag
-};
-
-static int
+int
RRset_init(s_RRset* self, PyObject* args) {
- s_Name* name;
- s_RRClass* rrclass;
- s_RRType* rrtype;
- s_RRTTL* rrttl;
+ PyObject* name;
+ PyObject* rrclass;
+ PyObject* rrtype;
+ PyObject* rrttl;
if (PyArg_ParseTuple(args, "O!O!O!O!", &name_type, &name,
&rrclass_type, &rrclass,
&rrtype_type, &rrtype,
&rrttl_type, &rrttl
)) {
- self->rrset = RRsetPtr(new RRset(*name->cppobj, *rrclass->rrclass,
- *rrtype->rrtype, *rrttl->rrttl));
+ self->cppobj = RRsetPtr(new RRset(PyName_ToName(name),
+ PyRRClass_ToRRClass(rrclass),
+ PyRRType_ToRRType(rrtype),
+ PyRRTTL_ToRRTTL(rrttl)));
return (0);
}
- self->rrset = RRsetPtr();
+ self->cppobj = RRsetPtr();
return (-1);
}
-static void
+void
RRset_destroy(s_RRset* self) {
// Clear the shared_ptr so that its reference count is zero
// before we call tp_free() (there is no direct release())
- self->rrset.reset();
+ self->cppobj.reset();
Py_TYPE(self)->tp_free(self);
}
-static PyObject*
+PyObject*
RRset_getRdataCount(s_RRset* self) {
- return (Py_BuildValue("I", self->rrset->getRdataCount()));
+ return (Py_BuildValue("I", self->cppobj->getRdataCount()));
}
-static PyObject*
+PyObject*
RRset_getName(s_RRset* self) {
- s_Name* name;
-
- // is this the best way to do this?
- name = static_cast<s_Name*>(name_type.tp_alloc(&name_type, 0));
- if (name != NULL) {
- name->cppobj = new Name(self->rrset->getName());
- if (name->cppobj == NULL)
- {
- Py_DECREF(name);
- return (NULL);
- }
+ try {
+ return (createNameObject(self->cppobj->getName()));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Unexpected failure getting rrset Name: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure getting rrset Name");
}
-
- return (name);
+ return (NULL);
}
-static PyObject*
+PyObject*
RRset_getClass(s_RRset* self) {
- s_RRClass* rrclass;
-
- rrclass = static_cast<s_RRClass*>(rrclass_type.tp_alloc(&rrclass_type, 0));
- if (rrclass != NULL) {
- rrclass->rrclass = new RRClass(self->rrset->getClass());
- if (rrclass->rrclass == NULL)
- {
- Py_DECREF(rrclass);
- return (NULL);
- }
+ try {
+ return (createRRClassObject(self->cppobj->getClass()));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Unexpected failure getting question RRClass: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure getting question RRClass");
}
-
- return (rrclass);
+ return (NULL);
}
-static PyObject*
+PyObject*
RRset_getType(s_RRset* self) {
- s_RRType* rrtype;
-
- rrtype = static_cast<s_RRType*>(rrtype_type.tp_alloc(&rrtype_type, 0));
- if (rrtype != NULL) {
- rrtype->rrtype = new RRType(self->rrset->getType());
- if (rrtype->rrtype == NULL)
- {
- Py_DECREF(rrtype);
- return (NULL);
- }
+ try {
+ return (createRRTypeObject(self->cppobj->getType()));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Unexpected failure getting question RRType: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure getting question RRType");
}
-
- return (rrtype);
+ return (NULL);
}
-static PyObject*
+PyObject*
RRset_getTTL(s_RRset* self) {
- s_RRTTL* rrttl;
-
- rrttl = static_cast<s_RRTTL*>(rrttl_type.tp_alloc(&rrttl_type, 0));
- if (rrttl != NULL) {
- rrttl->rrttl = new RRTTL(self->rrset->getTTL());
- if (rrttl->rrttl == NULL)
- {
- Py_DECREF(rrttl);
- return (NULL);
- }
+ try {
+ return (createRRTTLObject(self->cppobj->getTTL()));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Unexpected failure getting question TTL: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure getting question TTL");
}
-
- return (rrttl);
+ return (NULL);
}
-static PyObject*
+PyObject*
RRset_setName(s_RRset* self, PyObject* args) {
- s_Name* name;
+ PyObject* name;
if (!PyArg_ParseTuple(args, "O!", &name_type, &name)) {
return (NULL);
}
- self->rrset->setName(*name->cppobj);
+ self->cppobj->setName(PyName_ToName(name));
Py_RETURN_NONE;
}
-static PyObject*
+PyObject*
RRset_setTTL(s_RRset* self, PyObject* args) {
- s_RRTTL* rrttl;
+ PyObject* rrttl;
if (!PyArg_ParseTuple(args, "O!", &rrttl_type, &rrttl)) {
return (NULL);
}
- self->rrset->setTTL(*rrttl->rrttl);
+ self->cppobj->setTTL(PyRRTTL_ToRRTTL(rrttl));
Py_RETURN_NONE;
}
-static PyObject*
+PyObject*
RRset_toText(s_RRset* self) {
try {
- return (Py_BuildValue("s", self->rrset->toText().c_str()));
+ return (Py_BuildValue("s", self->cppobj->toText().c_str()));
} catch (const EmptyRRset& ers) {
PyErr_SetString(po_EmptyRRset, ers.what());
return (NULL);
}
}
-static PyObject*
+PyObject*
RRset_str(PyObject* self) {
// Simply call the to_text method we already defined
return (PyObject_CallMethod(self,
@@ -297,17 +239,17 @@ RRset_str(PyObject* self) {
const_cast<char*>("")));
}
-static PyObject*
+PyObject*
RRset_toWire(s_RRset* self, PyObject* args) {
PyObject* bytes;
- s_MessageRenderer* mr;
+ PyObject* mr;
try {
if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
PyObject* bytes_o = bytes;
-
+
OutputBuffer buffer(4096);
- self->rrset->toWire(buffer);
+ self->cppobj->toWire(buffer);
PyObject* n = PyBytes_FromStringAndSize(static_cast<const char*>(buffer.getData()), buffer.getLength());
PyObject* result = PySequence_InPlaceConcat(bytes_o, n);
// We need to release the object we temporarily created here
@@ -315,7 +257,7 @@ RRset_toWire(s_RRset* self, PyObject* args) {
Py_DECREF(n);
return (result);
} else if (PyArg_ParseTuple(args, "O!", &messagerenderer_type, &mr)) {
- self->rrset->toWire(*mr->messagerenderer);
+ self->cppobj->toWire(PyMessageRenderer_ToMessageRenderer(mr));
// If we return NULL it is seen as an error, so use this for
// None returns
Py_RETURN_NONE;
@@ -331,14 +273,14 @@ RRset_toWire(s_RRset* self, PyObject* args) {
return (NULL);
}
-static PyObject*
+PyObject*
RRset_addRdata(s_RRset* self, PyObject* args) {
- s_Rdata* rdata;
+ PyObject* rdata;
if (!PyArg_ParseTuple(args, "O!", &rdata_type, &rdata)) {
return (NULL);
}
try {
- self->rrset->addRdata(*rdata->rdata);
+ self->cppobj->addRdata(PyRdata_ToRdata(rdata));
Py_RETURN_NONE;
} catch (const std::bad_cast&) {
PyErr_Clear();
@@ -348,55 +290,173 @@ RRset_addRdata(s_RRset* self, PyObject* args) {
}
}
-static PyObject*
+PyObject*
RRset_getRdata(s_RRset* self) {
PyObject* list = PyList_New(0);
- RdataIteratorPtr it = self->rrset->getRdataIterator();
-
- for (; !it->isLast(); it->next()) {
- s_Rdata *rds = static_cast<s_Rdata*>(rdata_type.tp_alloc(&rdata_type, 0));
- if (rds != NULL) {
- // hmz them iterators/shared_ptrs and private constructors
- // make this a bit weird, so we create a new one with
- // the data available
- const Rdata *rd = &it->getCurrent();
- rds->rdata = createRdata(self->rrset->getType(), self->rrset->getClass(), *rd);
- PyList_Append(list, rds);
- } else {
- return (NULL);
+ RdataIteratorPtr it = self->cppobj->getRdataIterator();
+
+ try {
+ for (; !it->isLast(); it->next()) {
+ const rdata::Rdata *rd = &it->getCurrent();
+ if (PyList_Append(list,
+ createRdataObject(createRdata(self->cppobj->getType(),
+ self->cppobj->getClass(), *rd))) == -1) {
+ Py_DECREF(list);
+ return (NULL);
+ }
}
+ return (list);
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Unexpected failure getting rrset Rdata: " +
+ string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure getting rrset Rdata");
}
-
- return (list);
+ Py_DECREF(list);
+ return (NULL);
}
-// end of RRset
+PyObject*
+RRset_removeRRsig(s_RRset* self) {
+ self->cppobj->removeRRsig();
+ Py_RETURN_NONE;
+}
+} // end of unnamed namespace
-// Module Initialization, all statics are initialized here
-bool
-initModulePart_RRset(PyObject* mod) {
- // Add the exceptions to the module
- po_EmptyRRset = PyErr_NewException("pydnspp.EmptyRRset", NULL, NULL);
- PyModule_AddObject(mod, "EmptyRRset", po_EmptyRRset);
+namespace isc {
+namespace dns {
+namespace python {
- // Add the enums to the module
+//
+// Declaration of the custom exceptions
+// Initialization and addition of these go in the module init at the
+// end
+//
+PyObject* po_EmptyRRset;
- // Add the constants to the module
+PyTypeObject rrset_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pydnspp.RRset",
+ sizeof(s_RRset), // tp_basicsize
+ 0, // tp_itemsize
+ (destructor)RRset_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ RRset_str, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "The AbstractRRset class is an abstract base class that "
+ "models a DNS RRset.\n\n"
+ "An object of (a specific derived class of) AbstractRRset "
+ "models an RRset as described in the DNS standard:\n"
+ "A set of DNS resource records (RRs) of the same type and class. "
+ "The standard requires the TTL of all RRs in an RRset be the same; "
+ "this class follows that requirement.\n\n"
+ "Note about duplicate RDATA: RFC2181 states that it's meaningless that an "
+ "RRset contains two identical RRs and that name servers should suppress "
+ "such duplicates.\n"
+ "This class is not responsible for ensuring this requirement: For example, "
+ "addRdata() method doesn't check if there's already RDATA identical "
+ "to the one being added.\n"
+ "This is because such checks can be expensive, and it's often easy to "
+ "ensure the uniqueness requirement at the %data preparation phase "
+ "(e.g. when loading a zone).",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ RRset_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ (initproc)RRset_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+PyObject*
+createRRsetObject(const RRset& source) {
- // Add the classes to the module
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module
+ // RRsets are noncopyable, so as a workaround we recreate a new one
+ // and copy over all content
+ RRsetPtr new_rrset = isc::dns::RRsetPtr(
+ new isc::dns::RRset(source.getName(), source.getClass(),
+ source.getType(), source.getTTL()));
- // NameComparisonResult
- if (PyType_Ready(&rrset_type) < 0) {
- return (false);
+ isc::dns::RdataIteratorPtr rdata_it(source.getRdataIterator());
+ for (rdata_it->first(); !rdata_it->isLast(); rdata_it->next()) {
+ new_rrset->addRdata(rdata_it->getCurrent());
+ }
+
+ isc::dns::RRsetPtr sigs = source.getRRsig();
+ if (sigs) {
+ new_rrset->addRRsig(sigs);
+ }
+ s_RRset* py_rrset =
+ static_cast<s_RRset*>(rrset_type.tp_alloc(&rrset_type, 0));
+ if (py_rrset == NULL) {
+ isc_throw(PyCPPWrapperException, "Unexpected NULL C++ object, "
+ "probably due to short memory");
}
- Py_INCREF(&rrset_type);
- PyModule_AddObject(mod, "RRset",
- reinterpret_cast<PyObject*>(&rrset_type));
-
- return (true);
+ py_rrset->cppobj = new_rrset;
+ return (py_rrset);
}
+bool
+PyRRset_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
+ }
+ return (PyObject_TypeCheck(obj, &rrset_type));
+}
+
+RRset&
+PyRRset_ToRRset(PyObject* rrset_obj) {
+ s_RRset* rrset = static_cast<s_RRset*>(rrset_obj);
+ return (*rrset->cppobj);
+}
+
+RRsetPtr
+PyRRset_ToRRsetPtr(PyObject* rrset_obj) {
+ if (rrset_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in RRset PyObject conversion");
+ }
+ s_RRset* rrset = static_cast<s_RRset*>(rrset_obj);
+ return (rrset->cppobj);
+}
+
+
+} // end python namespace
+} // end dns namespace
+} // end isc namespace
diff --git a/src/lib/dns/python/rrset_python.h b/src/lib/dns/python/rrset_python.h
new file mode 100644
index 0000000..4268678
--- /dev/null
+++ b/src/lib/dns/python/rrset_python.h
@@ -0,0 +1,78 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_RRSET_H
+#define __PYTHON_RRSET_H 1
+
+#include <Python.h>
+
+#include <dns/rrset.h>
+
+#include <util/python/pycppwrapper_util.h>
+
+namespace isc {
+namespace dns {
+namespace python {
+
+extern PyObject* po_EmptyRRset;
+
+extern PyTypeObject rrset_type;
+
+/// This is a simple shortcut to create a python RRset object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createRRsetObject(const RRset& source);
+
+/// \brief Checks if the given python object is a RRset object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type RRset, false otherwise
+bool PyRRset_Check(PyObject* obj);
+
+/// \brief Returns a reference to the RRset object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type RRset; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyRRset_Check()
+///
+/// \note This is not a copy; if the RRset is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param rrset_obj The rrset object to convert
+RRset& PyRRset_ToRRset(PyObject* rrset_obj);
+
+/// \brief Returns the shared_ptr of the RRset object contained within the
+/// given Python object.
+///
+/// \note The given object MUST be of type RRset; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyRRset_Check()
+///
+/// \param rrset_obj The rrset object to convert
+RRsetPtr PyRRset_ToRRsetPtr(PyObject* rrset_obj);
+
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_RRSET_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/rrttl_python.cc b/src/lib/dns/python/rrttl_python.cc
index c4b25bf..3a3f067 100644
--- a/src/lib/dns/python/rrttl_python.cc
+++ b/src/lib/dns/python/rrttl_python.cc
@@ -12,57 +12,41 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <Python.h>
#include <vector>
#include <dns/rrttl.h>
+#include <dns/messagerenderer.h>
+#include <util/buffer.h>
+#include <util/python/pycppwrapper_util.h>
+
+#include "rrttl_python.h"
+#include "pydnspp_common.h"
+#include "messagerenderer_python.h"
using namespace std;
using namespace isc::dns;
+using namespace isc::dns::python;
using namespace isc::util;
+using namespace isc::util::python;
-//
-// Declaration of the custom exceptions
-// Initialization and addition of these go in the initModulePart
-// function at the end of this file
-//
-static PyObject* po_InvalidRRTTL;
-static PyObject* po_IncompleteRRTTL;
-
-//
-// Definition of the classes
-//
-
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
-
-//
-// RRTTL
-//
-
+namespace {
// The s_* Class simply covers one instantiation of the object
class s_RRTTL : public PyObject {
public:
- RRTTL* rrttl;
+ s_RRTTL() : cppobj(NULL) {};
+ isc::dns::RRTTL* cppobj;
};
-//
-// We declare the functions here, the definitions are below
-// the type definition of the object, since both can use the other
-//
-
-// General creation and destruction
-static int RRTTL_init(s_RRTTL* self, PyObject* args);
-static void RRTTL_destroy(s_RRTTL* self);
+typedef CPPPyObjectContainer<s_RRTTL, RRTTL> RRTTLContainer;
-// These are the functions we export
-static PyObject* RRTTL_toText(s_RRTTL* self);
+PyObject* RRTTL_toText(s_RRTTL* self);
// This is a second version of toText, we need one where the argument
// is a PyObject*, for the str() function in python.
-static PyObject* RRTTL_str(PyObject* self);
-static PyObject* RRTTL_toWire(s_RRTTL* self, PyObject* args);
-static PyObject* RRTTL_getValue(s_RRTTL* self);
-static PyObject* RRTTL_richcmp(s_RRTTL* self, s_RRTTL* other, int op);
+PyObject* RRTTL_str(PyObject* self);
+PyObject* RRTTL_toWire(s_RRTTL* self, PyObject* args);
+PyObject* RRTTL_getValue(s_RRTTL* self);
+PyObject* RRTTL_richcmp(s_RRTTL* self, s_RRTTL* other, int op);
// This list contains the actual set of functions we have in
// python. Each entry has
@@ -70,7 +54,7 @@ static PyObject* RRTTL_richcmp(s_RRTTL* self, s_RRTTL* other, int op);
// 2. Our static function here
// 3. Argument type
// 4. Documentation
-static PyMethodDef RRTTL_methods[] = {
+PyMethodDef RRTTL_methods[] = {
{ "to_text", reinterpret_cast<PyCFunction>(RRTTL_toText), METH_NOARGS,
"Returns the string representation" },
{ "to_wire", reinterpret_cast<PyCFunction>(RRTTL_toWire), METH_VARARGS,
@@ -85,65 +69,7 @@ static PyMethodDef RRTTL_methods[] = {
{ NULL, NULL, 0, NULL }
};
-// This defines the complete type for reflection in python and
-// parsing of PyObject* to s_RRTTL
-// Most of the functions are not actually implemented and NULL here.
-static PyTypeObject rrttl_type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "pydnspp.RRTTL",
- sizeof(s_RRTTL), // tp_basicsize
- 0, // tp_itemsize
- (destructor)RRTTL_destroy, // tp_dealloc
- NULL, // tp_print
- NULL, // tp_getattr
- NULL, // tp_setattr
- NULL, // tp_reserved
- NULL, // tp_repr
- NULL, // tp_as_number
- NULL, // tp_as_sequence
- NULL, // tp_as_mapping
- NULL, // tp_hash
- NULL, // tp_call
- RRTTL_str, // tp_str
- NULL, // tp_getattro
- NULL, // tp_setattro
- NULL, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
- "The RRTTL class encapsulates TTLs used in DNS resource records.\n\n"
- "This is a straightforward class; an RRTTL object simply maintains a "
- "32-bit unsigned integer corresponding to the TTL value. The main purpose "
- "of this class is to provide convenient interfaces to convert a textual "
- "representation into the integer TTL value and vice versa, and to handle "
- "wire-format representations.",
- NULL, // tp_traverse
- NULL, // tp_clear
- (richcmpfunc)RRTTL_richcmp, // tp_richcompare
- 0, // tp_weaklistoffset
- NULL, // tp_iter
- NULL, // tp_iternext
- RRTTL_methods, // tp_methods
- NULL, // tp_members
- NULL, // tp_getset
- NULL, // tp_base
- NULL, // tp_dict
- NULL, // tp_descr_get
- NULL, // tp_descr_set
- 0, // tp_dictoffset
- (initproc)RRTTL_init, // tp_init
- NULL, // tp_alloc
- PyType_GenericNew, // tp_new
- NULL, // tp_free
- NULL, // tp_is_gc
- NULL, // tp_bases
- NULL, // tp_mro
- NULL, // tp_cache
- NULL, // tp_subclasses
- NULL, // tp_weaklist
- NULL, // tp_del
- 0 // tp_version_tag
-};
-
-static int
+int
RRTTL_init(s_RRTTL* self, PyObject* args) {
const char* s;
long long i;
@@ -157,7 +83,7 @@ RRTTL_init(s_RRTTL* self, PyObject* args) {
// (the way to do exceptions is to set PyErr and return -1)
try {
if (PyArg_ParseTuple(args, "s", &s)) {
- self->rrttl = new RRTTL(s);
+ self->cppobj = new RRTTL(s);
return (0);
} else if (PyArg_ParseTuple(args, "L", &i)) {
PyErr_Clear();
@@ -165,7 +91,7 @@ RRTTL_init(s_RRTTL* self, PyObject* args) {
PyErr_SetString(PyExc_ValueError, "RR TTL number out of range");
return (-1);
}
- self->rrttl = new RRTTL(i);
+ self->cppobj = new RRTTL(i);
return (0);
} else if (PyArg_ParseTuple(args, "O", &bytes) &&
PySequence_Check(bytes)) {
@@ -176,7 +102,7 @@ RRTTL_init(s_RRTTL* self, PyObject* args) {
return (result);
}
InputBuffer ib(&data[0], size);
- self->rrttl = new RRTTL(ib);
+ self->cppobj = new RRTTL(ib);
PyErr_Clear();
return (0);
}
@@ -200,20 +126,20 @@ RRTTL_init(s_RRTTL* self, PyObject* args) {
return (-1);
}
-static void
+void
RRTTL_destroy(s_RRTTL* self) {
- delete self->rrttl;
- self->rrttl = NULL;
+ delete self->cppobj;
+ self->cppobj = NULL;
Py_TYPE(self)->tp_free(self);
}
-static PyObject*
+PyObject*
RRTTL_toText(s_RRTTL* self) {
// Py_BuildValue makes python objects from native data
- return (Py_BuildValue("s", self->rrttl->toText().c_str()));
+ return (Py_BuildValue("s", self->cppobj->toText().c_str()));
}
-static PyObject*
+PyObject*
RRTTL_str(PyObject* self) {
// Simply call the to_text method we already defined
return (PyObject_CallMethod(self,
@@ -221,16 +147,16 @@ RRTTL_str(PyObject* self) {
const_cast<char*>("")));
}
-static PyObject*
+PyObject*
RRTTL_toWire(s_RRTTL* self, PyObject* args) {
PyObject* bytes;
- s_MessageRenderer* mr;
-
+ PyObject* mr;
+
if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
PyObject* bytes_o = bytes;
-
+
OutputBuffer buffer(4);
- self->rrttl->toWire(buffer);
+ self->cppobj->toWire(buffer);
PyObject* n = PyBytes_FromStringAndSize(static_cast<const char*>(buffer.getData()),
buffer.getLength());
PyObject* result = PySequence_InPlaceConcat(bytes_o, n);
@@ -239,7 +165,7 @@ RRTTL_toWire(s_RRTTL* self, PyObject* args) {
Py_DECREF(n);
return (result);
} else if (PyArg_ParseTuple(args, "O!", &messagerenderer_type, &mr)) {
- self->rrttl->toWire(*mr->messagerenderer);
+ self->cppobj->toWire(PyMessageRenderer_ToMessageRenderer(mr));
// If we return NULL it is seen as an error, so use this for
// None returns
Py_RETURN_NONE;
@@ -250,12 +176,12 @@ RRTTL_toWire(s_RRTTL* self, PyObject* args) {
return (NULL);
}
-static PyObject*
+PyObject*
RRTTL_getValue(s_RRTTL* self) {
- return (Py_BuildValue("I", self->rrttl->getValue()));
+ return (Py_BuildValue("I", self->cppobj->getValue()));
}
-static PyObject*
+PyObject*
RRTTL_richcmp(s_RRTTL* self, s_RRTTL* other, int op) {
bool c = false;
@@ -267,24 +193,24 @@ RRTTL_richcmp(s_RRTTL* self, s_RRTTL* other, int op) {
switch (op) {
case Py_LT:
- c = *self->rrttl < *other->rrttl;
+ c = *self->cppobj < *other->cppobj;
break;
case Py_LE:
- c = *self->rrttl < *other->rrttl ||
- *self->rrttl == *other->rrttl;
+ c = *self->cppobj < *other->cppobj ||
+ *self->cppobj == *other->cppobj;
break;
case Py_EQ:
- c = *self->rrttl == *other->rrttl;
+ c = *self->cppobj == *other->cppobj;
break;
case Py_NE:
- c = *self->rrttl != *other->rrttl;
+ c = *self->cppobj != *other->cppobj;
break;
case Py_GT:
- c = *other->rrttl < *self->rrttl;
+ c = *other->cppobj < *self->cppobj;
break;
case Py_GE:
- c = *other->rrttl < *self->rrttl ||
- *self->rrttl == *other->rrttl;
+ c = *other->cppobj < *self->cppobj ||
+ *self->cppobj == *other->cppobj;
break;
}
if (c)
@@ -292,27 +218,104 @@ RRTTL_richcmp(s_RRTTL* self, s_RRTTL* other, int op) {
else
Py_RETURN_FALSE;
}
-// end of RRTTL
+} // end anonymous namespace
+
+namespace isc {
+namespace dns {
+namespace python {
+
+//
+// Declaration of the custom exceptions
+// Initialization and addition of these go in the initModulePart
+// function in pydnspp.cc
+//
+PyObject* po_InvalidRRTTL;
+PyObject* po_IncompleteRRTTL;
+
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_RRTTL
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject rrttl_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pydnspp.RRTTL",
+ sizeof(s_RRTTL), // tp_basicsize
+ 0, // tp_itemsize
+ (destructor)RRTTL_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ RRTTL_str, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "The RRTTL class encapsulates TTLs used in DNS resource records.\n\n"
+ "This is a straightforward class; an RRTTL object simply maintains a "
+ "32-bit unsigned integer corresponding to the TTL value. The main purpose "
+ "of this class is to provide convenient interfaces to convert a textual "
+ "representation into the integer TTL value and vice versa, and to handle "
+ "wire-format representations.",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ (richcmpfunc)RRTTL_richcmp, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ RRTTL_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ (initproc)RRTTL_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+PyObject*
+createRRTTLObject(const RRTTL& source) {
+ RRTTLContainer container(PyObject_New(s_RRTTL, &rrttl_type));
+ container.set(new RRTTL(source));
+ return (container.release());
+}
-// Module Initialization, all statics are initialized here
bool
-initModulePart_RRTTL(PyObject* mod) {
- // Add the exceptions to the module
- po_InvalidRRTTL = PyErr_NewException("pydnspp.InvalidRRTTL", NULL, NULL);
- PyModule_AddObject(mod, "InvalidRRTTL", po_InvalidRRTTL);
- po_IncompleteRRTTL = PyErr_NewException("pydnspp.IncompleteRRTTL", NULL, NULL);
- PyModule_AddObject(mod, "IncompleteRRTTL", po_IncompleteRRTTL);
+PyRRTTL_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
+ }
+ return (PyObject_TypeCheck(obj, &rrttl_type));
+}
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module. This is not just a check! (leaving
- // this out results in segmentation faults)
- if (PyType_Ready(&rrttl_type) < 0) {
- return (false);
+const RRTTL&
+PyRRTTL_ToRRTTL(const PyObject* rrttl_obj) {
+ if (rrttl_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in RRTTL PyObject conversion");
}
- Py_INCREF(&rrttl_type);
- PyModule_AddObject(mod, "RRTTL",
- reinterpret_cast<PyObject*>(&rrttl_type));
-
- return (true);
+ const s_RRTTL* rrttl = static_cast<const s_RRTTL*>(rrttl_obj);
+ return (*rrttl->cppobj);
}
+
+} // namespace python
+} // namespace dns
+} // namespace isc
diff --git a/src/lib/dns/python/rrttl_python.h b/src/lib/dns/python/rrttl_python.h
new file mode 100644
index 0000000..9dbc982
--- /dev/null
+++ b/src/lib/dns/python/rrttl_python.h
@@ -0,0 +1,67 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_RRTTL_H
+#define __PYTHON_RRTTL_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class RRTTL;
+
+namespace python {
+
+extern PyObject* po_InvalidRRTTL;
+extern PyObject* po_IncompleteRRTTL;
+
+extern PyTypeObject rrttl_type;
+
+/// This is a simple shortcut to create a python RRTTL object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createRRTTLObject(const RRTTL& source);
+
+/// \brief Checks if the given python object is a RRTTL object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type RRTTL, false otherwise
+bool PyRRTTL_Check(PyObject* obj);
+
+/// \brief Returns a reference to the RRTTL object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type RRTTL; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyRRTTL_Check()
+///
+/// \note This is not a copy; if the RRTTL is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param rrttl_obj The rrttl object to convert
+const RRTTL& PyRRTTL_ToRRTTL(const PyObject* rrttl_obj);
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_RRTTL_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/rrtype_python.cc b/src/lib/dns/python/rrtype_python.cc
index 00e0acd..bf20b7c 100644
--- a/src/lib/dns/python/rrtype_python.cc
+++ b/src/lib/dns/python/rrtype_python.cc
@@ -12,77 +12,64 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <Python.h>
#include <vector>
#include <dns/rrtype.h>
+#include <dns/messagerenderer.h>
+#include <util/python/pycppwrapper_util.h>
+
+#include "rrtype_python.h"
+#include "messagerenderer_python.h"
+#include "pydnspp_common.h"
using namespace std;
using namespace isc::dns;
+using namespace isc::dns::python;
using namespace isc::util;
+using namespace isc::util::python;
-//
-// Declaration of the custom exceptions
-// Initialization and addition of these go in the initModulePart
-// function at the end of this file
-//
-static PyObject* po_InvalidRRType;
-static PyObject* po_IncompleteRRType;
-
-//
-// Definition of the classes
-//
-
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
-
-//
-// RRType
-//
-
+namespace {
// The s_* Class simply covers one instantiation of the object
class s_RRType : public PyObject {
public:
- const RRType* rrtype;
+ const RRType* cppobj;
};
-//
-// We declare the functions here, the definitions are below
-// the type definition of the object, since both can use the other
-//
-
// General creation and destruction
-static int RRType_init(s_RRType* self, PyObject* args);
-static void RRType_destroy(s_RRType* self);
+int RRType_init(s_RRType* self, PyObject* args);
+void RRType_destroy(s_RRType* self);
// These are the functions we export
-static PyObject*
+PyObject*
RRType_toText(s_RRType* self);
// This is a second version of toText, we need one where the argument
// is a PyObject*, for the str() function in python.
-static PyObject* RRType_str(PyObject* self);
-static PyObject* RRType_toWire(s_RRType* self, PyObject* args);
-static PyObject* RRType_getCode(s_RRType* self);
-static PyObject* RRType_richcmp(s_RRType* self, s_RRType* other, int op);
-static PyObject* RRType_NSEC3PARAM(s_RRType *self);
-static PyObject* RRType_DNAME(s_RRType *self);
-static PyObject* RRType_PTR(s_RRType *self);
-static PyObject* RRType_MX(s_RRType *self);
-static PyObject* RRType_DNSKEY(s_RRType *self);
-static PyObject* RRType_TXT(s_RRType *self);
-static PyObject* RRType_RRSIG(s_RRType *self);
-static PyObject* RRType_NSEC(s_RRType *self);
-static PyObject* RRType_AAAA(s_RRType *self);
-static PyObject* RRType_DS(s_RRType *self);
-static PyObject* RRType_OPT(s_RRType *self);
-static PyObject* RRType_A(s_RRType *self);
-static PyObject* RRType_NS(s_RRType *self);
-static PyObject* RRType_CNAME(s_RRType *self);
-static PyObject* RRType_SOA(s_RRType *self);
-static PyObject* RRType_NSEC3(s_RRType *self);
-static PyObject* RRType_IXFR(s_RRType *self);
-static PyObject* RRType_AXFR(s_RRType *self);
-static PyObject* RRType_ANY(s_RRType *self);
+PyObject* RRType_str(PyObject* self);
+PyObject* RRType_toWire(s_RRType* self, PyObject* args);
+PyObject* RRType_getCode(s_RRType* self);
+PyObject* RRType_richcmp(s_RRType* self, s_RRType* other, int op);
+PyObject* RRType_NSEC3PARAM(s_RRType *self);
+PyObject* RRType_DNAME(s_RRType *self);
+PyObject* RRType_PTR(s_RRType *self);
+PyObject* RRType_MX(s_RRType *self);
+PyObject* RRType_DNSKEY(s_RRType *self);
+PyObject* RRType_TXT(s_RRType *self);
+PyObject* RRType_RRSIG(s_RRType *self);
+PyObject* RRType_NSEC(s_RRType *self);
+PyObject* RRType_AAAA(s_RRType *self);
+PyObject* RRType_DS(s_RRType *self);
+PyObject* RRType_OPT(s_RRType *self);
+PyObject* RRType_A(s_RRType *self);
+PyObject* RRType_NS(s_RRType *self);
+PyObject* RRType_CNAME(s_RRType *self);
+PyObject* RRType_SOA(s_RRType *self);
+PyObject* RRType_NSEC3(s_RRType *self);
+PyObject* RRType_IXFR(s_RRType *self);
+PyObject* RRType_AXFR(s_RRType *self);
+PyObject* RRType_ANY(s_RRType *self);
+
+typedef CPPPyObjectContainer<s_RRType, RRType> RRTypeContainer;
// This list contains the actual set of functions we have in
// python. Each entry has
@@ -90,7 +77,7 @@ static PyObject* RRType_ANY(s_RRType *self);
// 2. Our static function here
// 3. Argument type
// 4. Documentation
-static PyMethodDef RRType_methods[] = {
+PyMethodDef RRType_methods[] = {
{ "to_text", reinterpret_cast<PyCFunction>(RRType_toText), METH_NOARGS,
"Returns the string representation" },
{ "to_wire", reinterpret_cast<PyCFunction>(RRType_toWire), METH_VARARGS,
@@ -124,63 +111,7 @@ static PyMethodDef RRType_methods[] = {
{ NULL, NULL, 0, NULL }
};
-// This defines the complete type for reflection in python and
-// parsing of PyObject* to s_RRType
-// Most of the functions are not actually implemented and NULL here.
-static PyTypeObject rrtype_type = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "pydnspp.RRType",
- sizeof(s_RRType), // tp_basicsize
- 0, // tp_itemsize
- (destructor)RRType_destroy, // tp_dealloc
- NULL, // tp_print
- NULL, // tp_getattr
- NULL, // tp_setattr
- NULL, // tp_reserved
- NULL, // tp_repr
- NULL, // tp_as_number
- NULL, // tp_as_sequence
- NULL, // tp_as_mapping
- NULL, // tp_hash
- NULL, // tp_call
- RRType_str, // tp_str
- NULL, // tp_getattro
- NULL, // tp_setattro
- NULL, // tp_as_buffer
- Py_TPFLAGS_DEFAULT, // tp_flags
- "The RRType class encapsulates DNS resource record types.\n\n"
- "This class manages the 16-bit integer type codes in quite a straightforward "
- "way. The only non trivial task is to handle textual representations of "
- "RR types, such as \"A\", \"AAAA\", or \"TYPE65534\".",
- NULL, // tp_traverse
- NULL, // tp_clear
- (richcmpfunc)RRType_richcmp, // tp_richcompare
- 0, // tp_weaklistoffset
- NULL, // tp_iter
- NULL, // tp_iternext
- RRType_methods, // tp_methods
- NULL, // tp_members
- NULL, // tp_getset
- NULL, // tp_base
- NULL, // tp_dict
- NULL, // tp_descr_get
- NULL, // tp_descr_set
- 0, // tp_dictoffset
- (initproc)RRType_init, // tp_init
- NULL, // tp_alloc
- PyType_GenericNew, // tp_new
- NULL, // tp_free
- NULL, // tp_is_gc
- NULL, // tp_bases
- NULL, // tp_mro
- NULL, // tp_cache
- NULL, // tp_subclasses
- NULL, // tp_weaklist
- NULL, // tp_del
- 0 // tp_version_tag
-};
-
-static int
+int
RRType_init(s_RRType* self, PyObject* args) {
const char* s;
long i;
@@ -194,7 +125,7 @@ RRType_init(s_RRType* self, PyObject* args) {
// (the way to do exceptions is to set PyErr and return -1)
try {
if (PyArg_ParseTuple(args, "s", &s)) {
- self->rrtype = new RRType(s);
+ self->cppobj = new RRType(s);
return (0);
} else if (PyArg_ParseTuple(args, "l", &i)) {
PyErr_Clear();
@@ -202,7 +133,7 @@ RRType_init(s_RRType* self, PyObject* args) {
PyErr_SetString(PyExc_ValueError, "RR Type number out of range");
return (-1);
}
- self->rrtype = new RRType(i);
+ self->cppobj = new RRType(i);
return (0);
} else if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
Py_ssize_t size = PySequence_Size(bytes);
@@ -212,7 +143,7 @@ RRType_init(s_RRType* self, PyObject* args) {
return (result);
}
InputBuffer ib(&data[0], size);
- self->rrtype = new RRType(ib);
+ self->cppobj = new RRType(ib);
PyErr_Clear();
return (0);
}
@@ -236,36 +167,36 @@ RRType_init(s_RRType* self, PyObject* args) {
return (-1);
}
-static void
+void
RRType_destroy(s_RRType* self) {
- delete self->rrtype;
- self->rrtype = NULL;
+ delete self->cppobj;
+ self->cppobj = NULL;
Py_TYPE(self)->tp_free(self);
}
-static PyObject*
+PyObject*
RRType_toText(s_RRType* self) {
// Py_BuildValue makes python objects from native data
- return (Py_BuildValue("s", self->rrtype->toText().c_str()));
+ return (Py_BuildValue("s", self->cppobj->toText().c_str()));
}
-static PyObject*
+PyObject*
RRType_str(PyObject* self) {
// Simply call the to_text method we already defined
return (PyObject_CallMethod(self, const_cast<char*>("to_text"),
const_cast<char*>("")));
}
-static PyObject*
+PyObject*
RRType_toWire(s_RRType* self, PyObject* args) {
PyObject* bytes;
- s_MessageRenderer* mr;
+ PyObject* mr;
if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
PyObject* bytes_o = bytes;
OutputBuffer buffer(2);
- self->rrtype->toWire(buffer);
+ self->cppobj->toWire(buffer);
PyObject* n = PyBytes_FromStringAndSize(static_cast<const char*>(buffer.getData()), buffer.getLength());
PyObject* result = PySequence_InPlaceConcat(bytes_o, n);
// We need to release the object we temporarily created here
@@ -273,7 +204,7 @@ RRType_toWire(s_RRType* self, PyObject* args) {
Py_DECREF(n);
return (result);
} else if (PyArg_ParseTuple(args, "O!", &messagerenderer_type, &mr)) {
- self->rrtype->toWire(*mr->messagerenderer);
+ self->cppobj->toWire(PyMessageRenderer_ToMessageRenderer(mr));
// If we return NULL it is seen as an error, so use this for
// None returns
Py_RETURN_NONE;
@@ -284,12 +215,12 @@ RRType_toWire(s_RRType* self, PyObject* args) {
return (NULL);
}
-static PyObject*
+PyObject*
RRType_getCode(s_RRType* self) {
- return (Py_BuildValue("I", self->rrtype->getCode()));
+ return (Py_BuildValue("I", self->cppobj->getCode()));
}
-static PyObject*
+PyObject*
RRType_richcmp(s_RRType* self, s_RRType* other, int op) {
bool c;
@@ -301,24 +232,24 @@ RRType_richcmp(s_RRType* self, s_RRType* other, int op) {
switch (op) {
case Py_LT:
- c = *self->rrtype < *other->rrtype;
+ c = *self->cppobj < *other->cppobj;
break;
case Py_LE:
- c = *self->rrtype < *other->rrtype ||
- *self->rrtype == *other->rrtype;
+ c = *self->cppobj < *other->cppobj ||
+ *self->cppobj == *other->cppobj;
break;
case Py_EQ:
- c = *self->rrtype == *other->rrtype;
+ c = *self->cppobj == *other->cppobj;
break;
case Py_NE:
- c = *self->rrtype != *other->rrtype;
+ c = *self->cppobj != *other->cppobj;
break;
case Py_GT:
- c = *other->rrtype < *self->rrtype;
+ c = *other->cppobj < *self->cppobj;
break;
case Py_GE:
- c = *other->rrtype < *self->rrtype ||
- *self->rrtype == *other->rrtype;
+ c = *other->cppobj < *self->cppobj ||
+ *self->cppobj == *other->cppobj;
break;
default:
PyErr_SetString(PyExc_IndexError,
@@ -334,131 +265,200 @@ RRType_richcmp(s_RRType* self, s_RRType* other, int op) {
//
// Common function for RRType_A/NS/etc.
//
-static PyObject* RRType_createStatic(RRType stc) {
+PyObject* RRType_createStatic(RRType stc) {
s_RRType* ret = PyObject_New(s_RRType, &rrtype_type);
if (ret != NULL) {
- ret->rrtype = new RRType(stc);
+ ret->cppobj = new RRType(stc);
}
return (ret);
}
-static PyObject*
+PyObject*
RRType_NSEC3PARAM(s_RRType*) {
return (RRType_createStatic(RRType::NSEC3PARAM()));
}
-static PyObject*
+PyObject*
RRType_DNAME(s_RRType*) {
return (RRType_createStatic(RRType::DNAME()));
}
-static PyObject*
+PyObject*
RRType_PTR(s_RRType*) {
return (RRType_createStatic(RRType::PTR()));
}
-static PyObject*
+PyObject*
RRType_MX(s_RRType*) {
return (RRType_createStatic(RRType::MX()));
}
-static PyObject*
+PyObject*
RRType_DNSKEY(s_RRType*) {
return (RRType_createStatic(RRType::DNSKEY()));
}
-static PyObject*
+PyObject*
RRType_TXT(s_RRType*) {
return (RRType_createStatic(RRType::TXT()));
}
-static PyObject*
+PyObject*
RRType_RRSIG(s_RRType*) {
return (RRType_createStatic(RRType::RRSIG()));
}
-static PyObject*
+PyObject*
RRType_NSEC(s_RRType*) {
return (RRType_createStatic(RRType::NSEC()));
}
-static PyObject*
+PyObject*
RRType_AAAA(s_RRType*) {
return (RRType_createStatic(RRType::AAAA()));
}
-static PyObject*
+PyObject*
RRType_DS(s_RRType*) {
return (RRType_createStatic(RRType::DS()));
}
-static PyObject*
+PyObject*
RRType_OPT(s_RRType*) {
return (RRType_createStatic(RRType::OPT()));
}
-static PyObject*
+PyObject*
RRType_A(s_RRType*) {
return (RRType_createStatic(RRType::A()));
}
-static PyObject*
+PyObject*
RRType_NS(s_RRType*) {
return (RRType_createStatic(RRType::NS()));
}
-static PyObject*
+PyObject*
RRType_CNAME(s_RRType*) {
return (RRType_createStatic(RRType::CNAME()));
}
-static PyObject*
+PyObject*
RRType_SOA(s_RRType*) {
return (RRType_createStatic(RRType::SOA()));
}
-static PyObject*
+PyObject*
RRType_NSEC3(s_RRType*) {
return (RRType_createStatic(RRType::NSEC3()));
}
-static PyObject*
+PyObject*
RRType_IXFR(s_RRType*) {
return (RRType_createStatic(RRType::IXFR()));
}
-static PyObject*
+PyObject*
RRType_AXFR(s_RRType*) {
return (RRType_createStatic(RRType::AXFR()));
}
-static PyObject*
+PyObject*
RRType_ANY(s_RRType*) {
return (RRType_createStatic(RRType::ANY()));
}
+} // end anonymous namespace
+
+namespace isc {
+namespace dns {
+namespace python {
-// end of RRType
+PyObject* po_InvalidRRType;
+PyObject* po_IncompleteRRType;
+
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_RRType
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject rrtype_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pydnspp.RRType",
+ sizeof(s_RRType), // tp_basicsize
+ 0, // tp_itemsize
+ (destructor)RRType_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ RRType_str, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ "The RRType class encapsulates DNS resource record types.\n\n"
+ "This class manages the 16-bit integer type codes in quite a straightforward "
+ "way. The only non trivial task is to handle textual representations of "
+ "RR types, such as \"A\", \"AAAA\", or \"TYPE65534\".",
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ (richcmpfunc)RRType_richcmp, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ RRType_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ (initproc)RRType_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+PyObject*
+createRRTypeObject(const RRType& source) {
+ RRTypeContainer container(PyObject_New(s_RRType, &rrtype_type));
+ container.set(new RRType(source));
+ return (container.release());
+}
-// Module Initialization, all statics are initialized here
bool
-initModulePart_RRType(PyObject* mod) {
- // Add the exceptions to the module
- po_InvalidRRType = PyErr_NewException("pydnspp.InvalidRRType", NULL, NULL);
- PyModule_AddObject(mod, "InvalidRRType", po_InvalidRRType);
- po_IncompleteRRType = PyErr_NewException("pydnspp.IncompleteRRType", NULL, NULL);
- PyModule_AddObject(mod, "IncompleteRRType", po_IncompleteRRType);
-
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module. This is not just a check! (leaving
- // this out results in segmentation faults)
- if (PyType_Ready(&rrtype_type) < 0) {
- return (false);
+PyRRType_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
+ }
+ return (PyObject_TypeCheck(obj, &rrtype_type));
+}
+
+const RRType&
+PyRRType_ToRRType(const PyObject* rrtype_obj) {
+ if (rrtype_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in RRType PyObject conversion");
}
- Py_INCREF(&rrtype_type);
- PyModule_AddObject(mod, "RRType",
- reinterpret_cast<PyObject*>(&rrtype_type));
-
- return (true);
+ const s_RRType* rrtype = static_cast<const s_RRType*>(rrtype_obj);
+ return (*rrtype->cppobj);
}
+
+
+} // end namespace python
+} // end namespace dns
+} // end namespace isc
diff --git a/src/lib/dns/python/rrtype_python.h b/src/lib/dns/python/rrtype_python.h
new file mode 100644
index 0000000..596598e
--- /dev/null
+++ b/src/lib/dns/python/rrtype_python.h
@@ -0,0 +1,68 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_RRTYPE_H
+#define __PYTHON_RRTYPE_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class RRType;
+
+namespace python {
+
+extern PyObject* po_InvalidRRType;
+extern PyObject* po_IncompleteRRType;
+
+extern PyTypeObject rrtype_type;
+
+/// This is a simple shortcut to create a python RRType object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createRRTypeObject(const RRType& source);
+
+/// \brief Checks if the given python object is a RRType object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type RRType, false otherwise
+bool PyRRType_Check(PyObject* obj);
+
+/// \brief Returns a reference to the RRType object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type RRType; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyRRType_Check()
+///
+/// \note This is not a copy; if the RRType is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param rrtype_obj The rrtype object to convert
+const RRType& PyRRType_ToRRType(const PyObject* rrtype_obj);
+
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_RRTYPE_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/tests/Makefile.am b/src/lib/dns/python/tests/Makefile.am
index 61d7df6..d1273f3 100644
--- a/src/lib/dns/python/tests/Makefile.am
+++ b/src/lib/dns/python/tests/Makefile.am
@@ -24,7 +24,7 @@ EXTRA_DIST += testutil.py
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
diff --git a/src/lib/dns/python/tests/message_python_test.py b/src/lib/dns/python/tests/message_python_test.py
index 41b9a67..8f2d732 100644
--- a/src/lib/dns/python/tests/message_python_test.py
+++ b/src/lib/dns/python/tests/message_python_test.py
@@ -21,6 +21,7 @@ import unittest
import os
from pydnspp import *
from testutil import *
+from pyunittests_util import fix_current_time
# helper functions for tests taken from c++ unittests
if "TESTDATA_PATH" in os.environ:
@@ -28,10 +29,10 @@ if "TESTDATA_PATH" in os.environ:
else:
testdata_path = "../tests/testdata"
-def factoryFromFile(message, file):
+def factoryFromFile(message, file, parse_options=Message.PARSE_DEFAULT):
data = read_wire_data(file)
- message.from_wire(data)
- pass
+ message.from_wire(data, parse_options)
+ return data
# we don't have direct comparison for rrsets right now (should we?
# should go in the cpp version first then), so also no direct list
@@ -44,6 +45,15 @@ def compare_rrset_list(list1, list2):
return False
return True
+# These are used for TSIG + TC tests
+LONG_TXT1 = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcde";
+
+LONG_TXT2 = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456";
+
+LONG_TXT3 = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01";
+
+LONG_TXT4 = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0";
+
# a complete message taken from cpp tests, for testing towire and totext
def create_message():
message_render = Message(Message.RENDER)
@@ -62,16 +72,12 @@ def create_message():
message_render.add_rrset(Message.SECTION_ANSWER, rrset)
return message_render
-def strip_mutable_tsig_data(data):
- # Unfortunately we cannot easily compare TSIG RR because we can't tweak
- # current time. As a work around this helper function strips off the time
- # dependent part of TSIG RDATA, i.e., the MAC (assuming HMAC-MD5) and
- # Time Signed.
- return data[0:-32] + data[-26:-22] + data[-6:]
-
class MessageTest(unittest.TestCase):
def setUp(self):
+ # make sure we don't use faked time unless explicitly do so in tests
+ fix_current_time(None)
+
self.p = Message(Message.PARSE)
self.r = Message(Message.RENDER)
@@ -90,6 +96,10 @@ class MessageTest(unittest.TestCase):
self.tsig_key = TSIGKey("www.example.com:SFuWd/q99SzF8Yzd1QbB9g==")
self.tsig_ctx = TSIGContext(self.tsig_key)
+ def tearDown(self):
+ # reset any faked current time setting (it would affect other tests)
+ fix_current_time(None)
+
def test_init(self):
self.assertRaises(TypeError, Message, -1)
self.assertRaises(TypeError, Message, 3)
@@ -285,33 +295,112 @@ class MessageTest(unittest.TestCase):
self.assertRaises(InvalidMessageOperation, self.r.to_wire,
MessageRenderer())
- def __common_tsigquery_setup(self):
+ def __common_tsigmessage_setup(self, flags=[Message.HEADERFLAG_RD],
+ rrtype=RRType("A"), answer_data=None):
self.r.set_opcode(Opcode.QUERY())
self.r.set_rcode(Rcode.NOERROR())
- self.r.set_header_flag(Message.HEADERFLAG_RD)
+ for flag in flags:
+ self.r.set_header_flag(flag)
+ if answer_data is not None:
+ rrset = RRset(Name("www.example.com"), RRClass("IN"),
+ rrtype, RRTTL(86400))
+ for rdata in answer_data:
+ rrset.add_rdata(Rdata(rrtype, RRClass("IN"), rdata))
+ self.r.add_rrset(Message.SECTION_ANSWER, rrset)
self.r.add_question(Question(Name("www.example.com"),
- RRClass("IN"), RRType("A")))
+ RRClass("IN"), rrtype))
def __common_tsig_checks(self, expected_file):
renderer = MessageRenderer()
self.r.to_wire(renderer, self.tsig_ctx)
- actual_wire = strip_mutable_tsig_data(renderer.get_data())
- expected_wire = strip_mutable_tsig_data(read_wire_data(expected_file))
- self.assertEqual(expected_wire, actual_wire)
+ self.assertEqual(read_wire_data(expected_file), renderer.get_data())
def test_to_wire_with_tsig(self):
+ fix_current_time(0x4da8877a)
self.r.set_qid(0x2d65)
- self.__common_tsigquery_setup()
+ self.__common_tsigmessage_setup()
self.__common_tsig_checks("message_toWire2.wire")
def test_to_wire_with_edns_tsig(self):
+ fix_current_time(0x4db60d1f)
self.r.set_qid(0x6cd)
- self.__common_tsigquery_setup()
+ self.__common_tsigmessage_setup()
edns = EDNS()
edns.set_udp_size(4096)
self.r.set_edns(edns)
self.__common_tsig_checks("message_toWire3.wire")
+ def test_to_wire_tsig_truncation(self):
+ fix_current_time(0x4e179212)
+ data = factoryFromFile(self.p, "message_fromWire17.wire")
+ self.assertEqual(TSIGError.NOERROR,
+ self.tsig_ctx.verify(self.p.get_tsig_record(), data))
+ self.r.set_qid(0x22c2)
+ self.__common_tsigmessage_setup([Message.HEADERFLAG_QR,
+ Message.HEADERFLAG_AA,
+ Message.HEADERFLAG_RD],
+ RRType("TXT"),
+ [LONG_TXT1, LONG_TXT2])
+ self.__common_tsig_checks("message_toWire4.wire")
+
+ def test_to_wire_tsig_truncation2(self):
+ fix_current_time(0x4e179212)
+ data = factoryFromFile(self.p, "message_fromWire17.wire")
+ self.assertEqual(TSIGError.NOERROR,
+ self.tsig_ctx.verify(self.p.get_tsig_record(), data))
+ self.r.set_qid(0x22c2)
+ self.__common_tsigmessage_setup([Message.HEADERFLAG_QR,
+ Message.HEADERFLAG_AA,
+ Message.HEADERFLAG_RD],
+ RRType("TXT"),
+ [LONG_TXT1, LONG_TXT3])
+ self.__common_tsig_checks("message_toWire4.wire")
+
+ def test_to_wire_tsig_truncation3(self):
+ self.r.set_opcode(Opcode.QUERY())
+ self.r.set_rcode(Rcode.NOERROR())
+ for i in range(1, 68):
+ self.r.add_question(Question(Name("www.example.com"),
+ RRClass("IN"), RRType(i)))
+ renderer = MessageRenderer()
+ self.r.to_wire(renderer, self.tsig_ctx)
+
+ self.p.from_wire(renderer.get_data())
+ self.assertTrue(self.p.get_header_flag(Message.HEADERFLAG_TC))
+ self.assertEqual(66, self.p.get_rr_count(Message.SECTION_QUESTION))
+ self.assertNotEqual(None, self.p.get_tsig_record())
+
+ def test_to_wire_tsig_no_truncation(self):
+ fix_current_time(0x4e17b38d)
+ data = factoryFromFile(self.p, "message_fromWire18.wire")
+ self.assertEqual(TSIGError.NOERROR,
+ self.tsig_ctx.verify(self.p.get_tsig_record(), data))
+ self.r.set_qid(0xd6e2)
+ self.__common_tsigmessage_setup([Message.HEADERFLAG_QR,
+ Message.HEADERFLAG_AA,
+ Message.HEADERFLAG_RD],
+ RRType("TXT"),
+ [LONG_TXT1, LONG_TXT4])
+ self.__common_tsig_checks("message_toWire5.wire")
+
+ def test_to_wire_tsig_length_errors(self):
+ renderer = MessageRenderer()
+ renderer.set_length_limit(84) # 84 = expected TSIG length - 1
+ self.__common_tsigmessage_setup()
+ self.assertRaises(TSIGContextError,
+ self.r.to_wire, renderer, self.tsig_ctx)
+
+ renderer.clear()
+ self.r.clear(Message.RENDER)
+ renderer.set_length_limit(86) # 86 = expected TSIG length + 1
+ self.__common_tsigmessage_setup()
+ self.assertRaises(TSIGContextError,
+ self.r.to_wire, renderer, self.tsig_ctx)
+
+ # skip the last test of the corresponding C++ test: it requires
+ # subclassing MessageRenderer, which is (currently) not possible
+ # for python. In any case, it's very unlikely to happen in practice.
+
def test_to_text(self):
message_render = create_message()
@@ -377,6 +466,54 @@ test.example.com. 3600 IN A 192.0.2.2
self.assertEqual("192.0.2.2", rdata[1].to_text())
self.assertEqual(2, len(rdata))
+ def test_from_wire_short_buffer(self):
+ data = read_wire_data("message_fromWire22.wire")
+ self.assertRaises(DNSMessageFORMERR, self.p.from_wire, data[:-1])
+
+ def test_from_wire_combind_rrs(self):
+ factoryFromFile(self.p, "message_fromWire19.wire")
+ rrset = self.p.get_section(Message.SECTION_ANSWER)[0]
+ self.assertEqual(RRType("A"), rrset.get_type())
+ self.assertEqual(2, len(rrset.get_rdata()))
+
+ rrset = self.p.get_section(Message.SECTION_ANSWER)[1]
+ self.assertEqual(RRType("AAAA"), rrset.get_type())
+ self.assertEqual(1, len(rrset.get_rdata()))
+
+ def check_preserve_rrs(self, message, section):
+ rrset = message.get_section(section)[0]
+ self.assertEqual(RRType("A"), rrset.get_type())
+ rdata = rrset.get_rdata()
+ self.assertEqual(1, len(rdata))
+ self.assertEqual('192.0.2.1', rdata[0].to_text())
+
+ rrset = message.get_section(section)[1]
+ self.assertEqual(RRType("AAAA"), rrset.get_type())
+ rdata = rrset.get_rdata()
+ self.assertEqual(1, len(rdata))
+ self.assertEqual('2001:db8::1', rdata[0].to_text())
+
+ rrset = message.get_section(section)[2]
+ self.assertEqual(RRType("A"), rrset.get_type())
+ rdata = rrset.get_rdata()
+ self.assertEqual(1, len(rdata))
+ self.assertEqual('192.0.2.2', rdata[0].to_text())
+
+ def test_from_wire_preserve_answer(self):
+ factoryFromFile(self.p, "message_fromWire19.wire",
+ Message.PRESERVE_ORDER)
+ self.check_preserve_rrs(self.p, Message.SECTION_ANSWER)
+
+ def test_from_wire_preserve_authority(self):
+ factoryFromFile(self.p, "message_fromWire20.wire",
+ Message.PRESERVE_ORDER)
+ self.check_preserve_rrs(self.p, Message.SECTION_AUTHORITY)
+
+ def test_from_wire_preserve_additional(self):
+ factoryFromFile(self.p, "message_fromWire21.wire",
+ Message.PRESERVE_ORDER)
+ self.check_preserve_rrs(self.p, Message.SECTION_ADDITIONAL)
+
def test_EDNS0ExtCode(self):
# Extended Rcode = BADVERS
message_parse = Message(Message.PARSE)
diff --git a/src/lib/dns/python/tests/question_python_test.py b/src/lib/dns/python/tests/question_python_test.py
index 69e3051..8c8c815 100644
--- a/src/lib/dns/python/tests/question_python_test.py
+++ b/src/lib/dns/python/tests/question_python_test.py
@@ -74,7 +74,6 @@ class QuestionTest(unittest.TestCase):
self.assertEqual("foo.example.com. IN NS\n", str(self.test_question1))
self.assertEqual("bar.example.com. CH A\n", self.test_question2.to_text())
-
def test_to_wire_buffer(self):
obuffer = bytes()
obuffer = self.test_question1.to_wire(obuffer)
@@ -82,7 +81,6 @@ class QuestionTest(unittest.TestCase):
wiredata = read_wire_data("question_toWire1")
self.assertEqual(obuffer, wiredata)
-
def test_to_wire_renderer(self):
renderer = MessageRenderer()
self.test_question1.to_wire(renderer)
@@ -91,5 +89,13 @@ class QuestionTest(unittest.TestCase):
self.assertEqual(renderer.get_data(), wiredata)
self.assertRaises(TypeError, self.test_question1.to_wire, 1)
+ def test_to_wire_truncated(self):
+ renderer = MessageRenderer()
+ renderer.set_length_limit(self.example_name1.get_length())
+ self.assertFalse(renderer.is_truncated())
+ self.test_question1.to_wire(renderer)
+ self.assertTrue(renderer.is_truncated())
+ self.assertEqual(0, renderer.get_length())
+
if __name__ == '__main__':
unittest.main()
diff --git a/src/lib/dns/python/tsig_python.cc b/src/lib/dns/python/tsig_python.cc
index db93a08..0764e33 100644
--- a/src/lib/dns/python/tsig_python.cc
+++ b/src/lib/dns/python/tsig_python.cc
@@ -37,23 +37,18 @@ using namespace isc::util::python;
using namespace isc::dns;
using namespace isc::dns::python;
-//
-// Definition of the classes
-//
-
// For each class, we need a struct, a helper functions (init, destroy,
// and static wrappers around the methods we export), a list of methods,
// and a type description
-//
-// TSIGContext
-//
-
-// Trivial constructor.
-s_TSIGContext::s_TSIGContext() : cppobj(NULL) {
-}
-
namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_TSIGContext : public PyObject {
+public:
+ s_TSIGContext() : cppobj(NULL) {};
+ TSIGContext* cppobj;
+};
+
// Shortcut type which would be convenient for adding class variables safely.
typedef CPPPyObjectContainer<s_TSIGContext, TSIGContext> TSIGContextContainer;
@@ -101,23 +96,23 @@ int
TSIGContext_init(s_TSIGContext* self, PyObject* args) {
try {
// "From key" constructor
- const s_TSIGKey* tsigkey_obj;
+ const PyObject* tsigkey_obj;
if (PyArg_ParseTuple(args, "O!", &tsigkey_type, &tsigkey_obj)) {
- self->cppobj = new TSIGContext(*tsigkey_obj->cppobj);
+ self->cppobj = new TSIGContext(PyTSIGKey_ToTSIGKey(tsigkey_obj));
return (0);
}
// "From key param + keyring" constructor
PyErr_Clear();
- const s_Name* keyname_obj;
- const s_Name* algname_obj;
- const s_TSIGKeyRing* keyring_obj;
+ const PyObject* keyname_obj;
+ const PyObject* algname_obj;
+ const PyObject* keyring_obj;
if (PyArg_ParseTuple(args, "O!O!O!", &name_type, &keyname_obj,
&name_type, &algname_obj, &tsigkeyring_type,
&keyring_obj)) {
- self->cppobj = new TSIGContext(*keyname_obj->cppobj,
- *algname_obj->cppobj,
- *keyring_obj->cppobj);
+ self->cppobj = new TSIGContext(PyName_ToName(keyname_obj),
+ PyName_ToName(algname_obj),
+ PyTSIGKeyRing_ToTSIGKeyRing(keyring_obj));
return (0);
}
} catch (const exception& ex) {
@@ -153,7 +148,7 @@ PyObject*
TSIGContext_getError(s_TSIGContext* self) {
try {
PyObjectContainer container(createTSIGErrorObject(
- self->cppobj->getError()));
+ self->cppobj->getError()));
return (Py_BuildValue("O", container.get()));
} catch (const exception& ex) {
const string ex_what =
@@ -205,13 +200,13 @@ PyObject*
TSIGContext_verify(s_TSIGContext* self, PyObject* args) {
const char* data;
Py_ssize_t data_len;
- s_TSIGRecord* py_record;
+ PyObject* py_record;
PyObject* py_maybe_none;
- TSIGRecord* record;
+ const TSIGRecord* record;
if (PyArg_ParseTuple(args, "O!y#", &tsigrecord_type, &py_record,
&data, &data_len)) {
- record = py_record->cppobj;
+ record = &PyTSIGRecord_ToTSIGRecord(py_record);
} else if (PyArg_ParseTuple(args, "Oy#", &py_maybe_none, &data,
&data_len)) {
record = NULL;
@@ -264,7 +259,7 @@ PyTypeObject tsigcontext_type = {
NULL, // tp_as_number
NULL, // tp_as_sequence
NULL, // tp_as_mapping
- NULL, // tp_hash
+ NULL, // tp_hash
NULL, // tp_call
NULL, // tp_str
NULL, // tp_getattro
@@ -307,58 +302,24 @@ PyTypeObject tsigcontext_type = {
0 // tp_version_tag
};
-// Module Initialization, all statics are initialized here
bool
-initModulePart_TSIGContext(PyObject* mod) {
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module. This is not just a check! (leaving
- // this out results in segmentation faults)
- if (PyType_Ready(&tsigcontext_type) < 0) {
- return (false);
+PyTSIGContext_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
}
- void* p = &tsigcontext_type;
- if (PyModule_AddObject(mod, "TSIGContext",
- static_cast<PyObject*>(p)) < 0) {
- return (false);
- }
- Py_INCREF(&tsigcontext_type);
+ return (PyObject_TypeCheck(obj, &tsigcontext_type));
+}
- try {
- // Class specific exceptions
- po_TSIGContextError = PyErr_NewException("pydnspp.TSIGContextError",
- po_IscException, NULL);
- PyObjectContainer(po_TSIGContextError).installToModule(
- mod, "TSIGContextError");
-
- // Constant class variables
- installClassVariable(tsigcontext_type, "STATE_INIT",
- Py_BuildValue("I", TSIGContext::INIT));
- installClassVariable(tsigcontext_type, "STATE_SENT_REQUEST",
- Py_BuildValue("I", TSIGContext::SENT_REQUEST));
- installClassVariable(tsigcontext_type, "STATE_RECEIVED_REQUEST",
- Py_BuildValue("I", TSIGContext::RECEIVED_REQUEST));
- installClassVariable(tsigcontext_type, "STATE_SENT_RESPONSE",
- Py_BuildValue("I", TSIGContext::SENT_RESPONSE));
- installClassVariable(tsigcontext_type, "STATE_VERIFIED_RESPONSE",
- Py_BuildValue("I",
- TSIGContext::VERIFIED_RESPONSE));
-
- installClassVariable(tsigcontext_type, "DEFAULT_FUDGE",
- Py_BuildValue("H", TSIGContext::DEFAULT_FUDGE));
- } catch (const exception& ex) {
- const string ex_what =
- "Unexpected failure in TSIGContext initialization: " +
- string(ex.what());
- PyErr_SetString(po_IscException, ex_what.c_str());
- return (false);
- } catch (...) {
- PyErr_SetString(PyExc_SystemError,
- "Unexpected failure in TSIGContext initialization");
- return (false);
+TSIGContext&
+PyTSIGContext_ToTSIGContext(PyObject* tsigcontext_obj) {
+ if (tsigcontext_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in TSIGContext PyObject conversion");
}
-
- return (true);
+ s_TSIGContext* tsigcontext = static_cast<s_TSIGContext*>(tsigcontext_obj);
+ return (*tsigcontext->cppobj);
}
+
} // namespace python
} // namespace dns
} // namespace isc
diff --git a/src/lib/dns/python/tsig_python.h b/src/lib/dns/python/tsig_python.h
index f9b4f7b..e4e9fff 100644
--- a/src/lib/dns/python/tsig_python.h
+++ b/src/lib/dns/python/tsig_python.h
@@ -23,19 +23,31 @@ class TSIGContext;
namespace python {
-// The s_* Class simply covers one instantiation of the object
-class s_TSIGContext : public PyObject {
-public:
- s_TSIGContext();
- TSIGContext* cppobj;
-};
-
extern PyTypeObject tsigcontext_type;
// Class specific exceptions
extern PyObject* po_TSIGContextError;
-bool initModulePart_TSIGContext(PyObject* mod);
+/// \brief Checks if the given python object is a TSIGContext object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type TSIGContext, false otherwise
+bool PyTSIGContext_Check(PyObject* obj);
+
+/// \brief Returns a reference to the TSIGContext object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type TSIGContext; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyTSIGContext_Check()
+///
+/// \note This is not a copy; if the TSIGContext is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param tsigcontext_obj The tsigcontext object to convert
+TSIGContext& PyTSIGContext_ToTSIGContext(PyObject* tsigcontext_obj);
+
} // namespace python
} // namespace dns
diff --git a/src/lib/dns/python/tsig_rdata_python.cc b/src/lib/dns/python/tsig_rdata_python.cc
index 4e4f287..6ec0f09 100644
--- a/src/lib/dns/python/tsig_rdata_python.cc
+++ b/src/lib/dns/python/tsig_rdata_python.cc
@@ -12,6 +12,7 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#define PY_SSIZE_T_CLEAN
#include <Python.h>
#include <string>
@@ -32,23 +33,19 @@ using namespace isc::dns;
using namespace isc::dns::rdata;
using namespace isc::dns::python;
-//
-// Definition of the classes
-//
-
// For each class, we need a struct, a helper functions (init, destroy,
// and static wrappers around the methods we export), a list of methods,
// and a type description
-//
-// TSIG RDATA
-//
+namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_TSIG : public PyObject {
+public:
+ s_TSIG() : cppobj(NULL) {};
+ const rdata::any::TSIG* cppobj;
+};
-// Trivial constructor.
-s_TSIG::s_TSIG() : cppobj(NULL) {
-}
-namespace {
// Shortcut type which would be convenient for adding class variables safely.
typedef CPPPyObjectContainer<s_TSIG, any::TSIG> TSIGContainer;
@@ -235,7 +232,7 @@ TSIG_toWire(const s_TSIG* const self, PyObject* args) {
self, args));
}
-PyObject*
+PyObject*
TSIG_richcmp(const s_TSIG* const self,
const s_TSIG* const other,
const int op)
@@ -302,7 +299,7 @@ PyTypeObject tsig_type = {
NULL, // tp_as_number
NULL, // tp_as_sequence
NULL, // tp_as_mapping
- NULL, // tp_hash
+ NULL, // tp_hash
NULL, // tp_call
TSIG_str, // tp_str
NULL, // tp_getattro
@@ -340,30 +337,31 @@ PyTypeObject tsig_type = {
0 // tp_version_tag
};
-// Module Initialization, all statics are initialized here
-bool
-initModulePart_TSIG(PyObject* mod) {
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module. This is not just a check! (leaving
- // this out results in segmentation faults)
- if (PyType_Ready(&tsig_type) < 0) {
- return (false);
- }
- void* p = &tsig_type;
- if (PyModule_AddObject(mod, "TSIG", static_cast<PyObject*>(p)) < 0) {
- return (false);
- }
- Py_INCREF(&tsig_type);
-
- return (true);
-}
-
PyObject*
createTSIGObject(const any::TSIG& source) {
- TSIGContainer container = PyObject_New(s_TSIG, &tsig_type);
+ TSIGContainer container(PyObject_New(s_TSIG, &tsig_type));
container.set(new any::TSIG(source));
return (container.release());
}
+
+bool
+PyTSIG_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
+ }
+ return (PyObject_TypeCheck(obj, &tsig_type));
+}
+
+const any::TSIG&
+PyTSIG_ToTSIG(const PyObject* tsig_obj) {
+ if (tsig_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in TSIG PyObject conversion");
+ }
+ const s_TSIG* tsig = static_cast<const s_TSIG*>(tsig_obj);
+ return (*tsig->cppobj);
+}
+
} // namespace python
} // namespace dns
} // namespace isc
diff --git a/src/lib/dns/python/tsig_rdata_python.h b/src/lib/dns/python/tsig_rdata_python.h
index e5e0c6c..a84d9e8 100644
--- a/src/lib/dns/python/tsig_rdata_python.h
+++ b/src/lib/dns/python/tsig_rdata_python.h
@@ -27,17 +27,8 @@ class TSIG;
namespace python {
-// The s_* Class simply covers one instantiation of the object
-class s_TSIG : public PyObject {
-public:
- s_TSIG();
- const rdata::any::TSIG* cppobj;
-};
-
extern PyTypeObject tsig_type;
-bool initModulePart_TSIG(PyObject* mod);
-
/// This is A simple shortcut to create a python TSIG object (in the
/// form of a pointer to PyObject) with minimal exception safety.
/// On success, it returns a valid pointer to PyObject with a reference
@@ -47,6 +38,26 @@ bool initModulePart_TSIG(PyObject* mod);
/// followed by necessary setup for python exception.
PyObject* createTSIGObject(const rdata::any::TSIG& source);
+/// \brief Checks if the given python object is a TSIG object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type TSIG, false otherwise
+bool PyTSIG_Check(PyObject* obj);
+
+/// \brief Returns a reference to the TSIG object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type TSIG; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyTSIG_Check()
+///
+/// \note This is not a copy; if the TSIG is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param tsig_obj The tsig object to convert
+const rdata::any::TSIG& PyTSIG_ToTSIG(const PyObject* tsig_obj);
+
} // namespace python
} // namespace dns
} // namespace isc
diff --git a/src/lib/dns/python/tsigerror_python.cc b/src/lib/dns/python/tsigerror_python.cc
index 0ad4716..7a0217e 100644
--- a/src/lib/dns/python/tsigerror_python.cc
+++ b/src/lib/dns/python/tsigerror_python.cc
@@ -30,26 +30,21 @@ using namespace isc::util::python;
using namespace isc::dns;
using namespace isc::dns::python;
-//
-// Definition of the classes
-//
-
// For each class, we need a struct, a helper functions (init, destroy,
// and static wrappers around the methods we export), a list of methods,
// and a type description
-//
-// TSIGError
-//
-
-// Trivial constructor.
-s_TSIGError::s_TSIGError() : cppobj(NULL) {
-}
-
// Import pydoc text
#include "tsigerror_python_inc.cc"
namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_TSIGError : public PyObject {
+public:
+ s_TSIGError() : cppobj(NULL) {};
+ const TSIGError* cppobj;
+};
+
// Shortcut type which would be convenient for adding class variables safely.
typedef CPPPyObjectContainer<s_TSIGError, TSIGError> TSIGErrorContainer;
@@ -107,9 +102,9 @@ TSIGError_init(s_TSIGError* self, PyObject* args) {
// Constructor from Rcode
PyErr_Clear();
- s_Rcode* py_rcode;
+ PyObject* py_rcode;
if (PyArg_ParseTuple(args, "O!", &rcode_type, &py_rcode)) {
- self->cppobj = new TSIGError(*py_rcode->cppobj);
+ self->cppobj = new TSIGError(PyRcode_ToRcode(py_rcode));
return (0);
}
} catch (const isc::OutOfRange& ex) {
@@ -172,13 +167,8 @@ TSIGError_str(PyObject* self) {
PyObject*
TSIGError_toRcode(const s_TSIGError* const self) {
- typedef CPPPyObjectContainer<s_Rcode, Rcode> RcodePyObjectContainer;
-
try {
- RcodePyObjectContainer rcode_container(PyObject_New(s_Rcode,
- &rcode_type));
- rcode_container.set(new Rcode(self->cppobj->toRcode()));
- return (rcode_container.release());
+ return (createRcodeObject(self->cppobj->toRcode()));
} catch (const exception& ex) {
const string ex_what =
"Failed to convert TSIGError to Rcode: " + string(ex.what());
@@ -190,7 +180,7 @@ TSIGError_toRcode(const s_TSIGError* const self) {
return (NULL);
}
-PyObject*
+PyObject*
TSIGError_richcmp(const s_TSIGError* const self,
const s_TSIGError* const other,
const int op)
@@ -252,7 +242,7 @@ PyTypeObject tsigerror_type = {
NULL, // tp_as_number
NULL, // tp_as_sequence
NULL, // tp_as_mapping
- NULL, // tp_hash
+ NULL, // tp_hash
NULL, // tp_call
// THIS MAY HAVE TO BE CHANGED TO NULL:
TSIGError_str, // tp_str
@@ -290,78 +280,9 @@ PyTypeObject tsigerror_type = {
0 // tp_version_tag
};
-namespace {
-// Trivial shortcut to create and install TSIGError constants.
-inline void
-installTSIGErrorConstant(const char* name, const TSIGError& val) {
- TSIGErrorContainer container(PyObject_New(s_TSIGError, &tsigerror_type));
- container.installAsClassVariable(tsigerror_type, name, new TSIGError(val));
-}
-}
-
-// Module Initialization, all statics are initialized here
-bool
-initModulePart_TSIGError(PyObject* mod) {
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module. This is not just a check! (leaving
- // this out results in segmentation faults)
- if (PyType_Ready(&tsigerror_type) < 0) {
- return (false);
- }
- void* p = &tsigerror_type;
- if (PyModule_AddObject(mod, "TSIGError", static_cast<PyObject*>(p)) < 0) {
- return (false);
- }
- Py_INCREF(&tsigerror_type);
-
- try {
- // Constant class variables
- // Error codes (bare values)
- installClassVariable(tsigerror_type, "BAD_SIG_CODE",
- Py_BuildValue("H", TSIGError::BAD_SIG_CODE));
- installClassVariable(tsigerror_type, "BAD_KEY_CODE",
- Py_BuildValue("H", TSIGError::BAD_KEY_CODE));
- installClassVariable(tsigerror_type, "BAD_TIME_CODE",
- Py_BuildValue("H", TSIGError::BAD_TIME_CODE));
-
- // Error codes (constant objects)
- installTSIGErrorConstant("NOERROR", TSIGError::NOERROR());
- installTSIGErrorConstant("FORMERR", TSIGError::FORMERR());
- installTSIGErrorConstant("SERVFAIL", TSIGError::SERVFAIL());
- installTSIGErrorConstant("NXDOMAIN", TSIGError::NXDOMAIN());
- installTSIGErrorConstant("NOTIMP", TSIGError::NOTIMP());
- installTSIGErrorConstant("REFUSED", TSIGError::REFUSED());
- installTSIGErrorConstant("YXDOMAIN", TSIGError::YXDOMAIN());
- installTSIGErrorConstant("YXRRSET", TSIGError::YXRRSET());
- installTSIGErrorConstant("NXRRSET", TSIGError::NXRRSET());
- installTSIGErrorConstant("NOTAUTH", TSIGError::NOTAUTH());
- installTSIGErrorConstant("NOTZONE", TSIGError::NOTZONE());
- installTSIGErrorConstant("RESERVED11", TSIGError::RESERVED11());
- installTSIGErrorConstant("RESERVED12", TSIGError::RESERVED12());
- installTSIGErrorConstant("RESERVED13", TSIGError::RESERVED13());
- installTSIGErrorConstant("RESERVED14", TSIGError::RESERVED14());
- installTSIGErrorConstant("RESERVED15", TSIGError::RESERVED15());
- installTSIGErrorConstant("BAD_SIG", TSIGError::BAD_SIG());
- installTSIGErrorConstant("BAD_KEY", TSIGError::BAD_KEY());
- installTSIGErrorConstant("BAD_TIME", TSIGError::BAD_TIME());
- } catch (const exception& ex) {
- const string ex_what =
- "Unexpected failure in TSIGError initialization: " +
- string(ex.what());
- PyErr_SetString(po_IscException, ex_what.c_str());
- return (false);
- } catch (...) {
- PyErr_SetString(PyExc_SystemError,
- "Unexpected failure in TSIGError initialization");
- return (false);
- }
-
- return (true);
-}
-
PyObject*
createTSIGErrorObject(const TSIGError& source) {
- TSIGErrorContainer container = PyObject_New(s_TSIGError, &tsigerror_type);
+ TSIGErrorContainer container(PyObject_New(s_TSIGError, &tsigerror_type));
container.set(new TSIGError(source));
return (container.release());
}
diff --git a/src/lib/dns/python/tsigerror_python.h b/src/lib/dns/python/tsigerror_python.h
index 735a480..0b5b630 100644
--- a/src/lib/dns/python/tsigerror_python.h
+++ b/src/lib/dns/python/tsigerror_python.h
@@ -23,17 +23,8 @@ class TSIGError;
namespace python {
-// The s_* Class simply covers one instantiation of the object
-class s_TSIGError : public PyObject {
-public:
- s_TSIGError();
- const TSIGError* cppobj;
-};
-
extern PyTypeObject tsigerror_type;
-bool initModulePart_TSIGError(PyObject* mod);
-
/// This is A simple shortcut to create a python TSIGError object (in the
/// form of a pointer to PyObject) with minimal exception safety.
/// On success, it returns a valid pointer to PyObject with a reference
@@ -42,6 +33,7 @@ bool initModulePart_TSIGError(PyObject* mod);
/// This function is expected to be called with in a try block
/// followed by necessary setup for python exception.
PyObject* createTSIGErrorObject(const TSIGError& source);
+
} // namespace python
} // namespace dns
} // namespace isc
diff --git a/src/lib/dns/python/tsigkey_python.cc b/src/lib/dns/python/tsigkey_python.cc
index f0906cb..cf79c1a 100644
--- a/src/lib/dns/python/tsigkey_python.cc
+++ b/src/lib/dns/python/tsigkey_python.cc
@@ -31,10 +31,6 @@ using namespace isc::util::python;
using namespace isc::dns;
using namespace isc::dns::python;
-//
-// Definition of the classes
-//
-
// For each class, we need a struct, a helper functions (init, destroy,
// and static wrappers around the methods we export), a list of methods,
// and a type description
@@ -43,11 +39,14 @@ using namespace isc::dns::python;
// TSIGKey
//
+namespace {
// The s_* Class simply covers one instantiation of the object
+class s_TSIGKey : public PyObject {
+public:
+ s_TSIGKey() : cppobj(NULL) {};
+ TSIGKey* cppobj;
+};
-s_TSIGKey::s_TSIGKey() : cppobj(NULL) {}
-
-namespace {
//
// We declare the functions here, the definitions are below
// the type definition of the object, since both can use the other
@@ -96,8 +95,8 @@ TSIGKey_init(s_TSIGKey* self, PyObject* args) {
}
PyErr_Clear();
- const s_Name* key_name;
- const s_Name* algorithm_name;
+ const PyObject* key_name;
+ const PyObject* algorithm_name;
PyObject* bytes_obj;
const char* secret;
Py_ssize_t secret_len;
@@ -107,8 +106,8 @@ TSIGKey_init(s_TSIGKey* self, PyObject* args) {
if (secret_len == 0) {
secret = NULL;
}
- self->cppobj = new TSIGKey(*key_name->cppobj,
- *algorithm_name->cppobj,
+ self->cppobj = new TSIGKey(PyName_ToName(key_name),
+ PyName_ToName(algorithm_name),
secret, secret_len);
return (0);
}
@@ -196,7 +195,7 @@ PyTypeObject tsigkey_type = {
NULL, // tp_as_number
NULL, // tp_as_sequence
NULL, // tp_as_mapping
- NULL, // tp_hash
+ NULL, // tp_hash
NULL, // tp_call
NULL, // tp_str
NULL, // tp_getattro
@@ -233,49 +232,20 @@ PyTypeObject tsigkey_type = {
0 // tp_version_tag
};
-// Module Initialization, all statics are initialized here
bool
-initModulePart_TSIGKey(PyObject* mod) {
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module. This is not just a check! (leaving
- // this out results in segmentation faults)
- if (PyType_Ready(&tsigkey_type) < 0) {
- return (false);
- }
- void* p = &tsigkey_type;
- if (PyModule_AddObject(mod, "TSIGKey", static_cast<PyObject*>(p)) != 0) {
- return (false);
- }
- Py_INCREF(&tsigkey_type);
-
- try {
- // Constant class variables
- installClassVariable(tsigkey_type, "HMACMD5_NAME",
- createNameObject(TSIGKey::HMACMD5_NAME()));
- installClassVariable(tsigkey_type, "HMACSHA1_NAME",
- createNameObject(TSIGKey::HMACSHA1_NAME()));
- installClassVariable(tsigkey_type, "HMACSHA256_NAME",
- createNameObject(TSIGKey::HMACSHA256_NAME()));
- installClassVariable(tsigkey_type, "HMACSHA224_NAME",
- createNameObject(TSIGKey::HMACSHA224_NAME()));
- installClassVariable(tsigkey_type, "HMACSHA384_NAME",
- createNameObject(TSIGKey::HMACSHA384_NAME()));
- installClassVariable(tsigkey_type, "HMACSHA512_NAME",
- createNameObject(TSIGKey::HMACSHA512_NAME()));
- } catch (const exception& ex) {
- const string ex_what =
- "Unexpected failure in TSIGKey initialization: " +
- string(ex.what());
- PyErr_SetString(po_IscException, ex_what.c_str());
- return (false);
- } catch (...) {
- PyErr_SetString(PyExc_SystemError,
- "Unexpected failure in TSIGKey initialization");
- return (false);
+PyTSIGKey_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
}
+ return (PyObject_TypeCheck(obj, &tsigkey_type));
+}
- return (true);
+const TSIGKey&
+PyTSIGKey_ToTSIGKey(const PyObject* tsigkey_obj) {
+ const s_TSIGKey* tsigkey = static_cast<const s_TSIGKey*>(tsigkey_obj);
+ return (*tsigkey->cppobj);
}
+
} // namespace python
} // namespace dns
} // namespace isc
@@ -287,13 +257,14 @@ initModulePart_TSIGKey(PyObject* mod) {
// TSIGKeyRing
//
+namespace {
// The s_* Class simply covers one instantiation of the object
+class s_TSIGKeyRing : public PyObject {
+public:
+ s_TSIGKeyRing() : cppobj(NULL) {};
+ TSIGKeyRing* cppobj;
+};
-// The s_* Class simply covers one instantiation of the object
-
-s_TSIGKeyRing::s_TSIGKeyRing() : cppobj(NULL) {}
-
-namespace {
//
// We declare the functions here, the definitions are below
// the type definition of the object, since both can use the other
@@ -329,7 +300,7 @@ TSIGKeyRing_init(s_TSIGKeyRing* self, PyObject* args) {
"Invalid arguments to TSIGKeyRing constructor");
return (-1);
}
-
+
self->cppobj = new(nothrow) TSIGKeyRing();
if (self->cppobj == NULL) {
PyErr_SetString(po_IscException, "Allocating TSIGKeyRing failed");
@@ -354,7 +325,7 @@ TSIGKeyRing_size(const s_TSIGKeyRing* const self) {
PyObject*
TSIGKeyRing_add(const s_TSIGKeyRing* const self, PyObject* args) {
s_TSIGKey* tsigkey;
-
+
if (PyArg_ParseTuple(args, "O!", &tsigkey_type, &tsigkey)) {
try {
const TSIGKeyRing::Result result =
@@ -374,11 +345,11 @@ TSIGKeyRing_add(const s_TSIGKeyRing* const self, PyObject* args) {
PyObject*
TSIGKeyRing_remove(const s_TSIGKeyRing* self, PyObject* args) {
- s_Name* key_name;
+ PyObject* key_name;
if (PyArg_ParseTuple(args, "O!", &name_type, &key_name)) {
const TSIGKeyRing::Result result =
- self->cppobj->remove(*key_name->cppobj);
+ self->cppobj->remove(PyName_ToName(key_name));
return (Py_BuildValue("I", result));
}
@@ -390,13 +361,14 @@ TSIGKeyRing_remove(const s_TSIGKeyRing* self, PyObject* args) {
PyObject*
TSIGKeyRing_find(const s_TSIGKeyRing* self, PyObject* args) {
- s_Name* key_name;
- s_Name* algorithm_name;
+ PyObject* key_name;
+ PyObject* algorithm_name;
if (PyArg_ParseTuple(args, "O!O!", &name_type, &key_name,
&name_type, &algorithm_name)) {
const TSIGKeyRing::FindResult result =
- self->cppobj->find(*key_name->cppobj, *algorithm_name->cppobj);
+ self->cppobj->find(PyName_ToName(key_name),
+ PyName_ToName(algorithm_name));
if (result.key != NULL) {
s_TSIGKey* key = PyObject_New(s_TSIGKey, &tsigkey_type);
if (key == NULL) {
@@ -436,7 +408,7 @@ PyTypeObject tsigkeyring_type = {
NULL, // tp_as_number
NULL, // tp_as_sequence
NULL, // tp_as_mapping
- NULL, // tp_hash
+ NULL, // tp_hash
NULL, // tp_call
NULL, // tp_str
NULL, // tp_getattro
@@ -473,27 +445,24 @@ PyTypeObject tsigkeyring_type = {
};
bool
-initModulePart_TSIGKeyRing(PyObject* mod) {
- if (PyType_Ready(&tsigkeyring_type) < 0) {
- return (false);
+PyTSIGKeyRing_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
}
- Py_INCREF(&tsigkeyring_type);
- void* p = &tsigkeyring_type;
- if (PyModule_AddObject(mod, "TSIGKeyRing",
- static_cast<PyObject*>(p)) != 0) {
- Py_DECREF(&tsigkeyring_type);
- return (false);
- }
-
- addClassVariable(tsigkeyring_type, "SUCCESS",
- Py_BuildValue("I", TSIGKeyRing::SUCCESS));
- addClassVariable(tsigkeyring_type, "EXIST",
- Py_BuildValue("I", TSIGKeyRing::EXIST));
- addClassVariable(tsigkeyring_type, "NOTFOUND",
- Py_BuildValue("I", TSIGKeyRing::NOTFOUND));
+ return (PyObject_TypeCheck(obj, &tsigkeyring_type));
+}
- return (true);
+const TSIGKeyRing&
+PyTSIGKeyRing_ToTSIGKeyRing(const PyObject* tsigkeyring_obj) {
+ if (tsigkeyring_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in TSIGKeyRing PyObject conversion");
+ }
+ const s_TSIGKeyRing* tsigkeyring =
+ static_cast<const s_TSIGKeyRing*>(tsigkeyring_obj);
+ return (*tsigkeyring->cppobj);
}
+
} // namespace python
} // namespace dns
} // namespace isc
diff --git a/src/lib/dns/python/tsigkey_python.h b/src/lib/dns/python/tsigkey_python.h
index 51b3ae7..6c3d2e3 100644
--- a/src/lib/dns/python/tsigkey_python.h
+++ b/src/lib/dns/python/tsigkey_python.h
@@ -24,24 +24,46 @@ class TSIGKeyRing;
namespace python {
-// The s_* Class simply covers one instantiation of the object
-class s_TSIGKey : public PyObject {
-public:
- s_TSIGKey();
- TSIGKey* cppobj;
-};
-
-class s_TSIGKeyRing : public PyObject {
-public:
- s_TSIGKeyRing();
- TSIGKeyRing* cppobj;
-};
-
extern PyTypeObject tsigkey_type;
extern PyTypeObject tsigkeyring_type;
-bool initModulePart_TSIGKey(PyObject* mod);
-bool initModulePart_TSIGKeyRing(PyObject* mod);
+/// \brief Checks if the given python object is a TSIGKey object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type TSIGKey, false otherwise
+bool PyTSIGKey_Check(PyObject* obj);
+
+/// \brief Returns a reference to the TSIGKey object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type TSIGKey; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyTSIGKey_Check()
+///
+/// \note This is not a copy; if the TSIGKey is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param tsigkey_obj The tsigkey object to convert
+const TSIGKey& PyTSIGKey_ToTSIGKey(const PyObject* tsigkey_obj);
+
+/// \brief Checks if the given python object is a TSIGKeyRing object
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type TSIGKeyRing, false otherwise
+bool PyTSIGKeyRing_Check(PyObject* obj);
+
+/// \brief Returns a reference to the TSIGKeyRing object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type TSIGKeyRing; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyTSIGKeyRing_Check()
+///
+/// \note This is not a copy; if the TSIGKeyRing is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param tsigkeyring_obj The tsigkeyring object to convert
+const TSIGKeyRing& PyTSIGKeyRing_ToTSIGKeyRing(const PyObject* tsigkeyring_obj);
} // namespace python
} // namespace dns
diff --git a/src/lib/dns/python/tsigrecord_python.cc b/src/lib/dns/python/tsigrecord_python.cc
index 8a78b5e..c754dd2 100644
--- a/src/lib/dns/python/tsigrecord_python.cc
+++ b/src/lib/dns/python/tsigrecord_python.cc
@@ -32,10 +32,6 @@ using namespace isc::util::python;
using namespace isc::dns;
using namespace isc::dns::python;
-//
-// Definition of the classes
-//
-
// For each class, we need a struct, a helper functions (init, destroy,
// and static wrappers around the methods we export), a list of methods,
// and a type description
@@ -44,11 +40,14 @@ using namespace isc::dns::python;
// TSIGRecord
//
-// Trivial constructor.
-s_TSIGRecord::s_TSIGRecord() : cppobj(NULL) {
-}
-
namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_TSIGRecord : public PyObject {
+public:
+ s_TSIGRecord() : cppobj(NULL) {};
+ TSIGRecord* cppobj;
+};
+
// Shortcut type which would be convenient for adding class variables safely.
typedef CPPPyObjectContainer<s_TSIGRecord, TSIGRecord> TSIGRecordContainer;
@@ -102,11 +101,12 @@ PyMethodDef TSIGRecord_methods[] = {
int
TSIGRecord_init(s_TSIGRecord* self, PyObject* args) {
try {
- const s_Name* py_name;
- const s_TSIG* py_tsig;
+ const PyObject* py_name;
+ const PyObject* py_tsig;
if (PyArg_ParseTuple(args, "O!O!", &name_type, &py_name,
&tsig_type, &py_tsig)) {
- self->cppobj = new TSIGRecord(*py_name->cppobj, *py_tsig->cppobj);
+ self->cppobj = new TSIGRecord(PyName_ToName(py_name),
+ PyTSIG_ToTSIG(py_tsig));
return (0);
}
} catch (const exception& ex) {
@@ -226,7 +226,7 @@ PyTypeObject tsigrecord_type = {
NULL, // tp_as_number
NULL, // tp_as_sequence
NULL, // tp_as_mapping
- NULL, // tp_hash
+ NULL, // tp_hash
NULL, // tp_call
TSIGRecord_str, // tp_str
NULL, // tp_getattro
@@ -262,50 +262,32 @@ PyTypeObject tsigrecord_type = {
0 // tp_version_tag
};
-// Module Initialization, all statics are initialized here
+PyObject*
+createTSIGRecordObject(const TSIGRecord& source) {
+ TSIGRecordContainer container(PyObject_New(s_TSIGRecord, &tsigrecord_type));
+ container.set(new TSIGRecord(source));
+ return (container.release());
+}
+
bool
-initModulePart_TSIGRecord(PyObject* mod) {
- // We initialize the static description object with PyType_Ready(),
- // then add it to the module. This is not just a check! (leaving
- // this out results in segmentation faults)
- if (PyType_Ready(&tsigrecord_type) < 0) {
- return (false);
- }
- void* p = &tsigrecord_type;
- if (PyModule_AddObject(mod, "TSIGRecord", static_cast<PyObject*>(p)) < 0) {
- return (false);
+PyTSIGRecord_Check(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
}
- Py_INCREF(&tsigrecord_type);
+ return (PyObject_TypeCheck(obj, &tsigrecord_type));
+}
- // The following template is the typical procedure for installing class
- // variables. If the class doesn't have a class variable, remove the
- // entire try-catch clauses.
- try {
- // Constant class variables
- installClassVariable(tsigrecord_type, "TSIG_TTL",
- Py_BuildValue("I", 0));
- } catch (const exception& ex) {
- const string ex_what =
- "Unexpected failure in TSIGRecord initialization: " +
- string(ex.what());
- PyErr_SetString(po_IscException, ex_what.c_str());
- return (false);
- } catch (...) {
- PyErr_SetString(PyExc_SystemError,
- "Unexpected failure in TSIGRecord initialization");
- return (false);
+const TSIGRecord&
+PyTSIGRecord_ToTSIGRecord(PyObject* tsigrecord_obj) {
+ if (tsigrecord_obj == NULL) {
+ isc_throw(PyCPPWrapperException,
+ "obj argument NULL in TSIGRecord PyObject conversion");
}
-
- return (true);
+ s_TSIGRecord* tsigrecord = static_cast<s_TSIGRecord*>(tsigrecord_obj);
+ return (*tsigrecord->cppobj);
}
-PyObject*
-createTSIGRecordObject(const TSIGRecord& source) {
- TSIGRecordContainer container = PyObject_New(s_TSIGRecord,
- &tsigrecord_type);
- container.set(new TSIGRecord(source));
- return (container.release());
-}
+
} // namespace python
} // namespace dns
} // namespace isc
diff --git a/src/lib/dns/python/tsigrecord_python.h b/src/lib/dns/python/tsigrecord_python.h
index e0a3526..d6252e1 100644
--- a/src/lib/dns/python/tsigrecord_python.h
+++ b/src/lib/dns/python/tsigrecord_python.h
@@ -23,17 +23,9 @@ class TSIGRecord;
namespace python {
-// The s_* Class simply covers one instantiation of the object
-class s_TSIGRecord : public PyObject {
-public:
- s_TSIGRecord();
- TSIGRecord* cppobj;
-};
extern PyTypeObject tsigrecord_type;
-bool initModulePart_TSIGRecord(PyObject* mod);
-
/// This is A simple shortcut to create a python TSIGRecord object (in the
/// form of a pointer to PyObject) with minimal exception safety.
/// On success, it returns a valid pointer to PyObject with a reference
@@ -43,6 +35,26 @@ bool initModulePart_TSIGRecord(PyObject* mod);
/// followed by necessary setup for python exception.
PyObject* createTSIGRecordObject(const TSIGRecord& source);
+/// \brief Checks if the given python object is a TSIGRecord object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type TSIGRecord, false otherwise
+bool PyTSIGRecord_Check(PyObject* obj);
+
+/// \brief Returns a reference to the TSIGRecord object contained within the given
+/// Python object.
+///
+/// \note The given object MUST be of type TSIGRecord; this can be checked with
+/// either the right call to ParseTuple("O!"), or with PyTSIGRecord_Check()
+///
+/// \note This is not a copy; if the TSIGRecord is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param rrtype_obj The rrtype object to convert
+const TSIGRecord& PyTSIGRecord_ToTSIGRecord(PyObject* tsigrecord_obj);
+
} // namespace python
} // namespace dns
} // namespace isc
diff --git a/src/lib/dns/question.cc b/src/lib/dns/question.cc
index 96e2a9c..6ccb164 100644
--- a/src/lib/dns/question.cc
+++ b/src/lib/dns/question.cc
@@ -57,10 +57,19 @@ Question::toWire(OutputBuffer& buffer) const {
unsigned int
Question::toWire(AbstractMessageRenderer& renderer) const {
+ const size_t pos0 = renderer.getLength();
+
renderer.writeName(name_);
rrtype_.toWire(renderer);
rrclass_.toWire(renderer);
+ // Make sure the renderer has a room for the question
+ if (renderer.getLength() > renderer.getLengthLimit()) {
+ renderer.trim(renderer.getLength() - pos0);
+ renderer.setTruncated();
+ return (0);
+ }
+
return (1); // number of "entries"
}
diff --git a/src/lib/dns/question.h b/src/lib/dns/question.h
index b3f3d98..5d2783b 100644
--- a/src/lib/dns/question.h
+++ b/src/lib/dns/question.h
@@ -201,23 +201,23 @@ public:
/// class description).
///
/// The owner name will be compressed if possible, although it's an
- /// unlikely event in practice because the %Question section a DNS
+ /// unlikely event in practice because the Question section a DNS
/// message normally doesn't contain multiple question entries and
/// it's located right after the Header section.
/// Nevertheless, \c renderer records the information of the owner name
/// so that it can be pointed by other RRs in other sections (which is
/// more likely to happen).
///
- /// In theory, an attempt to render a Question may cause truncation
- /// (when the Question section contains a large number of entries),
- /// but this implementation doesn't catch that situation.
- /// It would make the code unnecessarily complicated (though perhaps
- /// slightly) for almost impossible case in practice.
- /// An upper layer will handle the pathological case as a general error.
+ /// It could be possible, though very rare in practice, that
+ /// an attempt to render a Question may cause truncation
+ /// (when the Question section contains a large number of entries).
+ /// In such a case this method avoid the rendering and indicate the
+ /// truncation in the \c renderer. This method returns 0 in this case.
///
/// \param renderer DNS message rendering context that encapsulates the
/// output buffer and name compression information.
- /// \return 1
+ ///
+ /// \return 1 on success; 0 if it causes truncation
unsigned int toWire(AbstractMessageRenderer& renderer) const;
/// \brief Render the Question in the wire format without name compression.
diff --git a/src/lib/dns/rdata/any_255/tsig_250.cc b/src/lib/dns/rdata/any_255/tsig_250.cc
index 2557965..4eb72bc 100644
--- a/src/lib/dns/rdata/any_255/tsig_250.cc
+++ b/src/lib/dns/rdata/any_255/tsig_250.cc
@@ -19,9 +19,11 @@
#include <boost/lexical_cast.hpp>
#include <util/buffer.h>
+#include <util/strutil.h>
#include <util/encode/base64.h>
#include <dns/messagerenderer.h>
+#include <dns/name.h>
#include <dns/rdata.h>
#include <dns/rdataclass.h>
#include <dns/tsigerror.h>
@@ -30,6 +32,7 @@ using namespace std;
using namespace boost;
using namespace isc::util;
using namespace isc::util::encode;
+using namespace isc::util::str;
// BEGIN_ISC_NAMESPACE
// BEGIN_RDATA_NAMESPACE
@@ -65,45 +68,6 @@ struct TSIG::TSIGImpl {
const vector<uint8_t> other_data_;
};
-namespace {
-string
-getToken(istringstream& iss, const string& full_input) {
- string token;
- iss >> token;
- if (iss.bad() || iss.fail()) {
- isc_throw(InvalidRdataText, "Invalid TSIG text: parse error " <<
- full_input);
- }
- return (token);
-}
-
-// This helper function converts a string token to an *unsigned* integer.
-// NumType is a *signed* integral type (e.g. int32_t) that is sufficiently
-// wide to store resulting integers.
-// BitSize is the maximum number of bits that the resulting integer can take.
-// This function first checks whether the given token can be converted to
-// an integer of NumType type. It then confirms the conversion result is
-// within the valid range, i.e., [0, 2^NumType - 1]. The second check is
-// necessary because lexical_cast<T> where T is an unsigned integer type
-// doesn't correctly reject negative numbers when compiled with SunStudio.
-template <typename NumType, int BitSize>
-NumType
-tokenToNum(const string& num_token) {
- NumType num;
- try {
- num = lexical_cast<NumType>(num_token);
- } catch (const boost::bad_lexical_cast& ex) {
- isc_throw(InvalidRdataText, "Invalid TSIG numeric parameter: " <<
- num_token);
- }
- if (num < 0 || num >= (static_cast<NumType>(1) << BitSize)) {
- isc_throw(InvalidRdataText, "Numeric TSIG parameter out of range: " <<
- num);
- }
- return (num);
-}
-}
-
/// \brief Constructor from string.
///
/// \c tsig_str must be formatted as follows:
@@ -148,47 +112,52 @@ tokenToNum(const string& num_token) {
TSIG::TSIG(const std::string& tsig_str) : impl_(NULL) {
istringstream iss(tsig_str);
- const Name algorithm(getToken(iss, tsig_str));
- const int64_t time_signed = tokenToNum<int64_t, 48>(getToken(iss,
- tsig_str));
- const int32_t fudge = tokenToNum<int32_t, 16>(getToken(iss, tsig_str));
- const int32_t macsize = tokenToNum<int32_t, 16>(getToken(iss, tsig_str));
-
- const string mac_txt = (macsize > 0) ? getToken(iss, tsig_str) : "";
- vector<uint8_t> mac;
- decodeBase64(mac_txt, mac);
- if (mac.size() != macsize) {
- isc_throw(InvalidRdataText, "TSIG MAC size and data are inconsistent");
- }
-
- const int32_t orig_id = tokenToNum<int32_t, 16>(getToken(iss, tsig_str));
-
- const string error_txt = getToken(iss, tsig_str);
- int32_t error = 0;
- // XXX: In the initial implementation we hardcode the mnemonics.
- // We'll soon generalize this.
- if (error_txt == "BADSIG") {
- error = 16;
- } else if (error_txt == "BADKEY") {
- error = 17;
- } else if (error_txt == "BADTIME") {
- error = 18;
- } else {
- error = tokenToNum<int32_t, 16>(error_txt);
- }
-
- const int32_t otherlen = tokenToNum<int32_t, 16>(getToken(iss, tsig_str));
- const string otherdata_txt = (otherlen > 0) ? getToken(iss, tsig_str) : "";
- vector<uint8_t> other_data;
- decodeBase64(otherdata_txt, other_data);
-
- if (!iss.eof()) {
- isc_throw(InvalidRdataText, "Unexpected input for TSIG RDATA: " <<
- tsig_str);
+ try {
+ const Name algorithm(getToken(iss));
+ const int64_t time_signed = tokenToNum<int64_t, 48>(getToken(iss));
+ const int32_t fudge = tokenToNum<int32_t, 16>(getToken(iss));
+ const int32_t macsize = tokenToNum<int32_t, 16>(getToken(iss));
+
+ const string mac_txt = (macsize > 0) ? getToken(iss) : "";
+ vector<uint8_t> mac;
+ decodeBase64(mac_txt, mac);
+ if (mac.size() != macsize) {
+ isc_throw(InvalidRdataText, "TSIG MAC size and data are inconsistent");
+ }
+
+ const int32_t orig_id = tokenToNum<int32_t, 16>(getToken(iss));
+
+ const string error_txt = getToken(iss);
+ int32_t error = 0;
+ // XXX: In the initial implementation we hardcode the mnemonics.
+ // We'll soon generalize this.
+ if (error_txt == "BADSIG") {
+ error = 16;
+ } else if (error_txt == "BADKEY") {
+ error = 17;
+ } else if (error_txt == "BADTIME") {
+ error = 18;
+ } else {
+ error = tokenToNum<int32_t, 16>(error_txt);
+ }
+
+ const int32_t otherlen = tokenToNum<int32_t, 16>(getToken(iss));
+ const string otherdata_txt = (otherlen > 0) ? getToken(iss) : "";
+ vector<uint8_t> other_data;
+ decodeBase64(otherdata_txt, other_data);
+
+ if (!iss.eof()) {
+ isc_throw(InvalidRdataText, "Unexpected input for TSIG RDATA: " <<
+ tsig_str);
+ }
+
+ impl_ = new TSIGImpl(algorithm, time_signed, fudge, mac, orig_id,
+ error, other_data);
+
+ } catch (const StringTokenError& ste) {
+ isc_throw(InvalidRdataText, "Invalid TSIG text: " << ste.what() <<
+ ": " << tsig_str);
}
-
- impl_ = new TSIGImpl(algorithm, time_signed, fudge, mac, orig_id,
- error, other_data);
}
/// \brief Constructor from wire-format data.
diff --git a/src/lib/dns/rdata/generic/afsdb_18.cc b/src/lib/dns/rdata/generic/afsdb_18.cc
new file mode 100644
index 0000000..6afc4de
--- /dev/null
+++ b/src/lib/dns/rdata/generic/afsdb_18.cc
@@ -0,0 +1,171 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <string>
+#include <sstream>
+
+#include <util/buffer.h>
+#include <util/strutil.h>
+
+#include <dns/name.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+#include <boost/lexical_cast.hpp>
+
+using namespace std;
+using namespace isc::util;
+using namespace isc::util::str;
+
+// BEGIN_ISC_NAMESPACE
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief Constructor from string.
+///
+/// \c afsdb_str must be formatted as follows:
+/// \code <subtype> <server name>
+/// \endcode
+/// where server name field must represent a valid domain name.
+///
+/// An example of valid string is:
+/// \code "1 server.example.com." \endcode
+///
+/// <b>Exceptions</b>
+///
+/// \exception InvalidRdataText The number of RDATA fields (must be 2) is
+/// incorrect.
+/// \exception std::bad_alloc Memory allocation fails.
+/// \exception Other The constructor of the \c Name class will throw if the
+/// names in the string is invalid.
+AFSDB::AFSDB(const std::string& afsdb_str) :
+ subtype_(0), server_(Name::ROOT_NAME())
+{
+ istringstream iss(afsdb_str);
+
+ try {
+ const uint32_t subtype = tokenToNum<int32_t, 16>(getToken(iss));
+ const Name servername(getToken(iss));
+ string server;
+
+ if (!iss.eof()) {
+ isc_throw(InvalidRdataText, "Unexpected input for AFSDB"
+ "RDATA: " << afsdb_str);
+ }
+
+ subtype_ = subtype;
+ server_ = servername;
+
+ } catch (const StringTokenError& ste) {
+ isc_throw(InvalidRdataText, "Invalid AFSDB text: " <<
+ ste.what() << ": " << afsdb_str);
+ }
+}
+
+/// \brief Constructor from wire-format data.
+///
+/// This constructor doesn't check the validity of the second parameter (rdata
+/// length) for parsing.
+/// If necessary, the caller will check consistency.
+///
+/// \exception std::bad_alloc Memory allocation fails.
+/// \exception Other The constructor of the \c Name class will throw if the
+/// names in the wire is invalid.
+AFSDB::AFSDB(InputBuffer& buffer, size_t) :
+ subtype_(buffer.readUint16()), server_(buffer)
+{}
+
+/// \brief Copy constructor.
+///
+/// \exception std::bad_alloc Memory allocation fails in copying internal
+/// member variables (this should be very rare).
+AFSDB::AFSDB(const AFSDB& other) :
+ Rdata(), subtype_(other.subtype_), server_(other.server_)
+{}
+
+AFSDB&
+AFSDB::operator=(const AFSDB& source) {
+ subtype_ = source.subtype_;
+ server_ = source.server_;
+
+ return (*this);
+}
+
+/// \brief Convert the \c AFSDB to a string.
+///
+/// The output of this method is formatted as described in the "from string"
+/// constructor (\c AFSDB(const std::string&))).
+///
+/// \exception std::bad_alloc Internal resource allocation fails.
+///
+/// \return A \c string object that represents the \c AFSDB object.
+string
+AFSDB::toText() const {
+ return (boost::lexical_cast<string>(subtype_) + " " + server_.toText());
+}
+
+/// \brief Render the \c AFSDB in the wire format without name compression.
+///
+/// \exception std::bad_alloc Internal resource allocation fails.
+///
+/// \param buffer An output buffer to store the wire data.
+void
+AFSDB::toWire(OutputBuffer& buffer) const {
+ buffer.writeUint16(subtype_);
+ server_.toWire(buffer);
+}
+
+/// \brief Render the \c AFSDB in the wire format with taking into account
+/// compression.
+///
+/// As specified in RFC3597, TYPE AFSDB is not "well-known", the server
+/// field (domain name) will not be compressed.
+///
+/// \exception std::bad_alloc Internal resource allocation fails.
+///
+/// \param renderer DNS message rendering context that encapsulates the
+/// output buffer and name compression information.
+void
+AFSDB::toWire(AbstractMessageRenderer& renderer) const {
+ renderer.writeUint16(subtype_);
+ renderer.writeName(server_, false);
+}
+
+/// \brief Compare two instances of \c AFSDB RDATA.
+///
+/// See documentation in \c Rdata.
+int
+AFSDB::compare(const Rdata& other) const {
+ const AFSDB& other_afsdb = dynamic_cast<const AFSDB&>(other);
+ if (subtype_ < other_afsdb.subtype_) {
+ return (-1);
+ } else if (subtype_ > other_afsdb.subtype_) {
+ return (1);
+ }
+
+ return (compareNames(server_, other_afsdb.server_));
+}
+
+const Name&
+AFSDB::getServer() const {
+ return (server_);
+}
+
+uint16_t
+AFSDB::getSubtype() const {
+ return (subtype_);
+}
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/afsdb_18.h b/src/lib/dns/rdata/generic/afsdb_18.h
new file mode 100644
index 0000000..4a46775
--- /dev/null
+++ b/src/lib/dns/rdata/generic/afsdb_18.h
@@ -0,0 +1,74 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// BEGIN_HEADER_GUARD
+
+#include <stdint.h>
+
+#include <string>
+
+#include <dns/name.h>
+#include <dns/rdata.h>
+
+// BEGIN_ISC_NAMESPACE
+
+// BEGIN_COMMON_DECLARATIONS
+// END_COMMON_DECLARATIONS
+
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief \c rdata::AFSDB class represents the AFSDB RDATA as defined %in
+/// RFC1183.
+///
+/// This class implements the basic interfaces inherited from the abstract
+/// \c rdata::Rdata class, and provides trivial accessors specific to the
+/// AFSDB RDATA.
+class AFSDB : public Rdata {
+public:
+ // BEGIN_COMMON_MEMBERS
+ // END_COMMON_MEMBERS
+
+ /// \brief Assignment operator.
+ ///
+ /// This method never throws an exception.
+ AFSDB& operator=(const AFSDB& source);
+ ///
+ /// Specialized methods
+ ///
+
+ /// \brief Return the value of the server field.
+ ///
+ /// \return A reference to a \c Name class object corresponding to the
+ /// internal server name.
+ ///
+ /// This method never throws an exception.
+ const Name& getServer() const;
+
+ /// \brief Return the value of the subtype field.
+ ///
+ /// This method never throws an exception.
+ uint16_t getSubtype() const;
+
+private:
+ uint16_t subtype_;
+ Name server_;
+};
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+// END_HEADER_GUARD
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/generic/detail/ds_like.h b/src/lib/dns/rdata/generic/detail/ds_like.h
new file mode 100644
index 0000000..b5a35cd
--- /dev/null
+++ b/src/lib/dns/rdata/generic/detail/ds_like.h
@@ -0,0 +1,225 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __DS_LIKE_H
+#define __DS_LIKE_H 1
+
+#include <stdint.h>
+
+#include <iostream>
+#include <sstream>
+#include <string>
+#include <vector>
+
+#include <boost/lexical_cast.hpp>
+
+#include <exceptions/exceptions.h>
+
+#include <dns/messagerenderer.h>
+#include <dns/name.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+namespace isc {
+namespace dns {
+namespace rdata {
+namespace generic {
+namespace detail {
+
+/// \brief \c rdata::DSLikeImpl class represents the DS-like RDATA for DS
+/// and DLV types.
+///
+/// This class implements the basic interfaces inherited by the DS and DLV
+/// classes from the abstract \c rdata::Rdata class, and provides trivial
+/// accessors to DS-like RDATA.
+template <class Type, uint16_t typeCode> class DSLikeImpl {
+ // Common sequence of toWire() operations used for the two versions of
+ // toWire().
+ template <typename Output>
+ void
+ toWireCommon(Output& output) const {
+ output.writeUint16(tag_);
+ output.writeUint8(algorithm_);
+ output.writeUint8(digest_type_);
+ output.writeData(&digest_[0], digest_.size());
+ }
+
+public:
+ /// \brief Constructor from string.
+ ///
+ /// <b>Exceptions</b>
+ ///
+ /// \c InvalidRdataText is thrown if the method cannot process the
+ /// parameter data for any of the number of reasons.
+ DSLikeImpl(const std::string& ds_str) {
+ std::istringstream iss(ds_str);
+ // peekc should be of iss's char_type for isspace to work
+ std::istringstream::char_type peekc;
+ std::stringbuf digestbuf;
+ uint32_t tag, algorithm, digest_type;
+
+ iss >> tag >> algorithm >> digest_type;
+ if (iss.bad() || iss.fail()) {
+ isc_throw(InvalidRdataText,
+ "Invalid " << RRType(typeCode) << " text");
+ }
+ if (tag > 0xffff) {
+ isc_throw(InvalidRdataText,
+ RRType(typeCode) << " tag out of range");
+ }
+ if (algorithm > 0xff) {
+ isc_throw(InvalidRdataText,
+ RRType(typeCode) << " algorithm out of range");
+ }
+ if (digest_type > 0xff) {
+ isc_throw(InvalidRdataText,
+ RRType(typeCode) << " digest type out of range");
+ }
+
+ iss.read(&peekc, 1);
+ if (!iss.good() || !isspace(peekc, iss.getloc())) {
+ isc_throw(InvalidRdataText,
+ RRType(typeCode) << " presentation format error");
+ }
+
+ iss >> &digestbuf;
+
+ tag_ = tag;
+ algorithm_ = algorithm;
+ digest_type_ = digest_type;
+ decodeHex(digestbuf.str(), digest_);
+ }
+
+ /// \brief Constructor from wire-format data.
+ ///
+ /// \param buffer A buffer storing the wire format data.
+ /// \param rdata_len The length of the RDATA in bytes, normally expected
+ /// to be the value of the RDLENGTH field of the corresponding RR.
+ ///
+ /// <b>Exceptions</b>
+ ///
+ /// \c InvalidRdataLength is thrown if the input data is too short for the
+ /// type.
+ DSLikeImpl(InputBuffer& buffer, size_t rdata_len) {
+ if (rdata_len < 4) {
+ isc_throw(InvalidRdataLength, RRType(typeCode) << " too short");
+ }
+
+ tag_ = buffer.readUint16();
+ algorithm_ = buffer.readUint8();
+ digest_type_ = buffer.readUint8();
+
+ rdata_len -= 4;
+ digest_.resize(rdata_len);
+ buffer.readData(&digest_[0], rdata_len);
+ }
+
+ /// \brief The copy constructor.
+ ///
+ /// Trivial for now, we could've used the default one.
+ DSLikeImpl(const DSLikeImpl& source) {
+ digest_ = source.digest_;
+ tag_ = source.tag_;
+ algorithm_ = source.algorithm_;
+ digest_type_ = source.digest_type_;
+ }
+
+ /// \brief Convert the DS-like data to a string.
+ ///
+ /// \return A \c string object that represents the DS-like data.
+ std::string
+ toText() const {
+ using namespace boost;
+ return (lexical_cast<string>(static_cast<int>(tag_)) +
+ " " + lexical_cast<string>(static_cast<int>(algorithm_)) +
+ " " + lexical_cast<string>(static_cast<int>(digest_type_)) +
+ " " + encodeHex(digest_));
+ }
+
+ /// \brief Render the DS-like data in the wire format to an OutputBuffer
+ /// object.
+ ///
+ /// \param buffer An output buffer to store the wire data.
+ void
+ toWire(OutputBuffer& buffer) const {
+ toWireCommon(buffer);
+ }
+
+ /// \brief Render the DS-like data in the wire format to an
+ /// AbstractMessageRenderer object.
+ ///
+ /// \param renderer A renderer object to send the wire data to.
+ void
+ toWire(AbstractMessageRenderer& renderer) const {
+ toWireCommon(renderer);
+ }
+
+ /// \brief Compare two instances of DS-like RDATA.
+ ///
+ /// It is up to the caller to make sure that \c other is an object of the
+ /// same \c DSLikeImpl class.
+ ///
+ /// \param other the right-hand operand to compare against.
+ /// \return < 0 if \c this would be sorted before \c other.
+ /// \return 0 if \c this is identical to \c other in terms of sorting
+ /// order.
+ /// \return > 0 if \c this would be sorted after \c other.
+ int
+ compare(const DSLikeImpl& other_ds) const {
+ if (tag_ != other_ds.tag_) {
+ return (tag_ < other_ds.tag_ ? -1 : 1);
+ }
+ if (algorithm_ != other_ds.algorithm_) {
+ return (algorithm_ < other_ds.algorithm_ ? -1 : 1);
+ }
+ if (digest_type_ != other_ds.digest_type_) {
+ return (digest_type_ < other_ds.digest_type_ ? -1 : 1);
+ }
+
+ size_t this_len = digest_.size();
+ size_t other_len = other_ds.digest_.size();
+ size_t cmplen = min(this_len, other_len);
+ int cmp = memcmp(&digest_[0], &other_ds.digest_[0], cmplen);
+ if (cmp != 0) {
+ return (cmp);
+ } else {
+ return ((this_len == other_len)
+ ? 0 : (this_len < other_len) ? -1 : 1);
+ }
+ }
+
+ /// \brief Accessors
+ uint16_t
+ getTag() const {
+ return (tag_);
+ }
+
+private:
+ // straightforward representation of DS RDATA fields
+ uint16_t tag_;
+ uint8_t algorithm_;
+ uint8_t digest_type_;
+ std::vector<uint8_t> digest_;
+};
+
+}
+}
+}
+}
+}
+#endif // __DS_LIKE_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/generic/detail/txt_like.h b/src/lib/dns/rdata/generic/detail/txt_like.h
new file mode 100644
index 0000000..392a8ce
--- /dev/null
+++ b/src/lib/dns/rdata/generic/detail/txt_like.h
@@ -0,0 +1,172 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __TXT_LIKE_H
+#define __TXT_LIKE_H 1
+
+#include <stdint.h>
+
+#include <string>
+#include <vector>
+
+using namespace std;
+using namespace isc::util;
+
+template<class Type, uint16_t typeCode>class TXTLikeImpl {
+public:
+ TXTLikeImpl(InputBuffer& buffer, size_t rdata_len) {
+ if (rdata_len > MAX_RDLENGTH) {
+ isc_throw(InvalidRdataLength, "RDLENGTH too large: " << rdata_len);
+ }
+
+ if (rdata_len == 0) { // note that this couldn't happen in the loop.
+ isc_throw(DNSMessageFORMERR, "Error in parsing " <<
+ RRType(typeCode) << " RDATA: 0-length character string");
+ }
+
+ do {
+ const uint8_t len = buffer.readUint8();
+ if (rdata_len < len + 1) {
+ isc_throw(DNSMessageFORMERR, "Error in parsing " <<
+ RRType(typeCode) <<
+ " RDATA: character string length is too large: " <<
+ static_cast<int>(len));
+ }
+ vector<uint8_t> data(len + 1);
+ data[0] = len;
+ buffer.readData(&data[0] + 1, len);
+ string_list_.push_back(data);
+
+ rdata_len -= (len + 1);
+ } while (rdata_len > 0);
+ }
+
+ explicit TXTLikeImpl(const std::string& txtstr) {
+ // TBD: this is a simple, incomplete implementation that only supports
+ // a single character-string.
+
+ size_t length = txtstr.size();
+ size_t pos_begin = 0;
+
+ if (length > 1 && txtstr[0] == '"' && txtstr[length - 1] == '"') {
+ pos_begin = 1;
+ length -= 2;
+ }
+
+ if (length > MAX_CHARSTRING_LEN) {
+ isc_throw(CharStringTooLong, RRType(typeCode) <<
+ " RDATA construction from text:"
+ " string length is too long: " << length);
+ }
+
+ // TBD: right now, we don't support escaped characters
+ if (txtstr.find('\\') != string::npos) {
+ isc_throw(InvalidRdataText, RRType(typeCode) <<
+ " RDATA from text:"
+ " escaped character is currently not supported: " <<
+ txtstr);
+ }
+
+ vector<uint8_t> data;
+ data.reserve(length + 1);
+ data.push_back(length);
+ data.insert(data.end(), txtstr.begin() + pos_begin,
+ txtstr.begin() + pos_begin + length);
+ string_list_.push_back(data);
+ }
+
+ TXTLikeImpl(const TXTLikeImpl& other) :
+ string_list_(other.string_list_)
+ {}
+
+ void
+ toWire(OutputBuffer& buffer) const {
+ for (vector<vector<uint8_t> >::const_iterator it =
+ string_list_.begin();
+ it != string_list_.end();
+ ++it)
+ {
+ buffer.writeData(&(*it)[0], (*it).size());
+ }
+ }
+
+ void
+ toWire(AbstractMessageRenderer& renderer) const {
+ for (vector<vector<uint8_t> >::const_iterator it =
+ string_list_.begin();
+ it != string_list_.end();
+ ++it)
+ {
+ renderer.writeData(&(*it)[0], (*it).size());
+ }
+ }
+
+ string
+ toText() const {
+ string s;
+
+ // XXX: this implementation is not entirely correct. for example, it
+ // should escape double-quotes if they appear in the character string.
+ for (vector<vector<uint8_t> >::const_iterator it =
+ string_list_.begin();
+ it != string_list_.end();
+ ++it)
+ {
+ if (!s.empty()) {
+ s.push_back(' ');
+ }
+ s.push_back('"');
+ s.insert(s.end(), (*it).begin() + 1, (*it).end());
+ s.push_back('"');
+ }
+
+ return (s);
+ }
+
+ int
+ compare(const TXTLikeImpl& other) const {
+ // This implementation is not efficient. Revisit this (TBD).
+ OutputBuffer this_buffer(0);
+ toWire(this_buffer);
+ size_t this_len = this_buffer.getLength();
+
+ OutputBuffer other_buffer(0);
+ other.toWire(other_buffer);
+ const size_t other_len = other_buffer.getLength();
+
+ const size_t cmplen = min(this_len, other_len);
+ const int cmp = memcmp(this_buffer.getData(), other_buffer.getData(),
+ cmplen);
+ if (cmp != 0) {
+ return (cmp);
+ } else {
+ return ((this_len == other_len) ? 0 :
+ (this_len < other_len) ? -1 : 1);
+ }
+ }
+
+private:
+ /// Note: this is a prototype version; we may reconsider
+ /// this representation later.
+ std::vector<std::vector<uint8_t> > string_list_;
+};
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+
+#endif // __TXT_LIKE_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/generic/dlv_32769.cc b/src/lib/dns/rdata/generic/dlv_32769.cc
new file mode 100644
index 0000000..9887aa8
--- /dev/null
+++ b/src/lib/dns/rdata/generic/dlv_32769.cc
@@ -0,0 +1,121 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <string>
+
+#include <util/buffer.h>
+#include <util/encode/hex.h>
+
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+#include <dns/rdata/generic/detail/ds_like.h>
+
+using namespace std;
+using namespace isc::util;
+using namespace isc::util::encode;
+using namespace isc::dns::rdata::generic::detail;
+
+// BEGIN_ISC_NAMESPACE
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief Constructor from string.
+///
+/// A copy of the implementation object is allocated and constructed.
+DLV::DLV(const string& ds_str) :
+ impl_(new DLVImpl(ds_str))
+{}
+
+/// \brief Constructor from wire-format data.
+///
+/// A copy of the implementation object is allocated and constructed.
+DLV::DLV(InputBuffer& buffer, size_t rdata_len) :
+ impl_(new DLVImpl(buffer, rdata_len))
+{}
+
+/// \brief Copy constructor
+///
+/// A copy of the implementation object is allocated and constructed.
+DLV::DLV(const DLV& source) :
+ Rdata(), impl_(new DLVImpl(*source.impl_))
+{}
+
+/// \brief Assignment operator
+///
+/// PIMPL-induced logic
+DLV&
+DLV::operator=(const DLV& source) {
+ if (impl_ == source.impl_) {
+ return (*this);
+ }
+
+ DLVImpl* newimpl = new DLVImpl(*source.impl_);
+ delete impl_;
+ impl_ = newimpl;
+
+ return (*this);
+}
+
+/// \brief Destructor
+///
+/// Deallocates an internal resource.
+DLV::~DLV() {
+ delete impl_;
+}
+
+/// \brief Convert the \c DLV to a string.
+///
+/// A pass-thru to the corresponding implementation method.
+string
+DLV::toText() const {
+ return (impl_->toText());
+}
+
+/// \brief Render the \c DLV in the wire format to a OutputBuffer object
+///
+/// A pass-thru to the corresponding implementation method.
+void
+DLV::toWire(OutputBuffer& buffer) const {
+ impl_->toWire(buffer);
+}
+
+/// \brief Render the \c DLV in the wire format to a AbstractMessageRenderer
+/// object
+///
+/// A pass-thru to the corresponding implementation method.
+void
+DLV::toWire(AbstractMessageRenderer& renderer) const {
+ impl_->toWire(renderer);
+}
+
+/// \brief Compare two instances of \c DLV RDATA.
+///
+/// The type check is performed here. Otherwise, a pass-thru to the
+/// corresponding implementation method.
+int
+DLV::compare(const Rdata& other) const {
+ const DLV& other_ds = dynamic_cast<const DLV&>(other);
+
+ return (impl_->compare(*other_ds.impl_));
+}
+
+/// \brief Tag accessor
+uint16_t
+DLV::getTag() const {
+ return (impl_->getTag());
+}
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/dlv_32769.h b/src/lib/dns/rdata/generic/dlv_32769.h
new file mode 100644
index 0000000..86cd98c
--- /dev/null
+++ b/src/lib/dns/rdata/generic/dlv_32769.h
@@ -0,0 +1,77 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// BEGIN_HEADER_GUARD
+
+#include <stdint.h>
+
+#include <string>
+
+#include <dns/name.h>
+#include <dns/rrtype.h>
+#include <dns/rrttl.h>
+#include <dns/rdata.h>
+
+// BEGIN_ISC_NAMESPACE
+
+// BEGIN_COMMON_DECLARATIONS
+// END_COMMON_DECLARATIONS
+
+// BEGIN_RDATA_NAMESPACE
+
+namespace detail {
+template <class Type, uint16_t typeCode> class DSLikeImpl;
+}
+
+/// \brief \c rdata::generic::DLV class represents the DLV RDATA as defined in
+/// RFC4431.
+///
+/// This class implements the basic interfaces inherited from the abstract
+/// \c rdata::Rdata class, and provides trivial accessors specific to the
+/// DLV RDATA.
+class DLV : public Rdata {
+public:
+ // BEGIN_COMMON_MEMBERS
+ // END_COMMON_MEMBERS
+
+ /// \brief Assignment operator.
+ ///
+ /// It internally allocates a resource, and if it fails a corresponding
+ /// standard exception will be thrown.
+ /// This operator never throws an exception otherwise.
+ ///
+ /// This operator provides the strong exception guarantee: When an
+ /// exception is thrown the content of the assignment target will be
+ /// intact.
+ DLV& operator=(const DLV& source);
+
+ /// \brief The destructor.
+ ~DLV();
+
+ /// \brief Return the value of the Tag field.
+ ///
+ /// This method never throws an exception.
+ uint16_t getTag() const;
+private:
+ typedef detail::DSLikeImpl<DLV, 32769> DLVImpl;
+ DLVImpl* impl_;
+};
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+// END_HEADER_GUARD
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/generic/ds_43.cc b/src/lib/dns/rdata/generic/ds_43.cc
index 1b48456..20b62dc 100644
--- a/src/lib/dns/rdata/generic/ds_43.cc
+++ b/src/lib/dns/rdata/generic/ds_43.cc
@@ -1,4 +1,4 @@
-// Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -12,87 +12,32 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
-#include <iostream>
#include <string>
-#include <sstream>
-#include <vector>
-
-#include <boost/lexical_cast.hpp>
#include <util/buffer.h>
#include <util/encode/hex.h>
#include <dns/messagerenderer.h>
-#include <dns/name.h>
#include <dns/rdata.h>
#include <dns/rdataclass.h>
-#include <stdio.h>
-#include <time.h>
+#include <dns/rdata/generic/detail/ds_like.h>
using namespace std;
using namespace isc::util;
using namespace isc::util::encode;
+using namespace isc::dns::rdata::generic::detail;
// BEGIN_ISC_NAMESPACE
// BEGIN_RDATA_NAMESPACE
-struct DSImpl {
- // straightforward representation of DS RDATA fields
- DSImpl(uint16_t tag, uint8_t algorithm, uint8_t digest_type,
- const vector<uint8_t>& digest) :
- tag_(tag), algorithm_(algorithm), digest_type_(digest_type),
- digest_(digest)
- {}
-
- uint16_t tag_;
- uint8_t algorithm_;
- uint8_t digest_type_;
- const vector<uint8_t> digest_;
-};
-
DS::DS(const string& ds_str) :
- impl_(NULL)
-{
- istringstream iss(ds_str);
- unsigned int tag, algorithm, digest_type;
- stringbuf digestbuf;
-
- iss >> tag >> algorithm >> digest_type >> &digestbuf;
- if (iss.bad() || iss.fail()) {
- isc_throw(InvalidRdataText, "Invalid DS text");
- }
- if (tag > 0xffff) {
- isc_throw(InvalidRdataText, "DS tag out of range");
- }
- if (algorithm > 0xff) {
- isc_throw(InvalidRdataText, "DS algorithm out of range");
- }
- if (digest_type > 0xff) {
- isc_throw(InvalidRdataText, "DS digest type out of range");
- }
-
- vector<uint8_t> digest;
- decodeHex(digestbuf.str(), digest);
-
- impl_ = new DSImpl(tag, algorithm, digest_type, digest);
-}
-
-DS::DS(InputBuffer& buffer, size_t rdata_len) {
- if (rdata_len < 4) {
- isc_throw(InvalidRdataLength, "DS too short");
- }
-
- uint16_t tag = buffer.readUint16();
- uint16_t algorithm = buffer.readUint8();
- uint16_t digest_type = buffer.readUint8();
-
- rdata_len -= 4;
- vector<uint8_t> digest(rdata_len);
- buffer.readData(&digest[0], rdata_len);
+ impl_(new DSImpl(ds_str))
+{}
- impl_ = new DSImpl(tag, algorithm, digest_type, digest);
-}
+DS::DS(InputBuffer& buffer, size_t rdata_len) :
+ impl_(new DSImpl(buffer, rdata_len))
+{}
DS::DS(const DS& source) :
Rdata(), impl_(new DSImpl(*source.impl_))
@@ -117,57 +62,29 @@ DS::~DS() {
string
DS::toText() const {
- using namespace boost;
- return (lexical_cast<string>(static_cast<int>(impl_->tag_)) +
- " " + lexical_cast<string>(static_cast<int>(impl_->algorithm_)) +
- " " + lexical_cast<string>(static_cast<int>(impl_->digest_type_)) +
- " " + encodeHex(impl_->digest_));
+ return (impl_->toText());
}
void
DS::toWire(OutputBuffer& buffer) const {
- buffer.writeUint16(impl_->tag_);
- buffer.writeUint8(impl_->algorithm_);
- buffer.writeUint8(impl_->digest_type_);
- buffer.writeData(&impl_->digest_[0], impl_->digest_.size());
+ impl_->toWire(buffer);
}
void
DS::toWire(AbstractMessageRenderer& renderer) const {
- renderer.writeUint16(impl_->tag_);
- renderer.writeUint8(impl_->algorithm_);
- renderer.writeUint8(impl_->digest_type_);
- renderer.writeData(&impl_->digest_[0], impl_->digest_.size());
+ impl_->toWire(renderer);
}
int
DS::compare(const Rdata& other) const {
const DS& other_ds = dynamic_cast<const DS&>(other);
- if (impl_->tag_ != other_ds.impl_->tag_) {
- return (impl_->tag_ < other_ds.impl_->tag_ ? -1 : 1);
- }
- if (impl_->algorithm_ != other_ds.impl_->algorithm_) {
- return (impl_->algorithm_ < other_ds.impl_->algorithm_ ? -1 : 1);
- }
- if (impl_->digest_type_ != other_ds.impl_->digest_type_) {
- return (impl_->digest_type_ < other_ds.impl_->digest_type_ ? -1 : 1);
- }
-
- size_t this_len = impl_->digest_.size();
- size_t other_len = other_ds.impl_->digest_.size();
- size_t cmplen = min(this_len, other_len);
- int cmp = memcmp(&impl_->digest_[0], &other_ds.impl_->digest_[0], cmplen);
- if (cmp != 0) {
- return (cmp);
- } else {
- return ((this_len == other_len) ? 0 : (this_len < other_len) ? -1 : 1);
- }
+ return (impl_->compare(*other_ds.impl_));
}
uint16_t
DS::getTag() const {
- return (impl_->tag_);
+ return (impl_->getTag());
}
// END_RDATA_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/ds_43.h b/src/lib/dns/rdata/generic/ds_43.h
index 03b19a0..2697f51 100644
--- a/src/lib/dns/rdata/generic/ds_43.h
+++ b/src/lib/dns/rdata/generic/ds_43.h
@@ -1,4 +1,4 @@
-// Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -12,6 +12,8 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+// BEGIN_HEADER_GUARD
+
#include <stdint.h>
#include <string>
@@ -21,8 +23,6 @@
#include <dns/rrttl.h>
#include <dns/rdata.h>
-// BEGIN_HEADER_GUARD
-
// BEGIN_ISC_NAMESPACE
// BEGIN_COMMON_DECLARATIONS
@@ -30,20 +30,41 @@
// BEGIN_RDATA_NAMESPACE
-struct DSImpl;
+namespace detail {
+template <class Type, uint16_t typeCode> class DSLikeImpl;
+}
+/// \brief \c rdata::generic::DS class represents the DS RDATA as defined in
+/// RFC3658.
+///
+/// This class implements the basic interfaces inherited from the abstract
+/// \c rdata::Rdata class, and provides trivial accessors specific to the
+/// DS RDATA.
class DS : public Rdata {
public:
// BEGIN_COMMON_MEMBERS
// END_COMMON_MEMBERS
+
+ /// \brief Assignment operator.
+ ///
+ /// It internally allocates a resource, and if it fails a corresponding
+ /// standard exception will be thrown.
+ /// This operator never throws an exception otherwise.
+ ///
+ /// This operator provides the strong exception guarantee: When an
+ /// exception is thrown the content of the assignment target will be
+ /// intact.
DS& operator=(const DS& source);
+
+ /// \brief The destructor.
~DS();
+ /// \brief Return the value of the Tag field.
///
- /// Specialized methods
- ///
+ /// This method never throws an exception.
uint16_t getTag() const;
private:
+ typedef detail::DSLikeImpl<DS, 43> DSImpl;
DSImpl* impl_;
};
diff --git a/src/lib/dns/rdata/generic/hinfo_13.cc b/src/lib/dns/rdata/generic/hinfo_13.cc
new file mode 100644
index 0000000..45f4209
--- /dev/null
+++ b/src/lib/dns/rdata/generic/hinfo_13.cc
@@ -0,0 +1,129 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+
+#include <string>
+
+#include <boost/lexical_cast.hpp>
+
+#include <exceptions/exceptions.h>
+
+#include <dns/name.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+#include <dns/character_string.h>
+#include <util/strutil.h>
+
+using namespace std;
+using namespace boost;
+using namespace isc::util;
+using namespace isc::dns;
+using namespace isc::dns::characterstr;
+
+// BEGIN_ISC_NAMESPACE
+// BEGIN_RDATA_NAMESPACE
+
+
+HINFO::HINFO(const string& hinfo_str) {
+ string::const_iterator input_iterator = hinfo_str.begin();
+ cpu_ = getNextCharacterString(hinfo_str, input_iterator);
+
+ skipLeftSpaces(hinfo_str, input_iterator);
+
+ os_ = getNextCharacterString(hinfo_str, input_iterator);
+}
+
+HINFO::HINFO(InputBuffer& buffer, size_t rdata_len) {
+ cpu_ = getNextCharacterString(buffer, rdata_len);
+ os_ = getNextCharacterString(buffer, rdata_len);
+}
+
+HINFO::HINFO(const HINFO& source):
+ Rdata(), cpu_(source.cpu_), os_(source.os_)
+{
+}
+
+std::string
+HINFO::toText() const {
+ string result;
+ result += "\"";
+ result += cpu_;
+ result += "\" \"";
+ result += os_;
+ result += "\"";
+ return (result);
+}
+
+void
+HINFO::toWire(OutputBuffer& buffer) const {
+ toWireHelper(buffer);
+}
+
+void
+HINFO::toWire(AbstractMessageRenderer& renderer) const {
+ toWireHelper(renderer);
+}
+
+int
+HINFO::compare(const Rdata& other) const {
+ const HINFO& other_hinfo = dynamic_cast<const HINFO&>(other);
+
+ if (cpu_ < other_hinfo.cpu_) {
+ return (-1);
+ } else if (cpu_ > other_hinfo.cpu_) {
+ return (1);
+ }
+
+ if (os_ < other_hinfo.os_) {
+ return (-1);
+ } else if (os_ > other_hinfo.os_) {
+ return (1);
+ }
+
+ return (0);
+}
+
+const std::string&
+HINFO::getCPU() const {
+ return (cpu_);
+}
+
+const std::string&
+HINFO::getOS() const {
+ return (os_);
+}
+
+void
+HINFO::skipLeftSpaces(const std::string& input_str,
+ std::string::const_iterator& input_iterator)
+{
+ if (input_iterator >= input_str.end()) {
+ isc_throw(InvalidRdataText,
+ "Invalid HINFO text format, field is missing.");
+ }
+
+ if (!isspace(*input_iterator)) {
+ isc_throw(InvalidRdataText,
+ "Invalid HINFO text format, fields are not separated by space.");
+ }
+ // Skip white spaces
+ while (input_iterator < input_str.end() && isspace(*input_iterator)) {
+ ++input_iterator;
+ }
+}
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/hinfo_13.h b/src/lib/dns/rdata/generic/hinfo_13.h
new file mode 100644
index 0000000..8513419
--- /dev/null
+++ b/src/lib/dns/rdata/generic/hinfo_13.h
@@ -0,0 +1,77 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// BEGIN_HEADER_GUARD
+#include <stdint.h>
+
+#include <string>
+
+#include <dns/name.h>
+#include <dns/rdata.h>
+#include <util/buffer.h>
+
+// BEGIN_ISC_NAMESPACE
+
+// BEGIN_COMMON_DECLARATIONS
+// END_COMMON_DECLARATIONS
+
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief \c HINFO class represents the HINFO rdata defined in
+/// RFC1034, RFC1035
+///
+/// This class implements the basic interfaces inherited from the
+/// \c rdata::Rdata class, and provides accessors specific to the
+/// HINFO rdata.
+class HINFO : public Rdata {
+public:
+ // BEGIN_COMMON_MEMBERS
+ // END_COMMON_MEMBERS
+
+ // HINFO specific methods
+ const std::string& getCPU() const;
+ const std::string& getOS() const;
+
+private:
+ /// Skip the left whitespaces of the input string
+ ///
+ /// \param input_str The input string
+ /// \param input_iterator From which the skipping started
+ void skipLeftSpaces(const std::string& input_str,
+ std::string::const_iterator& input_iterator);
+
+ /// Helper template function for toWire()
+ ///
+ /// \param outputer Where to write data in
+ template <typename T>
+ void toWireHelper(T& outputer) const {
+ outputer.writeUint8(cpu_.size());
+ outputer.writeData(cpu_.c_str(), cpu_.size());
+
+ outputer.writeUint8(os_.size());
+ outputer.writeData(os_.c_str(), os_.size());
+ }
+
+ std::string cpu_;
+ std::string os_;
+};
+
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+// END_HEADER_GUARD
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/generic/minfo_14.cc b/src/lib/dns/rdata/generic/minfo_14.cc
new file mode 100644
index 0000000..aa5272c
--- /dev/null
+++ b/src/lib/dns/rdata/generic/minfo_14.cc
@@ -0,0 +1,156 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <string>
+#include <sstream>
+
+#include <util/buffer.h>
+
+#include <dns/messagerenderer.h>
+#include <dns/name.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+
+// BEGIN_ISC_NAMESPACE
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief Constructor from string.
+///
+/// \c minfo_str must be formatted as follows:
+/// \code <rmailbox name> <emailbox name>
+/// \endcode
+/// where both fields must represent a valid domain name.
+///
+/// An example of valid string is:
+/// \code "rmail.example.com. email.example.com." \endcode
+///
+/// <b>Exceptions</b>
+///
+/// \exception InvalidRdataText The number of RDATA fields (must be 2) is
+/// incorrect.
+/// \exception std::bad_alloc Memory allocation for names fails.
+/// \exception Other The constructor of the \c Name class will throw if the
+/// names in the string is invalid.
+MINFO::MINFO(const std::string& minfo_str) :
+ // We cannot construct both names in the initialization list due to the
+ // necessary text processing, so we have to initialize them with a dummy
+ // name and replace them later.
+ rmailbox_(Name::ROOT_NAME()), emailbox_(Name::ROOT_NAME())
+{
+ istringstream iss(minfo_str);
+ string rmailbox_str, emailbox_str;
+ iss >> rmailbox_str >> emailbox_str;
+
+ // Validation: A valid MINFO RR must have exactly two fields.
+ if (iss.bad() || iss.fail()) {
+ isc_throw(InvalidRdataText, "Invalid MINFO text: " << minfo_str);
+ }
+ if (!iss.eof()) {
+ isc_throw(InvalidRdataText, "Invalid MINFO text (redundant field): "
+ << minfo_str);
+ }
+
+ rmailbox_ = Name(rmailbox_str);
+ emailbox_ = Name(emailbox_str);
+}
+
+/// \brief Constructor from wire-format data.
+///
+/// This constructor doesn't check the validity of the second parameter (rdata
+/// length) for parsing.
+/// If necessary, the caller will check consistency.
+///
+/// \exception std::bad_alloc Memory allocation for names fails.
+/// \exception Other The constructor of the \c Name class will throw if the
+/// names in the wire is invalid.
+MINFO::MINFO(InputBuffer& buffer, size_t) :
+ rmailbox_(buffer), emailbox_(buffer)
+{}
+
+/// \brief Copy constructor.
+///
+/// \exception std::bad_alloc Memory allocation fails in copying internal
+/// member variables (this should be very rare).
+MINFO::MINFO(const MINFO& other) :
+ Rdata(), rmailbox_(other.rmailbox_), emailbox_(other.emailbox_)
+{}
+
+/// \brief Convert the \c MINFO to a string.
+///
+/// The output of this method is formatted as described in the "from string"
+/// constructor (\c MINFO(const std::string&))).
+///
+/// \exception std::bad_alloc Internal resource allocation fails.
+///
+/// \return A \c string object that represents the \c MINFO object.
+std::string
+MINFO::toText() const {
+ return (rmailbox_.toText() + " " + emailbox_.toText());
+}
+
+/// \brief Render the \c MINFO in the wire format without name compression.
+///
+/// \exception std::bad_alloc Internal resource allocation fails.
+///
+/// \param buffer An output buffer to store the wire data.
+void
+MINFO::toWire(OutputBuffer& buffer) const {
+ rmailbox_.toWire(buffer);
+ emailbox_.toWire(buffer);
+}
+
+MINFO&
+MINFO::operator=(const MINFO& source) {
+ rmailbox_ = source.rmailbox_;
+ emailbox_ = source.emailbox_;
+
+ return (*this);
+}
+
+/// \brief Render the \c MINFO in the wire format with taking into account
+/// compression.
+///
+/// As specified in RFC3597, TYPE MINFO is "well-known", the rmailbox and
+/// emailbox fields (domain names) will be compressed.
+///
+/// \exception std::bad_alloc Internal resource allocation fails.
+///
+/// \param renderer DNS message rendering context that encapsulates the
+/// output buffer and name compression information.
+void
+MINFO::toWire(AbstractMessageRenderer& renderer) const {
+ renderer.writeName(rmailbox_);
+ renderer.writeName(emailbox_);
+}
+
+/// \brief Compare two instances of \c MINFO RDATA.
+///
+/// See documentation in \c Rdata.
+int
+MINFO::compare(const Rdata& other) const {
+ const MINFO& other_minfo = dynamic_cast<const MINFO&>(other);
+
+ const int cmp = compareNames(rmailbox_, other_minfo.rmailbox_);
+ if (cmp != 0) {
+ return (cmp);
+ }
+ return (compareNames(emailbox_, other_minfo.emailbox_));
+}
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/minfo_14.h b/src/lib/dns/rdata/generic/minfo_14.h
new file mode 100644
index 0000000..f3ee1d0
--- /dev/null
+++ b/src/lib/dns/rdata/generic/minfo_14.h
@@ -0,0 +1,82 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// BEGIN_HEADER_GUARD
+
+#include <string>
+
+#include <dns/name.h>
+#include <dns/rdata.h>
+
+// BEGIN_ISC_NAMESPACE
+
+// BEGIN_COMMON_DECLARATIONS
+// END_COMMON_DECLARATIONS
+
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief \c rdata::generic::MINFO class represents the MINFO RDATA as
+/// defined in RFC1035.
+///
+/// This class implements the basic interfaces inherited from the abstract
+/// \c rdata::Rdata class, and provides trivial accessors specific to the
+/// MINFO RDATA.
+class MINFO : public Rdata {
+public:
+ // BEGIN_COMMON_MEMBERS
+ // END_COMMON_MEMBERS
+
+ /// \brief Define the assignment operator.
+ ///
+ /// \exception std::bad_alloc Memory allocation fails in copying
+ /// internal member variables (this should be very rare).
+ MINFO& operator=(const MINFO& source);
+
+ /// \brief Return the value of the rmailbox field.
+ ///
+ /// \exception std::bad_alloc If resource allocation for the returned
+ /// \c Name fails.
+ ///
+ /// \note
+ /// Unlike the case of some other RDATA classes (such as
+ /// \c NS::getNSName()), this method constructs a new \c Name object
+ /// and returns it, instead of returning a reference to a \c Name object
+ /// internally maintained in the class (which is a private member).
+ /// This is based on the observation that this method will be rarely
+ /// used and even when it's used it will not be in a performance context
+ /// (for example, a recursive resolver won't need this field in its
+ /// resolution process). By returning a new object we have flexibility
+ /// of changing the internal representation without the risk of changing
+ /// the interface or method property.
+ /// The same note applies to the \c getEmailbox() method.
+ Name getRmailbox() const { return (rmailbox_); }
+
+ /// \brief Return the value of the emailbox field.
+ ///
+ /// \exception std::bad_alloc If resource allocation for the returned
+ /// \c Name fails.
+ Name getEmailbox() const { return (emailbox_); }
+
+private:
+ Name rmailbox_;
+ Name emailbox_;
+};
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+// END_HEADER_GUARD
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/generic/naptr_35.cc b/src/lib/dns/rdata/generic/naptr_35.cc
new file mode 100644
index 0000000..129bf6c
--- /dev/null
+++ b/src/lib/dns/rdata/generic/naptr_35.cc
@@ -0,0 +1,220 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+
+#include <string>
+
+#include <boost/lexical_cast.hpp>
+
+#include <exceptions/exceptions.h>
+
+#include <dns/character_string.h>
+#include <dns/name.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+using namespace std;
+using namespace boost;
+using namespace isc::util;
+using namespace isc::dns;
+using namespace isc::dns::characterstr;
+
+// BEGIN_ISC_NAMESPACE
+// BEGIN_RDATA_NAMESPACE
+
+namespace {
+/// Skip the left whitespaces of the input string
+///
+/// \param input_str The input string
+/// \param input_iterator From which the skipping started
+void
+skipLeftSpaces(const std::string& input_str,
+ std::string::const_iterator& input_iterator)
+{
+ if (input_iterator >= input_str.end()) {
+ isc_throw(InvalidRdataText,
+ "Invalid NAPTR text format, field is missing.");
+ }
+
+ if (!isspace(*input_iterator)) {
+ isc_throw(InvalidRdataText,
+ "Invalid NAPTR text format, fields are not separated by space.");
+ }
+ // Skip white spaces
+ while (input_iterator < input_str.end() && isspace(*input_iterator)) {
+ ++input_iterator;
+ }
+}
+
+} // Anonymous namespace
+
+NAPTR::NAPTR(InputBuffer& buffer, size_t len):
+ replacement_(".")
+{
+ order_ = buffer.readUint16();
+ preference_ = buffer.readUint16();
+
+ flags_ = getNextCharacterString(buffer, len);
+ services_ = getNextCharacterString(buffer, len);
+ regexp_ = getNextCharacterString(buffer, len);
+ replacement_ = Name(buffer);
+}
+
+NAPTR::NAPTR(const std::string& naptr_str):
+ replacement_(".")
+{
+ istringstream iss(naptr_str);
+ uint16_t order;
+ uint16_t preference;
+
+ iss >> order >> preference;
+
+ if (iss.bad() || iss.fail()) {
+ isc_throw(InvalidRdataText, "Invalid NAPTR text format");
+ }
+
+ order_ = order;
+ preference_ = preference;
+
+ string::const_iterator input_iterator = naptr_str.begin() + iss.tellg();
+
+ skipLeftSpaces(naptr_str, input_iterator);
+
+ flags_ = getNextCharacterString(naptr_str, input_iterator);
+
+ skipLeftSpaces(naptr_str, input_iterator);
+
+ services_ = getNextCharacterString(naptr_str, input_iterator);
+
+ skipLeftSpaces(naptr_str, input_iterator);
+
+ regexp_ = getNextCharacterString(naptr_str, input_iterator);
+
+ skipLeftSpaces(naptr_str, input_iterator);
+
+ if (input_iterator < naptr_str.end()) {
+ string replacementStr(input_iterator, naptr_str.end());
+
+ replacement_ = Name(replacementStr);
+ } else {
+ isc_throw(InvalidRdataText,
+ "Invalid NAPTR text format, replacement field is missing");
+ }
+}
+
+NAPTR::NAPTR(const NAPTR& naptr):
+ Rdata(), order_(naptr.order_), preference_(naptr.preference_),
+ flags_(naptr.flags_), services_(naptr.services_), regexp_(naptr.regexp_),
+ replacement_(naptr.replacement_)
+{
+}
+
+void
+NAPTR::toWire(OutputBuffer& buffer) const {
+ toWireHelper(buffer);
+}
+
+void
+NAPTR::toWire(AbstractMessageRenderer& renderer) const {
+ toWireHelper(renderer);
+}
+
+string
+NAPTR::toText() const {
+ string result;
+ result += lexical_cast<string>(order_);
+ result += " ";
+ result += lexical_cast<string>(preference_);
+ result += " \"";
+ result += flags_;
+ result += "\" \"";
+ result += services_;
+ result += "\" \"";
+ result += regexp_;
+ result += "\" ";
+ result += replacement_.toText();
+ return (result);
+}
+
+int
+NAPTR::compare(const Rdata& other) const {
+ const NAPTR other_naptr = dynamic_cast<const NAPTR&>(other);
+
+ if (order_ < other_naptr.order_) {
+ return (-1);
+ } else if (order_ > other_naptr.order_) {
+ return (1);
+ }
+
+ if (preference_ < other_naptr.preference_) {
+ return (-1);
+ } else if (preference_ > other_naptr.preference_) {
+ return (1);
+ }
+
+ if (flags_ < other_naptr.flags_) {
+ return (-1);
+ } else if (flags_ > other_naptr.flags_) {
+ return (1);
+ }
+
+ if (services_ < other_naptr.services_) {
+ return (-1);
+ } else if (services_ > other_naptr.services_) {
+ return (1);
+ }
+
+ if (regexp_ < other_naptr.regexp_) {
+ return (-1);
+ } else if (regexp_ > other_naptr.regexp_) {
+ return (1);
+ }
+
+ return (compareNames(replacement_, other_naptr.replacement_));
+}
+
+uint16_t
+NAPTR::getOrder() const {
+ return (order_);
+}
+
+uint16_t
+NAPTR::getPreference() const {
+ return (preference_);
+}
+
+const std::string&
+NAPTR::getFlags() const {
+ return (flags_);
+}
+
+const std::string&
+NAPTR::getServices() const {
+ return (services_);
+}
+
+const std::string&
+NAPTR::getRegexp() const {
+ return (regexp_);
+}
+
+const Name&
+NAPTR::getReplacement() const {
+ return (replacement_);
+}
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/naptr_35.h b/src/lib/dns/rdata/generic/naptr_35.h
new file mode 100644
index 0000000..ca16b3c
--- /dev/null
+++ b/src/lib/dns/rdata/generic/naptr_35.h
@@ -0,0 +1,83 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// BEGIN_HEADER_GUARD
+
+#include <string>
+
+#include <dns/name.h>
+#include <dns/rdata.h>
+#include <util/buffer.h>
+
+// BEGIN_ISC_NAMESPACE
+
+// BEGIN_COMMON_DECLARATIONS
+// END_COMMON_DECLARATIONS
+
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief \c NAPTR class represents the NAPTR rdata defined in
+/// RFC2915, RFC2168 and RFC3403
+///
+/// This class implements the basic interfaces inherited from the
+/// \c rdata::Rdata class, and provides accessors specific to the
+/// NAPTR rdata.
+class NAPTR : public Rdata {
+public:
+ // BEGIN_COMMON_MEMBERS
+ // END_COMMON_MEMBERS
+
+ // NAPTR specific methods
+ uint16_t getOrder() const;
+ uint16_t getPreference() const;
+ const std::string& getFlags() const;
+ const std::string& getServices() const;
+ const std::string& getRegexp() const;
+ const Name& getReplacement() const;
+private:
+ /// Helper template function for toWire()
+ ///
+ /// \param outputer Where to write data in
+ template <typename T>
+ void toWireHelper(T& outputer) const {
+ outputer.writeUint16(order_);
+ outputer.writeUint16(preference_);
+
+ outputer.writeUint8(flags_.size());
+ outputer.writeData(flags_.c_str(), flags_.size());
+
+ outputer.writeUint8(services_.size());
+ outputer.writeData(services_.c_str(), services_.size());
+
+ outputer.writeUint8(regexp_.size());
+ outputer.writeData(regexp_.c_str(), regexp_.size());
+
+ replacement_.toWire(outputer);
+ }
+
+ uint16_t order_;
+ uint16_t preference_;
+ std::string flags_;
+ std::string services_;
+ std::string regexp_;
+ Name replacement_;
+};
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+// END_HEADER_GUARD
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/generic/rp_17.cc b/src/lib/dns/rdata/generic/rp_17.cc
index b8b2ba2..781b55d 100644
--- a/src/lib/dns/rdata/generic/rp_17.cc
+++ b/src/lib/dns/rdata/generic/rp_17.cc
@@ -24,6 +24,7 @@
using namespace std;
using namespace isc::dns;
+using namespace isc::util;
// BEGIN_ISC_NAMESPACE
// BEGIN_RDATA_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/rrsig_46.cc b/src/lib/dns/rdata/generic/rrsig_46.cc
index 0c82406..59ff030 100644
--- a/src/lib/dns/rdata/generic/rrsig_46.cc
+++ b/src/lib/dns/rdata/generic/rrsig_46.cc
@@ -243,5 +243,10 @@ RRSIG::compare(const Rdata& other) const {
}
}
+const RRType&
+RRSIG::typeCovered() const {
+ return (impl_->covered_);
+}
+
// END_RDATA_NAMESPACE
// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/rrsig_46.h b/src/lib/dns/rdata/generic/rrsig_46.h
index 19acc40..b32c17f 100644
--- a/src/lib/dns/rdata/generic/rrsig_46.h
+++ b/src/lib/dns/rdata/generic/rrsig_46.h
@@ -38,6 +38,9 @@ public:
// END_COMMON_MEMBERS
RRSIG& operator=(const RRSIG& source);
~RRSIG();
+
+ // specialized methods
+ const RRType& typeCovered() const;
private:
RRSIGImpl* impl_;
};
diff --git a/src/lib/dns/rdata/generic/spf_99.cc b/src/lib/dns/rdata/generic/spf_99.cc
new file mode 100644
index 0000000..492de98
--- /dev/null
+++ b/src/lib/dns/rdata/generic/spf_99.cc
@@ -0,0 +1,87 @@
+// Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <stdint.h>
+#include <string.h>
+
+#include <string>
+#include <vector>
+
+#include <util/buffer.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+using namespace std;
+using namespace isc::util;
+
+// BEGIN_ISC_NAMESPACE
+// BEGIN_RDATA_NAMESPACE
+
+#include <dns/rdata/generic/detail/txt_like.h>
+
+SPF&
+SPF::operator=(const SPF& source) {
+ if (impl_ == source.impl_) {
+ return (*this);
+ }
+
+ SPFImpl* newimpl = new SPFImpl(*source.impl_);
+ delete impl_;
+ impl_ = newimpl;
+
+ return (*this);
+}
+
+SPF::~SPF() {
+ delete impl_;
+}
+
+SPF::SPF(InputBuffer& buffer, size_t rdata_len) :
+ impl_(new SPFImpl(buffer, rdata_len))
+{}
+
+SPF::SPF(const std::string& txtstr) :
+ impl_(new SPFImpl(txtstr))
+{}
+
+SPF::SPF(const SPF& other) :
+ Rdata(), impl_(new SPFImpl(*other.impl_))
+{}
+
+void
+SPF::toWire(OutputBuffer& buffer) const {
+ impl_->toWire(buffer);
+}
+
+void
+SPF::toWire(AbstractMessageRenderer& renderer) const {
+ impl_->toWire(renderer);
+}
+
+string
+SPF::toText() const {
+ return (impl_->toText());
+}
+
+int
+SPF::compare(const Rdata& other) const {
+ const SPF& other_txt = dynamic_cast<const SPF&>(other);
+
+ return (impl_->compare(*other_txt.impl_));
+}
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/spf_99.h b/src/lib/dns/rdata/generic/spf_99.h
new file mode 100644
index 0000000..956adb9
--- /dev/null
+++ b/src/lib/dns/rdata/generic/spf_99.h
@@ -0,0 +1,52 @@
+// Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// BEGIN_HEADER_GUARD
+
+#include <stdint.h>
+
+#include <string>
+#include <vector>
+
+#include <dns/rdata.h>
+
+// BEGIN_ISC_NAMESPACE
+
+// BEGIN_COMMON_DECLARATIONS
+// END_COMMON_DECLARATIONS
+
+// BEGIN_RDATA_NAMESPACE
+
+template<class Type, uint16_t typeCode> class TXTLikeImpl;
+
+class SPF : public Rdata {
+public:
+ // BEGIN_COMMON_MEMBERS
+ // END_COMMON_MEMBERS
+
+ SPF& operator=(const SPF& source);
+ ~SPF();
+
+private:
+ typedef TXTLikeImpl<SPF, 99> SPFImpl;
+ SPFImpl* impl_;
+};
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+// END_HEADER_GUARD
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/generic/txt_16.cc b/src/lib/dns/rdata/generic/txt_16.cc
index ac2ba8a..418bc05 100644
--- a/src/lib/dns/rdata/generic/txt_16.cc
+++ b/src/lib/dns/rdata/generic/txt_16.cc
@@ -30,130 +30,57 @@ using namespace isc::util;
// BEGIN_ISC_NAMESPACE
// BEGIN_RDATA_NAMESPACE
-TXT::TXT(InputBuffer& buffer, size_t rdata_len) {
- if (rdata_len > MAX_RDLENGTH) {
- isc_throw(InvalidRdataLength, "RDLENGTH too large: " << rdata_len);
- }
+#include <dns/rdata/generic/detail/txt_like.h>
- if (rdata_len == 0) { // note that this couldn't happen in the loop.
- isc_throw(DNSMessageFORMERR,
- "Error in parsing TXT RDATA: 0-length character string");
+TXT&
+TXT::operator=(const TXT& source) {
+ if (impl_ == source.impl_) {
+ return (*this);
}
- do {
- const uint8_t len = buffer.readUint8();
- if (rdata_len < len + 1) {
- isc_throw(DNSMessageFORMERR,
- "Error in parsing TXT RDATA: character string length "
- "is too large: " << static_cast<int>(len));
- }
- vector<uint8_t> data(len + 1);
- data[0] = len;
- buffer.readData(&data[0] + 1, len);
- string_list_.push_back(data);
-
- rdata_len -= (len + 1);
- } while (rdata_len > 0);
-}
-
-TXT::TXT(const std::string& txtstr) {
- // TBD: this is a simple, incomplete implementation that only supports
- // a single character-string.
+ TXTImpl* newimpl = new TXTImpl(*source.impl_);
+ delete impl_;
+ impl_ = newimpl;
- size_t length = txtstr.size();
- size_t pos_begin = 0;
-
- if (length > 1 && txtstr[0] == '"' && txtstr[length - 1] == '"') {
- pos_begin = 1;
- length -= 2;
- }
+ return (*this);
+}
- if (length > MAX_CHARSTRING_LEN) {
- isc_throw(CharStringTooLong, "TXT RDATA construction from text: "
- "string length is too long: " << length);
- }
+TXT::~TXT() {
+ delete impl_;
+}
- // TBD: right now, we don't support escaped characters
- if (txtstr.find('\\') != string::npos) {
- isc_throw(InvalidRdataText, "TXT RDATA from text: "
- "escaped character is currently not supported: " << txtstr);
- }
+TXT::TXT(InputBuffer& buffer, size_t rdata_len) :
+ impl_(new TXTImpl(buffer, rdata_len))
+{}
- vector<uint8_t> data;
- data.reserve(length + 1);
- data.push_back(length);
- data.insert(data.end(), txtstr.begin() + pos_begin,
- txtstr.begin() + pos_begin + length);
- string_list_.push_back(data);
-}
+TXT::TXT(const std::string& txtstr) :
+ impl_(new TXTImpl(txtstr))
+{}
TXT::TXT(const TXT& other) :
- Rdata(), string_list_(other.string_list_)
+ Rdata(), impl_(new TXTImpl(*other.impl_))
{}
void
TXT::toWire(OutputBuffer& buffer) const {
- for (vector<vector<uint8_t> >::const_iterator it = string_list_.begin();
- it != string_list_.end();
- ++it)
- {
- buffer.writeData(&(*it)[0], (*it).size());
- }
+ impl_->toWire(buffer);
}
void
TXT::toWire(AbstractMessageRenderer& renderer) const {
- for (vector<vector<uint8_t> >::const_iterator it = string_list_.begin();
- it != string_list_.end();
- ++it)
- {
- renderer.writeData(&(*it)[0], (*it).size());
- }
+ impl_->toWire(renderer);
}
string
TXT::toText() const {
- string s;
-
- // XXX: this implementation is not entirely correct. for example, it
- // should escape double-quotes if they appear in the character string.
- for (vector<vector<uint8_t> >::const_iterator it = string_list_.begin();
- it != string_list_.end();
- ++it)
- {
- if (!s.empty()) {
- s.push_back(' ');
- }
- s.push_back('"');
- s.insert(s.end(), (*it).begin() + 1, (*it).end());
- s.push_back('"');
- }
-
- return (s);
+ return (impl_->toText());
}
int
TXT::compare(const Rdata& other) const {
const TXT& other_txt = dynamic_cast<const TXT&>(other);
- // This implementation is not efficient. Revisit this (TBD).
- OutputBuffer this_buffer(0);
- toWire(this_buffer);
- size_t this_len = this_buffer.getLength();
-
- OutputBuffer other_buffer(0);
- other_txt.toWire(other_buffer);
- const size_t other_len = other_buffer.getLength();
-
- const size_t cmplen = min(this_len, other_len);
- const int cmp = memcmp(this_buffer.getData(), other_buffer.getData(),
- cmplen);
- if (cmp != 0) {
- return (cmp);
- } else {
- return ((this_len == other_len) ? 0 :
- (this_len < other_len) ? -1 : 1);
- }
+ return (impl_->compare(*other_txt.impl_));
}
// END_RDATA_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/txt_16.h b/src/lib/dns/rdata/generic/txt_16.h
index b4c791f..d99d69b 100644
--- a/src/lib/dns/rdata/generic/txt_16.h
+++ b/src/lib/dns/rdata/generic/txt_16.h
@@ -28,14 +28,19 @@
// BEGIN_RDATA_NAMESPACE
+template<class Type, uint16_t typeCode> class TXTLikeImpl;
+
class TXT : public Rdata {
public:
// BEGIN_COMMON_MEMBERS
// END_COMMON_MEMBERS
+
+ TXT& operator=(const TXT& source);
+ ~TXT();
+
private:
- /// Note: this is a prototype version; we may reconsider
- /// this representation later.
- std::vector<std::vector<uint8_t> > string_list_;
+ typedef TXTLikeImpl<TXT, 16> TXTImpl;
+ TXTImpl* impl_;
};
// END_RDATA_NAMESPACE
diff --git a/src/lib/dns/rdata/in_1/dhcid_49.cc b/src/lib/dns/rdata/in_1/dhcid_49.cc
new file mode 100644
index 0000000..0a9a23c
--- /dev/null
+++ b/src/lib/dns/rdata/in_1/dhcid_49.cc
@@ -0,0 +1,145 @@
+// Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <stdint.h>
+#include <string.h>
+
+#include <string>
+
+#include <exceptions/exceptions.h>
+
+#include <util/buffer.h>
+#include <util/encode/hex.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+using namespace std;
+using namespace isc::util;
+
+// BEGIN_ISC_NAMESPACE
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief Constructor from string.
+///
+/// \param dhcid_str A base-64 representation of the DHCID binary data.
+/// The data is considered to be opaque, but a sanity check is performed.
+///
+/// <b>Exceptions</b>
+///
+/// \c dhcid_str must be a valid BASE-64 string, otherwise an exception
+/// of class \c isc::BadValue will be thrown;
+/// the binary data should consist of at leat of 3 octets as per RFC4701:
+/// < 2 octets > Identifier type code
+/// < 1 octet > Digest type code
+/// < n octets > Digest (length depends on digest type)
+/// If the data is less than 3 octets (i.e. it cannot contain id type code and
+/// digest type code), an exception of class \c InvalidRdataLength is thrown.
+DHCID::DHCID(const string& dhcid_str) {
+ istringstream iss(dhcid_str);
+ stringbuf digestbuf;
+
+ iss >> &digestbuf;
+ isc::util::encode::decodeHex(digestbuf.str(), digest_);
+
+ // RFC4701 states DNS software should consider the RDATA section to
+ // be opaque, but there must be at least three bytes in the data:
+ // < 2 octets > Identifier type code
+ // < 1 octet > Digest type code
+ if (digest_.size() < 3) {
+ isc_throw(InvalidRdataLength, "DHCID length " << digest_.size() <<
+ " too short, need at least 3 bytes");
+ }
+}
+
+/// \brief Constructor from wire-format data.
+///
+/// \param buffer A buffer storing the wire format data.
+/// \param rdata_len The length of the RDATA in bytes
+///
+/// <b>Exceptions</b>
+/// \c InvalidRdataLength is thrown if \c rdata_len is than minimum of 3 octets
+DHCID::DHCID(InputBuffer& buffer, size_t rdata_len) {
+ if (rdata_len < 3) {
+ isc_throw(InvalidRdataLength, "DHCID length " << rdata_len <<
+ " too short, need at least 3 bytes");
+ }
+
+ digest_.resize(rdata_len);
+ buffer.readData(&digest_[0], rdata_len);
+}
+
+/// \brief The copy constructor.
+///
+/// This trivial copy constructor never throws an exception.
+DHCID::DHCID(const DHCID& other) : Rdata(), digest_(other.digest_)
+{}
+
+/// \brief Render the \c DHCID in the wire format.
+///
+/// \param buffer An output buffer to store the wire data.
+void
+DHCID::toWire(OutputBuffer& buffer) const {
+ buffer.writeData(&digest_[0], digest_.size());
+}
+
+/// \brief Render the \c DHCID in the wire format into a
+/// \c MessageRenderer object.
+///
+/// \param renderer DNS message rendering context that encapsulates the
+/// output buffer in which the \c DHCID is to be stored.
+void
+DHCID::toWire(AbstractMessageRenderer& renderer) const {
+ renderer.writeData(&digest_[0], digest_.size());
+}
+
+/// \brief Convert the \c DHCID to a string.
+///
+/// This method returns a \c std::string object representing the \c DHCID.
+///
+/// \return A string representation of \c DHCID.
+string
+DHCID::toText() const {
+ return (isc::util::encode::encodeHex(digest_));
+}
+
+/// \brief Compare two instances of \c DHCID RDATA.
+///
+/// See documentation in \c Rdata.
+int
+DHCID::compare(const Rdata& other) const {
+ const DHCID& other_dhcid = dynamic_cast<const DHCID&>(other);
+
+ size_t this_len = digest_.size();
+ size_t other_len = other_dhcid.digest_.size();
+ size_t cmplen = min(this_len, other_len);
+ int cmp = memcmp(&digest_[0], &other_dhcid.digest_[0], cmplen);
+ if (cmp != 0) {
+ return (cmp);
+ } else {
+ return ((this_len == other_len) ? 0 : (this_len < other_len) ? -1 : 1);
+ }
+}
+
+/// \brief Accessor method to get the DHCID digest
+///
+/// \return A reference to the binary DHCID data
+const std::vector<uint8_t>&
+DHCID::getDigest() const {
+ return (digest_);
+}
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/in_1/dhcid_49.h b/src/lib/dns/rdata/in_1/dhcid_49.h
new file mode 100644
index 0000000..919395f
--- /dev/null
+++ b/src/lib/dns/rdata/in_1/dhcid_49.h
@@ -0,0 +1,58 @@
+// Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// BEGIN_HEADER_GUARD
+
+#include <string>
+#include <vector>
+
+#include <dns/rdata.h>
+
+// BEGIN_ISC_NAMESPACE
+
+// BEGIN_COMMON_DECLARATIONS
+// END_COMMON_DECLARATIONS
+
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief \c rdata::DHCID class represents the DHCID RDATA as defined %in
+/// RFC4701.
+///
+/// This class implements the basic interfaces inherited from the abstract
+/// \c rdata::Rdata class, and provides trivial accessors specific to the
+/// DHCID RDATA.
+class DHCID : public Rdata {
+public:
+ // BEGIN_COMMON_MEMBERS
+ // END_COMMON_MEMBERS
+
+ /// \brief Return the digest.
+ ///
+ /// This method never throws an exception.
+ const std::vector<uint8_t>& getDigest() const;
+
+private:
+ /// \brief Private data representation
+ ///
+ /// Opaque data at least 3 octets long as per RFC4701.
+ ///
+ std::vector<uint8_t> digest_;
+};
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+// END_HEADER_GUARD
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/in_1/srv_33.cc b/src/lib/dns/rdata/in_1/srv_33.cc
new file mode 100644
index 0000000..93b5d4d
--- /dev/null
+++ b/src/lib/dns/rdata/in_1/srv_33.cc
@@ -0,0 +1,245 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <iostream>
+#include <sstream>
+
+#include <boost/lexical_cast.hpp>
+
+#include <util/buffer.h>
+#include <util/strutil.h>
+
+#include <dns/messagerenderer.h>
+#include <dns/name.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+using namespace std;
+using namespace isc::util;
+using namespace isc::util::str;
+
+// BEGIN_ISC_NAMESPACE
+// BEGIN_RDATA_NAMESPACE
+
+struct SRVImpl {
+ // straightforward representation of SRV RDATA fields
+ SRVImpl(uint16_t priority, uint16_t weight, uint16_t port,
+ const Name& target) :
+ priority_(priority), weight_(weight), port_(port),
+ target_(target)
+ {}
+
+ uint16_t priority_;
+ uint16_t weight_;
+ uint16_t port_;
+ Name target_;
+};
+
+/// \brief Constructor from string.
+///
+/// \c srv_str must be formatted as follows:
+/// \code <Priority> <Weight> <Port> <Target>
+/// \endcode
+/// where
+/// - <Priority>, <Weight>, and <Port> are an unsigned 16-bit decimal
+/// integer.
+/// - <Target> is a valid textual representation of domain name.
+///
+/// An example of valid string is:
+/// \code "1 5 1500 example.com." \endcode
+///
+/// <b>Exceptions</b>
+///
+/// If <Target> is not a valid domain name, a corresponding exception from
+/// the \c Name class will be thrown;
+/// if %any of the other bullet points above is not met, an exception of
+/// class \c InvalidRdataText will be thrown.
+/// This constructor internally involves resource allocation, and if it fails
+/// a corresponding standard exception will be thrown.
+SRV::SRV(const string& srv_str) :
+ impl_(NULL)
+{
+ istringstream iss(srv_str);
+
+ try {
+ const int32_t priority = tokenToNum<int32_t, 16>(getToken(iss));
+ const int32_t weight = tokenToNum<int32_t, 16>(getToken(iss));
+ const int32_t port = tokenToNum<int32_t, 16>(getToken(iss));
+ const Name targetname(getToken(iss));
+
+ if (!iss.eof()) {
+ isc_throw(InvalidRdataText, "Unexpected input for SRV RDATA: " <<
+ srv_str);
+ }
+
+ impl_ = new SRVImpl(priority, weight, port, targetname);
+ } catch (const StringTokenError& ste) {
+ isc_throw(InvalidRdataText, "Invalid SRV text: " <<
+ ste.what() << ": " << srv_str);
+ }
+}
+
+/// \brief Constructor from wire-format data.
+///
+/// When a read operation on \c buffer fails (e.g., due to a corrupted
+/// message) a corresponding exception from the \c InputBuffer class will
+/// be thrown.
+/// If the wire-format data does not end with a valid domain name,
+/// a corresponding exception from the \c Name class will be thrown.
+/// In addition, this constructor internally involves resource allocation,
+/// and if it fails a corresponding standard exception will be thrown.
+///
+/// According to RFC2782, the Target field must be a non compressed form
+/// of domain name. But this implementation accepts a %SRV RR even if that
+/// field is compressed as suggested in RFC3597.
+///
+/// \param buffer A buffer storing the wire format data.
+/// \param rdata_len The length of the RDATA in bytes, normally expected
+/// to be the value of the RDLENGTH field of the corresponding RR.
+SRV::SRV(InputBuffer& buffer, size_t rdata_len) {
+ if (rdata_len < 6) {
+ isc_throw(InvalidRdataLength, "SRV too short");
+ }
+
+ uint16_t priority = buffer.readUint16();
+ uint16_t weight = buffer.readUint16();
+ uint16_t port = buffer.readUint16();
+ const Name targetname(buffer);
+
+ impl_ = new SRVImpl(priority, weight, port, targetname);
+}
+
+/// \brief The copy constructor.
+///
+/// It internally allocates a resource, and if it fails a corresponding
+/// standard exception will be thrown.
+/// This constructor never throws an exception otherwise.
+SRV::SRV(const SRV& source) :
+ Rdata(), impl_(new SRVImpl(*source.impl_))
+{}
+
+SRV&
+SRV::operator=(const SRV& source) {
+ if (impl_ == source.impl_) {
+ return (*this);
+ }
+
+ SRVImpl* newimpl = new SRVImpl(*source.impl_);
+ delete impl_;
+ impl_ = newimpl;
+
+ return (*this);
+}
+
+SRV::~SRV() {
+ delete impl_;
+}
+
+/// \brief Convert the \c SRV to a string.
+///
+/// The output of this method is formatted as described in the "from string"
+/// constructor (\c SRV(const std::string&))).
+///
+/// If internal resource allocation fails, a corresponding
+/// standard exception will be thrown.
+///
+/// \return A \c string object that represents the \c SRV object.
+string
+SRV::toText() const {
+ using namespace boost;
+ return (lexical_cast<string>(impl_->priority_) +
+ " " + lexical_cast<string>(impl_->weight_) +
+ " " + lexical_cast<string>(impl_->port_) +
+ " " + impl_->target_.toText());
+}
+
+/// \brief Render the \c SRV in the wire format without name compression.
+///
+/// If internal resource allocation fails, a corresponding
+/// standard exception will be thrown.
+/// This method never throws an exception otherwise.
+///
+/// \param buffer An output buffer to store the wire data.
+void
+SRV::toWire(OutputBuffer& buffer) const {
+ buffer.writeUint16(impl_->priority_);
+ buffer.writeUint16(impl_->weight_);
+ buffer.writeUint16(impl_->port_);
+ impl_->target_.toWire(buffer);
+}
+
+/// \brief Render the \c SRV in the wire format with taking into account
+/// compression.
+///
+/// As specified in RFC2782, the Target field (a domain name) will not be
+/// compressed. However, the domain name could be a target of compression
+/// of other compressible names (though pretty unlikely), the offset
+/// information of the algorithm name may be recorded in \c renderer.
+///
+/// If internal resource allocation fails, a corresponding
+/// standard exception will be thrown.
+/// This method never throws an exception otherwise.
+///
+/// \param renderer DNS message rendering context that encapsulates the
+/// output buffer and name compression information.
+void
+SRV::toWire(AbstractMessageRenderer& renderer) const {
+ renderer.writeUint16(impl_->priority_);
+ renderer.writeUint16(impl_->weight_);
+ renderer.writeUint16(impl_->port_);
+ renderer.writeName(impl_->target_, false);
+}
+
+/// \brief Compare two instances of \c SRV RDATA.
+///
+/// See documentation in \c Rdata.
+int
+SRV::compare(const Rdata& other) const {
+ const SRV& other_srv = dynamic_cast<const SRV&>(other);
+
+ if (impl_->priority_ != other_srv.impl_->priority_) {
+ return (impl_->priority_ < other_srv.impl_->priority_ ? -1 : 1);
+ }
+ if (impl_->weight_ != other_srv.impl_->weight_) {
+ return (impl_->weight_ < other_srv.impl_->weight_ ? -1 : 1);
+ }
+ if (impl_->port_ != other_srv.impl_->port_) {
+ return (impl_->port_ < other_srv.impl_->port_ ? -1 : 1);
+ }
+
+ return (compareNames(impl_->target_, other_srv.impl_->target_));
+}
+
+uint16_t
+SRV::getPriority() const {
+ return (impl_->priority_);
+}
+
+uint16_t
+SRV::getWeight() const {
+ return (impl_->weight_);
+}
+
+uint16_t
+SRV::getPort() const {
+ return (impl_->port_);
+}
+
+const Name&
+SRV::getTarget() const {
+ return (impl_->target_);
+}
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/in_1/srv_33.h b/src/lib/dns/rdata/in_1/srv_33.h
new file mode 100644
index 0000000..32b7dc0
--- /dev/null
+++ b/src/lib/dns/rdata/in_1/srv_33.h
@@ -0,0 +1,93 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// BEGIN_HEADER_GUARD
+
+#include <stdint.h>
+
+#include <dns/name.h>
+#include <dns/rdata.h>
+
+// BEGIN_ISC_NAMESPACE
+
+// BEGIN_COMMON_DECLARATIONS
+// END_COMMON_DECLARATIONS
+
+// BEGIN_RDATA_NAMESPACE
+
+struct SRVImpl;
+
+/// \brief \c rdata::SRV class represents the SRV RDATA as defined %in
+/// RFC2782.
+///
+/// This class implements the basic interfaces inherited from the abstract
+/// \c rdata::Rdata class, and provides trivial accessors specific to the
+/// SRV RDATA.
+class SRV : public Rdata {
+public:
+ // BEGIN_COMMON_MEMBERS
+ // END_COMMON_MEMBERS
+
+ /// \brief Assignment operator.
+ ///
+ /// It internally allocates a resource, and if it fails a corresponding
+ /// standard exception will be thrown.
+ /// This operator never throws an exception otherwise.
+ ///
+ /// This operator provides the strong exception guarantee: When an
+ /// exception is thrown the content of the assignment target will be
+ /// intact.
+ SRV& operator=(const SRV& source);
+
+ /// \brief The destructor.
+ ~SRV();
+
+ ///
+ /// Specialized methods
+ ///
+
+ /// \brief Return the value of the priority field.
+ ///
+ /// This method never throws an exception.
+ uint16_t getPriority() const;
+
+ /// \brief Return the value of the weight field.
+ ///
+ /// This method never throws an exception.
+ uint16_t getWeight() const;
+
+ /// \brief Return the value of the port field.
+ ///
+ /// This method never throws an exception.
+ uint16_t getPort() const;
+
+ /// \brief Return the value of the target field.
+ ///
+ /// \return A reference to a \c Name class object corresponding to the
+ /// internal target name.
+ ///
+ /// This method never throws an exception.
+ const Name& getTarget() const;
+
+private:
+ SRVImpl* impl_;
+};
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+// END_HEADER_GUARD
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/template.cc b/src/lib/dns/rdata/template.cc
index d9f08ee..e85f82c 100644
--- a/src/lib/dns/rdata/template.cc
+++ b/src/lib/dns/rdata/template.cc
@@ -18,6 +18,7 @@
#include <dns/messagerenderer.h>
#include <dns/rdata.h>
#include <dns/rdataclass.h>
+#include <dns/rrtype.h>
using namespace std;
using namespace isc::util;
diff --git a/src/lib/dns/rrtype-placeholder.h b/src/lib/dns/rrtype-placeholder.h
index 1cb028c..dad1b2b 100644
--- a/src/lib/dns/rrtype-placeholder.h
+++ b/src/lib/dns/rrtype-placeholder.h
@@ -22,6 +22,11 @@
#include <exceptions/exceptions.h>
+// Solaris x86 defines DS in <sys/regset.h>, which gets pulled in by Boost
+#if defined(__sun) && defined(DS)
+# undef DS
+#endif
+
namespace isc {
namespace util {
class InputBuffer;
diff --git a/src/lib/dns/tests/Makefile.am b/src/lib/dns/tests/Makefile.am
index 3a249c1..3794678 100644
--- a/src/lib/dns/tests/Makefile.am
+++ b/src/lib/dns/tests/Makefile.am
@@ -32,16 +32,21 @@ run_unittests_SOURCES += rdata_ns_unittest.cc rdata_soa_unittest.cc
run_unittests_SOURCES += rdata_txt_unittest.cc rdata_mx_unittest.cc
run_unittests_SOURCES += rdata_ptr_unittest.cc rdata_cname_unittest.cc
run_unittests_SOURCES += rdata_dname_unittest.cc
+run_unittests_SOURCES += rdata_afsdb_unittest.cc
run_unittests_SOURCES += rdata_opt_unittest.cc
run_unittests_SOURCES += rdata_dnskey_unittest.cc
-run_unittests_SOURCES += rdata_ds_unittest.cc
+run_unittests_SOURCES += rdata_ds_like_unittest.cc
run_unittests_SOURCES += rdata_nsec_unittest.cc
run_unittests_SOURCES += rdata_nsec3_unittest.cc
run_unittests_SOURCES += rdata_nsecbitmap_unittest.cc
run_unittests_SOURCES += rdata_nsec3param_unittest.cc
run_unittests_SOURCES += rdata_rrsig_unittest.cc
run_unittests_SOURCES += rdata_rp_unittest.cc
+run_unittests_SOURCES += rdata_srv_unittest.cc
+run_unittests_SOURCES += rdata_minfo_unittest.cc
run_unittests_SOURCES += rdata_tsig_unittest.cc
+run_unittests_SOURCES += rdata_naptr_unittest.cc
+run_unittests_SOURCES += rdata_hinfo_unittest.cc
run_unittests_SOURCES += rrset_unittest.cc rrsetlist_unittest.cc
run_unittests_SOURCES += question_unittest.cc
run_unittests_SOURCES += rrparamregistry_unittest.cc
@@ -51,6 +56,7 @@ run_unittests_SOURCES += tsig_unittest.cc
run_unittests_SOURCES += tsigerror_unittest.cc
run_unittests_SOURCES += tsigkey_unittest.cc
run_unittests_SOURCES += tsigrecord_unittest.cc
+run_unittests_SOURCES += character_string_unittest.cc
run_unittests_SOURCES += run_unittests.cc
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
# We shouldn't need to include BOTAN_LDFLAGS here, but there
diff --git a/src/lib/dns/tests/character_string_unittest.cc b/src/lib/dns/tests/character_string_unittest.cc
new file mode 100644
index 0000000..5fed9eb
--- /dev/null
+++ b/src/lib/dns/tests/character_string_unittest.cc
@@ -0,0 +1,92 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+
+#include <gtest/gtest.h>
+
+#include <dns/rdata.h>
+#include <dns/tests/unittest_util.h>
+#include <dns/character_string.h>
+
+using isc::UnitTestUtil;
+
+using namespace std;
+using namespace isc;
+using namespace isc::dns;
+using namespace isc::dns::characterstr;
+using namespace isc::dns::rdata;
+
+namespace {
+
+class CharacterString {
+public:
+ CharacterString(const string& str){
+ string::const_iterator it = str.begin();
+ characterStr_ = getNextCharacterString(str, it);
+ }
+ const string& str() const { return characterStr_; }
+private:
+ string characterStr_;
+};
+
+TEST(CharacterStringTest, testNormalCase) {
+ CharacterString cstr1("foo");
+ EXPECT_EQ(string("foo"), cstr1.str());
+
+ // Test <character-string> that separated by space
+ CharacterString cstr2("foo bar");
+ EXPECT_EQ(string("foo"), cstr2.str());
+
+ // Test <character-string> that separated by quotes
+ CharacterString cstr3("\"foo bar\"");
+ EXPECT_EQ(string("foo bar"), cstr3.str());
+
+ // Test <character-string> that not separate by quotes but ended with quotes
+ CharacterString cstr4("foo\"");
+ EXPECT_EQ(string("foo\""), cstr4.str());
+}
+
+TEST(CharacterStringTest, testBadCase) {
+ // The <character-string> that started with quotes should also be ended
+ // with quotes
+ EXPECT_THROW(CharacterString cstr("\"foo"), InvalidRdataText);
+
+ // The string length cannot exceed 255 characters
+ string str;
+ for (int i = 0; i < 257; ++i) {
+ str += 'A';
+ }
+ EXPECT_THROW(CharacterString cstr(str), CharStringTooLong);
+}
+
+TEST(CharacterStringTest, testEscapeCharacter) {
+ CharacterString cstr1("foo\\bar");
+ EXPECT_EQ(string("foobar"), cstr1.str());
+
+ CharacterString cstr2("foo\\\\bar");
+ EXPECT_EQ(string("foo\\bar"), cstr2.str());
+
+ CharacterString cstr3("fo\\111bar");
+ EXPECT_EQ(string("foobar"), cstr3.str());
+
+ CharacterString cstr4("fo\\1112bar");
+ EXPECT_EQ(string("foo2bar"), cstr4.str());
+
+ // There must be at least 3 digits followed by '\'
+ EXPECT_THROW(CharacterString cstr("foo\\98ar"), InvalidRdataText);
+ EXPECT_THROW(CharacterString cstr("foo\\9ar"), InvalidRdataText);
+ EXPECT_THROW(CharacterString cstr("foo\\98"), InvalidRdataText);
+}
+
+} // namespace
diff --git a/src/lib/dns/tests/message_unittest.cc b/src/lib/dns/tests/message_unittest.cc
index c79ea2c..f068791 100644
--- a/src/lib/dns/tests/message_unittest.cc
+++ b/src/lib/dns/tests/message_unittest.cc
@@ -62,7 +62,6 @@ using namespace isc::dns::rdata;
//
const uint16_t Message::DEFAULT_MAX_UDPSIZE;
-const Name test_name("test.example.com");
namespace isc {
namespace util {
@@ -79,7 +78,8 @@ const uint16_t TSIGContext::DEFAULT_FUDGE;
namespace {
class MessageTest : public ::testing::Test {
protected:
- MessageTest() : obuffer(0), renderer(obuffer),
+ MessageTest() : test_name("test.example.com"), obuffer(0),
+ renderer(obuffer),
message_parse(Message::PARSE),
message_render(Message::RENDER),
bogus_section(static_cast<Message::Section>(
@@ -103,8 +103,9 @@ protected:
"FAKEFAKEFAKEFAKE"));
rrset_aaaa->addRRsig(rrset_rrsig);
}
-
+
static Question factoryFromFile(const char* datafile);
+ const Name test_name;
OutputBuffer obuffer;
MessageRenderer renderer;
Message message_parse;
@@ -114,18 +115,23 @@ protected:
RRsetPtr rrset_aaaa; // AAAA RRset with one RDATA with RRSIG
RRsetPtr rrset_rrsig; // RRSIG for the AAAA RRset
TSIGContext tsig_ctx;
+ vector<unsigned char> received_data;
vector<unsigned char> expected_data;
- static void factoryFromFile(Message& message, const char* datafile);
+ void factoryFromFile(Message& message, const char* datafile,
+ Message::ParseOptions options =
+ Message::PARSE_DEFAULT);
};
void
-MessageTest::factoryFromFile(Message& message, const char* datafile) {
- std::vector<unsigned char> data;
- UnitTestUtil::readWireData(datafile, data);
+MessageTest::factoryFromFile(Message& message, const char* datafile,
+ Message::ParseOptions options)
+{
+ received_data.clear();
+ UnitTestUtil::readWireData(datafile, received_data);
- InputBuffer buffer(&data[0], data.size());
- message.fromWire(buffer);
+ InputBuffer buffer(&received_data[0], received_data.size());
+ message.fromWire(buffer, options);
}
TEST_F(MessageTest, headerFlag) {
@@ -173,7 +179,6 @@ TEST_F(MessageTest, headerFlag) {
EXPECT_THROW(message_parse.setHeaderFlag(Message::HEADERFLAG_QR),
InvalidMessageOperation);
}
-
TEST_F(MessageTest, getEDNS) {
EXPECT_FALSE(message_parse.getEDNS()); // by default EDNS isn't set
@@ -530,7 +535,46 @@ TEST_F(MessageTest, appendSection) {
}
+TEST_F(MessageTest, parseHeader) {
+ received_data.clear();
+ UnitTestUtil::readWireData("message_fromWire1", received_data);
+
+ // parseHeader() isn't allowed in the render mode.
+ InputBuffer buffer(&received_data[0], received_data.size());
+ EXPECT_THROW(message_render.parseHeader(buffer), InvalidMessageOperation);
+
+ message_parse.parseHeader(buffer);
+ EXPECT_EQ(0x1035, message_parse.getQid());
+ EXPECT_EQ(Opcode::QUERY(), message_parse.getOpcode());
+ EXPECT_EQ(Rcode::NOERROR(), message_parse.getRcode());
+ EXPECT_TRUE(message_parse.getHeaderFlag(Message::HEADERFLAG_QR));
+ EXPECT_TRUE(message_parse.getHeaderFlag(Message::HEADERFLAG_AA));
+ EXPECT_FALSE(message_parse.getHeaderFlag(Message::HEADERFLAG_TC));
+ EXPECT_TRUE(message_parse.getHeaderFlag(Message::HEADERFLAG_RD));
+ EXPECT_FALSE(message_parse.getHeaderFlag(Message::HEADERFLAG_RA));
+ EXPECT_FALSE(message_parse.getHeaderFlag(Message::HEADERFLAG_AD));
+ EXPECT_FALSE(message_parse.getHeaderFlag(Message::HEADERFLAG_CD));
+ EXPECT_EQ(1, message_parse.getRRCount(Message::SECTION_QUESTION));
+ EXPECT_EQ(2, message_parse.getRRCount(Message::SECTION_ANSWER));
+ EXPECT_EQ(0, message_parse.getRRCount(Message::SECTION_AUTHORITY));
+ EXPECT_EQ(0, message_parse.getRRCount(Message::SECTION_ADDITIONAL));
+
+ // Only the header part should have been examined.
+ EXPECT_EQ(12, buffer.getPosition()); // 12 = size of the header section
+ EXPECT_TRUE(message_parse.beginQuestion() == message_parse.endQuestion());
+ EXPECT_TRUE(message_parse.beginSection(Message::SECTION_ANSWER) ==
+ message_parse.endSection(Message::SECTION_ANSWER));
+ EXPECT_TRUE(message_parse.beginSection(Message::SECTION_AUTHORITY) ==
+ message_parse.endSection(Message::SECTION_AUTHORITY));
+ EXPECT_TRUE(message_parse.beginSection(Message::SECTION_ADDITIONAL) ==
+ message_parse.endSection(Message::SECTION_ADDITIONAL));
+}
+
TEST_F(MessageTest, fromWire) {
+ // fromWire() isn't allowed in the render mode.
+ EXPECT_THROW(factoryFromFile(message_render, "message_fromWire1"),
+ InvalidMessageOperation);
+
factoryFromFile(message_parse, "message_fromWire1");
EXPECT_EQ(0x1035, message_parse.getQid());
EXPECT_EQ(Opcode::QUERY(), message_parse.getOpcode());
@@ -562,6 +606,87 @@ TEST_F(MessageTest, fromWire) {
EXPECT_TRUE(it->isLast());
}
+TEST_F(MessageTest, fromWireShortBuffer) {
+ // We trim a valid message (ending with an SOA RR) for one byte.
+ // fromWire() should throw an exception while parsing the trimmed RR.
+ UnitTestUtil::readWireData("message_fromWire22.wire", received_data);
+ InputBuffer buffer(&received_data[0], received_data.size() - 1);
+ EXPECT_THROW(message_parse.fromWire(buffer), InvalidBufferPosition);
+}
+
+TEST_F(MessageTest, fromWireCombineRRs) {
+ // This message contains 3 RRs in the answer section in the order of
+ // A, AAAA, A types. fromWire() should combine the two A RRs into a
+ // single RRset by default.
+ factoryFromFile(message_parse, "message_fromWire19.wire");
+
+ RRsetIterator it = message_parse.beginSection(Message::SECTION_ANSWER);
+ RRsetIterator it_end = message_parse.endSection(Message::SECTION_ANSWER);
+ ASSERT_TRUE(it != it_end);
+ EXPECT_EQ(RRType::A(), (*it)->getType());
+ EXPECT_EQ(2, (*it)->getRdataCount());
+
+ ++it;
+ ASSERT_TRUE(it != it_end);
+ EXPECT_EQ(RRType::AAAA(), (*it)->getType());
+ EXPECT_EQ(1, (*it)->getRdataCount());
+}
+
+// A helper function for a test pattern commonly used in several tests below.
+void
+preserveRRCheck(const Message& message, Message::Section section) {
+ RRsetIterator it = message.beginSection(section);
+ RRsetIterator it_end = message.endSection(section);
+ ASSERT_TRUE(it != it_end);
+ EXPECT_EQ(RRType::A(), (*it)->getType());
+ EXPECT_EQ(1, (*it)->getRdataCount());
+ EXPECT_EQ("192.0.2.1", (*it)->getRdataIterator()->getCurrent().toText());
+
+ ++it;
+ ASSERT_TRUE(it != it_end);
+ EXPECT_EQ(RRType::AAAA(), (*it)->getType());
+ EXPECT_EQ(1, (*it)->getRdataCount());
+ EXPECT_EQ("2001:db8::1", (*it)->getRdataIterator()->getCurrent().toText());
+
+ ++it;
+ ASSERT_TRUE(it != it_end);
+ EXPECT_EQ(RRType::A(), (*it)->getType());
+ EXPECT_EQ(1, (*it)->getRdataCount());
+ EXPECT_EQ("192.0.2.2", (*it)->getRdataIterator()->getCurrent().toText());
+}
+
+TEST_F(MessageTest, fromWirePreserveAnswer) {
+ // Using the same data as the previous test, but specify the PRESERVE_ORDER
+ // option. The received order of RRs should be preserved, and each RR
+ // should be stored in a single RRset.
+ factoryFromFile(message_parse, "message_fromWire19.wire",
+ Message::PRESERVE_ORDER);
+ {
+ SCOPED_TRACE("preserve answer RRs");
+ preserveRRCheck(message_parse, Message::SECTION_ANSWER);
+ }
+}
+
+TEST_F(MessageTest, fromWirePreserveAuthority) {
+ // Same for the previous test, but for the authority section.
+ factoryFromFile(message_parse, "message_fromWire20.wire",
+ Message::PRESERVE_ORDER);
+ {
+ SCOPED_TRACE("preserve authority RRs");
+ preserveRRCheck(message_parse, Message::SECTION_AUTHORITY);
+ }
+}
+
+TEST_F(MessageTest, fromWirePreserveAdditional) {
+ // Same for the previous test, but for the additional section.
+ factoryFromFile(message_parse, "message_fromWire21.wire",
+ Message::PRESERVE_ORDER);
+ {
+ SCOPED_TRACE("preserve additional RRs");
+ preserveRRCheck(message_parse, Message::SECTION_ADDITIONAL);
+ }
+}
+
TEST_F(MessageTest, EDNS0ExtRcode) {
// Extended Rcode = BADVERS
factoryFromFile(message_parse, "message_fromWire10.wire");
@@ -618,15 +743,43 @@ testGetTime() {
return (NOW);
}
+// bit-wise constant flags to configure DNS header flags for test
+// messages.
+const unsigned int QR_FLAG = 0x1;
+const unsigned int AA_FLAG = 0x2;
+const unsigned int RD_FLAG = 0x4;
+
void
commonTSIGToWireCheck(Message& message, MessageRenderer& renderer,
- TSIGContext& tsig_ctx, const char* const expected_file)
+ TSIGContext& tsig_ctx, const char* const expected_file,
+ unsigned int message_flags = RD_FLAG,
+ RRType qtype = RRType::A(),
+ const vector<const char*>* answer_data = NULL)
{
message.setOpcode(Opcode::QUERY());
message.setRcode(Rcode::NOERROR());
- message.setHeaderFlag(Message::HEADERFLAG_RD, true);
+ if ((message_flags & QR_FLAG) != 0) {
+ message.setHeaderFlag(Message::HEADERFLAG_QR);
+ }
+ if ((message_flags & AA_FLAG) != 0) {
+ message.setHeaderFlag(Message::HEADERFLAG_AA);
+ }
+ if ((message_flags & RD_FLAG) != 0) {
+ message.setHeaderFlag(Message::HEADERFLAG_RD);
+ }
message.addQuestion(Question(Name("www.example.com"), RRClass::IN(),
- RRType::A()));
+ qtype));
+
+ if (answer_data != NULL) {
+ RRsetPtr ans_rrset(new RRset(Name("www.example.com"), RRClass::IN(),
+ qtype, RRTTL(86400)));
+ for (vector<const char*>::const_iterator it = answer_data->begin();
+ it != answer_data->end();
+ ++it) {
+ ans_rrset->addRdata(createRdata(qtype, RRClass::IN(), *it));
+ }
+ message.addRRset(Message::SECTION_ANSWER, ans_rrset);
+ }
message.toWire(renderer, tsig_ctx);
vector<unsigned char> expected_data;
@@ -670,6 +823,182 @@ TEST_F(MessageTest, toWireWithEDNSAndTSIG) {
}
}
+// Some of the following tests involve truncation. We use the query name
+// "www.example.com" and some TXT question/answers. The length of the
+// header and question will be 33 bytes. If we also try to include a
+// TSIG of the same key name (not compressed) with HMAC-MD5, the TSIG RR
+// will be 85 bytes.
+
+// A long TXT RDATA. With a fully compressed owner name, the corresponding
+// RR will be 268 bytes.
+const char* const long_txt1 = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcde";
+
+// With a fully compressed owner name, the corresponding RR will be 212 bytes.
+// It should result in truncation even without TSIG (33 + 268 + 212 = 513)
+const char* const long_txt2 = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456";
+
+// With a fully compressed owner name, the corresponding RR will be 127 bytes.
+// So, it can fit in the standard 512 bytes with txt1 and without TSIG, but
+// adding a TSIG would result in truncation (33 + 268 + 127 + 85 = 513)
+const char* const long_txt3 = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01";
+
+// This is 1 byte shorter than txt3, which will result in a possible longest
+// message containing answer RRs and TSIG.
+const char* const long_txt4 = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0";
+
+// Example output generated by
+// "dig -y www.example.com:SFuWd/q99SzF8Yzd1QbB9g== www.example.com txt
+// QID: 0x22c2
+// Time Signed: 0x00004e179212
+TEST_F(MessageTest, toWireTSIGTruncation) {
+ isc::util::detail::gettimeFunction = testGetTime<0x4e179212>;
+
+ // Verify a validly signed query so that we can use the TSIG context
+
+ factoryFromFile(message_parse, "message_fromWire17.wire");
+ EXPECT_EQ(TSIGError::NOERROR(),
+ tsig_ctx.verify(message_parse.getTSIGRecord(),
+ &received_data[0], received_data.size()));
+
+ message_render.setQid(0x22c2);
+ vector<const char*> answer_data;
+ answer_data.push_back(long_txt1);
+ answer_data.push_back(long_txt2);
+ {
+ SCOPED_TRACE("Message sign with TSIG and TC bit on");
+ commonTSIGToWireCheck(message_render, renderer, tsig_ctx,
+ "message_toWire4.wire",
+ QR_FLAG|AA_FLAG|RD_FLAG,
+ RRType::TXT(), &answer_data);
+ }
+}
+
+TEST_F(MessageTest, toWireTSIGTruncation2) {
+ // Similar to the previous test, but without TSIG it wouldn't cause
+ // truncation.
+ isc::util::detail::gettimeFunction = testGetTime<0x4e179212>;
+ factoryFromFile(message_parse, "message_fromWire17.wire");
+ EXPECT_EQ(TSIGError::NOERROR(),
+ tsig_ctx.verify(message_parse.getTSIGRecord(),
+ &received_data[0], received_data.size()));
+
+ message_render.setQid(0x22c2);
+ vector<const char*> answer_data;
+ answer_data.push_back(long_txt1);
+ answer_data.push_back(long_txt3);
+ {
+ SCOPED_TRACE("Message sign with TSIG and TC bit on (2)");
+ commonTSIGToWireCheck(message_render, renderer, tsig_ctx,
+ "message_toWire4.wire",
+ QR_FLAG|AA_FLAG|RD_FLAG,
+ RRType::TXT(), &answer_data);
+ }
+}
+
+TEST_F(MessageTest, toWireTSIGTruncation3) {
+ // Similar to previous ones, but truncation occurs due to too many
+ // Questions (very unusual, but not necessarily illegal).
+
+ // We are going to create a message starting with a standard
+ // header (12 bytes) and multiple questions in the Question
+ // section of the same owner name (changing the RRType, just so
+ // that it would be the form that would be accepted by the BIND 9
+ // parser). The first Question is 21 bytes in length, and the subsequent
+ // ones are 6 bytes. We'll also use a TSIG whose size is 85 bytes.
+ // Up to 66 questions can fit in the standard 512-byte buffer
+ // (12 + 21 + 6 * 65 + 85 = 508). If we try to add one more it would
+ // result in truncation.
+ message_render.setOpcode(Opcode::QUERY());
+ message_render.setRcode(Rcode::NOERROR());
+ for (int i = 1; i <= 67; ++i) {
+ message_render.addQuestion(Question(Name("www.example.com"),
+ RRClass::IN(), RRType(i)));
+ }
+ message_render.toWire(renderer, tsig_ctx);
+
+ // Check the rendered data by parsing it. We only check it has the
+ // TC bit on, has the correct number of questions, and has a TSIG RR.
+ // Checking the signature wouldn't be necessary for this rare case
+ // scenario.
+ InputBuffer buffer(renderer.getData(), renderer.getLength());
+ message_parse.fromWire(buffer);
+ EXPECT_TRUE(message_parse.getHeaderFlag(Message::HEADERFLAG_TC));
+ // Note that the number of questions are 66, not 67 as we tried to add.
+ EXPECT_EQ(66, message_parse.getRRCount(Message::SECTION_QUESTION));
+ EXPECT_TRUE(message_parse.getTSIGRecord() != NULL);
+}
+
+TEST_F(MessageTest, toWireTSIGNoTruncation) {
+ // A boundary case that shouldn't cause truncation: the resulting
+ // response message with a TSIG will be 512 bytes long.
+ isc::util::detail::gettimeFunction = testGetTime<0x4e17b38d>;
+ factoryFromFile(message_parse, "message_fromWire18.wire");
+ EXPECT_EQ(TSIGError::NOERROR(),
+ tsig_ctx.verify(message_parse.getTSIGRecord(),
+ &received_data[0], received_data.size()));
+
+ message_render.setQid(0xd6e2);
+ vector<const char*> answer_data;
+ answer_data.push_back(long_txt1);
+ answer_data.push_back(long_txt4);
+ {
+ SCOPED_TRACE("Message sign with TSIG, no truncation");
+ commonTSIGToWireCheck(message_render, renderer, tsig_ctx,
+ "message_toWire5.wire",
+ QR_FLAG|AA_FLAG|RD_FLAG,
+ RRType::TXT(), &answer_data);
+ }
+}
+
+// This is a buggy renderer for testing. It behaves like the straightforward
+// MessageRenderer, but once it has some data, its setLengthLimit() ignores
+// the given parameter and resets the limit to the current length, making
+// subsequent insertion result in truncation, which would make TSIG RR
+// rendering fail unexpectedly in the test that follows.
+class BadRenderer : public MessageRenderer {
+public:
+ BadRenderer(isc::util::OutputBuffer& buffer) :
+ MessageRenderer(buffer)
+ {}
+ virtual void setLengthLimit(size_t len) {
+ if (getLength() > 0) {
+ MessageRenderer::setLengthLimit(getLength());
+ } else {
+ MessageRenderer::setLengthLimit(len);
+ }
+ }
+};
+
+TEST_F(MessageTest, toWireTSIGLengthErrors) {
+ // specify an unusual short limit that wouldn't be able to hold
+ // the TSIG.
+ renderer.setLengthLimit(tsig_ctx.getTSIGLength() - 1);
+ // Use commonTSIGToWireCheck() only to call toWire() with otherwise valid
+ // conditions. The checks inside it don't matter because we expect an
+ // exception before any of the checks.
+ EXPECT_THROW(commonTSIGToWireCheck(message_render, renderer, tsig_ctx,
+ "message_toWire2.wire"),
+ InvalidParameter);
+
+ // This one is large enough for TSIG, but the remaining limit isn't
+ // even enough for the Header section.
+ renderer.clear();
+ message_render.clear(Message::RENDER);
+ renderer.setLengthLimit(tsig_ctx.getTSIGLength() + 1);
+ EXPECT_THROW(commonTSIGToWireCheck(message_render, renderer, tsig_ctx,
+ "message_toWire2.wire"),
+ InvalidParameter);
+
+ // Trying to render a message with TSIG using a buggy renderer.
+ obuffer.clear();
+ BadRenderer bad_renderer(obuffer);
+ bad_renderer.setLengthLimit(512);
+ message_render.clear(Message::RENDER);
+ EXPECT_THROW(commonTSIGToWireCheck(message_render, bad_renderer, tsig_ctx,
+ "message_toWire2.wire"),
+ Unexpected);
+}
+
TEST_F(MessageTest, toWireWithoutOpcode) {
message_render.setRcode(Rcode::NOERROR());
EXPECT_THROW(message_render.toWire(renderer), InvalidMessageOperation);
diff --git a/src/lib/dns/tests/question_unittest.cc b/src/lib/dns/tests/question_unittest.cc
index 25fd75b..1d483f2 100644
--- a/src/lib/dns/tests/question_unittest.cc
+++ b/src/lib/dns/tests/question_unittest.cc
@@ -106,6 +106,22 @@ TEST_F(QuestionTest, toWireRenderer) {
obuffer.getLength(), &wiredata[0], wiredata.size());
}
+TEST_F(QuestionTest, toWireTruncated) {
+ // If the available length in the renderer is too small, it would require
+ // truncation. This won't happen in normal cases, but protocol wise it
+ // could still happen if and when we support some (possibly future) opcode
+ // that allows multiple questions.
+
+ // Set the length limit to the qname length so that the whole question
+ // would request truncated
+ renderer.setLengthLimit(example_name1.getLength());
+
+ EXPECT_FALSE(renderer.isTruncated()); // check pre-render condition
+ EXPECT_EQ(0, test_question1.toWire(renderer));
+ EXPECT_TRUE(renderer.isTruncated());
+ EXPECT_EQ(0, renderer.getLength()); // renderer shouldn't have any data
+}
+
// test operator<<. We simply confirm it appends the result of toText().
TEST_F(QuestionTest, LeftShiftOperator) {
ostringstream oss;
diff --git a/src/lib/dns/tests/rdata_afsdb_unittest.cc b/src/lib/dns/tests/rdata_afsdb_unittest.cc
new file mode 100644
index 0000000..7df8d83
--- /dev/null
+++ b/src/lib/dns/tests/rdata_afsdb_unittest.cc
@@ -0,0 +1,210 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <util/buffer.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+#include <dns/rrclass.h>
+#include <dns/rrtype.h>
+
+#include <gtest/gtest.h>
+
+#include <dns/tests/unittest_util.h>
+#include <dns/tests/rdata_unittest.h>
+
+using isc::UnitTestUtil;
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+using namespace isc::dns::rdata;
+
+const char* const afsdb_text = "1 afsdb.example.com.";
+const char* const afsdb_text2 = "0 root.example.com.";
+const char* const too_long_label("012345678901234567890123456789"
+ "0123456789012345678901234567890123");
+
+namespace {
+class Rdata_AFSDB_Test : public RdataTest {
+protected:
+ Rdata_AFSDB_Test() :
+ rdata_afsdb(string(afsdb_text)), rdata_afsdb2(string(afsdb_text2))
+ {}
+
+ const generic::AFSDB rdata_afsdb;
+ const generic::AFSDB rdata_afsdb2;
+ vector<uint8_t> expected_wire;
+};
+
+
+TEST_F(Rdata_AFSDB_Test, createFromText) {
+ EXPECT_EQ(1, rdata_afsdb.getSubtype());
+ EXPECT_EQ(Name("afsdb.example.com."), rdata_afsdb.getServer());
+
+ EXPECT_EQ(0, rdata_afsdb2.getSubtype());
+ EXPECT_EQ(Name("root.example.com."), rdata_afsdb2.getServer());
+}
+
+TEST_F(Rdata_AFSDB_Test, badText) {
+ // subtype is too large
+ EXPECT_THROW(const generic::AFSDB rdata_afsdb("99999999 afsdb.example.com."),
+ InvalidRdataText);
+ // incomplete text
+ EXPECT_THROW(const generic::AFSDB rdata_afsdb("10"), InvalidRdataText);
+ EXPECT_THROW(const generic::AFSDB rdata_afsdb("SPOON"), InvalidRdataText);
+ EXPECT_THROW(const generic::AFSDB rdata_afsdb("1root.example.com."), InvalidRdataText);
+ // number of fields (must be 2) is incorrect
+ EXPECT_THROW(const generic::AFSDB rdata_afsdb("10 afsdb. example.com."),
+ InvalidRdataText);
+ // bad name
+ EXPECT_THROW(const generic::AFSDB rdata_afsdb("1 afsdb.example.com." +
+ string(too_long_label)), TooLongLabel);
+}
+
+TEST_F(Rdata_AFSDB_Test, assignment) {
+ generic::AFSDB copy((string(afsdb_text2)));
+ copy = rdata_afsdb;
+ EXPECT_EQ(0, copy.compare(rdata_afsdb));
+
+ // Check if the copied data is valid even after the original is deleted
+ generic::AFSDB* copy2 = new generic::AFSDB(rdata_afsdb);
+ generic::AFSDB copy3((string(afsdb_text2)));
+ copy3 = *copy2;
+ delete copy2;
+ EXPECT_EQ(0, copy3.compare(rdata_afsdb));
+
+ // Self assignment
+ copy = copy;
+ EXPECT_EQ(0, copy.compare(rdata_afsdb));
+}
+
+TEST_F(Rdata_AFSDB_Test, createFromWire) {
+ // uncompressed names
+ EXPECT_EQ(0, rdata_afsdb.compare(
+ *rdataFactoryFromFile(RRType::AFSDB(), RRClass::IN(),
+ "rdata_afsdb_fromWire1.wire")));
+ // compressed name
+ EXPECT_EQ(0, rdata_afsdb.compare(
+ *rdataFactoryFromFile(RRType::AFSDB(), RRClass::IN(),
+ "rdata_afsdb_fromWire2.wire", 13)));
+ // RDLENGTH is too short
+ EXPECT_THROW(rdataFactoryFromFile(RRType::AFSDB(), RRClass::IN(),
+ "rdata_afsdb_fromWire3.wire"),
+ InvalidRdataLength);
+ // RDLENGTH is too long
+ EXPECT_THROW(rdataFactoryFromFile(RRType::AFSDB(), RRClass::IN(),
+ "rdata_afsdb_fromWire4.wire"),
+ InvalidRdataLength);
+ // bogus server name, the error should be detected in the name
+ // constructor
+ EXPECT_THROW(rdataFactoryFromFile(RRType::AFSDB(), RRClass::IN(),
+ "rdata_afsdb_fromWire5.wire"),
+ DNSMessageFORMERR);
+}
+
+TEST_F(Rdata_AFSDB_Test, toWireBuffer) {
+ // construct actual data
+ rdata_afsdb.toWire(obuffer);
+
+ // construct expected data
+ UnitTestUtil::readWireData("rdata_afsdb_toWire1.wire", expected_wire);
+
+ // then compare them
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ obuffer.getData(), obuffer.getLength(),
+ &expected_wire[0], expected_wire.size());
+
+ // clear buffer for the next test
+ obuffer.clear();
+
+ // construct actual data
+ Name("example.com.").toWire(obuffer);
+ rdata_afsdb2.toWire(obuffer);
+
+ // construct expected data
+ UnitTestUtil::readWireData("rdata_afsdb_toWire2.wire", expected_wire);
+
+ // then compare them
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ obuffer.getData(), obuffer.getLength(),
+ &expected_wire[0], expected_wire.size());
+}
+
+TEST_F(Rdata_AFSDB_Test, toWireRenderer) {
+ // similar to toWireBuffer, but names in RDATA could be compressed due to
+ // preceding names. Actually they must not be compressed according to
+ // RFC3597, and this test checks that.
+
+ // construct actual data
+ rdata_afsdb.toWire(renderer);
+
+ // construct expected data
+ UnitTestUtil::readWireData("rdata_afsdb_toWire1.wire", expected_wire);
+
+ // then compare them
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ renderer.getData(), renderer.getLength(),
+ &expected_wire[0], expected_wire.size());
+
+ // clear renderer for the next test
+ renderer.clear();
+
+ // construct actual data
+ Name("example.com.").toWire(obuffer);
+ rdata_afsdb2.toWire(renderer);
+
+ // construct expected data
+ UnitTestUtil::readWireData("rdata_afsdb_toWire2.wire", expected_wire);
+
+ // then compare them
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ renderer.getData(), renderer.getLength(),
+ &expected_wire[0], expected_wire.size());
+}
+
+TEST_F(Rdata_AFSDB_Test, toText) {
+ EXPECT_EQ(afsdb_text, rdata_afsdb.toText());
+ EXPECT_EQ(afsdb_text2, rdata_afsdb2.toText());
+}
+
+TEST_F(Rdata_AFSDB_Test, compare) {
+ // check reflexivity
+ EXPECT_EQ(0, rdata_afsdb.compare(rdata_afsdb));
+
+ // name must be compared in case-insensitive manner
+ EXPECT_EQ(0, rdata_afsdb.compare(generic::AFSDB("1 "
+ "AFSDB.example.com.")));
+
+ const generic::AFSDB small1("10 afsdb.example.com");
+ const generic::AFSDB large1("65535 afsdb.example.com");
+ const generic::AFSDB large2("256 afsdb.example.com");
+
+ // confirm these are compared as unsigned values
+ EXPECT_GT(0, rdata_afsdb.compare(large1));
+ EXPECT_LT(0, large1.compare(rdata_afsdb));
+
+ // confirm these are compared in network byte order
+ EXPECT_GT(0, small1.compare(large2));
+ EXPECT_LT(0, large2.compare(small1));
+
+ // another AFSDB whose server name is larger than that of rdata_afsdb.
+ const generic::AFSDB large3("256 zzzzz.example.com");
+ EXPECT_GT(0, large2.compare(large3));
+ EXPECT_LT(0, large3.compare(large2));
+
+ // comparison attempt between incompatible RR types should be rejected
+ EXPECT_THROW(rdata_afsdb.compare(*rdata_nomatch), bad_cast);
+}
+}
diff --git a/src/lib/dns/tests/rdata_ds_like_unittest.cc b/src/lib/dns/tests/rdata_ds_like_unittest.cc
new file mode 100644
index 0000000..9b29446
--- /dev/null
+++ b/src/lib/dns/tests/rdata_ds_like_unittest.cc
@@ -0,0 +1,171 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <algorithm>
+#include <string>
+
+#include <util/buffer.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+#include <dns/rrclass.h>
+#include <dns/rrtype.h>
+
+#include <gtest/gtest.h>
+
+#include <dns/tests/unittest_util.h>
+#include <dns/tests/rdata_unittest.h>
+
+using isc::UnitTestUtil;
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+using namespace isc::dns::rdata;
+
+namespace {
+// hacks to make templates work
+template <class T>
+class RRTYPE : public RRType {
+public:
+ RRTYPE();
+};
+
+template<> RRTYPE<generic::DS>::RRTYPE() : RRType(RRType::DS()) {}
+template<> RRTYPE<generic::DLV>::RRTYPE() : RRType(RRType::DLV()) {}
+
+template <class DS_LIKE>
+class Rdata_DS_LIKE_Test : public RdataTest {
+protected:
+ static DS_LIKE const rdata_ds_like;
+};
+
+string ds_like_txt("12892 5 2 F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+ "5F0EB5C777586DE18DA6B5");
+
+template <class DS_LIKE>
+DS_LIKE const Rdata_DS_LIKE_Test<DS_LIKE>::rdata_ds_like(ds_like_txt);
+
+// The list of types we want to test.
+typedef testing::Types<generic::DS, generic::DLV> Implementations;
+
+TYPED_TEST_CASE(Rdata_DS_LIKE_Test, Implementations);
+
+TYPED_TEST(Rdata_DS_LIKE_Test, toText_DS_LIKE) {
+ EXPECT_EQ(ds_like_txt, this->rdata_ds_like.toText());
+}
+
+TYPED_TEST(Rdata_DS_LIKE_Test, badText_DS_LIKE) {
+ EXPECT_THROW(const TypeParam ds_like2("99999 5 2 BEEF"), InvalidRdataText);
+ EXPECT_THROW(const TypeParam ds_like2("11111 555 2 BEEF"),
+ InvalidRdataText);
+ EXPECT_THROW(const TypeParam ds_like2("11111 5 22222 BEEF"),
+ InvalidRdataText);
+ EXPECT_THROW(const TypeParam ds_like2("11111 5 2"), InvalidRdataText);
+ EXPECT_THROW(const TypeParam ds_like2("GARBAGE IN"), InvalidRdataText);
+ // no space between the digest type and the digest.
+ EXPECT_THROW(const TypeParam ds_like2(
+ "12892 5 2F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+ "5F0EB5C777586DE18DA6B5"), InvalidRdataText);
+}
+
+TYPED_TEST(Rdata_DS_LIKE_Test, createFromWire_DS_LIKE) {
+ EXPECT_EQ(0, this->rdata_ds_like.compare(
+ *this->rdataFactoryFromFile(RRTYPE<TypeParam>(), RRClass::IN(),
+ "rdata_ds_fromWire")));
+}
+
+TYPED_TEST(Rdata_DS_LIKE_Test, assignment_DS_LIKE) {
+ TypeParam copy((string(ds_like_txt)));
+ copy = this->rdata_ds_like;
+ EXPECT_EQ(0, copy.compare(this->rdata_ds_like));
+
+ // Check if the copied data is valid even after the original is deleted
+ TypeParam* copy2 = new TypeParam(this->rdata_ds_like);
+ TypeParam copy3((string(ds_like_txt)));
+ copy3 = *copy2;
+ delete copy2;
+ EXPECT_EQ(0, copy3.compare(this->rdata_ds_like));
+
+ // Self assignment
+ copy = copy;
+ EXPECT_EQ(0, copy.compare(this->rdata_ds_like));
+}
+
+TYPED_TEST(Rdata_DS_LIKE_Test, getTag_DS_LIKE) {
+ EXPECT_EQ(12892, this->rdata_ds_like.getTag());
+}
+
+TYPED_TEST(Rdata_DS_LIKE_Test, toWireRenderer) {
+ Rdata_DS_LIKE_Test<TypeParam>::renderer.skip(2);
+ TypeParam rdata_ds_like(ds_like_txt);
+ rdata_ds_like.toWire(this->renderer);
+
+ vector<unsigned char> data;
+ UnitTestUtil::readWireData("rdata_ds_fromWire", data);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ static_cast<const uint8_t*>
+ (this->obuffer.getData()) + 2,
+ this->obuffer.getLength() - 2,
+ &data[2], data.size() - 2);
+}
+
+TYPED_TEST(Rdata_DS_LIKE_Test, toWireBuffer) {
+ TypeParam rdata_ds_like(ds_like_txt);
+ rdata_ds_like.toWire(this->obuffer);
+}
+
+string ds_like_txt1("12892 5 2 F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+ "5F0EB5C777586DE18DA6B5");
+// different tag
+string ds_like_txt2("12893 5 2 F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+ "5F0EB5C777586DE18DA6B5");
+// different algorithm
+string ds_like_txt3("12892 6 2 F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+ "5F0EB5C777586DE18DA6B5");
+// different digest type
+string ds_like_txt4("12892 5 3 F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+ "5F0EB5C777586DE18DA6B5");
+// different digest
+string ds_like_txt5("12892 5 2 F2E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+ "5F0EB5C777586DE18DA6B5");
+// different digest length
+string ds_like_txt6("12892 5 2 F2E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+ "5F0EB5C777586DE18DA6B555");
+
+TYPED_TEST(Rdata_DS_LIKE_Test, compare) {
+ // trivial case: self equivalence
+ EXPECT_EQ(0, TypeParam(ds_like_txt).compare(TypeParam(ds_like_txt)));
+
+ // non-equivalence tests
+ EXPECT_LT(TypeParam(ds_like_txt1).compare(TypeParam(ds_like_txt2)), 0);
+ EXPECT_GT(TypeParam(ds_like_txt2).compare(TypeParam(ds_like_txt1)), 0);
+
+ EXPECT_LT(TypeParam(ds_like_txt1).compare(TypeParam(ds_like_txt3)), 0);
+ EXPECT_GT(TypeParam(ds_like_txt3).compare(TypeParam(ds_like_txt1)), 0);
+
+ EXPECT_LT(TypeParam(ds_like_txt1).compare(TypeParam(ds_like_txt4)), 0);
+ EXPECT_GT(TypeParam(ds_like_txt4).compare(TypeParam(ds_like_txt1)), 0);
+
+ EXPECT_LT(TypeParam(ds_like_txt1).compare(TypeParam(ds_like_txt5)), 0);
+ EXPECT_GT(TypeParam(ds_like_txt5).compare(TypeParam(ds_like_txt1)), 0);
+
+ EXPECT_LT(TypeParam(ds_like_txt1).compare(TypeParam(ds_like_txt6)), 0);
+ EXPECT_GT(TypeParam(ds_like_txt6).compare(TypeParam(ds_like_txt1)), 0);
+
+ // comparison attempt between incompatible RR types should be rejected
+ EXPECT_THROW(this->rdata_ds_like.compare(*this->rdata_nomatch),
+ bad_cast);
+}
+
+}
diff --git a/src/lib/dns/tests/rdata_ds_unittest.cc b/src/lib/dns/tests/rdata_ds_unittest.cc
deleted file mode 100644
index 5988620..0000000
--- a/src/lib/dns/tests/rdata_ds_unittest.cc
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-// PERFORMANCE OF THIS SOFTWARE.
-
-#include <string>
-
-#include <util/buffer.h>
-#include <dns/messagerenderer.h>
-#include <dns/rdata.h>
-#include <dns/rdataclass.h>
-#include <dns/rrclass.h>
-#include <dns/rrtype.h>
-
-#include <gtest/gtest.h>
-
-#include <dns/tests/unittest_util.h>
-#include <dns/tests/rdata_unittest.h>
-
-using isc::UnitTestUtil;
-using namespace std;
-using namespace isc::dns;
-using namespace isc::util;
-using namespace isc::dns::rdata;
-
-namespace {
-class Rdata_DS_Test : public RdataTest {
- // there's nothing to specialize
-};
-
-string ds_txt("12892 5 2 F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
- "5F0EB5C777586DE18DA6B5");
-const generic::DS rdata_ds(ds_txt);
-
-TEST_F(Rdata_DS_Test, toText_DS) {
- EXPECT_EQ(ds_txt, rdata_ds.toText());
-}
-
-TEST_F(Rdata_DS_Test, badText_DS) {
- EXPECT_THROW(const generic::DS ds2("99999 5 2 BEEF"), InvalidRdataText);
- EXPECT_THROW(const generic::DS ds2("11111 555 2 BEEF"), InvalidRdataText);
- EXPECT_THROW(const generic::DS ds2("11111 5 22222 BEEF"), InvalidRdataText);
- EXPECT_THROW(const generic::DS ds2("11111 5 2"), InvalidRdataText);
- EXPECT_THROW(const generic::DS ds2("GARBAGE IN"), InvalidRdataText);
-}
-
-// this test currently fails; we must fix it, and then migrate the test to
-// badText_DS
-TEST_F(Rdata_DS_Test, DISABLED_badText_DS) {
- // no space between the digest type and the digest.
- EXPECT_THROW(const generic::DS ds2(
- "12892 5 2F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
- "5F0EB5C777586DE18DA6B5"), InvalidRdataText);
-}
-
-TEST_F(Rdata_DS_Test, createFromWire_DS) {
- EXPECT_EQ(0, rdata_ds.compare(
- *rdataFactoryFromFile(RRType::DS(), RRClass::IN(),
- "rdata_ds_fromWire")));
-}
-
-TEST_F(Rdata_DS_Test, getTag_DS) {
- EXPECT_EQ(12892, rdata_ds.getTag());
-}
-
-TEST_F(Rdata_DS_Test, toWireRenderer) {
- renderer.skip(2);
- generic::DS rdata_ds(ds_txt);
- rdata_ds.toWire(renderer);
-
- vector<unsigned char> data;
- UnitTestUtil::readWireData("rdata_ds_fromWire", data);
- EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
- static_cast<const uint8_t *>(obuffer.getData()) + 2,
- obuffer.getLength() - 2, &data[2], data.size() - 2);
-}
-
-TEST_F(Rdata_DS_Test, toWireBuffer) {
- generic::DS rdata_ds(ds_txt);
- rdata_ds.toWire(obuffer);
-}
-
-TEST_F(Rdata_DS_Test, compare) {
- // trivial case: self equivalence
- EXPECT_EQ(0, generic::DS(ds_txt).compare(generic::DS(ds_txt)));
-
- // TODO: need more tests
-}
-
-}
diff --git a/src/lib/dns/tests/rdata_hinfo_unittest.cc b/src/lib/dns/tests/rdata_hinfo_unittest.cc
new file mode 100644
index 0000000..c52b2a0
--- /dev/null
+++ b/src/lib/dns/tests/rdata_hinfo_unittest.cc
@@ -0,0 +1,115 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <util/buffer.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+#include <dns/rrclass.h>
+#include <dns/rrtype.h>
+
+#include <gtest/gtest.h>
+
+#include <dns/tests/unittest_util.h>
+#include <dns/tests/rdata_unittest.h>
+
+using isc::UnitTestUtil;
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+using namespace isc::dns::rdata;
+using namespace isc::dns::rdata::generic;
+
+namespace {
+class Rdata_HINFO_Test : public RdataTest {
+};
+
+static uint8_t hinfo_rdata[] = {0x07,0x50,0x65,0x6e,0x74,0x69,0x75,0x6d,0x05,
+ 0x4c,0x69,0x6e,0x75,0x78};
+static const char *hinfo_str = "\"Pentium\" \"Linux\"";
+static const char *hinfo_str1 = "\"Pen\\\"tium\" \"Linux\"";
+
+static const char *hinfo_str_small1 = "\"Lentium\" \"Linux\"";
+static const char *hinfo_str_small2 = "\"Pentium\" \"Kinux\"";
+static const char *hinfo_str_large1 = "\"Qentium\" \"Linux\"";
+static const char *hinfo_str_large2 = "\"Pentium\" \"UNIX\"";
+
+TEST_F(Rdata_HINFO_Test, createFromText) {
+ HINFO hinfo(hinfo_str);
+ EXPECT_EQ(string("Pentium"), hinfo.getCPU());
+ EXPECT_EQ(string("Linux"), hinfo.getOS());
+
+ // Test the text with double quotes in the middle of string
+ HINFO hinfo1(hinfo_str1);
+ EXPECT_EQ(string("Pen\"tium"), hinfo1.getCPU());
+}
+
+TEST_F(Rdata_HINFO_Test, badText) {
+ // Fields must be seperated by spaces
+ EXPECT_THROW(const HINFO hinfo("\"Pentium\"\"Linux\""), InvalidRdataText);
+ // Field cannot be missing
+ EXPECT_THROW(const HINFO hinfo("Pentium"), InvalidRdataText);
+ // The <character-string> cannot exceed 255 characters
+ string hinfo_str;
+ for (int i = 0; i < 257; ++i) {
+ hinfo_str += 'A';
+ }
+ hinfo_str += " Linux";
+ EXPECT_THROW(const HINFO hinfo(hinfo_str), CharStringTooLong);
+}
+
+TEST_F(Rdata_HINFO_Test, createFromWire) {
+ InputBuffer input_buffer(hinfo_rdata, sizeof(hinfo_rdata));
+ HINFO hinfo(input_buffer, sizeof(hinfo_rdata));
+ EXPECT_EQ(string("Pentium"), hinfo.getCPU());
+ EXPECT_EQ(string("Linux"), hinfo.getOS());
+}
+
+TEST_F(Rdata_HINFO_Test, toText) {
+ HINFO hinfo(hinfo_str);
+ EXPECT_EQ(hinfo_str, hinfo.toText());
+}
+
+TEST_F(Rdata_HINFO_Test, toWire) {
+ HINFO hinfo(hinfo_str);
+ hinfo.toWire(obuffer);
+
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData, obuffer.getData(),
+ obuffer.getLength(), hinfo_rdata, sizeof(hinfo_rdata));
+}
+
+TEST_F(Rdata_HINFO_Test, toWireRenderer) {
+ HINFO hinfo(hinfo_str);
+
+ hinfo.toWire(renderer);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData, obuffer.getData(),
+ obuffer.getLength(), hinfo_rdata, sizeof(hinfo_rdata));
+}
+
+TEST_F(Rdata_HINFO_Test, compare) {
+ HINFO hinfo(hinfo_str);
+ HINFO hinfo_small1(hinfo_str_small1);
+ HINFO hinfo_small2(hinfo_str_small2);
+ HINFO hinfo_large1(hinfo_str_large1);
+ HINFO hinfo_large2(hinfo_str_large2);
+
+ EXPECT_EQ(0, hinfo.compare(HINFO(hinfo_str)));
+ EXPECT_EQ(1, hinfo.compare(HINFO(hinfo_str_small1)));
+ EXPECT_EQ(1, hinfo.compare(HINFO(hinfo_str_small2)));
+ EXPECT_EQ(-1, hinfo.compare(HINFO(hinfo_str_large1)));
+ EXPECT_EQ(-1, hinfo.compare(HINFO(hinfo_str_large2)));
+}
+
+}
diff --git a/src/lib/dns/tests/rdata_minfo_unittest.cc b/src/lib/dns/tests/rdata_minfo_unittest.cc
new file mode 100644
index 0000000..30c7c39
--- /dev/null
+++ b/src/lib/dns/tests/rdata_minfo_unittest.cc
@@ -0,0 +1,184 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for generic
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <util/buffer.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+#include <dns/rrclass.h>
+#include <dns/rrtype.h>
+
+#include <gtest/gtest.h>
+
+#include <dns/tests/unittest_util.h>
+#include <dns/tests/rdata_unittest.h>
+
+using isc::UnitTestUtil;
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+using namespace isc::dns::rdata;
+
+// minfo text
+const char* const minfo_txt = "rmailbox.example.com. emailbox.example.com.";
+const char* const minfo_txt2 = "root.example.com. emailbox.example.com.";
+const char* const too_long_label = "01234567890123456789012345678901234567"
+ "89012345678901234567890123";
+
+namespace {
+class Rdata_MINFO_Test : public RdataTest {
+public:
+ Rdata_MINFO_Test():
+ rdata_minfo(string(minfo_txt)), rdata_minfo2(string(minfo_txt2)) {}
+
+ const generic::MINFO rdata_minfo;
+ const generic::MINFO rdata_minfo2;
+};
+
+
+TEST_F(Rdata_MINFO_Test, createFromText) {
+ EXPECT_EQ(Name("rmailbox.example.com."), rdata_minfo.getRmailbox());
+ EXPECT_EQ(Name("emailbox.example.com."), rdata_minfo.getEmailbox());
+
+ EXPECT_EQ(Name("root.example.com."), rdata_minfo2.getRmailbox());
+ EXPECT_EQ(Name("emailbox.example.com."), rdata_minfo2.getEmailbox());
+}
+
+TEST_F(Rdata_MINFO_Test, badText) {
+ // incomplete text
+ EXPECT_THROW(generic::MINFO("root.example.com."),
+ InvalidRdataText);
+ // number of fields (must be 2) is incorrect
+ EXPECT_THROW(generic::MINFO("root.example.com emailbox.example.com. "
+ "example.com."),
+ InvalidRdataText);
+ // bad rmailbox name
+ EXPECT_THROW(generic::MINFO("root.example.com. emailbox.example.com." +
+ string(too_long_label)),
+ TooLongLabel);
+ // bad emailbox name
+ EXPECT_THROW(generic::MINFO("root.example.com." +
+ string(too_long_label) + " emailbox.example.com."),
+ TooLongLabel);
+}
+
+TEST_F(Rdata_MINFO_Test, createFromWire) {
+ // uncompressed names
+ EXPECT_EQ(0, rdata_minfo.compare(
+ *rdataFactoryFromFile(RRType::MINFO(), RRClass::IN(),
+ "rdata_minfo_fromWire1.wire")));
+ // compressed names
+ EXPECT_EQ(0, rdata_minfo.compare(
+ *rdataFactoryFromFile(RRType::MINFO(), RRClass::IN(),
+ "rdata_minfo_fromWire2.wire", 15)));
+ // RDLENGTH is too short
+ EXPECT_THROW(rdataFactoryFromFile(RRType::MINFO(), RRClass::IN(),
+ "rdata_minfo_fromWire3.wire"),
+ InvalidRdataLength);
+ // RDLENGTH is too long
+ EXPECT_THROW(rdataFactoryFromFile(RRType::MINFO(), RRClass::IN(),
+ "rdata_minfo_fromWire4.wire"),
+ InvalidRdataLength);
+ // bogus rmailbox name, the error should be detected in the name
+ // constructor
+ EXPECT_THROW(rdataFactoryFromFile(RRType::MINFO(), RRClass::IN(),
+ "rdata_minfo_fromWire5.wire"),
+ DNSMessageFORMERR);
+ // bogus emailbox name, the error should be detected in the name
+ // constructor
+ EXPECT_THROW(rdataFactoryFromFile(RRType::MINFO(), RRClass::IN(),
+ "rdata_minfo_fromWire6.wire"),
+ DNSMessageFORMERR);
+}
+
+TEST_F(Rdata_MINFO_Test, assignment) {
+ generic::MINFO copy((string(minfo_txt2)));
+ copy = rdata_minfo;
+ EXPECT_EQ(0, copy.compare(rdata_minfo));
+
+ // Check if the copied data is valid even after the original is deleted
+ generic::MINFO* copy2 = new generic::MINFO(rdata_minfo);
+ generic::MINFO copy3((string(minfo_txt2)));
+ copy3 = *copy2;
+ delete copy2;
+ EXPECT_EQ(0, copy3.compare(rdata_minfo));
+
+ // Self assignment
+ copy = copy;
+ EXPECT_EQ(0, copy.compare(rdata_minfo));
+}
+
+TEST_F(Rdata_MINFO_Test, toWireBuffer) {
+ rdata_minfo.toWire(obuffer);
+ vector<unsigned char> data;
+ UnitTestUtil::readWireData("rdata_minfo_toWireUncompressed1.wire", data);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ static_cast<const uint8_t *>(obuffer.getData()),
+ obuffer.getLength(), &data[0], data.size());
+
+ obuffer.clear();
+ rdata_minfo2.toWire(obuffer);
+ vector<unsigned char> data2;
+ UnitTestUtil::readWireData("rdata_minfo_toWireUncompressed2.wire", data2);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ static_cast<const uint8_t *>(obuffer.getData()),
+ obuffer.getLength(), &data2[0], data2.size());
+}
+
+TEST_F(Rdata_MINFO_Test, toWireRenderer) {
+ rdata_minfo.toWire(renderer);
+ vector<unsigned char> data;
+ UnitTestUtil::readWireData("rdata_minfo_toWire1.wire", data);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ static_cast<const uint8_t *>(obuffer.getData()),
+ obuffer.getLength(), &data[0], data.size());
+ renderer.clear();
+ rdata_minfo2.toWire(renderer);
+ vector<unsigned char> data2;
+ UnitTestUtil::readWireData("rdata_minfo_toWire2.wire", data2);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ static_cast<const uint8_t *>(obuffer.getData()),
+ obuffer.getLength(), &data2[0], data2.size());
+}
+
+TEST_F(Rdata_MINFO_Test, toText) {
+ EXPECT_EQ(minfo_txt, rdata_minfo.toText());
+ EXPECT_EQ(minfo_txt2, rdata_minfo2.toText());
+}
+
+TEST_F(Rdata_MINFO_Test, compare) {
+ // check reflexivity
+ EXPECT_EQ(0, rdata_minfo.compare(rdata_minfo));
+
+ // names must be compared in case-insensitive manner
+ EXPECT_EQ(0, rdata_minfo.compare(generic::MINFO("RMAILBOX.example.com. "
+ "emailbox.EXAMPLE.com.")));
+
+ // another MINFO whose rmailbox name is larger than that of rdata_minfo.
+ const generic::MINFO large1_minfo("zzzzzzzz.example.com. "
+ "emailbox.example.com.");
+ EXPECT_GT(0, rdata_minfo.compare(large1_minfo));
+ EXPECT_LT(0, large1_minfo.compare(rdata_minfo));
+
+ // another MINFO whose emailbox name is larger than that of rdata_minfo.
+ const generic::MINFO large2_minfo("rmailbox.example.com. "
+ "zzzzzzzzzzz.example.com.");
+ EXPECT_GT(0, rdata_minfo.compare(large2_minfo));
+ EXPECT_LT(0, large2_minfo.compare(rdata_minfo));
+
+ // comparison attempt between incompatible RR types should be rejected
+ EXPECT_THROW(rdata_minfo.compare(*RdataTest::rdata_nomatch), bad_cast);
+}
+}
diff --git a/src/lib/dns/tests/rdata_naptr_unittest.cc b/src/lib/dns/tests/rdata_naptr_unittest.cc
new file mode 100644
index 0000000..f905943
--- /dev/null
+++ b/src/lib/dns/tests/rdata_naptr_unittest.cc
@@ -0,0 +1,178 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <util/buffer.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+#include <dns/rrclass.h>
+#include <dns/rrtype.h>
+
+#include <gtest/gtest.h>
+
+#include <dns/tests/unittest_util.h>
+#include <dns/tests/rdata_unittest.h>
+
+using isc::UnitTestUtil;
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+using namespace isc::dns::rdata;
+using namespace isc::dns::rdata::generic;
+
+namespace {
+class Rdata_NAPTR_Test : public RdataTest {
+};
+
+// 10 100 "S" "SIP+D2U" "" _sip._udp.example.com.
+static uint8_t naptr_rdata[] = {0x00,0x0a,0x00,0x64,0x01,0x53,0x07,0x53,0x49,
+ 0x50,0x2b,0x44,0x32,0x55,0x00,0x04,0x5f,0x73,0x69,0x70,0x04,0x5f,0x75,0x64,
+ 0x70,0x07,0x65,0x78,0x61,0x6d,0x70,0x6c,0x65,0x03,0x63,0x6f,0x6d,0x00};
+
+static const char *naptr_str =
+ "10 100 \"S\" \"SIP+D2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str2 =
+ "10 100 S SIP+D2U \"\" _sip._udp.example.com.";
+
+static const char *naptr_str_small1 =
+ "9 100 \"S\" \"SIP+D2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_small2 =
+ "10 90 \"S\" \"SIP+D2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_small3 =
+ "10 100 \"R\" \"SIP+D2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_small4 =
+ "10 100 \"S\" \"SIP+C2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_small5 =
+ "10 100 \"S\" \"SIP+D2U\" \"\" _rip._udp.example.com.";
+
+static const char *naptr_str_large1 =
+ "11 100 \"S\" \"SIP+D2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_large2 =
+ "10 110 \"S\" \"SIP+D2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_large3 =
+ "10 100 \"T\" \"SIP+D2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_large4 =
+ "10 100 \"S\" \"SIP+E2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_large5 =
+ "10 100 \"S\" \"SIP+D2U\" \"\" _tip._udp.example.com.";
+
+TEST_F(Rdata_NAPTR_Test, createFromText) {
+ NAPTR naptr(naptr_str);
+ EXPECT_EQ(10, naptr.getOrder());
+ EXPECT_EQ(100, naptr.getPreference());
+ EXPECT_EQ(string("S"), naptr.getFlags());
+ EXPECT_EQ(string("SIP+D2U"), naptr.getServices());
+ EXPECT_EQ(string(""), naptr.getRegexp());
+ EXPECT_EQ(Name("_sip._udp.example.com."), naptr.getReplacement());
+
+ // Test <char-string> that separated by space
+ NAPTR naptr2(naptr_str2);
+ EXPECT_EQ(string("S"), naptr2.getFlags());
+ EXPECT_EQ(string("SIP+D2U"), naptr2.getServices());
+}
+
+TEST_F(Rdata_NAPTR_Test, badText) {
+ // Order number cannot exceed 65535
+ EXPECT_THROW(const NAPTR naptr("65536 10 S SIP \"\" _sip._udp.example.com."),
+ InvalidRdataText);
+ // Preference number cannot exceed 65535
+ EXPECT_THROW(const NAPTR naptr("100 65536 S SIP \"\" _sip._udp.example.com."),
+ InvalidRdataText);
+ // No regexp given
+ EXPECT_THROW(const NAPTR naptr("100 10 S SIP _sip._udp.example.com."),
+ InvalidRdataText);
+ // The double quotes seperator must match
+ EXPECT_THROW(const NAPTR naptr("100 10 \"S SIP \"\" _sip._udp.example.com."),
+ InvalidRdataText);
+ // Order or preference cannot be missed
+ EXPECT_THROW(const NAPTR naptr("10 \"S\" SIP \"\" _sip._udp.example.com."),
+ InvalidRdataText);
+ // Fields must be seperated by spaces
+ EXPECT_THROW(const NAPTR naptr("100 10S SIP \"\" _sip._udp.example.com."),
+ InvalidRdataText);
+ EXPECT_THROW(const NAPTR naptr("100 10 \"S\"\"SIP\" \"\" _sip._udp.example.com."),
+ InvalidRdataText);
+ // Field cannot be missing
+ EXPECT_THROW(const NAPTR naptr("100 10 \"S\""), InvalidRdataText);
+
+ // The <character-string> cannot exceed 255 characters
+ string naptr_str;
+ naptr_str += "100 10 ";
+ for (int i = 0; i < 257; ++i) {
+ naptr_str += 'A';
+ }
+ naptr_str += " SIP \"\" _sip._udp.example.com.";
+ EXPECT_THROW(const NAPTR naptr(naptr_str), CharStringTooLong);
+}
+
+TEST_F(Rdata_NAPTR_Test, createFromWire) {
+ InputBuffer input_buffer(naptr_rdata, sizeof(naptr_rdata));
+ NAPTR naptr(input_buffer, sizeof(naptr_rdata));
+ EXPECT_EQ(10, naptr.getOrder());
+ EXPECT_EQ(100, naptr.getPreference());
+ EXPECT_EQ(string("S"), naptr.getFlags());
+ EXPECT_EQ(string("SIP+D2U"), naptr.getServices());
+ EXPECT_EQ(string(""), naptr.getRegexp());
+ EXPECT_EQ(Name("_sip._udp.example.com."), naptr.getReplacement());
+}
+
+TEST_F(Rdata_NAPTR_Test, toWire) {
+ NAPTR naptr(naptr_str);
+ naptr.toWire(obuffer);
+
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData, obuffer.getData(),
+ obuffer.getLength(), naptr_rdata, sizeof(naptr_rdata));
+}
+
+TEST_F(Rdata_NAPTR_Test, toWireRenderer) {
+ NAPTR naptr(naptr_str);
+
+ naptr.toWire(renderer);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData, obuffer.getData(),
+ obuffer.getLength(), naptr_rdata, sizeof(naptr_rdata));
+}
+
+TEST_F(Rdata_NAPTR_Test, toText) {
+ NAPTR naptr(naptr_str);
+ EXPECT_EQ(naptr_str, naptr.toText());
+}
+
+TEST_F(Rdata_NAPTR_Test, compare) {
+ NAPTR naptr(naptr_str);
+ NAPTR naptr_small1(naptr_str_small1);
+ NAPTR naptr_small2(naptr_str_small2);
+ NAPTR naptr_small3(naptr_str_small3);
+ NAPTR naptr_small4(naptr_str_small4);
+ NAPTR naptr_small5(naptr_str_small5);
+ NAPTR naptr_large1(naptr_str_large1);
+ NAPTR naptr_large2(naptr_str_large2);
+ NAPTR naptr_large3(naptr_str_large3);
+ NAPTR naptr_large4(naptr_str_large4);
+ NAPTR naptr_large5(naptr_str_large5);
+
+ EXPECT_EQ(0, naptr.compare(NAPTR(naptr_str)));
+ EXPECT_EQ(1, naptr.compare(NAPTR(naptr_str_small1)));
+ EXPECT_EQ(1, naptr.compare(NAPTR(naptr_str_small2)));
+ EXPECT_EQ(1, naptr.compare(NAPTR(naptr_str_small3)));
+ EXPECT_EQ(1, naptr.compare(NAPTR(naptr_str_small4)));
+ EXPECT_EQ(1, naptr.compare(NAPTR(naptr_str_small5)));
+ EXPECT_EQ(-1, naptr.compare(NAPTR(naptr_str_large1)));
+ EXPECT_EQ(-1, naptr.compare(NAPTR(naptr_str_large2)));
+ EXPECT_EQ(-1, naptr.compare(NAPTR(naptr_str_large3)));
+ EXPECT_EQ(-1, naptr.compare(NAPTR(naptr_str_large4)));
+ EXPECT_EQ(-1, naptr.compare(NAPTR(naptr_str_large5)));
+}
+
+}
diff --git a/src/lib/dns/tests/rdata_rrsig_unittest.cc b/src/lib/dns/tests/rdata_rrsig_unittest.cc
index 903021f..3324b99 100644
--- a/src/lib/dns/tests/rdata_rrsig_unittest.cc
+++ b/src/lib/dns/tests/rdata_rrsig_unittest.cc
@@ -47,7 +47,7 @@ TEST_F(Rdata_RRSIG_Test, fromText) {
"f49t+sXKPzbipN9g+s1ZPiIyofc=");
generic::RRSIG rdata_rrsig(rrsig_txt);
EXPECT_EQ(rrsig_txt, rdata_rrsig.toText());
-
+ EXPECT_EQ(isc::dns::RRType::A(), rdata_rrsig.typeCovered());
}
TEST_F(Rdata_RRSIG_Test, badText) {
diff --git a/src/lib/dns/tests/rdata_srv_unittest.cc b/src/lib/dns/tests/rdata_srv_unittest.cc
new file mode 100644
index 0000000..3394f43
--- /dev/null
+++ b/src/lib/dns/tests/rdata_srv_unittest.cc
@@ -0,0 +1,173 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for generic
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <util/buffer.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+#include <dns/rrclass.h>
+#include <dns/rrtype.h>
+
+#include <gtest/gtest.h>
+
+#include <dns/tests/unittest_util.h>
+#include <dns/tests/rdata_unittest.h>
+
+using isc::UnitTestUtil;
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+using namespace isc::dns::rdata;
+
+namespace {
+class Rdata_SRV_Test : public RdataTest {
+ // there's nothing to specialize
+};
+
+string srv_txt("1 5 1500 a.example.com.");
+string srv_txt2("1 5 1400 example.com.");
+string too_long_label("012345678901234567890123456789"
+ "0123456789012345678901234567890123");
+
+// 1 5 1500 a.example.com.
+const uint8_t wiredata_srv[] = {
+ 0x00, 0x01, 0x00, 0x05, 0x05, 0xdc, 0x01, 0x61, 0x07, 0x65, 0x78,
+ 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x03, 0x63, 0x6f, 0x6d, 0x00};
+// 1 5 1400 example.com.
+const uint8_t wiredata_srv2[] = {
+ 0x00, 0x01, 0x00, 0x05, 0x05, 0x78, 0x07, 0x65, 0x78, 0x61, 0x6d,
+ 0x70, 0x6c, 0x65, 0x03, 0x63, 0x6f, 0x6d, 0x00};
+
+const in::SRV rdata_srv(srv_txt);
+const in::SRV rdata_srv2(srv_txt2);
+
+TEST_F(Rdata_SRV_Test, createFromText) {
+ EXPECT_EQ(1, rdata_srv.getPriority());
+ EXPECT_EQ(5, rdata_srv.getWeight());
+ EXPECT_EQ(1500, rdata_srv.getPort());
+ EXPECT_EQ(Name("a.example.com."), rdata_srv.getTarget());
+}
+
+TEST_F(Rdata_SRV_Test, badText) {
+ // priority is too large (2814...6 is 2^48)
+ EXPECT_THROW(in::SRV("281474976710656 5 1500 a.example.com."),
+ InvalidRdataText);
+ // weight is too large
+ EXPECT_THROW(in::SRV("1 281474976710656 1500 a.example.com."),
+ InvalidRdataText);
+ // port is too large
+ EXPECT_THROW(in::SRV("1 5 281474976710656 a.example.com."),
+ InvalidRdataText);
+ // incomplete text
+ EXPECT_THROW(in::SRV("1 5 a.example.com."),
+ InvalidRdataText);
+ EXPECT_THROW(in::SRV("1 5 1500a.example.com."),
+ InvalidRdataText);
+ // bad name
+ EXPECT_THROW(in::SRV("1 5 1500 a.example.com." + too_long_label),
+ TooLongLabel);
+}
+
+TEST_F(Rdata_SRV_Test, assignment) {
+ in::SRV copy((string(srv_txt2)));
+ copy = rdata_srv;
+ EXPECT_EQ(0, copy.compare(rdata_srv));
+
+ // Check if the copied data is valid even after the original is deleted
+ in::SRV* copy2 = new in::SRV(rdata_srv);
+ in::SRV copy3((string(srv_txt2)));
+ copy3 = *copy2;
+ delete copy2;
+ EXPECT_EQ(0, copy3.compare(rdata_srv));
+
+ // Self assignment
+ copy = copy;
+ EXPECT_EQ(0, copy.compare(rdata_srv));
+}
+
+TEST_F(Rdata_SRV_Test, createFromWire) {
+ EXPECT_EQ(0, rdata_srv.compare(
+ *rdataFactoryFromFile(RRType("SRV"), RRClass("IN"),
+ "rdata_srv_fromWire")));
+ // RDLENGTH is too short
+ EXPECT_THROW(rdataFactoryFromFile(RRType("SRV"), RRClass("IN"),
+ "rdata_srv_fromWire", 23),
+ InvalidRdataLength);
+ // RDLENGTH is too long
+ EXPECT_THROW(rdataFactoryFromFile(RRType("SRV"), RRClass("IN"),
+ "rdata_srv_fromWire", 46),
+ InvalidRdataLength);
+ // incomplete name. the error should be detected in the name constructor
+ EXPECT_THROW(rdataFactoryFromFile(RRType("SRV"), RRClass("IN"),
+ "rdata_cname_fromWire", 69),
+ DNSMessageFORMERR);
+ // parse compressed target name
+ EXPECT_EQ(0, rdata_srv.compare(
+ *rdataFactoryFromFile(RRType("SRV"), RRClass("IN"),
+ "rdata_srv_fromWire", 89)));
+}
+
+TEST_F(Rdata_SRV_Test, toWireBuffer) {
+ rdata_srv.toWire(obuffer);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ obuffer.getData(), obuffer.getLength(),
+ wiredata_srv, sizeof(wiredata_srv));
+ obuffer.clear();
+ rdata_srv2.toWire(obuffer);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ obuffer.getData(), obuffer.getLength(),
+ wiredata_srv2, sizeof(wiredata_srv2));
+}
+
+TEST_F(Rdata_SRV_Test, toWireRenderer) {
+ rdata_srv.toWire(renderer);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ obuffer.getData(), obuffer.getLength(),
+ wiredata_srv, sizeof(wiredata_srv));
+ renderer.clear();
+ rdata_srv2.toWire(renderer);
+ EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+ obuffer.getData(), obuffer.getLength(),
+ wiredata_srv2, sizeof(wiredata_srv2));
+}
+
+TEST_F(Rdata_SRV_Test, toText) {
+ EXPECT_EQ(srv_txt, rdata_srv.toText());
+ EXPECT_EQ(srv_txt2, rdata_srv2.toText());
+}
+
+TEST_F(Rdata_SRV_Test, compare) {
+ // test RDATAs, sorted in the ascendent order.
+ vector<in::SRV> compare_set;
+ compare_set.push_back(in::SRV("1 5 1500 a.example.com."));
+ compare_set.push_back(in::SRV("2 5 1500 a.example.com."));
+ compare_set.push_back(in::SRV("2 6 1500 a.example.com."));
+ compare_set.push_back(in::SRV("2 6 1600 a.example.com."));
+ compare_set.push_back(in::SRV("2 6 1600 example.com."));
+
+ EXPECT_EQ(0, compare_set[0].compare(
+ in::SRV("1 5 1500 a.example.com.")));
+
+ vector<in::SRV>::const_iterator it;
+ vector<in::SRV>::const_iterator it_end = compare_set.end();
+ for (it = compare_set.begin(); it != it_end - 1; ++it) {
+ EXPECT_GT(0, (*it).compare(*(it + 1)));
+ EXPECT_LT(0, (*(it + 1)).compare(*it));
+ }
+
+ // comparison attempt between incompatible RR types should be rejected
+ EXPECT_THROW(rdata_srv.compare(*RdataTest::rdata_nomatch), bad_cast);
+}
+}
diff --git a/src/lib/dns/tests/testdata/Makefile.am b/src/lib/dns/tests/testdata/Makefile.am
index cb1bb1c..d8f0d1c 100644
--- a/src/lib/dns/tests/testdata/Makefile.am
+++ b/src/lib/dns/tests/testdata/Makefile.am
@@ -5,8 +5,12 @@ BUILT_SOURCES += edns_toWire4.wire
BUILT_SOURCES += message_fromWire10.wire message_fromWire11.wire
BUILT_SOURCES += message_fromWire12.wire message_fromWire13.wire
BUILT_SOURCES += message_fromWire14.wire message_fromWire15.wire
-BUILT_SOURCES += message_fromWire16.wire
+BUILT_SOURCES += message_fromWire16.wire message_fromWire17.wire
+BUILT_SOURCES += message_fromWire18.wire message_fromWire19.wire
+BUILT_SOURCES += message_fromWire20.wire message_fromWire21.wire
+BUILT_SOURCES += message_fromWire22.wire
BUILT_SOURCES += message_toWire2.wire message_toWire3.wire
+BUILT_SOURCES += message_toWire4.wire message_toWire5.wire
BUILT_SOURCES += message_toText1.wire message_toText2.wire
BUILT_SOURCES += message_toText3.wire
BUILT_SOURCES += name_toWire5.wire name_toWire6.wire
@@ -24,10 +28,20 @@ BUILT_SOURCES += rdata_nsec3_fromWire10.wire rdata_nsec3_fromWire11.wire
BUILT_SOURCES += rdata_nsec3_fromWire12.wire rdata_nsec3_fromWire13.wire
BUILT_SOURCES += rdata_nsec3_fromWire14.wire rdata_nsec3_fromWire15.wire
BUILT_SOURCES += rdata_rrsig_fromWire2.wire
+BUILT_SOURCES += rdata_minfo_fromWire1.wire rdata_minfo_fromWire2.wire
+BUILT_SOURCES += rdata_minfo_fromWire3.wire rdata_minfo_fromWire4.wire
+BUILT_SOURCES += rdata_minfo_fromWire5.wire rdata_minfo_fromWire6.wire
+BUILT_SOURCES += rdata_minfo_toWire1.wire rdata_minfo_toWire2.wire
+BUILT_SOURCES += rdata_minfo_toWireUncompressed1.wire
+BUILT_SOURCES += rdata_minfo_toWireUncompressed2.wire
BUILT_SOURCES += rdata_rp_fromWire1.wire rdata_rp_fromWire2.wire
BUILT_SOURCES += rdata_rp_fromWire3.wire rdata_rp_fromWire4.wire
BUILT_SOURCES += rdata_rp_fromWire5.wire rdata_rp_fromWire6.wire
BUILT_SOURCES += rdata_rp_toWire1.wire rdata_rp_toWire2.wire
+BUILT_SOURCES += rdata_afsdb_fromWire1.wire rdata_afsdb_fromWire2.wire
+BUILT_SOURCES += rdata_afsdb_fromWire3.wire rdata_afsdb_fromWire4.wire
+BUILT_SOURCES += rdata_afsdb_fromWire5.wire
+BUILT_SOURCES += rdata_afsdb_toWire1.wire rdata_afsdb_toWire2.wire
BUILT_SOURCES += rdata_soa_toWireUncompressed.wire
BUILT_SOURCES += rdata_txt_fromWire2.wire rdata_txt_fromWire3.wire
BUILT_SOURCES += rdata_txt_fromWire4.wire rdata_txt_fromWire5.wire
@@ -47,8 +61,7 @@ BUILT_SOURCES += tsig_verify10.wire
# NOTE: keep this in sync with real file listing
# so is included in tarball
-EXTRA_DIST = gen-wiredata.py.in
-EXTRA_DIST += edns_toWire1.spec edns_toWire2.spec
+EXTRA_DIST = edns_toWire1.spec edns_toWire2.spec
EXTRA_DIST += edns_toWire3.spec edns_toWire4.spec
EXTRA_DIST += masterload.txt
EXTRA_DIST += message_fromWire1 message_fromWire2
@@ -59,7 +72,11 @@ EXTRA_DIST += message_fromWire9 message_fromWire10.spec
EXTRA_DIST += message_fromWire11.spec message_fromWire12.spec
EXTRA_DIST += message_fromWire13.spec message_fromWire14.spec
EXTRA_DIST += message_fromWire15.spec message_fromWire16.spec
+EXTRA_DIST += message_fromWire17.spec message_fromWire18.spec
+EXTRA_DIST += message_fromWire19.spec message_fromWire20.spec
+EXTRA_DIST += message_fromWire21.spec message_fromWire22.spec
EXTRA_DIST += message_toWire1 message_toWire2.spec message_toWire3.spec
+EXTRA_DIST += message_toWire4.spec message_toWire5.spec
EXTRA_DIST += message_toText1.txt message_toText1.spec
EXTRA_DIST += message_toText2.txt message_toText2.spec
EXTRA_DIST += message_toText3.txt message_toText3.spec
@@ -96,7 +113,18 @@ EXTRA_DIST += rdata_rp_fromWire1.spec rdata_rp_fromWire2.spec
EXTRA_DIST += rdata_rp_fromWire3.spec rdata_rp_fromWire4.spec
EXTRA_DIST += rdata_rp_fromWire5.spec rdata_rp_fromWire6.spec
EXTRA_DIST += rdata_rp_toWire1.spec rdata_rp_toWire2.spec
+EXTRA_DIST += rdata_afsdb_fromWire1.spec rdata_afsdb_fromWire2.spec
+EXTRA_DIST += rdata_afsdb_fromWire3.spec rdata_afsdb_fromWire4.spec
+EXTRA_DIST += rdata_afsdb_fromWire5.spec
+EXTRA_DIST += rdata_afsdb_toWire1.spec rdata_afsdb_toWire2.spec
EXTRA_DIST += rdata_soa_fromWire rdata_soa_toWireUncompressed.spec
+EXTRA_DIST += rdata_srv_fromWire
+EXTRA_DIST += rdata_minfo_fromWire1.spec rdata_minfo_fromWire2.spec
+EXTRA_DIST += rdata_minfo_fromWire3.spec rdata_minfo_fromWire4.spec
+EXTRA_DIST += rdata_minfo_fromWire5.spec rdata_minfo_fromWire6.spec
+EXTRA_DIST += rdata_minfo_toWire1.spec rdata_minfo_toWire2.spec
+EXTRA_DIST += rdata_minfo_toWireUncompressed1.spec
+EXTRA_DIST += rdata_minfo_toWireUncompressed2.spec
EXTRA_DIST += rdata_txt_fromWire1 rdata_txt_fromWire2.spec
EXTRA_DIST += rdata_txt_fromWire3.spec rdata_txt_fromWire4.spec
EXTRA_DIST += rdata_txt_fromWire5.spec rdata_unknown_fromWire
@@ -118,4 +146,4 @@ EXTRA_DIST += tsig_verify7.spec tsig_verify8.spec tsig_verify9.spec
EXTRA_DIST += tsig_verify10.spec
.spec.wire:
- ./gen-wiredata.py -o $@ $<
+ $(PYTHON) $(top_builddir)/src/lib/util/python/gen_wiredata.py -o $@ $<
diff --git a/src/lib/dns/tests/testdata/gen-wiredata.py.in b/src/lib/dns/tests/testdata/gen-wiredata.py.in
deleted file mode 100755
index fd98c6e..0000000
--- a/src/lib/dns/tests/testdata/gen-wiredata.py.in
+++ /dev/null
@@ -1,612 +0,0 @@
-#!@PYTHON@
-
-# Copyright (C) 2010 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-import configparser, re, time, socket, sys
-from datetime import datetime
-from optparse import OptionParser
-
-re_hex = re.compile(r'^0x[0-9a-fA-F]+')
-re_decimal = re.compile(r'^\d+$')
-re_string = re.compile(r"\'(.*)\'$")
-
-dnssec_timefmt = '%Y%m%d%H%M%S'
-
-dict_qr = { 'query' : 0, 'response' : 1 }
-dict_opcode = { 'query' : 0, 'iquery' : 1, 'status' : 2, 'notify' : 4,
- 'update' : 5 }
-rdict_opcode = dict([(dict_opcode[k], k.upper()) for k in dict_opcode.keys()])
-dict_rcode = { 'noerror' : 0, 'formerr' : 1, 'servfail' : 2, 'nxdomain' : 3,
- 'notimp' : 4, 'refused' : 5, 'yxdomain' : 6, 'yxrrset' : 7,
- 'nxrrset' : 8, 'notauth' : 9, 'notzone' : 10 }
-rdict_rcode = dict([(dict_rcode[k], k.upper()) for k in dict_rcode.keys()])
-dict_rrtype = { 'none' : 0, 'a' : 1, 'ns' : 2, 'md' : 3, 'mf' : 4, 'cname' : 5,
- 'soa' : 6, 'mb' : 7, 'mg' : 8, 'mr' : 9, 'null' : 10,
- 'wks' : 11, 'ptr' : 12, 'hinfo' : 13, 'minfo' : 14, 'mx' : 15,
- 'txt' : 16, 'rp' : 17, 'afsdb' : 18, 'x25' : 19, 'isdn' : 20,
- 'rt' : 21, 'nsap' : 22, 'nsap_tr' : 23, 'sig' : 24, 'key' : 25,
- 'px' : 26, 'gpos' : 27, 'aaaa' : 28, 'loc' : 29, 'nxt' : 30,
- 'srv' : 33, 'naptr' : 35, 'kx' : 36, 'cert' : 37, 'a6' : 38,
- 'dname' : 39, 'opt' : 41, 'apl' : 42, 'ds' : 43, 'sshfp' : 44,
- 'ipseckey' : 45, 'rrsig' : 46, 'nsec' : 47, 'dnskey' : 48,
- 'dhcid' : 49, 'nsec3' : 50, 'nsec3param' : 51, 'hip' : 55,
- 'spf' : 99, 'unspec' : 103, 'tkey' : 249, 'tsig' : 250,
- 'dlv' : 32769, 'ixfr' : 251, 'axfr' : 252, 'mailb' : 253,
- 'maila' : 254, 'any' : 255 }
-rdict_rrtype = dict([(dict_rrtype[k], k.upper()) for k in dict_rrtype.keys()])
-dict_rrclass = { 'in' : 1, 'ch' : 3, 'hs' : 4, 'any' : 255 }
-rdict_rrclass = dict([(dict_rrclass[k], k.upper()) for k in \
- dict_rrclass.keys()])
-dict_algorithm = { 'rsamd5' : 1, 'dh' : 2, 'dsa' : 3, 'ecc' : 4,
- 'rsasha1' : 5 }
-dict_nsec3_algorithm = { 'reserved' : 0, 'sha1' : 1 }
-rdict_algorithm = dict([(dict_algorithm[k], k.upper()) for k in \
- dict_algorithm.keys()])
-rdict_nsec3_algorithm = dict([(dict_nsec3_algorithm[k], k.upper()) for k in \
- dict_nsec3_algorithm.keys()])
-
-header_xtables = { 'qr' : dict_qr, 'opcode' : dict_opcode,
- 'rcode' : dict_rcode }
-question_xtables = { 'rrtype' : dict_rrtype, 'rrclass' : dict_rrclass }
-rrsig_xtables = { 'algorithm' : dict_algorithm }
-
-def parse_value(value, xtable = {}):
- if re.search(re_hex, value):
- return int(value, 16)
- if re.search(re_decimal, value):
- return int(value)
- m = re.match(re_string, value)
- if m:
- return m.group(1)
- lovalue = value.lower()
- if lovalue in xtable:
- return xtable[lovalue]
- return value
-
-def code_totext(code, dict):
- if code in dict.keys():
- return dict[code] + '(' + str(code) + ')'
- return str(code)
-
-def encode_name(name, absolute=True):
- # make sure the name is dot-terminated. duplicate dots will be ignored
- # below.
- name += '.'
- labels = name.split('.')
- wire = ''
- for l in labels:
- if len(l) > 4 and l[0:4] == 'ptr=':
- # special meta-syntax for compression pointer
- wire += '%04x' % (0xc000 | int(l[4:]))
- break
- if absolute or len(l) > 0:
- wire += '%02x' % len(l)
- wire += ''.join(['%02x' % ord(ch) for ch in l])
- if len(l) == 0:
- break
- return wire
-
-def encode_string(name, len=None):
- if type(name) is int and len is not None:
- return '%0.*x' % (len * 2, name)
- return ''.join(['%02x' % ord(ch) for ch in name])
-
-def count_namelabels(name):
- if name == '.': # special case
- return 0
- m = re.match('^(.*)\.$', name)
- if m:
- name = m.group(1)
- return len(name.split('.'))
-
-def get_config(config, section, configobj, xtables = {}):
- try:
- for field in config.options(section):
- value = config.get(section, field)
- if field in xtables.keys():
- xtable = xtables[field]
- else:
- xtable = {}
- configobj.__dict__[field] = parse_value(value, xtable)
- except configparser.NoSectionError:
- return False
- return True
-
-def print_header(f, input_file):
- f.write('''###
-### This data file was auto-generated from ''' + input_file + '''
-###
-''')
-
-class Name:
- name = 'example.com'
- pointer = None # no compression by default
- def dump(self, f):
- name = self.name
- if self.pointer is not None:
- if len(name) > 0 and name[-1] != '.':
- name += '.'
- name += 'ptr=%d' % self.pointer
- name_wire = encode_name(name)
- f.write('\n# DNS Name: %s' % self.name)
- if self.pointer is not None:
- f.write(' + compression pointer: %d' % self.pointer)
- f.write('\n')
- f.write('%s' % name_wire)
- f.write('\n')
-
-class DNSHeader:
- id = 0x1035
- (qr, aa, tc, rd, ra, ad, cd) = 0, 0, 0, 0, 0, 0, 0
- mbz = 0
- rcode = 0 # noerror
- opcode = 0 # query
- (qdcount, ancount, nscount, arcount) = 1, 0, 0, 0
- def dump(self, f):
- f.write('\n# Header Section\n')
- f.write('# ID=' + str(self.id))
- f.write(' QR=' + ('Response' if self.qr else 'Query'))
- f.write(' Opcode=' + code_totext(self.opcode, rdict_opcode))
- f.write(' Rcode=' + code_totext(self.rcode, rdict_rcode))
- f.write('%s' % (' AA' if self.aa else ''))
- f.write('%s' % (' TC' if self.tc else ''))
- f.write('%s' % (' RD' if self.rd else ''))
- f.write('%s' % (' AD' if self.ad else ''))
- f.write('%s' % (' CD' if self.cd else ''))
- f.write('\n')
- f.write('%04x ' % self.id)
- flag_and_code = 0
- flag_and_code |= (self.qr << 15 | self.opcode << 14 | self.aa << 10 |
- self.tc << 9 | self.rd << 8 | self.ra << 7 |
- self.mbz << 6 | self.ad << 5 | self.cd << 4 |
- self.rcode)
- f.write('%04x\n' % flag_and_code)
- f.write('# QDCNT=%d, ANCNT=%d, NSCNT=%d, ARCNT=%d\n' %
- (self.qdcount, self.ancount, self.nscount, self.arcount))
- f.write('%04x %04x %04x %04x\n' % (self.qdcount, self.ancount,
- self.nscount, self.arcount))
-
-class DNSQuestion:
- name = 'example.com.'
- rrtype = parse_value('A', dict_rrtype)
- rrclass = parse_value('IN', dict_rrclass)
- def dump(self, f):
- f.write('\n# Question Section\n')
- f.write('# QNAME=%s QTYPE=%s QCLASS=%s\n' %
- (self.name,
- code_totext(self.rrtype, rdict_rrtype),
- code_totext(self.rrclass, rdict_rrclass)))
- f.write(encode_name(self.name))
- f.write(' %04x %04x\n' % (self.rrtype, self.rrclass))
-
-class EDNS:
- name = '.'
- udpsize = 4096
- extrcode = 0
- version = 0
- do = 0
- mbz = 0
- rdlen = 0
- def dump(self, f):
- f.write('\n# EDNS OPT RR\n')
- f.write('# NAME=%s TYPE=%s UDPSize=%d ExtRcode=%s Version=%s DO=%d\n' %
- (self.name, code_totext(dict_rrtype['opt'], rdict_rrtype),
- self.udpsize, self.extrcode, self.version,
- 1 if self.do else 0))
-
- code_vers = (self.extrcode << 8) | (self.version & 0x00ff)
- extflags = (self.do << 15) | (self.mbz & 0x8000)
- f.write('%s %04x %04x %04x %04x\n' %
- (encode_name(self.name), dict_rrtype['opt'], self.udpsize,
- code_vers, extflags))
- f.write('# RDLEN=%d\n' % self.rdlen)
- f.write('%04x\n' % self.rdlen)
-
-class RR:
- '''This is a base class for various types of RR test data.
- For each RR type (A, AAAA, NS, etc), we define a derived class of RR
- to dump type specific RDATA parameters. This class defines parameters
- common to all types of RDATA, namely the owner name, RR class and TTL.
- The dump() method of derived classes are expected to call dump_header(),
- whose default implementation is provided in this class. This method
- decides whether to dump the test data as an RR (with name, type, class)
- or only as RDATA (with its length), and dumps the corresponding data
- via the specified file object.
-
- By convention we assume derived classes are named after the common
- standard mnemonic of the corresponding RR types. For example, the
- derived class for the RR type SOA should be named "SOA".
-
- Configurable parameters are as follows:
- - as_rr (bool): Whether or not the data is to be dumped as an RR. False
- by default.
- - rr_class (string): The RR class of the data. Only meaningful when the
- data is dumped as an RR. Default is 'IN'.
- - rr_ttl (integer): The TTL value of the RR. Only meaningful when the
- data is dumped as an RR. Default is 86400 (1 day).
- '''
-
- def __init__(self):
- self.as_rr = False
- # only when as_rr is True, same for class/TTL:
- self.rr_name = 'example.com'
- self.rr_class = 'IN'
- self.rr_ttl = 86400
- def dump_header(self, f, rdlen):
- type_txt = self.__class__.__name__
- type_code = parse_value(type_txt, dict_rrtype)
- if self.as_rr:
- rrclass = parse_value(self.rr_class, dict_rrclass)
- f.write('\n# %s RR (QNAME=%s Class=%s TTL=%d RDLEN=%d)\n' %
- (type_txt, self.rr_name,
- code_totext(rrclass, rdict_rrclass), self.rr_ttl, rdlen))
- f.write('%s %04x %04x %08x %04x\n' %
- (encode_name(self.rr_name), type_code, rrclass,
- self.rr_ttl, rdlen))
- else:
- f.write('\n# %s RDATA (RDLEN=%d)\n' % (type_txt, rdlen))
- f.write('%04x\n' % rdlen)
-
-class A(RR):
- rdlen = 4 # fixed by default
- address = '192.0.2.1'
-
- def dump(self, f):
- self.dump_header(f, self.rdlen)
- f.write('# Address=%s\n' % (self.address))
- bin_address = socket.inet_aton(self.address)
- f.write('%02x%02x%02x%02x\n' % (bin_address[0], bin_address[1],
- bin_address[2], bin_address[3]))
-
-class NS(RR):
- rdlen = None # auto calculate
- nsname = 'ns.example.com'
-
- def dump(self, f):
- nsname_wire = encode_name(self.nsname)
- if self.rdlen is None:
- self.rdlen = len(nsname_wire) / 2
- self.dump_header(f, self.rdlen)
- f.write('# NS name=%s\n' % (self.nsname))
- f.write('%s\n' % nsname_wire)
-
-class SOA(RR):
- rdlen = None # auto-calculate
- mname = 'ns.example.com'
- rname = 'root.example.com'
- serial = 2010012601
- refresh = 3600
- retry = 300
- expire = 3600000
- minimum = 1200
- def dump(self, f):
- mname_wire = encode_name(self.mname)
- rname_wire = encode_name(self.rname)
- if self.rdlen is None:
- self.rdlen = int(20 + len(mname_wire) / 2 + len(str(rname_wire)) / 2)
- self.dump_header(f, self.rdlen)
- f.write('# NNAME=%s RNAME=%s\n' % (self.mname, self.rname))
- f.write('%s %s\n' % (mname_wire, rname_wire))
- f.write('# SERIAL(%d) REFRESH(%d) RETRY(%d) EXPIRE(%d) MINIMUM(%d)\n' %
- (self.serial, self.refresh, self.retry, self.expire,
- self.minimum))
- f.write('%08x %08x %08x %08x %08x\n' % (self.serial, self.refresh,
- self.retry, self.expire,
- self.minimum))
-
-class TXT:
- rdlen = -1 # auto-calculate
- nstring = 1 # number of character-strings
- stringlen = -1 # default string length, auto-calculate
- string = 'Test String' # default string
- def dump(self, f):
- stringlen_list = []
- string_list = []
- wirestring_list = []
- for i in range(0, self.nstring):
- key_string = 'string' + str(i)
- if key_string in self.__dict__:
- string_list.append(self.__dict__[key_string])
- else:
- string_list.append(self.string)
- wirestring_list.append(encode_string(string_list[-1]))
- key_stringlen = 'stringlen' + str(i)
- if key_stringlen in self.__dict__:
- stringlen_list.append(self.__dict__[key_stringlen])
- else:
- stringlen_list.append(self.stringlen)
- if stringlen_list[-1] < 0:
- stringlen_list[-1] = int(len(wirestring_list[-1]) / 2)
- rdlen = self.rdlen
- if rdlen < 0:
- rdlen = int(len(''.join(wirestring_list)) / 2) + self.nstring
- f.write('\n# TXT RDATA (RDLEN=%d)\n' % rdlen)
- f.write('%04x\n' % rdlen);
- for i in range(0, self.nstring):
- f.write('# String Len=%d, String=\"%s\"\n' %
- (stringlen_list[i], string_list[i]))
- f.write('%02x%s%s\n' % (stringlen_list[i],
- ' ' if len(wirestring_list[i]) > 0 else '',
- wirestring_list[i]))
-
-class RP:
- '''Implements rendering RP RDATA in the wire format.
- Configurable parameters are as follows:
- - rdlen: 16-bit RDATA length. If omitted, the accurate value is auto
- calculated and used; if negative, the RDLEN field will be omitted from
- the output data.
- - mailbox: The mailbox field.
- - text: The text field.
- All of these parameters have the default values and can be omitted.
- '''
- rdlen = None # auto-calculate
- mailbox = 'root.example.com'
- text = 'rp-text.example.com'
- def dump(self, f):
- mailbox_wire = encode_name(self.mailbox)
- text_wire = encode_name(self.text)
- if self.rdlen is None:
- self.rdlen = (len(mailbox_wire) + len(text_wire)) / 2
- else:
- self.rdlen = int(self.rdlen)
- if self.rdlen >= 0:
- f.write('\n# RP RDATA (RDLEN=%d)\n' % self.rdlen)
- f.write('%04x\n' % self.rdlen)
- else:
- f.write('\n# RP RDATA (RDLEN omitted)\n')
- f.write('# MAILBOX=%s TEXT=%s\n' % (self.mailbox, self.text))
- f.write('%s %s\n' % (mailbox_wire, text_wire))
-
-class NSECBASE:
- '''Implements rendering NSEC/NSEC3 type bitmaps commonly used for
- these RRs. The NSEC and NSEC3 classes will be inherited from this
- class.'''
- nbitmap = 1 # number of bitmaps
- block = 0
- maplen = None # default bitmap length, auto-calculate
- bitmap = '040000000003' # an arbtrarily chosen bitmap sample
- def dump(self, f):
- # first, construct the bitmpa data
- block_list = []
- maplen_list = []
- bitmap_list = []
- for i in range(0, self.nbitmap):
- key_bitmap = 'bitmap' + str(i)
- if key_bitmap in self.__dict__:
- bitmap_list.append(self.__dict__[key_bitmap])
- else:
- bitmap_list.append(self.bitmap)
- key_maplen = 'maplen' + str(i)
- if key_maplen in self.__dict__:
- maplen_list.append(self.__dict__[key_maplen])
- else:
- maplen_list.append(self.maplen)
- if maplen_list[-1] is None: # calculate it if not specified
- maplen_list[-1] = int(len(bitmap_list[-1]) / 2)
- key_block = 'block' + str(i)
- if key_block in self.__dict__:
- block_list.append(self.__dict__[key_block])
- else:
- block_list.append(self.block)
-
- # dump RR-type specific part (NSEC or NSEC3)
- self.dump_fixedpart(f, 2 * self.nbitmap + \
- int(len(''.join(bitmap_list)) / 2))
-
- # dump the bitmap
- for i in range(0, self.nbitmap):
- f.write('# Bitmap: Block=%d, Length=%d\n' %
- (block_list[i], maplen_list[i]))
- f.write('%02x %02x %s\n' %
- (block_list[i], maplen_list[i], bitmap_list[i]))
-
-class NSEC(NSECBASE):
- rdlen = None # auto-calculate
- nextname = 'next.example.com'
- def dump_fixedpart(self, f, bitmap_totallen):
- name_wire = encode_name(self.nextname)
- if self.rdlen is None:
- # if rdlen needs to be calculated, it must be based on the bitmap
- # length, because the configured maplen can be fake.
- self.rdlen = int(len(name_wire) / 2) + bitmap_totallen
- f.write('\n# NSEC RDATA (RDLEN=%d)\n' % self.rdlen)
- f.write('%04x\n' % self.rdlen);
- f.write('# Next Name=%s (%d bytes)\n' % (self.nextname,
- int(len(name_wire) / 2)))
- f.write('%s\n' % name_wire)
-
-class NSEC3(NSECBASE):
- rdlen = None # auto-calculate
- hashalg = 1 # SHA-1
- optout = False # opt-out flag
- mbz = 0 # other flag fields (none defined yet)
- iterations = 1
- saltlen = 5
- salt = 's' * saltlen
- hashlen = 20
- hash = 'h' * hashlen
- def dump_fixedpart(self, f, bitmap_totallen):
- if self.rdlen is None:
- # if rdlen needs to be calculated, it must be based on the bitmap
- # length, because the configured maplen can be fake.
- self.rdlen = 4 + 1 + len(self.salt) + 1 + len(self.hash) \
- + bitmap_totallen
- f.write('\n# NSEC3 RDATA (RDLEN=%d)\n' % self.rdlen)
- f.write('%04x\n' % self.rdlen)
- optout_val = 1 if self.optout else 0
- f.write('# Hash Alg=%s, Opt-Out=%d, Other Flags=%0x, Iterations=%d\n' %
- (code_totext(self.hashalg, rdict_nsec3_algorithm),
- optout_val, self.mbz, self.iterations))
- f.write('%02x %02x %04x\n' %
- (self.hashalg, (self.mbz << 1) | optout_val, self.iterations))
- f.write("# Salt Len=%d, Salt='%s'\n" % (self.saltlen, self.salt))
- f.write('%02x%s%s\n' % (self.saltlen,
- ' ' if len(self.salt) > 0 else '',
- encode_string(self.salt)))
- f.write("# Hash Len=%d, Hash='%s'\n" % (self.hashlen, self.hash))
- f.write('%02x%s%s\n' % (self.hashlen,
- ' ' if len(self.hash) > 0 else '',
- encode_string(self.hash)))
-
-class RRSIG:
- rdlen = -1 # auto-calculate
- covered = 1 # A
- algorithm = 5 # RSA-SHA1
- labels = -1 # auto-calculate (#labels of signer)
- originalttl = 3600
- expiration = int(time.mktime(datetime.strptime('20100131120000',
- dnssec_timefmt).timetuple()))
- inception = int(time.mktime(datetime.strptime('20100101120000',
- dnssec_timefmt).timetuple()))
- tag = 0x1035
- signer = 'example.com'
- signature = 0x123456789abcdef123456789abcdef
- def dump(self, f):
- name_wire = encode_name(self.signer)
- sig_wire = '%x' % self.signature
- rdlen = self.rdlen
- if rdlen < 0:
- rdlen = int(18 + len(name_wire) / 2 + len(str(sig_wire)) / 2)
- labels = self.labels
- if labels < 0:
- labels = count_namelabels(self.signer)
- f.write('\n# RRSIG RDATA (RDLEN=%d)\n' % rdlen)
- f.write('%04x\n' % rdlen);
- f.write('# Covered=%s Algorithm=%s Labels=%d OrigTTL=%d\n' %
- (code_totext(self.covered, rdict_rrtype),
- code_totext(self.algorithm, rdict_algorithm), labels,
- self.originalttl))
- f.write('%04x %02x %02x %08x\n' % (self.covered, self.algorithm,
- labels, self.originalttl))
- f.write('# Expiration=%s, Inception=%s\n' %
- (str(self.expiration), str(self.inception)))
- f.write('%08x %08x\n' % (self.expiration, self.inception))
- f.write('# Tag=%d Signer=%s and Signature\n' % (self.tag, self.signer))
- f.write('%04x %s %s\n' % (self.tag, name_wire, sig_wire))
-
-class TSIG(RR):
- rdlen = None # auto-calculate
- algorithm = 'hmac-sha256'
- time_signed = 1286978795 # arbitrarily chosen default
- fudge = 300
- mac_size = None # use a common value for the algorithm
- mac = None # use 'x' * mac_size
- original_id = 2845 # arbitrarily chosen default
- error = 0
- other_len = None # 6 if error is BADTIME; otherwise 0
- other_data = None # use time_signed + fudge + 1 for BADTIME
- dict_macsize = { 'hmac-md5' : 16, 'hmac-sha1' : 20, 'hmac-sha256' : 32 }
-
- # TSIG has some special defaults
- def __init__(self):
- super().__init__()
- self.rr_class = 'ANY'
- self.rr_ttl = 0
-
- def dump(self, f):
- if str(self.algorithm) == 'hmac-md5':
- name_wire = encode_name('hmac-md5.sig-alg.reg.int')
- else:
- name_wire = encode_name(self.algorithm)
- mac_size = self.mac_size
- if mac_size is None:
- if self.algorithm in self.dict_macsize.keys():
- mac_size = self.dict_macsize[self.algorithm]
- else:
- raise RuntimeError('TSIG Mac Size cannot be determined')
- mac = encode_string('x' * mac_size) if self.mac is None else \
- encode_string(self.mac, mac_size)
- other_len = self.other_len
- if other_len is None:
- # 18 = BADTIME
- other_len = 6 if self.error == 18 else 0
- other_data = self.other_data
- if other_data is None:
- other_data = '%012x' % (self.time_signed + self.fudge + 1) \
- if self.error == 18 else ''
- else:
- other_data = encode_string(self.other_data, other_len)
- if self.rdlen is None:
- self.rdlen = int(len(name_wire) / 2 + 16 + len(mac) / 2 + \
- len(other_data) / 2)
- self.dump_header(f, self.rdlen)
- f.write('# Algorithm=%s Time-Signed=%d Fudge=%d\n' %
- (self.algorithm, self.time_signed, self.fudge))
- f.write('%s %012x %04x\n' % (name_wire, self.time_signed, self.fudge))
- f.write('# MAC Size=%d MAC=(see hex)\n' % mac_size)
- f.write('%04x%s\n' % (mac_size, ' ' + mac if len(mac) > 0 else ''))
- f.write('# Original-ID=%d Error=%d\n' % (self.original_id, self.error))
- f.write('%04x %04x\n' % (self.original_id, self.error))
- f.write('# Other-Len=%d Other-Data=(see hex)\n' % other_len)
- f.write('%04x%s\n' % (other_len,
- ' ' + other_data if len(other_data) > 0 else ''))
-
-def get_config_param(section):
- config_param = {'name' : (Name, {}),
- 'header' : (DNSHeader, header_xtables),
- 'question' : (DNSQuestion, question_xtables),
- 'edns' : (EDNS, {}), 'a' : (A, {}), 'ns' : (NS, {}),
- 'soa' : (SOA, {}), 'txt' : (TXT, {}),
- 'rp' : (RP, {}), 'rrsig' : (RRSIG, {}),
- 'nsec' : (NSEC, {}), 'nsec3' : (NSEC3, {}),
- 'tsig' : (TSIG, {}) }
- s = section
- m = re.match('^([^:]+)/\d+$', section)
- if m:
- s = m.group(1)
- return config_param[s]
-
-usage = '''usage: %prog [options] input_file'''
-
-if __name__ == "__main__":
- parser = OptionParser(usage=usage)
- parser.add_option('-o', '--output', action='store', dest='output',
- default=None, metavar='FILE',
- help='output file name [default: prefix of input_file]')
- (options, args) = parser.parse_args()
-
- if len(args) == 0:
- parser.error('input file is missing')
- configfile = args[0]
-
- outputfile = options.output
- if not outputfile:
- m = re.match('(.*)\.[^.]+$', configfile)
- if m:
- outputfile = m.group(1)
- else:
- raise ValueError('output file is not specified and input file is not in the form of "output_file.suffix"')
-
- config = configparser.SafeConfigParser()
- config.read(configfile)
-
- output = open(outputfile, 'w')
-
- print_header(output, configfile)
-
- # First try the 'custom' mode; if it fails assume the standard mode.
- try:
- sections = config.get('custom', 'sections').split(':')
- except configparser.NoSectionError:
- sections = ['header', 'question', 'edns']
-
- for s in sections:
- section_param = get_config_param(s)
- (obj, xtables) = (section_param[0](), section_param[1])
- if get_config(config, s, obj, xtables):
- obj.dump(output)
-
- output.close()
diff --git a/src/lib/dns/tests/testdata/message_fromWire17.spec b/src/lib/dns/tests/testdata/message_fromWire17.spec
new file mode 100644
index 0000000..366cf05
--- /dev/null
+++ b/src/lib/dns/tests/testdata/message_fromWire17.spec
@@ -0,0 +1,22 @@
+#
+# A simple DNS query message with TSIG signed
+#
+
+[custom]
+sections: header:question:tsig
+[header]
+id: 0x22c2
+rd: 1
+arcount: 1
+[question]
+name: www.example.com
+rrtype: TXT
+[tsig]
+as_rr: True
+# TSIG QNAME won't be compressed
+rr_name: www.example.com
+algorithm: hmac-md5
+time_signed: 0x4e179212
+mac_size: 16
+mac: 0x8214b04634e32323d651ac60b08e6388
+original_id: 0x22c2
diff --git a/src/lib/dns/tests/testdata/message_fromWire18.spec b/src/lib/dns/tests/testdata/message_fromWire18.spec
new file mode 100644
index 0000000..0b2592a
--- /dev/null
+++ b/src/lib/dns/tests/testdata/message_fromWire18.spec
@@ -0,0 +1,23 @@
+#
+# Another simple DNS query message with TSIG signed. Only ID and time signed
+# (and MAC as a result) are different.
+#
+
+[custom]
+sections: header:question:tsig
+[header]
+id: 0xd6e2
+rd: 1
+arcount: 1
+[question]
+name: www.example.com
+rrtype: TXT
+[tsig]
+as_rr: True
+# TSIG QNAME won't be compressed
+rr_name: www.example.com
+algorithm: hmac-md5
+time_signed: 0x4e17b38d
+mac_size: 16
+mac: 0x903b5b194a799b03a37718820c2404f2
+original_id: 0xd6e2
diff --git a/src/lib/dns/tests/testdata/message_fromWire19.spec b/src/lib/dns/tests/testdata/message_fromWire19.spec
new file mode 100644
index 0000000..8212dbf
--- /dev/null
+++ b/src/lib/dns/tests/testdata/message_fromWire19.spec
@@ -0,0 +1,20 @@
+#
+# A non realistic DNS response message containing mixed types of RRs in the
+# answer section in a mixed order.
+#
+
+[custom]
+sections: header:question:a/1:aaaa:a/2
+[header]
+qr: 1
+ancount: 3
+[question]
+name: www.example.com
+rrtype: A
+[a/1]
+as_rr: True
+[aaaa]
+as_rr: True
+[a/2]
+as_rr: True
+address: 192.0.2.2
diff --git a/src/lib/dns/tests/testdata/message_fromWire20.spec b/src/lib/dns/tests/testdata/message_fromWire20.spec
new file mode 100644
index 0000000..91986e4
--- /dev/null
+++ b/src/lib/dns/tests/testdata/message_fromWire20.spec
@@ -0,0 +1,20 @@
+#
+# A non realistic DNS response message containing mixed types of RRs in the
+# authority section in a mixed order.
+#
+
+[custom]
+sections: header:question:a/1:aaaa:a/2
+[header]
+qr: 1
+nscount: 3
+[question]
+name: www.example.com
+rrtype: A
+[a/1]
+as_rr: True
+[aaaa]
+as_rr: True
+[a/2]
+as_rr: True
+address: 192.0.2.2
diff --git a/src/lib/dns/tests/testdata/message_fromWire21.spec b/src/lib/dns/tests/testdata/message_fromWire21.spec
new file mode 100644
index 0000000..cd6aac9
--- /dev/null
+++ b/src/lib/dns/tests/testdata/message_fromWire21.spec
@@ -0,0 +1,20 @@
+#
+# A non realistic DNS response message containing mixed types of RRs in the
+# additional section in a mixed order.
+#
+
+[custom]
+sections: header:question:a/1:aaaa:a/2
+[header]
+qr: 1
+arcount: 3
+[question]
+name: www.example.com
+rrtype: A
+[a/1]
+as_rr: True
+[aaaa]
+as_rr: True
+[a/2]
+as_rr: True
+address: 192.0.2.2
diff --git a/src/lib/dns/tests/testdata/message_fromWire22.spec b/src/lib/dns/tests/testdata/message_fromWire22.spec
new file mode 100644
index 0000000..a52523b
--- /dev/null
+++ b/src/lib/dns/tests/testdata/message_fromWire22.spec
@@ -0,0 +1,14 @@
+#
+# A simple DNS message containing one SOA RR in the answer section. This is
+# intended to be trimmed to emulate a bogus message.
+#
+
+[custom]
+sections: header:question:soa
+[header]
+qr: 1
+ancount: 1
+[question]
+rrtype: SOA
+[soa]
+as_rr: True
diff --git a/src/lib/dns/tests/testdata/message_toWire4.spec b/src/lib/dns/tests/testdata/message_toWire4.spec
new file mode 100644
index 0000000..aab7e10
--- /dev/null
+++ b/src/lib/dns/tests/testdata/message_toWire4.spec
@@ -0,0 +1,27 @@
+#
+# Truncated DNS response with TSIG signed
+# This is expected to be a response to "fromWire17"
+#
+
+[custom]
+sections: header:question:tsig
+[header]
+id: 0x22c2
+rd: 1
+qr: 1
+aa: 1
+# It's "truncated":
+tc: 1
+arcount: 1
+[question]
+name: www.example.com
+rrtype: TXT
+[tsig]
+as_rr: True
+# TSIG QNAME won't be compressed
+rr_name: www.example.com
+algorithm: hmac-md5
+time_signed: 0x4e179212
+mac_size: 16
+mac: 0x88adc3811d1d6bec7c684438906fc694
+original_id: 0x22c2
diff --git a/src/lib/dns/tests/testdata/message_toWire5.spec b/src/lib/dns/tests/testdata/message_toWire5.spec
new file mode 100644
index 0000000..e97fb43
--- /dev/null
+++ b/src/lib/dns/tests/testdata/message_toWire5.spec
@@ -0,0 +1,36 @@
+#
+# A longest possible (without EDNS) DNS response with TSIG, i.e. totatl
+# length should be 512 bytes.
+#
+
+[custom]
+sections: header:question:txt/1:txt/2:tsig
+[header]
+id: 0xd6e2
+rd: 1
+qr: 1
+aa: 1
+ancount: 2
+arcount: 1
+[question]
+name: www.example.com
+rrtype: TXT
+[txt/1]
+as_rr: True
+# QNAME is fully compressed
+rr_name: ptr=12
+string: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcde
+[txt/2]
+as_rr: True
+# QNAME is fully compressed
+rr_name: ptr=12
+string: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0
+[tsig]
+as_rr: True
+# TSIG QNAME won't be compressed
+rr_name: www.example.com
+algorithm: hmac-md5
+time_signed: 0x4e17b38d
+mac_size: 16
+mac: 0xbe2ba477373d2496891e2fda240ee4ec
+original_id: 0xd6e2
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_fromWire1.spec b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire1.spec
new file mode 100644
index 0000000..f831313
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire1.spec
@@ -0,0 +1,3 @@
+[custom]
+sections: afsdb
+[afsdb]
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_fromWire2.spec b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire2.spec
new file mode 100644
index 0000000..f33e768
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire2.spec
@@ -0,0 +1,6 @@
+[custom]
+sections: name:afsdb
+[name]
+name: example.com
+[afsdb]
+server: afsdb.ptr=0
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_fromWire3.spec b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire3.spec
new file mode 100644
index 0000000..993032f
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire3.spec
@@ -0,0 +1,4 @@
+[custom]
+sections: afsdb
+[afsdb]
+rdlen: 3
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_fromWire4.spec b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire4.spec
new file mode 100644
index 0000000..37abf13
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire4.spec
@@ -0,0 +1,4 @@
+[custom]
+sections: afsdb
+[afsdb]
+rdlen: 80
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_fromWire5.spec b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire5.spec
new file mode 100644
index 0000000..0ea79dd
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire5.spec
@@ -0,0 +1,4 @@
+[custom]
+sections: afsdb
+[afsdb]
+server: "01234567890123456789012345678901234567890123456789012345678901234"
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_toWire1.spec b/src/lib/dns/tests/testdata/rdata_afsdb_toWire1.spec
new file mode 100644
index 0000000..1946458
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_toWire1.spec
@@ -0,0 +1,4 @@
+[custom]
+sections: afsdb
+[afsdb]
+rdlen: -1
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_toWire2.spec b/src/lib/dns/tests/testdata/rdata_afsdb_toWire2.spec
new file mode 100644
index 0000000..c80011a
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_toWire2.spec
@@ -0,0 +1,8 @@
+[custom]
+sections: name:afsdb
+[name]
+name: example.com.
+[afsdb]
+subtype: 0
+server: root.example.com
+rdlen: -1
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_fromWire1.spec b/src/lib/dns/tests/testdata/rdata_minfo_fromWire1.spec
new file mode 100644
index 0000000..2c43db0
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_fromWire1.spec
@@ -0,0 +1,3 @@
+[custom]
+sections: minfo
+[minfo]
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_fromWire2.spec b/src/lib/dns/tests/testdata/rdata_minfo_fromWire2.spec
new file mode 100644
index 0000000..d781cac
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_fromWire2.spec
@@ -0,0 +1,7 @@
+[custom]
+sections: name:minfo
+[name]
+name: a.example.com.
+[minfo]
+rmailbox: rmailbox.ptr=02
+emailbox: emailbox.ptr=02
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_fromWire3.spec b/src/lib/dns/tests/testdata/rdata_minfo_fromWire3.spec
new file mode 100644
index 0000000..a1d4b76
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_fromWire3.spec
@@ -0,0 +1,6 @@
+[custom]
+sections: minfo
+# rdlength too short
+[minfo]
+emailbox: emailbox.ptr=11
+rdlen: 3
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_fromWire4.spec b/src/lib/dns/tests/testdata/rdata_minfo_fromWire4.spec
new file mode 100644
index 0000000..269a6ce
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_fromWire4.spec
@@ -0,0 +1,6 @@
+[custom]
+sections: minfo
+# rdlength too long
+[minfo]
+emailbox: emailbox.ptr=11
+rdlen: 80
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_fromWire5.spec b/src/lib/dns/tests/testdata/rdata_minfo_fromWire5.spec
new file mode 100644
index 0000000..3a888e3
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_fromWire5.spec
@@ -0,0 +1,5 @@
+[custom]
+sections: minfo
+# bogus rmailbox name
+[minfo]
+rmailbox: "01234567890123456789012345678901234567890123456789012345678901234"
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_fromWire6.spec b/src/lib/dns/tests/testdata/rdata_minfo_fromWire6.spec
new file mode 100644
index 0000000..c75ed8e
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_fromWire6.spec
@@ -0,0 +1,5 @@
+[custom]
+sections: minfo
+# bogus emailbox name
+[minfo]
+emailbox: "01234567890123456789012345678901234567890123456789012345678901234"
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_toWire1.spec b/src/lib/dns/tests/testdata/rdata_minfo_toWire1.spec
new file mode 100644
index 0000000..7b340a3
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_toWire1.spec
@@ -0,0 +1,5 @@
+[custom]
+sections: minfo
+[minfo]
+emailbox: emailbox.ptr=09
+rdlen: -1
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_toWire2.spec b/src/lib/dns/tests/testdata/rdata_minfo_toWire2.spec
new file mode 100644
index 0000000..132f118
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_toWire2.spec
@@ -0,0 +1,6 @@
+[custom]
+sections: minfo
+[minfo]
+rmailbox: root.example.com.
+emailbox: emailbox.ptr=05
+rdlen: -1
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed1.spec b/src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed1.spec
new file mode 100644
index 0000000..d99a381
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed1.spec
@@ -0,0 +1,7 @@
+#
+# A simplest form of MINFO: all default parameters
+#
+[custom]
+sections: minfo
+[minfo]
+rdlen: -1
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed2.spec b/src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed2.spec
new file mode 100644
index 0000000..0f78fcc
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed2.spec
@@ -0,0 +1,8 @@
+#
+# A simplest form of MINFO: custom rmailbox and default emailbox
+#
+[custom]
+sections: minfo
+[minfo]
+rmailbox: root.example.com.
+rdlen: -1
diff --git a/src/lib/dns/tests/testdata/rdata_srv_fromWire b/src/lib/dns/tests/testdata/rdata_srv_fromWire
new file mode 100644
index 0000000..dac87e9
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_srv_fromWire
@@ -0,0 +1,36 @@
+#
+# various kinds of SRV RDATA stored in an input buffer
+#
+# RDLENGHT=21 bytes
+# 0 1
+ 00 15
+# 2 3 4 5 6 7 8 9 10 1 2 3 4 5 6 7 8 9 20 1 2(bytes)
+ 00 01 00 05 05 dc 01 61 07 65 78 61 6d 70 6c 65 03 63 6f 6d 00
+#
+# short length
+# 3 4
+ 00 12
+# 5 6 7 8 9 30 1 2 3 4 5 6 7 8 9 40 1 2 3 4 5
+ 00 01 00 05 05 dc 01 61 07 65 78 61 6d 70 6c 65 03 63 6f 6d 00
+#
+# length too long
+# 6 7
+ 00 19
+#
+# 8 9 50 1 2 3 4 5 6 7 8 9 60 1 2 3 4 5 6 7 8
+ 00 01 00 05 05 dc 01 61 07 65 78 61 6d 70 6c 65 03 63 6f 6d 00
+#
+#
+# incomplete target name
+# 9 70
+ 00 12
+# 1 2 3 4 5 6 7 8 9 80 1 2 3 4 5 6 7 8
+ 00 01 00 05 05 dc 01 61 07 65 78 61 6d 70 6c 65 03 63
+#
+#
+# Valid compressed target name: 'a' + pointer
+# 9 90
+ 00 0a
+#
+# 1 2 3 4 5 6 7 8 9 100
+ 00 01 00 05 05 dc 01 61 c0 0a
diff --git a/src/lib/dns/tests/tsig_unittest.cc b/src/lib/dns/tests/tsig_unittest.cc
index ba17e70..7944b29 100644
--- a/src/lib/dns/tests/tsig_unittest.cc
+++ b/src/lib/dns/tests/tsig_unittest.cc
@@ -440,7 +440,7 @@ TEST_F(TSIGTest, signUsingHMACSHA224) {
0xef, 0x33, 0xa2, 0xda, 0xa1, 0x48, 0x71, 0xd3
};
{
- SCOPED_TRACE("Sign test using HMAC-SHA1");
+ SCOPED_TRACE("Sign test using HMAC-SHA224");
commonSignChecks(createMessageAndSign(sha1_qid, test_name, &sha1_ctx),
sha1_qid, 0x4dae7d5f, expected_mac,
sizeof(expected_mac), 0, 0, NULL,
@@ -927,4 +927,76 @@ TEST_F(TSIGTest, tooShortMAC) {
}
}
+TEST_F(TSIGTest, getTSIGLength) {
+ // Check for the most common case with various algorithms
+ // See the comment in TSIGContext::getTSIGLength() for calculation and
+ // parameter notation.
+ // The key name (www.example.com) is the same for most cases, where n1=17
+
+ // hmac-md5.sig-alg.reg.int.: n2=26, x=16
+ EXPECT_EQ(85, tsig_ctx->getTSIGLength());
+
+ // hmac-sha1: n2=11, x=20
+ tsig_ctx.reset(new TSIGContext(TSIGKey(test_name, TSIGKey::HMACSHA1_NAME(),
+ &dummy_data[0], 20)));
+ EXPECT_EQ(74, tsig_ctx->getTSIGLength());
+
+ // hmac-sha256: n2=13, x=32
+ tsig_ctx.reset(new TSIGContext(TSIGKey(test_name,
+ TSIGKey::HMACSHA256_NAME(),
+ &dummy_data[0], 32)));
+ EXPECT_EQ(88, tsig_ctx->getTSIGLength());
+
+ // hmac-sha224: n2=13, x=28
+ tsig_ctx.reset(new TSIGContext(TSIGKey(test_name,
+ TSIGKey::HMACSHA224_NAME(),
+ &dummy_data[0], 28)));
+ EXPECT_EQ(84, tsig_ctx->getTSIGLength());
+
+ // hmac-sha384: n2=13, x=48
+ tsig_ctx.reset(new TSIGContext(TSIGKey(test_name,
+ TSIGKey::HMACSHA384_NAME(),
+ &dummy_data[0], 48)));
+ EXPECT_EQ(104, tsig_ctx->getTSIGLength());
+
+ // hmac-sha512: n2=13, x=64
+ tsig_ctx.reset(new TSIGContext(TSIGKey(test_name,
+ TSIGKey::HMACSHA512_NAME(),
+ &dummy_data[0], 64)));
+ EXPECT_EQ(120, tsig_ctx->getTSIGLength());
+
+ // bad key case: n1=len(badkey.example.com)=20, n2=26, x=0
+ tsig_ctx.reset(new TSIGContext(badkey_name, TSIGKey::HMACMD5_NAME(),
+ keyring));
+ EXPECT_EQ(72, tsig_ctx->getTSIGLength());
+
+ // bad sig case: n1=17, n2=26, x=0
+ isc::util::detail::gettimeFunction = testGetTime<0x4da8877a>;
+ createMessageFromFile("message_toWire2.wire");
+ tsig_ctx.reset(new TSIGContext(TSIGKey(test_name, TSIGKey::HMACMD5_NAME(),
+ &dummy_data[0],
+ dummy_data.size())));
+ {
+ SCOPED_TRACE("Verify resulting in BADSIG");
+ commonVerifyChecks(*tsig_ctx, message.getTSIGRecord(),
+ &received_data[0], received_data.size(),
+ TSIGError::BAD_SIG(), TSIGContext::RECEIVED_REQUEST);
+ }
+ EXPECT_EQ(69, tsig_ctx->getTSIGLength());
+
+ // bad time case: n1=17, n2=26, x=16, y=6
+ isc::util::detail::gettimeFunction = testGetTime<0x4da8877a - 1000>;
+ tsig_ctx.reset(new TSIGContext(TSIGKey(test_name, TSIGKey::HMACMD5_NAME(),
+ &dummy_data[0],
+ dummy_data.size())));
+ {
+ SCOPED_TRACE("Verify resulting in BADTIME");
+ commonVerifyChecks(*tsig_ctx, message.getTSIGRecord(),
+ &received_data[0], received_data.size(),
+ TSIGError::BAD_TIME(),
+ TSIGContext::RECEIVED_REQUEST);
+ }
+ EXPECT_EQ(91, tsig_ctx->getTSIGLength());
+}
+
} // end namespace
diff --git a/src/lib/dns/tsig.cc b/src/lib/dns/tsig.cc
index 714b2a5..1bda021 100644
--- a/src/lib/dns/tsig.cc
+++ b/src/lib/dns/tsig.cc
@@ -58,10 +58,32 @@ getTSIGTime() {
}
struct TSIGContext::TSIGContextImpl {
- TSIGContextImpl(const TSIGKey& key) :
- state_(INIT), key_(key), error_(Rcode::NOERROR()),
- previous_timesigned_(0)
- {}
+ TSIGContextImpl(const TSIGKey& key,
+ TSIGError error = TSIGError::NOERROR()) :
+ state_(INIT), key_(key), error_(error),
+ previous_timesigned_(0), digest_len_(0)
+ {
+ if (error == TSIGError::NOERROR()) {
+ // In normal (NOERROR) case, the key should be valid, and we
+ // should be able to pre-create a corresponding HMAC object,
+ // which will be likely to be used for sign or verify later.
+ // We do this in the constructor so that we can know the expected
+ // digest length in advance. The creation should normally succeed,
+ // but the key information could be still broken, which could
+ // trigger an exception inside the cryptolink module. We ignore
+ // it at this moment; a subsequent sign/verify operation will try
+ // to create the HMAC, which would also fail.
+ try {
+ hmac_.reset(CryptoLink::getCryptoLink().createHMAC(
+ key_.getSecret(), key_.getSecretLength(),
+ key_.getAlgorithm()),
+ deleteHMAC);
+ } catch (const Exception&) {
+ return;
+ }
+ digest_len_ = hmac_->getOutputLength();
+ }
+ }
// This helper method is used from verify(). It's expected to be called
// just before verify() returns. It updates internal state based on
@@ -85,6 +107,23 @@ struct TSIGContext::TSIGContextImpl {
return (error);
}
+ // A shortcut method to create an HMAC object for sign/verify. If one
+ // has been successfully created in the constructor, return it; otherwise
+ // create a new one and return it. In the former case, the ownership is
+ // transferred to the caller; the stored HMAC will be reset after the
+ // call.
+ HMACPtr createHMAC() {
+ if (hmac_) {
+ HMACPtr ret = HMACPtr();
+ ret.swap(hmac_);
+ return (ret);
+ }
+ return (HMACPtr(CryptoLink::getCryptoLink().createHMAC(
+ key_.getSecret(), key_.getSecretLength(),
+ key_.getAlgorithm()),
+ deleteHMAC));
+ }
+
// The following three are helper methods to compute the digest for
// TSIG sign/verify in order to unify the common code logic for sign()
// and verify() and to keep these callers concise.
@@ -111,6 +150,8 @@ struct TSIGContext::TSIGContextImpl {
vector<uint8_t> previous_digest_;
TSIGError error_;
uint64_t previous_timesigned_; // only meaningful for response with BADTIME
+ size_t digest_len_;
+ HMACPtr hmac_;
};
void
@@ -221,8 +262,7 @@ TSIGContext::TSIGContext(const Name& key_name, const Name& algorithm_name,
// be used in subsequent response with a TSIG indicating a BADKEY
// error.
impl_ = new TSIGContextImpl(TSIGKey(key_name, algorithm_name,
- NULL, 0));
- impl_->error_ = TSIGError::BAD_KEY();
+ NULL, 0), TSIGError::BAD_KEY());
} else {
impl_ = new TSIGContextImpl(*result.key);
}
@@ -232,6 +272,45 @@ TSIGContext::~TSIGContext() {
delete impl_;
}
+size_t
+TSIGContext::getTSIGLength() const {
+ //
+ // The space required for an TSIG record is:
+ //
+ // n1 bytes for the (key) name
+ // 2 bytes for the type
+ // 2 bytes for the class
+ // 4 bytes for the ttl
+ // 2 bytes for the rdlength
+ // n2 bytes for the algorithm name
+ // 6 bytes for the time signed
+ // 2 bytes for the fudge
+ // 2 bytes for the MAC size
+ // x bytes for the MAC
+ // 2 bytes for the original id
+ // 2 bytes for the error
+ // 2 bytes for the other data length
+ // y bytes for the other data (at most)
+ // ---------------------------------
+ // 26 + n1 + n2 + x + y bytes
+ //
+
+ // Normally the digest length ("x") is the length of the underlying
+ // hash output. If a key related error occurred, however, the
+ // corresponding TSIG will be "unsigned", and the digest length will be 0.
+ const size_t digest_len =
+ (impl_->error_ == TSIGError::BAD_KEY() ||
+ impl_->error_ == TSIGError::BAD_SIG()) ? 0 : impl_->digest_len_;
+
+ // Other Len ("y") is normally 0; if BAD_TIME error occurred, the
+ // subsequent TSIG will contain 48 bits of the server current time.
+ const size_t other_len = (impl_->error_ == TSIGError::BAD_TIME()) ? 6 : 0;
+
+ return (26 + impl_->key_.getKeyName().getLength() +
+ impl_->key_.getAlgorithmName().getLength() +
+ digest_len + other_len);
+}
+
TSIGContext::State
TSIGContext::getState() const {
return (impl_->state_);
@@ -276,11 +355,7 @@ TSIGContext::sign(const uint16_t qid, const void* const data,
return (tsig);
}
- HMACPtr hmac(CryptoLink::getCryptoLink().createHMAC(
- impl_->key_.getSecret(),
- impl_->key_.getSecretLength(),
- impl_->key_.getAlgorithm()),
- deleteHMAC);
+ HMACPtr hmac(impl_->createHMAC());
// If the context has previous MAC (either the Request MAC or its own
// previous MAC), digest it.
@@ -406,11 +481,7 @@ TSIGContext::verify(const TSIGRecord* const record, const void* const data,
return (impl_->postVerifyUpdate(error, NULL, 0));
}
- HMACPtr hmac(CryptoLink::getCryptoLink().createHMAC(
- impl_->key_.getSecret(),
- impl_->key_.getSecretLength(),
- impl_->key_.getAlgorithm()),
- deleteHMAC);
+ HMACPtr hmac(impl_->createHMAC());
// If the context has previous MAC (either the Request MAC or its own
// previous MAC), digest it.
diff --git a/src/lib/dns/tsig.h b/src/lib/dns/tsig.h
index bceec25..028d295 100644
--- a/src/lib/dns/tsig.h
+++ b/src/lib/dns/tsig.h
@@ -353,6 +353,27 @@ public:
TSIGError verify(const TSIGRecord* const record, const void* const data,
const size_t data_len);
+ /// Return the expected length of TSIG RR after \c sign()
+ ///
+ /// This method returns the length of the TSIG RR that would be
+ /// produced as a result of \c sign() with the state of the context
+ /// at the time of the call. The expected length can be decided
+ /// from the key and the algorithm (which determines the MAC size if
+ /// included) and the recorded TSIG error. Specifically, if a key
+ /// related error has been identified, the MAC will be excluded; if
+ /// a time error has occurred, the TSIG will include "other data".
+ ///
+ /// This method is provided mainly for the convenience of the Message
+ /// class, which needs to know the expected TSIG length in rendering a
+ /// signed DNS message so that it can handle truncated messages with TSIG
+ /// correctly. Normal applications wouldn't need this method. The Python
+ /// binding for this method won't be provided for the same reason.
+ ///
+ /// \exception None
+ ///
+ /// \return The expected TISG RR length in bytes
+ size_t getTSIGLength() const;
+
/// Return the current state of the context
///
/// \note
diff --git a/src/lib/exceptions/exceptions.h b/src/lib/exceptions/exceptions.h
index d0f1d74..433bb7d 100644
--- a/src/lib/exceptions/exceptions.h
+++ b/src/lib/exceptions/exceptions.h
@@ -137,6 +137,18 @@ public:
};
///
+/// \brief A generic exception that is thrown when a function is
+/// not implemented.
+///
+/// This may be due to unfinished implementation or in case the
+/// function isn't even planned to be provided for that situation.
+class NotImplemented : public Exception {
+public:
+ NotImplemented(const char* file, size_t line, const char* what) :
+ isc::Exception(file, line, what) {}
+};
+
+///
/// A shortcut macro to insert known values into exception arguments.
///
/// It allows the \c stream argument to be part of a statement using an
diff --git a/src/lib/log/Makefile.am b/src/lib/log/Makefile.am
index 63b1dfb..9f52724 100644
--- a/src/lib/log/Makefile.am
+++ b/src/lib/log/Makefile.am
@@ -20,6 +20,7 @@ liblog_la_SOURCES += logger_manager_impl.cc logger_manager_impl.h
liblog_la_SOURCES += logger_name.cc logger_name.h
liblog_la_SOURCES += logger_specification.h
liblog_la_SOURCES += logger_support.cc logger_support.h
+liblog_la_SOURCES += logger_unittest_support.cc logger_unittest_support.h
liblog_la_SOURCES += macros.h
liblog_la_SOURCES += log_messages.cc log_messages.h
liblog_la_SOURCES += message_dictionary.cc message_dictionary.h
diff --git a/src/lib/log/README b/src/lib/log/README
index d854dce..3747cb1 100644
--- a/src/lib/log/README
+++ b/src/lib/log/README
@@ -142,13 +142,19 @@ Points to note:
the error originated from the logging library and the "WRITE_ERROR"
indicates that there was a problem in a write operation.
- * The replacement tokens are the strings "%1", "%2" etc. When a message
- is logged, these are replaced with the arguments passed to the logging
- call: %1 refers to the first argument, %2 to the second etc. Within the
- message text, the placeholders can appear in any order and placeholders
- can be repeated.
-
-* Remaining lines indicate an explanation for the preceding message. These
+ * The rest of the line - from the first non-space character to the
+ last non- space character - is taken exactly for the text
+ of the message. There are no restrictions on what characters may
+ be in this text, other than they be printable. (This means that
+ both single-quote (') and double-quote (") characters are allowed.)
+ The message text may include replacement tokens (the strings "%1",
+ "%2" etc.). When a message is logged, these are replaced with the
+ arguments passed to the logging call: %1 refers to the first argument,
+ %2 to the second etc. Within the message text, the placeholders
+ can appear in any order and placeholders can be repeated. Otherwise,
+ the message is printed unmodified.
+
+* Remaining lines indicate an explanation for the preceding message. These
are intended to be processed by a separate program and used to generate
an error messages manual. They are ignored by the message compiler.
@@ -232,8 +238,8 @@ Using the Logging - C++
=======================
1. Build message header file and source file as describe above.
-2. The main program unit should include a call to isc::log::initLogger()
- (defined in logger_support.h) to set the logging severity, debug log
+2. The main program unit must include a call to isc::log::initLogger()
+ (described in more detail below) to set the logging severity, debug log
level, and external message file:
a) The logging severity is one of the enum defined in logger.h, i.e.
@@ -279,9 +285,9 @@ Using the Logging - Python
==========================
1. Build message module as describe above.
-2. The main program unit should include a call to isc.log.init() to
- set the to set the logging severity, debug log level, and external
- message file:
+2. The main program unit must include a call to isc.log.init()
+ (described in more detail below) to set the to set the logging
+ severity, debug log level, and external message file:
a) The logging severity is one of the strings:
@@ -316,6 +322,91 @@ Using the Logging - Python
logger.error(LOG_WRITE_ERROR, "output.txt");
+Logging Initialization
+======================
+In all cases, if an attempt is made to use a logging method before the logging
+has been initialized, the program will terminate with a LoggingNotInitialized
+exception.
+
+C++
+---
+Logging Initialization is carried out by calling initLogger(). There are two
+variants to the call, one for use by production programs and one for use by
+unit tests.
+
+Variant #1, Used by Production Programs
+---------------------------------------
+void isc::log::initLogger(const std::string& root,
+ isc::log::Severity severity = isc::log::INFO,
+ int dbglevel = 0, const char* file = NULL);
+
+This is the call that should be used by production programs:
+
+root
+Name of the program (e.g. "b10-auth"). This is also the name of the root
+logger and is used when configuring logging.
+
+severity
+Default severity that the program will start logging with. Although this may
+be overridden when the program obtains its configuration from the configuration
+database, this is the severity that it used until then. (This may be set by
+a command-line parameter.)
+
+dbglevel
+The debug level used if "severity" is set to isc::log::DEBUG.
+
+file
+The name of a local message file. This will be read and its definitions used
+to replace the compiled-in text of the messages.
+
+
+Variant #2, Used by Unit Tests
+------------------------------
+ void isc::log::initLogger()
+
+This is the call that should be used by unit tests. In this variant, all the
+options are supplied by environment variables. (It should not be used for
+production programs to avoid the chance that the program operation is affected
+by inadvertently-defined environment variables.)
+
+The environment variables are:
+
+B10_LOGGER_ROOT
+Sets the "root" for the unit test. If not defined, the name "bind10" is used.
+
+B10_LOGGER_SEVERITY
+The severity to set for the root logger in the unit test. Valid values are
+"DEBUG", "INFO", "WARN", "ERROR", "FATAL" and "NONE". If not defined, "INFO"
+is used.
+
+B10_LOGGER_DBGLEVEL
+If B10_LOGGER_SEVERITY is set to "DEBUG", the debug level. This can be a
+number between 0 and 99, and defaults to 0.
+
+B10_LOGGER_LOCALMSG
+If defined, points to a local message file. The default is not to use a local
+message file.
+
+B10_LOGGER_DESTINATION
+The location to which log message are written. This can be one of:
+
+ stdout Message are written to stdout
+ stderr Messages are written to stderr
+ syslog[:facility] Messages are written to syslog. If the optional
+ "facility" is used, the messages are written using
+ that facility. (This defaults to "local0" if not
+ specified.)
+ Anything else Interpreted as the name of a file to which output
+ is appended. If the file does not exist, a new one
+ is opened.
+
+In the case of "stdout", "stderr" and "syslog", they must be written exactly
+as is - no leading or trailing spaces, and in lower-case.
+
+Python
+------
+To be supplied
+
Severity Guidelines
===================
diff --git a/src/lib/log/compiler/message.cc b/src/lib/log/compiler/message.cc
index 68335dc..f74020a 100644
--- a/src/lib/log/compiler/message.cc
+++ b/src/lib/log/compiler/message.cc
@@ -43,6 +43,7 @@ using namespace isc::util;
static const char* VERSION = "1.0-0";
+/// \file log/compiler/message.cc
/// \brief Message Compiler
///
/// \b Overview<BR>
@@ -55,13 +56,16 @@ static const char* VERSION = "1.0-0";
/// \b Invocation<BR>
/// The program is invoked with the command:
///
-/// <tt>message [-v | -h | \<message-file\>]</tt>
+/// <tt>message [-v | -h | -p | -d <dir> | <message-file>]</tt>
///
-/// It reads the message file and writes out two files of the same name in the
-/// default directory but with extensions of .h and .cc.
+/// It reads the message file and writes out two files of the same
+/// name in the current working directory (unless -d is used) but
+/// with extensions of .h and .cc, or .py if -p is used.
///
-/// \-v causes it to print the version number and exit. \-h prints a help
-/// message (and exits).
+/// -v causes it to print the version number and exit. -h prints a help
+/// message (and exits). -p sets the output to python. -d <dir> will make
+/// it write the output file(s) to dir instead of current working
+/// directory
/// \brief Print Version
@@ -80,11 +84,12 @@ version() {
void
usage() {
cout <<
- "Usage: message [-h] [-v] [-p] <message-file>\n" <<
+ "Usage: message [-h] [-v] [-p] [-d dir] <message-file>\n" <<
"\n" <<
"-h Print this message and exit\n" <<
"-v Print the program version and exit\n" <<
"-p Output python source instead of C++ ones\n" <<
+ "-d <dir> Place output files in given directory\n" <<
"\n" <<
"<message-file> is the name of the input message file.\n";
}
@@ -106,7 +111,7 @@ currentTime() {
// Convert to string and strip out the trailing newline
string current_time = buffer;
- return isc::util::str::trim(current_time);
+ return (isc::util::str::trim(current_time));
}
@@ -127,7 +132,7 @@ sentinel(Filename& file) {
string ext = file.extension();
string sentinel_text = "__" + name + "_" + ext.substr(1);
isc::util::str::uppercase(sentinel_text);
- return sentinel_text;
+ return (sentinel_text);
}
@@ -154,7 +159,7 @@ quoteString(const string& instring) {
outstring += instring[i];
}
- return outstring;
+ return (outstring);
}
@@ -177,7 +182,7 @@ sortedIdentifiers(MessageDictionary& dictionary) {
}
sort(ident.begin(), ident.end());
- return ident;
+ return (ident);
}
@@ -207,7 +212,7 @@ splitNamespace(string ns) {
// ... and return the vector of namespace components split on the single
// colon.
- return isc::util::str::tokens(ns, ":");
+ return (isc::util::str::tokens(ns, ":"));
}
@@ -249,14 +254,22 @@ writeClosingNamespace(ostream& output, const vector<string>& ns) {
/// \param file Name of the message file. The source code is written to a file
/// file of the same name but with a .py suffix.
/// \param dictionary The dictionary holding the message definitions.
+/// \param output_directory if not null NULL, output files are written
+/// to the given directory. If NULL, they are written to the current
+/// working directory.
///
/// \note We don't use the namespace as in C++. We don't need it, because
/// python file/module works as implicit namespace as well.
void
-writePythonFile(const string& file, MessageDictionary& dictionary) {
+writePythonFile(const string& file, MessageDictionary& dictionary,
+ const char* output_directory)
+{
Filename message_file(file);
Filename python_file(Filename(message_file.name()).useAsDefault(".py"));
+ if (output_directory != NULL) {
+ python_file.setDirectory(output_directory);
+ }
// Open the file for writing
ofstream pyfile(python_file.fullName().c_str());
@@ -291,13 +304,19 @@ writePythonFile(const string& file, MessageDictionary& dictionary) {
/// \param ns Namespace in which the definitions are to be placed. An empty
/// string indicates no namespace.
/// \param dictionary Dictionary holding the message definitions.
+/// \param output_directory if not null NULL, output files are written
+/// to the given directory. If NULL, they are written to the current
+/// working directory.
void
writeHeaderFile(const string& file, const vector<string>& ns_components,
- MessageDictionary& dictionary)
+ MessageDictionary& dictionary, const char* output_directory)
{
Filename message_file(file);
Filename header_file(Filename(message_file.name()).useAsDefault(".h"));
+ if (output_directory != NULL) {
+ header_file.setDirectory(output_directory);
+ }
// Text to use as the sentinels.
string sentinel_text = sentinel(header_file);
@@ -382,13 +401,25 @@ replaceNonAlphaNum(char c) {
/// optimisation is done at link-time, not compiler-time. In this it _may_
/// decide to remove the initializer object because of a lack of references
/// to it. But until BIND-10 is ported to Windows, we won't know.
-
+///
+/// \param file Name of the message file. The header file is written to a
+/// file of the same name but with a .h suffix.
+/// \param ns Namespace in which the definitions are to be placed. An empty
+/// string indicates no namespace.
+/// \param dictionary Dictionary holding the message definitions.
+/// \param output_directory if not null NULL, output files are written
+/// to the given directory. If NULL, they are written to the current
+/// working directory.
void
writeProgramFile(const string& file, const vector<string>& ns_components,
- MessageDictionary& dictionary)
+ MessageDictionary& dictionary,
+ const char* output_directory)
{
Filename message_file(file);
Filename program_file(Filename(message_file.name()).useAsDefault(".cc"));
+ if (output_directory) {
+ program_file.setDirectory(output_directory);
+ }
// Open the output file for writing
ofstream ccfile(program_file.fullName().c_str());
@@ -496,30 +527,35 @@ warnDuplicates(MessageReader& reader) {
int
main(int argc, char* argv[]) {
- const char* soptions = "hvp"; // Short options
+ const char* soptions = "hvpd:"; // Short options
optind = 1; // Ensure we start a new scan
int opt; // Value of the option
bool doPython = false;
+ const char *output_directory = NULL;
while ((opt = getopt(argc, argv, soptions)) != -1) {
switch (opt) {
+ case 'd':
+ output_directory = optarg;
+ break;
+
case 'p':
doPython = true;
break;
case 'h':
usage();
- return 0;
+ return (0);
case 'v':
version();
- return 0;
+ return (0);
default:
// A message will have already been output about the error.
- return 1;
+ return (1);
}
}
@@ -527,11 +563,11 @@ main(int argc, char* argv[]) {
if (optind < (argc - 1)) {
cout << "Error: excess arguments in command line\n";
usage();
- return 1;
+ return (1);
} else if (optind >= argc) {
cout << "Error: missing message file\n";
usage();
- return 1;
+ return (1);
}
string message_file = argv[optind];
@@ -552,7 +588,7 @@ main(int argc, char* argv[]) {
}
// Write the whole python file
- writePythonFile(message_file, dictionary);
+ writePythonFile(message_file, dictionary, output_directory);
} else {
// Get the namespace into which the message definitions will be put and
// split it into components.
@@ -560,16 +596,18 @@ main(int argc, char* argv[]) {
splitNamespace(reader.getNamespace());
// Write the header file.
- writeHeaderFile(message_file, ns_components, dictionary);
+ writeHeaderFile(message_file, ns_components, dictionary,
+ output_directory);
// Write the file that defines the message symbols and text
- writeProgramFile(message_file, ns_components, dictionary);
+ writeProgramFile(message_file, ns_components, dictionary,
+ output_directory);
}
// Finally, warn of any duplicates encountered.
warnDuplicates(reader);
}
- catch (MessageException& e) {
+ catch (const MessageException& e) {
// Create an error message from the ID and the text
MessageDictionary& global = MessageDictionary::globalDictionary();
string text = e.id();
@@ -583,9 +621,9 @@ main(int argc, char* argv[]) {
cerr << text << "\n";
- return 1;
+ return (1);
}
- return 0;
+ return (0);
}
diff --git a/src/lib/log/log_formatter.h b/src/lib/log/log_formatter.h
index c81d4ea..ca23844 100644
--- a/src/lib/log/log_formatter.h
+++ b/src/lib/log/log_formatter.h
@@ -18,12 +18,28 @@
#include <cstddef>
#include <string>
#include <iostream>
+
+#include <exceptions/exceptions.h>
#include <boost/lexical_cast.hpp>
#include <log/logger_level.h>
namespace isc {
namespace log {
+/// \brief Format Failure
+///
+/// This exception is used to wrap a bad_lexical_cast exception thrown during
+/// formatting an argument.
+
+class FormatFailure : public isc::Exception {
+public:
+ FormatFailure(const char* file, size_t line, const char* what) :
+ isc::Exception(file, line, what)
+ {}
+};
+
+
+///
/// \brief The internal replacement routine
///
/// This is used internally by the Formatter. Replaces a placeholder
@@ -156,13 +172,29 @@ public:
/// \param arg The argument to place into the placeholder.
template<class Arg> Formatter& arg(const Arg& value) {
if (logger_) {
- return (arg(boost::lexical_cast<std::string>(value)));
+ try {
+ return (arg(boost::lexical_cast<std::string>(value)));
+ } catch (const boost::bad_lexical_cast& ex) {
+
+ // A bad_lexical_cast during a conversion to a string is
+ // *extremely* unlikely to fail. However, there is nothing
+ // in the documentation that rules it out, so we need to handle
+ // it. As it is a potentially very serious problem, throw the
+ // exception detailing the problem with as much information as
+ // we can. (Note that this does not include 'value' -
+ // boost::lexical_cast failed to convert it to a string, so an
+ // attempt to do so here would probably fail as well.)
+ isc_throw(FormatFailure, "bad_lexical_cast in call to "
+ "Formatter::arg(): " << ex.what());
+ }
} else {
return (*this);
}
}
/// \brief String version of arg.
+ ///
+ /// \param arg The text to place into the placeholder.
Formatter& arg(const std::string& arg) {
if (logger_) {
// Note that this method does a replacement and returns the
@@ -179,7 +211,6 @@ public:
}
return (*this);
}
-
};
}
diff --git a/src/lib/log/log_messages.cc b/src/lib/log/log_messages.cc
index a515959..f60898c 100644
--- a/src/lib/log/log_messages.cc
+++ b/src/lib/log/log_messages.cc
@@ -1,4 +1,4 @@
-// File created from log_messages.mes on Wed Jun 22 11:54:57 2011
+// File created from log_messages.mes on Thu Jul 7 15:32:06 2011
#include <cstddef>
#include <log/message_types.h>
diff --git a/src/lib/log/log_messages.h b/src/lib/log/log_messages.h
index 476f686..10e1501 100644
--- a/src/lib/log/log_messages.h
+++ b/src/lib/log/log_messages.h
@@ -1,4 +1,4 @@
-// File created from log_messages.mes on Wed Jun 22 11:54:57 2011
+// File created from log_messages.mes on Thu Jul 7 15:32:06 2011
#ifndef __LOG_MESSAGES_H
#define __LOG_MESSAGES_H
diff --git a/src/lib/log/log_messages.mes b/src/lib/log/log_messages.mes
index 697ac92..f150f39 100644
--- a/src/lib/log/log_messages.mes
+++ b/src/lib/log/log_messages.mes
@@ -28,23 +28,23 @@ destination should be one of "console", "file", or "syslog".
% LOG_BAD_SEVERITY unrecognized log severity: %1
A logger severity value was given that was not recognized. The severity
-should be one of "DEBUG", "INFO", "WARN", "ERROR", or "FATAL".
+should be one of "DEBUG", "INFO", "WARN", "ERROR", "FATAL" or "NONE".
% LOG_BAD_STREAM bad log console output stream: %1
-A log console output stream was given that was not recognized. The output
-stream should be one of "stdout", or "stderr"
+Logging has been configured so that output is written to the terminal
+(console) but the stream on which it is to be written is not recognised.
+Allowed values are "stdout" and "stderr".
% LOG_DUPLICATE_MESSAGE_ID duplicate message ID (%1) in compiled code
-During start-up, BIND10 detected that the given message identification had
-been defined multiple times in the BIND10 code.
-
-This has no ill-effects other than the possibility that an erronous
-message may be logged. However, as it is indicative of a programming
-error, please log a bug report.
+During start-up, BIND 10 detected that the given message identification
+had been defined multiple times in the BIND 10 code. This indicates a
+programming error; please submit a bug report.
% LOG_DUPLICATE_NAMESPACE line %1: duplicate $NAMESPACE directive found
When reading a message file, more than one $NAMESPACE directive was found.
-Such a condition is regarded as an error and the read will be abandoned.
+(This directive is used to set a C++ namespace when generating header
+files during software development.) Such a condition is regarded as an
+error and the read will be abandoned.
% LOG_INPUT_OPEN_FAIL unable to open message file %1 for input: %2
The program was not able to open the specified input message file for
@@ -99,10 +99,10 @@ There may be several reasons why this message may appear:
- The program outputting the message may not use that particular message
(e.g. it originates in a module not used by the program.)
-- The local file was written for an earlier version of the BIND10 software
+- The local file was written for an earlier version of the BIND 10 software
and the later version no longer generates that message.
-Whatever the reason, there is no impact on the operation of BIND10.
+Whatever the reason, there is no impact on the operation of BIND 10.
% LOG_OPEN_OUTPUT_FAIL unable to open %1 for output: %2
Originating within the logging code, the program was not able to open
@@ -115,7 +115,7 @@ This error is generated when the compiler finds a $PREFIX directive with
more than one argument.
Note: the $PREFIX directive is deprecated and will be removed in a future
-version of BIND10.
+version of BIND 10.
% LOG_PREFIX_INVALID_ARG line %1: $PREFIX directive has an invalid argument ('%2')
Within a message file, the $PREFIX directive takes a single argument,
@@ -123,13 +123,13 @@ a prefix to be added to the symbol names when a C++ file is created.
As such, it must adhere to restrictions on C++ symbol names (e.g. may
only contain alphanumeric characters or underscores, and may nor start
with a digit). A $PREFIX directive was found with an argument (given
-in the message) that violates those restictions.
+in the message) that violates those restrictions.
Note: the $PREFIX directive is deprecated and will be removed in a future
-version of BIND10.
+version of BIND 10.
% LOG_READING_LOCAL_FILE reading local message file %1
-This is an informational message output by BIND10 when it starts to read
+This is an informational message output by BIND 10 when it starts to read
a local message file. (A local message file may replace the text of
one of more messages; the ID of the message will not be changed though.)
diff --git a/src/lib/log/logger_support.cc b/src/lib/log/logger_support.cc
index 73323a0..2097136 100644
--- a/src/lib/log/logger_support.cc
+++ b/src/lib/log/logger_support.cc
@@ -12,26 +12,9 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE
-/// \brief Temporary Logger Support
-///
-/// Performs run-time initialization of the logger system. In particular, it
-/// is passed information from the command line and:
-///
-/// a) Sets the severity of the messages being logged (and debug level if
-/// appropriate).
-/// b) Reads in the local message file is one has been supplied.
-///
-/// These functions will be replaced once the code has been written to obtain
-/// the logging parameters from the configuration database.
-
-#include <iostream>
-#include <algorithm>
-#include <iostream>
#include <string>
-
-#include <log/logger_level.h>
-#include <log/logger_manager.h>
#include <log/logger_support.h>
+#include <log/logger_manager.h>
using namespace std;
@@ -67,60 +50,5 @@ initLogger(const string& root, isc::log::Severity severity, int dbglevel,
LoggerManager::init(root, severity, dbglevel, file);
}
-// Logger Run-Time Initialization via Environment Variables
-void initLogger(isc::log::Severity severity, int dbglevel) {
-
- // Root logger name is defined by the environment variable B10_LOGGER_ROOT.
- // If not present, the name is "bind10".
- const char* DEFAULT_ROOT = "bind10";
- const char* root = getenv("B10_LOGGER_ROOT");
- if (! root) {
- root = DEFAULT_ROOT;
- }
-
- // Set the logging severity. The environment variable is
- // B10_LOGGER_SEVERITY, and can be one of "DEBUG", "INFO", "WARN", "ERROR"
- // of "FATAL". Note that the string must be in upper case with no leading
- // of trailing blanks.
- const char* sev_char = getenv("B10_LOGGER_SEVERITY");
- if (sev_char) {
- severity = isc::log::getSeverity(sev_char);
- }
-
- // If the severity is debug, get the debug level (environment variable
- // B10_LOGGER_DBGLEVEL), which should be in the range 0 to 99.
- if (severity == isc::log::DEBUG) {
- const char* dbg_char = getenv("B10_LOGGER_DBGLEVEL");
- if (dbg_char) {
- int level = 0;
- try {
- level = boost::lexical_cast<int>(dbg_char);
- if (level < MIN_DEBUG_LEVEL) {
- cerr << "**ERROR** debug level of " << level
- << " is invalid - a value of " << MIN_DEBUG_LEVEL
- << " will be used\n";
- level = MIN_DEBUG_LEVEL;
- } else if (level > MAX_DEBUG_LEVEL) {
- cerr << "**ERROR** debug level of " << level
- << " is invalid - a value of " << MAX_DEBUG_LEVEL
- << " will be used\n";
- level = MAX_DEBUG_LEVEL;
- }
- } catch (...) {
- // Error, but not fatal to the test
- cerr << "**ERROR** Unable to translate "
- "B10_LOGGER_DBGLEVEL - a value of 0 will be used\n";
- }
- dbglevel = level;
- }
- }
-
- /// Set the local message file
- const char* localfile = getenv("B10_LOGGER_LOCALMSG");
-
- // Initialize logging
- initLogger(root, severity, dbglevel, localfile);
-}
-
} // namespace log
} // namespace isc
diff --git a/src/lib/log/logger_support.h b/src/lib/log/logger_support.h
index 4bc8acc..4ce3ced 100644
--- a/src/lib/log/logger_support.h
+++ b/src/lib/log/logger_support.h
@@ -19,6 +19,13 @@
#include <string>
#include <log/logger.h>
+#include <log/logger_unittest_support.h>
+
+/// \file
+/// \brief Logging initialization functions
+///
+/// Contains a set of functions relating to logging initialization that are
+/// used by the production code.
namespace isc {
namespace log {
@@ -33,17 +40,13 @@ namespace log {
/// \return true if logging has been initialized, false if not
bool isLoggingInitialized();
-/// \brief Set "logging initialized" flag
-///
-/// Sets the state of the "logging initialized" flag.
+/// \brief Set state of "logging initialized" flag
///
/// \param state State to set the flag to. (This is expected to be "true" - the
/// default - for all code apart from specific unit tests.)
void setLoggingInitialized(bool state = true);
-
-
-/// \brief Run-Time Initialization
+/// \brief Run-time initialization
///
/// Performs run-time initialization of the logger in particular supplying:
///
@@ -62,43 +65,7 @@ void initLogger(const std::string& root,
isc::log::Severity severity = isc::log::INFO,
int dbglevel = 0, const char* file = NULL);
-
-/// \brief Run-Time Initialization from Environment
-///
-/// Performs run-time initialization of the logger via the setting of
-/// environment variables. These are:
-///
-/// B10_LOGGER_ROOT
-/// Name of the root logger. If not given, the string "bind10" will be used.
-///
-/// B10_LOGGER_SEVERITY
-/// Severity of messages that will be logged. This must be one of the strings
-/// "DEBUG", "INFO", "WARN", "ERROR", "FATAL" or "NONE". (Must be upper case
-/// and must not contain leading or trailing spaces.) If not specified (or if
-/// specified but incorrect), the default passed as argument to this function
-/// (currently INFO) will be used.
-///
-/// B10_LOGGER_DBGLEVEL
-/// Ignored if the level is not DEBUG, this should be a number between 0 and
-/// 99 indicating the logging severity. The default is 0. If outside these
-/// limits or if not a number, The value passed to this function (default
-/// of 0) is used.
-///
-/// B10_LOGGER_LOCALMSG
-/// If defined, the path specification of a file that contains message
-/// definitions replacing ones in the default dictionary.
-///
-/// Any errors in the settings cause messages to be output to stderr.
-///
-/// This function is aimed at test programs, allowing the default settings to
-/// be overridden by the tester. It is not intended for use in production
-/// code.
-
-void initLogger(isc::log::Severity severity = isc::log::INFO,
- int dbglevel = 0);
-
} // namespace log
} // namespace isc
-
#endif // __LOGGER_SUPPORT_H
diff --git a/src/lib/log/logger_unittest_support.cc b/src/lib/log/logger_unittest_support.cc
new file mode 100644
index 0000000..a0969be
--- /dev/null
+++ b/src/lib/log/logger_unittest_support.cc
@@ -0,0 +1,175 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <iostream>
+#include <algorithm>
+#include <string>
+
+#include <log/logger_level.h>
+#include <log/logger_name.h>
+#include <log/logger_manager.h>
+#include <log/logger_specification.h>
+#include <log/logger_unittest_support.h>
+#include <log/logger_support.h>
+#include <log/output_option.h>
+
+using namespace std;
+
+namespace isc {
+namespace log {
+
+// Get the logging severity. This is defined by the environment variable
+// B10_LOGGER_SEVERITY, and can be one of "DEBUG", "INFO", "WARN", "ERROR"
+// of "FATAL". (Note that the string must be in upper case with no leading
+// of trailing blanks.) If not present, the default severity passed to the
+// function is returned.
+isc::log::Severity
+b10LoggerSeverity(isc::log::Severity defseverity) {
+ const char* sev_char = getenv("B10_LOGGER_SEVERITY");
+ if (sev_char) {
+ return (isc::log::getSeverity(sev_char));
+ }
+ return (defseverity);
+}
+
+// Get the debug level. This is defined by the envornment variable
+// B10_LOGGER_DBGLEVEL. If not defined, a default value passed to the function
+// is returned.
+int
+b10LoggerDbglevel(int defdbglevel) {
+ const char* dbg_char = getenv("B10_LOGGER_DBGLEVEL");
+ if (dbg_char) {
+ int level = 0;
+ try {
+ level = boost::lexical_cast<int>(dbg_char);
+ if (level < MIN_DEBUG_LEVEL) {
+ std::cerr << "**ERROR** debug level of " << level
+ << " is invalid - a value of " << MIN_DEBUG_LEVEL
+ << " will be used\n";
+ level = MIN_DEBUG_LEVEL;
+ } else if (level > MAX_DEBUG_LEVEL) {
+ std::cerr << "**ERROR** debug level of " << level
+ << " is invalid - a value of " << MAX_DEBUG_LEVEL
+ << " will be used\n";
+ level = MAX_DEBUG_LEVEL;
+ }
+ } catch (...) {
+ // Error, but not fatal to the test
+ std::cerr << "**ERROR** Unable to translate "
+ "B10_LOGGER_DBGLEVEL - a value of 0 will be used\n";
+ }
+ return (level);
+ }
+
+ return (defdbglevel);
+}
+
+
+// Reset characteristics of the root logger to that set by the environment
+// variables B10_LOGGER_SEVERITY, B10_LOGGER_DBGLEVEL and B10_LOGGER_DESTINATION.
+
+void
+resetUnitTestRootLogger() {
+
+ using namespace isc::log;
+
+ // Constants: not declared static as this is function is expected to be
+ // called once only
+ const string DEVNULL = "/dev/null";
+ const string STDOUT = "stdout";
+ const string STDERR = "stderr";
+ const string SYSLOG = "syslog";
+ const string SYSLOG_COLON = "syslog:";
+
+ // Get the destination. If not specified, assume /dev/null. (The default
+ // severity for unit tests is DEBUG, which generates a lot of output.
+ // Routing the logging to /dev/null will suppress that, whilst still
+ // ensuring that the code paths are tested.)
+ const char* destination = getenv("B10_LOGGER_DESTINATION");
+ const string dest((destination == NULL) ? DEVNULL : destination);
+
+ // Prepare the objects to define the logging specification
+ LoggerSpecification spec(getRootLoggerName(),
+ b10LoggerSeverity(isc::log::DEBUG),
+ b10LoggerDbglevel(isc::log::MAX_DEBUG_LEVEL));
+ OutputOption option;
+
+ // Set up output option according to destination specification
+ if (dest == STDOUT) {
+ option.destination = OutputOption::DEST_CONSOLE;
+ option.stream = OutputOption::STR_STDOUT;
+
+ } else if (dest == STDERR) {
+ option.destination = OutputOption::DEST_CONSOLE;
+ option.stream = OutputOption::STR_STDERR;
+
+ } else if (dest == SYSLOG) {
+ option.destination = OutputOption::DEST_SYSLOG;
+ // Use default specified in OutputOption constructor for the
+ // syslog destination
+
+ } else if (dest.find(SYSLOG_COLON) == 0) {
+ option.destination = OutputOption::DEST_SYSLOG;
+ // Must take account of the string actually being "syslog:"
+ if (dest == SYSLOG_COLON) {
+ cerr << "**ERROR** value for B10_LOGGER_DESTINATION of " <<
+ SYSLOG_COLON << " is invalid, " << SYSLOG <<
+ " will be used instead\n";
+ // Use default for logging facility
+
+ } else {
+ // Everything else in the string is the facility name
+ option.facility = dest.substr(SYSLOG_COLON.size());
+ }
+
+ } else {
+ // Not a recognised destination, assume a file.
+ option.destination = OutputOption::DEST_FILE;
+ option.filename = dest;
+ }
+
+ // ... and set the destination
+ spec.addOutputOption(option);
+ LoggerManager manager;
+ manager.process(spec);
+}
+
+
+// Logger Run-Time Initialization via Environment Variables
+void initLogger(isc::log::Severity severity, int dbglevel) {
+
+ // Root logger name is defined by the environment variable B10_LOGGER_ROOT.
+ // If not present, the name is "bind10".
+ const char* DEFAULT_ROOT = "bind10";
+ const char* root = getenv("B10_LOGGER_ROOT");
+ if (! root) {
+ root = DEFAULT_ROOT;
+ }
+
+ // Set the local message file
+ const char* localfile = getenv("B10_LOGGER_LOCALMSG");
+
+ // Initialize logging
+ initLogger(root, isc::log::DEBUG, isc::log::MAX_DEBUG_LEVEL, localfile);
+
+ // Now set reset the output destination of the root logger, overriding
+ // the default severity, debug level and destination with those specified
+ // in the environment variables. (The two-step approach is used as the
+ // setUnitTestRootLoggerCharacteristics() function is used in several
+ // places in the BIND 10 tests, and it avoid duplicating code.)
+ resetUnitTestRootLogger();
+}
+
+} // namespace log
+} // namespace isc
diff --git a/src/lib/log/logger_unittest_support.h b/src/lib/log/logger_unittest_support.h
new file mode 100644
index 0000000..ce9121b
--- /dev/null
+++ b/src/lib/log/logger_unittest_support.h
@@ -0,0 +1,126 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __LOGGER_UNITTEST_SUPPORT_H
+#define __LOGGER_UNITTEST_SUPPORT_H
+
+#include <string>
+#include <log/logger.h>
+
+/// \file
+/// \brief Miscellaneous logging functions used by the unit tests.
+///
+/// As the configuration database is unsually unavailable during unit tests,
+/// the functions defined here allow a limited amount of logging configuration
+/// through the use of environment variables
+
+namespace isc {
+namespace log {
+
+/// \brief Run-Time Initialization for Unit Tests from Environment
+///
+/// Performs run-time initialization of the logger via the setting of
+/// environment variables. These are:
+///
+/// - B10_LOGGER_ROOT\n
+/// Name of the root logger. If not given, the string "bind10" will be used.
+///
+/// - B10_LOGGER_SEVERITY\n
+/// Severity of messages that will be logged. This must be one of the strings
+/// "DEBUG", "INFO", "WARN", "ERROR", "FATAL" or "NONE". (Must be upper case
+/// and must not contain leading or trailing spaces.) If not specified (or if
+/// specified but incorrect), the default passed as argument to this function
+/// (currently DEBUG) will be used.
+///
+/// - B10_LOGGER_DBGLEVEL\n
+/// Ignored if the level is not DEBUG, this should be a number between 0 and
+/// 99 indicating the logging severity. The default is 0. If outside these
+/// limits or if not a number, The value passed to this function (default
+/// of MAX_DEBUG_LEVEL) is used.
+///
+/// - B10_LOGGER_LOCALMSG\n
+/// If defined, the path specification of a file that contains message
+/// definitions replacing ones in the default dictionary.
+///
+/// - B10_LOGGER_DESTINATION\n
+/// If defined, the destination of the logging output. This can be one of:
+/// - \c stdout Send output to stdout.
+/// - \c stderr Send output to stderr
+/// - \c syslog Send output to syslog using the facility local0.
+/// - \c syslog:xxx Send output to syslog, using the facility xxx. ("xxx"
+/// should be one of the syslog facilities such as "local0".) There must
+/// be a colon between "syslog" and "xxx
+/// - \c other Anything else is interpreted as the name of a file to which
+/// output is appended. If the file does not exist, it is created.
+///
+/// Any errors in the settings cause messages to be output to stderr.
+///
+/// This function is aimed at test programs, allowing the default settings to
+/// be overridden by the tester. It is not intended for use in production
+/// code.
+///
+/// TODO: Rename. This function overloads the initLogger() function that can
+/// be used to initialize production programs. This may lead to confusion.
+void initLogger(isc::log::Severity severity = isc::log::DEBUG,
+ int dbglevel = isc::log::MAX_DEBUG_LEVEL);
+
+
+/// \brief Obtains logging severity from B10_LOGGER_SEVERITY
+///
+/// Support function called by the unit test logging initialization code.
+/// It returns the logging severity defined by B10_LOGGER_SEVERITY. If
+/// not defined it returns the default passed to it.
+///
+/// \param defseverity Default severity used if B10_LOGGER_SEVERITY is not
+// defined.
+///
+/// \return Severity to use for the logging.
+isc::log::Severity b10LoggerSeverity(isc::log::Severity defseverity);
+
+
+/// \brief Obtains logging debug level from B10_LOGGER_DBGLEVEL
+///
+/// Support function called by the unit test logging initialization code.
+/// It returns the logging debug level defined by B10_LOGGER_DBGLEVEL. If
+/// not defined, it returns the default passed to it.
+///
+/// N.B. If there is an error, a message is written to stderr and a value
+/// related to the error is used. (This is because (a) logging is not yet
+/// initialized, hence only the error stream is known to exist, and (b) this
+/// function is only used in unit test logging initialization, so incorrect
+/// selection of a level is not really an issue.)
+///
+/// \param defdbglevel Default debug level to be used if B10_LOGGER_DBGLEVEL
+/// is not defined.
+///
+/// \return Debug level to use.
+int b10LoggerDbglevel(int defdbglevel);
+
+
+/// \brief Reset root logger characteristics
+///
+/// This is a simplified interface into the resetting of the characteristics
+/// of the root logger. It is aimed for use in unit tests and resets the
+/// characteristics of the root logger to use a severity, debug level and
+/// destination set by the environment variables B10_LOGGER_SEVERITY,
+/// B10_LOGGER_DBGLEVEL and B10_LOGGER_DESTINATION.
+void
+resetUnitTestRootLogger();
+
+} // namespace log
+} // namespace isc
+
+
+
+#endif // __LOGGER_UNITTEST_SUPPORT_H
diff --git a/src/lib/log/tests/Makefile.am b/src/lib/log/tests/Makefile.am
index cd2ae29..069a7b4 100644
--- a/src/lib/log/tests/Makefile.am
+++ b/src/lib/log/tests/Makefile.am
@@ -51,13 +51,26 @@ logger_example_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
logger_example_LDFLAGS = $(AM_LDFLAGS) $(LOG4CPLUS_LDFLAGS)
logger_example_LDADD = $(top_builddir)/src/lib/log/liblog.la
logger_example_LDADD += $(top_builddir)/src/lib/util/libutil.la
+logger_example_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
+
+check_PROGRAMS += init_logger_test
+init_logger_test_SOURCES = init_logger_test.cc
+init_logger_test_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
+init_logger_test_LDFLAGS = $(AM_LDFLAGS) $(LOG4CPLUS_LDFLAGS)
+init_logger_test_LDADD = $(top_builddir)/src/lib/log/liblog.la
+init_logger_test_LDADD += $(top_builddir)/src/lib/util/libutil.la
+init_logger_test_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
noinst_PROGRAMS = $(TESTS)
-# Additional test using the shell
-PYTESTS = console_test.sh local_file_test.sh severity_test.sh
+# Additional test using the shell. These are principally tests
+# where the global logging environment is affected, and where the
+# output needs to be compared with stored output (where "cut" and
+# "diff" are useful utilities).
+
check-local:
$(SHELL) $(abs_builddir)/console_test.sh
$(SHELL) $(abs_builddir)/destination_test.sh
+ $(SHELL) $(abs_builddir)/init_logger_test.sh
$(SHELL) $(abs_builddir)/local_file_test.sh
$(SHELL) $(abs_builddir)/severity_test.sh
diff --git a/src/lib/log/tests/console_test.sh.in b/src/lib/log/tests/console_test.sh.in
index 7ef2684..a16dc23 100755
--- a/src/lib/log/tests/console_test.sh.in
+++ b/src/lib/log/tests/console_test.sh.in
@@ -13,8 +13,6 @@
# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
-# \brief
-#
# The logger supports the idea of a "console" logger than logs to either stdout
# or stderr. This test checks that both these options work.
diff --git a/src/lib/log/tests/destination_test.sh.in b/src/lib/log/tests/destination_test.sh.in
index 41a52ee..1cfb9fb 100755
--- a/src/lib/log/tests/destination_test.sh.in
+++ b/src/lib/log/tests/destination_test.sh.in
@@ -13,10 +13,7 @@
# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
-# \brief Severity test
-#
-# Checks that the logger will limit the output of messages less severy than
-# the severity/debug setting.
+# Checks that the logger will route messages to the chosen destination.
testname="Destination test"
echo $testname
diff --git a/src/lib/log/tests/init_logger_test.cc b/src/lib/log/tests/init_logger_test.cc
new file mode 100644
index 0000000..104c078
--- /dev/null
+++ b/src/lib/log/tests/init_logger_test.cc
@@ -0,0 +1,42 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <log/macros.h>
+#include <log/logger_support.h>
+#include <log/log_messages.h>
+
+using namespace isc::log;
+
+/// \brief Test InitLogger
+///
+/// A program used in testing the logger that initializes logging using
+/// initLogger(), then outputs several messages at different severities and
+/// debug levels. An external script sets the environment variables and checks
+/// that they have the desired effect.
+
+int
+main(int, char**) {
+ initLogger();
+ Logger logger("log");
+
+ LOG_DEBUG(logger, 0, LOG_BAD_DESTINATION).arg("debug-0");
+ LOG_DEBUG(logger, 50, LOG_BAD_DESTINATION).arg("debug-50");
+ LOG_DEBUG(logger, 99, LOG_BAD_DESTINATION).arg("debug-99");
+ LOG_INFO(logger, LOG_BAD_SEVERITY).arg("info");
+ LOG_WARN(logger, LOG_BAD_STREAM).arg("warn");
+ LOG_ERROR(logger, LOG_DUPLICATE_MESSAGE_ID).arg("error");
+ LOG_FATAL(logger, LOG_NO_MESSAGE_ID).arg("fatal");
+
+ return (0);
+}
diff --git a/src/lib/log/tests/init_logger_test.sh.in b/src/lib/log/tests/init_logger_test.sh.in
new file mode 100755
index 0000000..795419b
--- /dev/null
+++ b/src/lib/log/tests/init_logger_test.sh.in
@@ -0,0 +1,110 @@
+#!/bin/sh
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# Checks that the initLogger() call uses for unit tests respects the setting of
+# the environment variables.
+
+testname="initLogger test"
+echo $testname
+
+failcount=0
+tempfile=@abs_builddir@/init_logger_test_tempfile_$$
+destfile=@abs_builddir@/init_logger_test_destfile_$$
+
+passfail() {
+ if [ $1 -eq 0 ]; then
+ echo " pass"
+ else
+ echo " FAIL"
+ failcount=`expr $failcount + $1`
+ fi
+}
+
+echo "1. Checking that B10_LOGGER_SEVERITY/B10_LOGGER_DBGLEVEL work"
+
+echo -n " - severity=DEBUG, dbglevel=99: "
+cat > $tempfile << .
+DEBUG [bind10.log] LOG_BAD_DESTINATION unrecognized log destination: debug-0
+DEBUG [bind10.log] LOG_BAD_DESTINATION unrecognized log destination: debug-50
+DEBUG [bind10.log] LOG_BAD_DESTINATION unrecognized log destination: debug-99
+INFO [bind10.log] LOG_BAD_SEVERITY unrecognized log severity: info
+WARN [bind10.log] LOG_BAD_STREAM bad log console output stream: warn
+ERROR [bind10.log] LOG_DUPLICATE_MESSAGE_ID duplicate message ID (error) in compiled code
+FATAL [bind10.log] LOG_NO_MESSAGE_ID line fatal: message definition line found without a message ID
+.
+B10_LOGGER_DESTINATION=stdout B10_LOGGER_SEVERITY=DEBUG B10_LOGGER_DBGLEVEL=99 ./init_logger_test | \
+ cut -d' ' -f3- | diff $tempfile -
+passfail $?
+
+echo -n " - severity=DEBUG, dbglevel=50: "
+cat > $tempfile << .
+DEBUG [bind10.log] LOG_BAD_DESTINATION unrecognized log destination: debug-0
+DEBUG [bind10.log] LOG_BAD_DESTINATION unrecognized log destination: debug-50
+INFO [bind10.log] LOG_BAD_SEVERITY unrecognized log severity: info
+WARN [bind10.log] LOG_BAD_STREAM bad log console output stream: warn
+ERROR [bind10.log] LOG_DUPLICATE_MESSAGE_ID duplicate message ID (error) in compiled code
+FATAL [bind10.log] LOG_NO_MESSAGE_ID line fatal: message definition line found without a message ID
+.
+B10_LOGGER_DESTINATION=stdout B10_LOGGER_SEVERITY=DEBUG B10_LOGGER_DBGLEVEL=50 ./init_logger_test | \
+ cut -d' ' -f3- | diff $tempfile -
+passfail $?
+
+echo -n " - severity=WARN: "
+cat > $tempfile << .
+WARN [bind10.log] LOG_BAD_STREAM bad log console output stream: warn
+ERROR [bind10.log] LOG_DUPLICATE_MESSAGE_ID duplicate message ID (error) in compiled code
+FATAL [bind10.log] LOG_NO_MESSAGE_ID line fatal: message definition line found without a message ID
+.
+B10_LOGGER_DESTINATION=stdout B10_LOGGER_SEVERITY=WARN ./init_logger_test | \
+ cut -d' ' -f3- | diff $tempfile -
+passfail $?
+
+echo "2. Checking that B10_LOGGER_DESTINATION works"
+
+echo -n " - stdout: "
+cat > $tempfile << .
+FATAL [bind10.log] LOG_NO_MESSAGE_ID line fatal: message definition line found without a message ID
+.
+rm -f $destfile
+B10_LOGGER_SEVERITY=FATAL B10_LOGGER_DESTINATION=stdout ./init_logger_test 1> $destfile
+cut -d' ' -f3- $destfile | diff $tempfile -
+passfail $?
+
+echo -n " - stderr: "
+rm -f $destfile
+B10_LOGGER_SEVERITY=FATAL B10_LOGGER_DESTINATION=stderr ./init_logger_test 2> $destfile
+cut -d' ' -f3- $destfile | diff $tempfile -
+passfail $?
+
+echo -n " - file: "
+rm -f $destfile
+B10_LOGGER_SEVERITY=FATAL B10_LOGGER_DESTINATION=$destfile ./init_logger_test
+cut -d' ' -f3- $destfile | diff $tempfile -
+passfail $?
+
+# Note: can't automatically test syslog output.
+
+if [ $failcount -eq 0 ]; then
+ echo "PASS: $testname"
+elif [ $failcount -eq 1 ]; then
+ echo "FAIL: $testname - 1 test failed"
+else
+ echo "FAIL: $testname - $failcount tests failed"
+fi
+
+# Tidy up.
+rm -f $tempfile $destfile
+
+exit $failcount
diff --git a/src/lib/log/tests/local_file_test.sh.in b/src/lib/log/tests/local_file_test.sh.in
index d76f48f..9b898e6 100755
--- a/src/lib/log/tests/local_file_test.sh.in
+++ b/src/lib/log/tests/local_file_test.sh.in
@@ -13,8 +13,6 @@
# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
-# \brief Local message file test
-#
# Checks that a local message file can override the definitions in the message
# dictionary.
diff --git a/src/lib/log/tests/logger_level_impl_unittest.cc b/src/lib/log/tests/logger_level_impl_unittest.cc
index 0ded7f9..dacd202 100644
--- a/src/lib/log/tests/logger_level_impl_unittest.cc
+++ b/src/lib/log/tests/logger_level_impl_unittest.cc
@@ -20,6 +20,7 @@
#include <boost/lexical_cast.hpp>
#include <log/logger_level_impl.h>
+#include <log/logger_support.h>
#include <log4cplus/logger.h>
using namespace isc::log;
@@ -27,8 +28,10 @@ using namespace std;
class LoggerLevelImplTest : public ::testing::Test {
protected:
- LoggerLevelImplTest()
- {}
+ LoggerLevelImplTest() {
+ // Ensure logging set to default for unit tests
+ resetUnitTestRootLogger();
+ }
~LoggerLevelImplTest()
{}
diff --git a/src/lib/log/tests/logger_level_unittest.cc b/src/lib/log/tests/logger_level_unittest.cc
index 8c98091..641a6cc 100644
--- a/src/lib/log/tests/logger_level_unittest.cc
+++ b/src/lib/log/tests/logger_level_unittest.cc
@@ -20,7 +20,7 @@
#include <log/logger.h>
#include <log/logger_manager.h>
#include <log/log_messages.h>
-#include <log/logger_name.h>
+#include <log/logger_support.h>
using namespace isc;
using namespace isc::log;
@@ -29,7 +29,9 @@ using namespace std;
class LoggerLevelTest : public ::testing::Test {
protected:
LoggerLevelTest() {
- // Logger initialization is done in main()
+ // Logger initialization is done in main(). As logging tests may
+ // alter the default logging output, it is reset here.
+ resetUnitTestRootLogger();
}
~LoggerLevelTest() {
LoggerManager::reset();
@@ -57,7 +59,7 @@ TEST_F(LoggerLevelTest, Creation) {
EXPECT_EQ(42, level3.dbglevel);
}
-TEST(LoggerLevel, getSeverity) {
+TEST_F(LoggerLevelTest, getSeverity) {
EXPECT_EQ(DEBUG, getSeverity("DEBUG"));
EXPECT_EQ(DEBUG, getSeverity("debug"));
EXPECT_EQ(DEBUG, getSeverity("DeBuG"));
diff --git a/src/lib/log/tests/logger_support_unittest.cc b/src/lib/log/tests/logger_support_unittest.cc
index 6a93652..b418906 100644
--- a/src/lib/log/tests/logger_support_unittest.cc
+++ b/src/lib/log/tests/logger_support_unittest.cc
@@ -18,12 +18,23 @@
using namespace isc::log;
+class LoggerSupportTest : public ::testing::Test {
+protected:
+ LoggerSupportTest() {
+ // Logger initialization is done in main(). As logging tests may
+ // alter the default logging output, it is reset here.
+ resetUnitTestRootLogger();
+ }
+ ~LoggerSupportTest() {
+ }
+};
+
// Check that the initialized flag can be manipulated. This is a bit chicken-
// -and-egg: we want to reset to the flag to the original value at the end
// of the test, so use the functions to do that. But we are trying to check
// that these functions in fact work.
-TEST(LoggerSupportTest, InitializedFlag) {
+TEST_F(LoggerSupportTest, InitializedFlag) {
bool current_flag = isLoggingInitialized();
// check we can flip the flag.
@@ -51,7 +62,7 @@ TEST(LoggerSupportTest, InitializedFlag) {
// Check that a logger will throw an exception if logging has not been
// initialized.
-TEST(LoggerSupportTest, LoggingInitializationCheck) {
+TEST_F(LoggerSupportTest, LoggingInitializationCheck) {
// Assert that logging has been initialized (it should be in main()).
bool current_flag = isLoggingInitialized();
diff --git a/src/lib/log/tests/severity_test.sh.in b/src/lib/log/tests/severity_test.sh.in
index 124f36a..78d5050 100755
--- a/src/lib/log/tests/severity_test.sh.in
+++ b/src/lib/log/tests/severity_test.sh.in
@@ -13,9 +13,7 @@
# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
-# \brief Severity test
-#
-# Checks that the logger will limit the output of messages less severy than
+# Checks that the logger will limit the output of messages less severe than
# the severity/debug setting.
testname="Severity test"
@@ -33,7 +31,7 @@ passfail() {
fi
}
-echo -n "1. runInitTest default parameters:"
+echo -n "1. Default parameters:"
cat > $tempfile << .
FATAL [example] LOG_WRITE_ERROR error writing to test1: 42
ERROR [example] LOG_READING_LOCAL_FILE reading local message file dummy/file
diff --git a/src/lib/python/isc/Makefile.am b/src/lib/python/isc/Makefile.am
index bfc5a91..a3e74c5 100644
--- a/src/lib/python/isc/Makefile.am
+++ b/src/lib/python/isc/Makefile.am
@@ -1,4 +1,5 @@
-SUBDIRS = datasrc cc config log net notify util testutils
+SUBDIRS = datasrc cc config dns log net notify util testutils acl bind10
+SUBDIRS += xfrin log_messages
python_PYTHON = __init__.py
diff --git a/src/lib/python/isc/__init__.py b/src/lib/python/isc/__init__.py
index 8fcbf42..029f110 100644
--- a/src/lib/python/isc/__init__.py
+++ b/src/lib/python/isc/__init__.py
@@ -1,4 +1,7 @@
-import isc.datasrc
+# On some systems, it appears the dynamic linker gets
+# confused if the order is not right here
+# There is probably a solution for this, but for now:
+# order is important here!
import isc.cc
import isc.config
-#import isc.dns
+import isc.datasrc
diff --git a/src/lib/python/isc/acl/Makefile.am b/src/lib/python/isc/acl/Makefile.am
new file mode 100644
index 0000000..b1afa15
--- /dev/null
+++ b/src/lib/python/isc/acl/Makefile.am
@@ -0,0 +1,45 @@
+SUBDIRS = . tests
+
+AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
+AM_CPPFLAGS += $(BOOST_INCLUDES)
+AM_CXXFLAGS = $(B10_CXXFLAGS)
+
+python_PYTHON = __init__.py dns.py
+pythondir = $(PYTHON_SITEPKG_DIR)/isc/acl
+
+pyexec_LTLIBRARIES = acl.la _dns.la
+pyexecdir = $(PYTHON_SITEPKG_DIR)/isc/acl
+
+acl_la_SOURCES = acl.cc
+acl_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
+acl_la_LDFLAGS = $(PYTHON_LDFLAGS)
+acl_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
+
+_dns_la_SOURCES = dns.h dns.cc dns_requestacl_python.h dns_requestacl_python.cc
+_dns_la_SOURCES += dns_requestcontext_python.h dns_requestcontext_python.cc
+_dns_la_SOURCES += dns_requestloader_python.h dns_requestloader_python.cc
+_dns_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
+_dns_la_LDFLAGS = $(PYTHON_LDFLAGS)
+# Note: PYTHON_CXXFLAGS may have some -Wno... workaround, which must be
+# placed after -Wextra defined in AM_CXXFLAGS
+_dns_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
+
+# Python prefers .so, while some OSes (specifically MacOS) use a different
+# suffix for dynamic objects. -module is necessary to work this around.
+acl_la_LDFLAGS += -module
+acl_la_LIBADD = $(top_builddir)/src/lib/acl/libacl.la
+acl_la_LIBADD += $(PYTHON_LIB)
+
+_dns_la_LDFLAGS += -module
+_dns_la_LIBADD = $(top_builddir)/src/lib/acl/libdnsacl.la
+_dns_la_LIBADD += $(PYTHON_LIB)
+
+EXTRA_DIST = acl.py _dns.py
+EXTRA_DIST += acl_inc.cc
+EXTRA_DIST += dnsacl_inc.cc dns_requestacl_inc.cc dns_requestcontext_inc.cc
+EXTRA_DIST += dns_requestloader_inc.cc
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/acl/__init__.py b/src/lib/python/isc/acl/__init__.py
new file mode 100644
index 0000000..d9b2838
--- /dev/null
+++ b/src/lib/python/isc/acl/__init__.py
@@ -0,0 +1,11 @@
+"""
+Here are function and classes for manipulating access control lists.
+"""
+
+# The DNS ACL loader would need the json module. Make sure it's imported
+# beforehand.
+import json
+
+# Other ACL modules highly depends on the main acl sub module, so it's
+# explicitly imported here.
+import isc.acl.acl
diff --git a/src/lib/python/isc/acl/_dns.py b/src/lib/python/isc/acl/_dns.py
new file mode 100644
index 0000000..a645a7b
--- /dev/null
+++ b/src/lib/python/isc/acl/_dns.py
@@ -0,0 +1,29 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# This file is not installed; The .so version will be installed into the right
+# place at installation time.
+# This helper script is only to find it in the .libs directory when we run
+# as a test or from the build directory.
+
+import os
+import sys
+
+for base in sys.path[:]:
+ bindingdir = os.path.join(base, 'isc/acl/.libs')
+ if os.path.exists(bindingdir):
+ sys.path.insert(0, bindingdir)
+
+from _dns import *
diff --git a/src/lib/python/isc/acl/acl.cc b/src/lib/python/isc/acl/acl.cc
new file mode 100644
index 0000000..6517a12
--- /dev/null
+++ b/src/lib/python/isc/acl/acl.cc
@@ -0,0 +1,80 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <Python.h>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <acl/acl.h>
+
+using namespace isc::util::python;
+
+#include "acl_inc.cc"
+
+namespace {
+// Commonly used Python exception objects. Right now the acl module consists
+// of only one .cc file, so we hide them in an unnamed namespace. If and when
+// we extend this module with multiple .cc files, we should move them to
+// a named namespace, say isc::acl::python, and declare them in a separate
+// header file.
+PyObject* po_ACLError;
+PyObject* po_LoaderError;
+}
+
+namespace {
+PyModuleDef acl = {
+ { PyObject_HEAD_INIT(NULL) NULL, 0, NULL},
+ "isc.acl.acl",
+ acl_doc,
+ -1,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+} // end of unnamed namespace
+
+PyMODINIT_FUNC
+PyInit_acl(void) {
+ PyObject* mod = PyModule_Create(&acl);
+ if (mod == NULL) {
+ return (NULL);
+ }
+
+ try {
+ po_ACLError = PyErr_NewException("isc.acl.Error", NULL, NULL);
+ PyObjectContainer(po_ACLError).installToModule(mod, "Error");
+
+ po_LoaderError = PyErr_NewException("isc.acl.LoaderError", NULL, NULL);
+ PyObjectContainer(po_LoaderError).installToModule(mod, "LoaderError");
+
+ // Install module constants. Note that we can let Py_BuildValue
+ // "steal" the references to these object (by specifying false to
+ // installToModule), because, unlike the exception cases above,
+ // we don't have corresponding C++ variables (see the note in
+ // pycppwrapper_util for more details).
+ PyObjectContainer(Py_BuildValue("I", isc::acl::ACCEPT)).
+ installToModule(mod, "ACCEPT", false);
+ PyObjectContainer(Py_BuildValue("I", isc::acl::REJECT)).
+ installToModule(mod, "REJECT", false);
+ PyObjectContainer(Py_BuildValue("I", isc::acl::DROP)).
+ installToModule(mod, "DROP", false);
+ } catch (...) {
+ Py_DECREF(mod);
+ return (NULL);
+ }
+
+ return (mod);
+}
diff --git a/src/lib/python/isc/acl/acl.py b/src/lib/python/isc/acl/acl.py
new file mode 100644
index 0000000..804d78b
--- /dev/null
+++ b/src/lib/python/isc/acl/acl.py
@@ -0,0 +1,29 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# This file is not installed; The .so version will be installed into the right
+# place at installation time.
+# This helper script is only to find it in the .libs directory when we run
+# as a test or from the build directory.
+
+import os
+import sys
+
+for base in sys.path[:]:
+ bindingdir = os.path.join(base, 'isc/acl/.libs')
+ if os.path.exists(bindingdir):
+ sys.path.insert(0, bindingdir)
+
+from acl import *
diff --git a/src/lib/python/isc/acl/acl_inc.cc b/src/lib/python/isc/acl/acl_inc.cc
new file mode 100644
index 0000000..a9f7c9d
--- /dev/null
+++ b/src/lib/python/isc/acl/acl_inc.cc
@@ -0,0 +1,16 @@
+namespace {
+const char* const acl_doc = "\
+Implementation module for ACL operations\n\n\
+This module provides Python bindings for the C++ classes in the\n\
+isc::acl namespace.\n\
+\n\
+Integer constants:\n\
+\n\
+ACCEPT, REJECT, DROP -- Default actions an ACL could perform.\n\
+ These are the commonly used actions in specific ACLs.\n\
+ It is possible to specify any other values, as the ACL class does\n\
+ nothing about them, but these look reasonable, so they are provided\n\
+ for convenience. It is not specified what exactly these mean and it's\n\
+ up to whoever uses them.\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/acl/dns.cc b/src/lib/python/isc/acl/dns.cc
new file mode 100644
index 0000000..eb3b57b
--- /dev/null
+++ b/src/lib/python/isc/acl/dns.cc
@@ -0,0 +1,135 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <Python.h>
+
+#include <stdexcept>
+#include <boost/shared_ptr.hpp>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <cc/data.h>
+
+#include <acl/acl.h>
+#include <acl/dns.h>
+
+#include "dns.h"
+#include "dns_requestcontext_python.h"
+#include "dns_requestacl_python.h"
+#include "dns_requestloader_python.h"
+
+using namespace std;
+using boost::shared_ptr;
+using namespace isc::util::python;
+using namespace isc::data;
+using namespace isc::acl::dns;
+using namespace isc::acl::dns::python;
+
+#include "dnsacl_inc.cc"
+
+namespace {
+// This is a Python binding object corresponding to the singleton loader used
+// in the C++ version of the library.
+// We can define it as a pure object rather than through an accessor function,
+// because in Python we can ensure it has been created and initialized
+// in the module initializer by the time it's actually used.
+s_RequestLoader* po_REQUEST_LOADER;
+
+PyMethodDef methods[] = {
+ { NULL, NULL, 0, NULL }
+};
+
+PyModuleDef dnsacl = {
+ { PyObject_HEAD_INIT(NULL) NULL, 0, NULL},
+ "isc.acl._dns",
+ dnsacl_doc,
+ -1,
+ methods,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+} // end of unnamed namespace
+
+namespace isc {
+namespace acl {
+namespace dns {
+namespace python {
+PyObject*
+getACLException(const char* ex_name) {
+ PyObject* ex_obj = NULL;
+
+ PyObject* acl_module = PyImport_AddModule("isc.acl.acl");
+ if (acl_module != NULL) {
+ PyObject* acl_dict = PyModule_GetDict(acl_module);
+ if (acl_dict != NULL) {
+ ex_obj = PyDict_GetItemString(acl_dict, ex_name);
+ }
+ }
+
+ if (ex_obj == NULL) {
+ ex_obj = PyExc_RuntimeError;
+ }
+ return (ex_obj);
+}
+}
+}
+}
+}
+
+PyMODINIT_FUNC
+PyInit__dns(void) {
+ PyObject* mod = PyModule_Create(&dnsacl);
+ if (mod == NULL) {
+ return (NULL);
+ }
+
+ if (!initModulePart_RequestContext(mod)) {
+ Py_DECREF(mod);
+ return (NULL);
+ }
+ if (!initModulePart_RequestACL(mod)) {
+ Py_DECREF(mod);
+ return (NULL);
+ }
+ if (!initModulePart_RequestLoader(mod)) {
+ Py_DECREF(mod);
+ return (NULL);
+ }
+
+ // Module constants
+ try {
+ if (po_REQUEST_LOADER == NULL) {
+ po_REQUEST_LOADER = static_cast<s_RequestLoader*>(
+ requestloader_type.tp_alloc(&requestloader_type, 0));
+ }
+ if (po_REQUEST_LOADER != NULL) {
+ // We gain and keep our own reference to the singleton object
+ // for the same reason as that for exception objects (see comments
+ // in pycppwrapper_util for more details). Note also that we don't
+ // bother to release the reference even if exception is thrown
+ // below (in fact, we cannot delete the singleton loader).
+ po_REQUEST_LOADER->cppobj = &getRequestLoader();
+ Py_INCREF(po_REQUEST_LOADER);
+ }
+ PyObjectContainer(po_REQUEST_LOADER).installToModule(mod,
+ "REQUEST_LOADER");
+ } catch (...) {
+ Py_DECREF(mod);
+ return (NULL);
+ }
+
+ return (mod);
+}
diff --git a/src/lib/python/isc/acl/dns.h b/src/lib/python/isc/acl/dns.h
new file mode 100644
index 0000000..76849c5
--- /dev/null
+++ b/src/lib/python/isc/acl/dns.h
@@ -0,0 +1,52 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_ACL_DNS_H
+#define __PYTHON_ACL_DNS_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace acl {
+namespace dns {
+namespace python {
+
+// Return a Python exception object of the given name (ex_name) defined in
+// the isc.acl.acl loadable module.
+//
+// Since the acl module is a different binary image and is loaded separately
+// from the dns module, it would be very tricky to directly access to
+// C/C++ symbols defined in that module. So we get access to these object
+// using the Python interpretor through this wrapper function.
+//
+// The __init__.py file should ensure isc.acl.acl has been loaded by the time
+// whenever this function is called, and there shouldn't be any operation
+// within this function that can fail (such as dynamic memory allocation),
+// so this function should always succeed. Yet there may be an overlooked
+// failure mode, perhaps due to a bug in the binding implementation, or
+// due to invalid usage. As a last resort for such cases, this function
+// returns PyExc_RuntimeError (a C binding of Python's RuntimeError) should
+// it encounters an unexpected failure.
+extern PyObject* getACLException(const char* ex_name);
+
+} // namespace python
+} // namespace dns
+} // namespace acl
+} // namespace isc
+
+#endif // __PYTHON_ACL_DNS_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/acl/dns.py b/src/lib/python/isc/acl/dns.py
new file mode 100644
index 0000000..0733bc3
--- /dev/null
+++ b/src/lib/python/isc/acl/dns.py
@@ -0,0 +1,73 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""\
+This module provides Python bindings for the C++ classes in the
+isc::acl::dns namespace. Specifically, it defines Python interfaces of
+handling access control lists (ACLs) with DNS related contexts.
+The actual binding is implemented in an effectively hidden module,
+isc.acl._dns; this frontend module is in terms of implementation so that
+the C++ binding code doesn't have to deal with complicated operations
+that could be done in a more straightforward way in native Python.
+
+For further details of the actual module, see the documentation of the
+_dns module.
+"""
+
+import pydnspp
+
+import isc.acl._dns
+from isc.acl._dns import *
+
+class RequestACL(isc.acl._dns.RequestACL):
+ """A straightforward wrapper subclass of isc.acl._dns.RequestACL.
+
+ See the base class documentation for more implementation.
+ """
+ pass
+
+class RequestLoader(isc.acl._dns.RequestLoader):
+ """A straightforward wrapper subclass of isc.acl._dns.RequestLoader.
+
+ See the base class documentation for more implementation.
+ """
+ pass
+
+class RequestContext(isc.acl._dns.RequestContext):
+ """A straightforward wrapper subclass of isc.acl._dns.RequestContext.
+
+ See the base class documentation for more implementation.
+ """
+
+ def __init__(self, remote_address, tsig=None):
+ """Wrapper for the RequestContext constructor.
+
+ Internal implementation details that the users don't have to
+ worry about: To avoid dealing with pydnspp bindings in the C++ code,
+ this wrapper converts the TSIG record in its wire format in the form
+ of byte data, and has the binding re-construct the record from it.
+ """
+ tsig_wire = b''
+ if tsig is not None:
+ if not isinstance(tsig, pydnspp.TSIGRecord):
+ raise TypeError("tsig must be a TSIGRecord, not %s" %
+ tsig.__class__.__name__)
+ tsig_wire = tsig.to_wire(tsig_wire)
+ isc.acl._dns.RequestContext.__init__(self, remote_address, tsig_wire)
+
+ def __str__(self):
+ """Wrap __str__() to convert the module name."""
+ s = isc.acl._dns.RequestContext.__str__(self)
+ return s.replace('<isc.acl._dns', '<isc.acl.dns')
diff --git a/src/lib/python/isc/acl/dns_requestacl_inc.cc b/src/lib/python/isc/acl/dns_requestacl_inc.cc
new file mode 100644
index 0000000..673fa23
--- /dev/null
+++ b/src/lib/python/isc/acl/dns_requestacl_inc.cc
@@ -0,0 +1,33 @@
+namespace {
+const char* const RequestACL_doc = "\
+The DNS Request ACL.\n\
+\n\
+It holds bunch of ordered entries, each one consisting of a check for\n\
+a given DNS Request context and an action, which is one of ACCEPT,\n\
+REJECT, or DROP, as defined in the isc.acl.acl module.\n\
+The checks are tested in the order and first match counts.\n\
+\n\
+A RequestACL object cannot be constructed directly; an application\n\
+must use isc.acl.dns.load_request_acl() to create a RequestACL object.\n\
+\n\
+";
+
+const char* const RequestACL_execute_doc = "\
+execute(context) -> action \n\
+\n\
+The returned action is one of ACCEPT, REJECT or DROP as defined in\n\
+the isc.acl.acl module.\n\
+\n\
+This is the function that takes the ACL entries one by one, checks the\n\
+context against conditions and if it matches, returns the action that\n\
+belongs to the first matched entry or default action if nothing\n\
+matches.\n\
+\n\
+Parameters:\n\
+ context The thing that should be checked. It is directly passed\n\
+ to the checks.\n\
+\n\
+Return Value(s): The action for the ACL entry that first matches the\n\
+context.\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/acl/dns_requestacl_python.cc b/src/lib/python/isc/acl/dns_requestacl_python.cc
new file mode 100644
index 0000000..1c38a30
--- /dev/null
+++ b/src/lib/python/isc/acl/dns_requestacl_python.cc
@@ -0,0 +1,184 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// Enable this if you use s# variants with PyArg_ParseTuple(), see
+// http://docs.python.org/py3k/c-api/arg.html#strings-and-buffers
+//#define PY_SSIZE_T_CLEAN
+
+// Python.h needs to be placed at the head of the program file, see:
+// http://docs.python.org/py3k/extending/extending.html#a-simple-example
+#include <Python.h>
+
+#include <string>
+#include <stdexcept>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <acl/acl.h>
+#include <acl/dns.h>
+
+#include "dns.h"
+#include "dns_requestacl_python.h"
+#include "dns_requestcontext_python.h"
+
+using namespace std;
+using namespace isc::util::python;
+using namespace isc::acl;
+using namespace isc::acl::dns;
+using namespace isc::acl::dns::python;
+
+//
+// Definition of the classes
+//
+
+// For each class, we need a struct, a helper functions (init, destroy,
+// and static wrappers around the methods we export), a list of methods,
+// and a type description
+
+//
+// RequestACL
+//
+
+// Trivial constructor.
+s_RequestACL::s_RequestACL() {}
+
+// Import pydoc text
+#include "dns_requestacl_inc.cc"
+
+namespace {
+int
+RequestACL_init(PyObject*, PyObject*, PyObject*) {
+ PyErr_SetString(getACLException("Error"),
+ "RequestACL cannot be directly constructed");
+ return (-1);
+}
+
+void
+RequestACL_destroy(PyObject* po_self) {
+ s_RequestACL* const self = static_cast<s_RequestACL*>(po_self);
+ self->cppobj.reset();
+ Py_TYPE(self)->tp_free(self);
+}
+
+PyObject*
+RequestACL_execute(PyObject* po_self, PyObject* args) {
+ s_RequestACL* const self = static_cast<s_RequestACL*>(po_self);
+
+ try {
+ const s_RequestContext* po_context;
+ if (PyArg_ParseTuple(args, "O!", &requestcontext_type, &po_context)) {
+ const BasicAction action =
+ self->cppobj->execute(*po_context->cppobj);
+ return (Py_BuildValue("I", action));
+ }
+ } catch (const exception& ex) {
+ const string ex_what = "Failed to execute ACL: " + string(ex.what());
+ PyErr_SetString(getACLException("Error"), ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "Unexpected exception in executing ACL");
+ }
+
+ return (NULL);
+}
+
+// This list contains the actual set of functions we have in
+// python. Each entry has
+// 1. Python method name
+// 2. Our static function here
+// 3. Argument type
+// 4. Documentation
+PyMethodDef RequestACL_methods[] = {
+ { "execute", RequestACL_execute, METH_VARARGS, RequestACL_execute_doc },
+ { NULL, NULL, 0, NULL }
+};
+} // end of unnamed namespace
+
+namespace isc {
+namespace acl {
+namespace dns {
+namespace python {
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_RequestACL
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject requestacl_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "isc.acl._dns.RequestACL",
+ sizeof(s_RequestACL), // tp_basicsize
+ 0, // tp_itemsize
+ RequestACL_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ NULL, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, // tp_flags
+ RequestACL_doc,
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ RequestACL_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ RequestACL_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+bool
+initModulePart_RequestACL(PyObject* mod) {
+ // We initialize the static description object with PyType_Ready(),
+ // then add it to the module. This is not just a check! (leaving
+ // this out results in segmentation faults)
+ if (PyType_Ready(&requestacl_type) < 0) {
+ return (false);
+ }
+ void* p = &requestacl_type;
+ if (PyModule_AddObject(mod, "RequestACL", static_cast<PyObject*>(p)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&requestacl_type);
+
+ return (true);
+}
+} // namespace python
+} // namespace dns
+} // namespace acl
+} // namespace isc
diff --git a/src/lib/python/isc/acl/dns_requestacl_python.h b/src/lib/python/isc/acl/dns_requestacl_python.h
new file mode 100644
index 0000000..8f7ad8a
--- /dev/null
+++ b/src/lib/python/isc/acl/dns_requestacl_python.h
@@ -0,0 +1,53 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_REQUESTACL_H
+#define __PYTHON_REQUESTACL_H 1
+
+#include <Python.h>
+
+#include <boost/shared_ptr.hpp>
+
+#include <acl/dns.h>
+
+namespace isc {
+namespace acl {
+namespace dns {
+namespace python {
+
+// The s_* Class simply covers one instantiation of the object
+class s_RequestACL : public PyObject {
+public:
+ s_RequestACL();
+
+ // We don't have to use a shared pointer for its original purposes as
+ // the python object maintains reference counters itself. But the
+ // underlying C++ API only exposes a shared pointer for the ACL objects,
+ // so we store it in that form.
+ boost::shared_ptr<RequestACL> cppobj;
+};
+
+extern PyTypeObject requestacl_type;
+
+bool initModulePart_RequestACL(PyObject* mod);
+
+} // namespace python
+} // namespace dns
+} // namespace acl
+} // namespace isc
+#endif // __PYTHON_REQUESTACL_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/acl/dns_requestcontext_inc.cc b/src/lib/python/isc/acl/dns_requestcontext_inc.cc
new file mode 100644
index 0000000..f71bc59
--- /dev/null
+++ b/src/lib/python/isc/acl/dns_requestcontext_inc.cc
@@ -0,0 +1,33 @@
+namespace {
+const char* const RequestContext_doc = "\
+DNS request to be checked.\n\
+\n\
+This plays the role of ACL context for the RequestACL object.\n\
+\n\
+Based on the minimalist philosophy, the initial implementation only\n\
+maintains the remote (source) IP address of the request and\n\
+(optionally) the TSIG record included in the request. We may add more\n\
+parameters of the request as we see the need for them. Possible\n\
+additional parameters are the local (destination) IP address, the\n\
+remote and local port numbers, various fields of the DNS request (e.g.\n\
+a particular header flag value).\n\
+\n\
+RequestContext(remote_address, tsig)\n\
+\n\
+ In this initial implementation, the constructor only takes a\n\
+ remote IP address in the form of a socket address as used in the\n\
+ Python socket module, and optionally a pydnspp.TSIGRecord object.\n\
+\n\
+ Exceptions:\n\
+ isc.acl.ACLError Normally shouldn't happen, but still possible\n\
+ for unexpected errors such as memory allocation\n\
+ failure or an invalid address text being passed.\n\
+\n\
+ Parameters:\n\
+ remote_address The remote IP address\n\
+ tsig The TSIG record included in the request message, if any.\n\
+ If the request doesn't include a TSIG, this will be None.\n\
+ If this parameter is omitted None will be assumed.\n\
+\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/acl/dns_requestcontext_python.cc b/src/lib/python/isc/acl/dns_requestcontext_python.cc
new file mode 100644
index 0000000..7f33f59
--- /dev/null
+++ b/src/lib/python/isc/acl/dns_requestcontext_python.cc
@@ -0,0 +1,382 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// Enable this if you use s# variants with PyArg_ParseTuple(), see
+// http://docs.python.org/py3k/c-api/arg.html#strings-and-buffers
+#define PY_SSIZE_T_CLEAN
+
+// Python.h needs to be placed at the head of the program file, see:
+// http://docs.python.org/py3k/extending/extending.html#a-simple-example
+#include <Python.h>
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <netdb.h>
+#include <string.h>
+
+#include <cassert>
+#include <memory>
+#include <string>
+#include <sstream>
+#include <stdexcept>
+
+#include <boost/scoped_ptr.hpp>
+#include <boost/lexical_cast.hpp>
+
+#include <exceptions/exceptions.h>
+
+#include <util/buffer.h>
+#include <util/python/pycppwrapper_util.h>
+
+#include <dns/name.h>
+#include <dns/rrclass.h>
+#include <dns/rrtype.h>
+#include <dns/rrttl.h>
+#include <dns/rdata.h>
+#include <dns/tsigrecord.h>
+
+#include <acl/dns.h>
+#include <acl/ip_check.h>
+
+#include "dns.h"
+#include "dns_requestcontext_python.h"
+
+using namespace std;
+using boost::scoped_ptr;
+using boost::lexical_cast;
+using namespace isc;
+using namespace isc::dns;
+using namespace isc::dns::rdata;
+using namespace isc::util::python;
+using namespace isc::acl::dns;
+using namespace isc::acl::dns::python;
+
+namespace isc {
+namespace acl {
+namespace dns {
+namespace python {
+
+struct s_RequestContext::Data {
+ // The constructor.
+ Data(const char* const remote_addr, const unsigned short remote_port,
+ const char* tsig_data, const Py_ssize_t tsig_len)
+ {
+ createRemoteAddr(remote_addr, remote_port);
+ createTSIGRecord(tsig_data, tsig_len);
+ }
+
+ // A convenient type converter from sockaddr_storage to sockaddr
+ const struct sockaddr& getRemoteSockaddr() const {
+ const void* p = &remote_ss;
+ return (*static_cast<const struct sockaddr*>(p));
+ }
+
+ // The remote (source) IP address of the request. Note that it needs
+ // a reference to remote_ss. That's why the latter is stored within
+ // this structure.
+ scoped_ptr<IPAddress> remote_ipaddr;
+
+ // The effective length of remote_ss. It's necessary for getnameinfo()
+ // called from sockaddrToText (__str__ backend).
+ socklen_t remote_salen;
+
+ // The TSIG record included in the request, if any. If the request
+ // doesn't contain a TSIG, this will be NULL.
+ scoped_ptr<TSIGRecord> tsig_record;
+
+private:
+ // A helper method for the constructor that is responsible for constructing
+ // the remote address.
+ void createRemoteAddr(const char* const remote_addr,
+ const unsigned short remote_port)
+ {
+ struct addrinfo hints, *res;
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = AF_UNSPEC;
+ hints.ai_socktype = SOCK_DGRAM;
+ hints.ai_protocol = IPPROTO_UDP;
+ hints.ai_flags = AI_NUMERICHOST | AI_NUMERICSERV;
+ const int error(getaddrinfo(remote_addr,
+ lexical_cast<string>(remote_port).c_str(),
+ &hints, &res));
+ if (error != 0) {
+ isc_throw(InvalidParameter, "Failed to convert [" << remote_addr
+ << "]:" << remote_port << ", " << gai_strerror(error));
+ }
+ assert(sizeof(remote_ss) > res->ai_addrlen);
+ memcpy(&remote_ss, res->ai_addr, res->ai_addrlen);
+ remote_salen = res->ai_addrlen;
+ freeaddrinfo(res);
+
+ remote_ipaddr.reset(new IPAddress(getRemoteSockaddr()));
+ }
+
+ // A helper method for the constructor that is responsible for constructing
+ // the request TSIG.
+ void createTSIGRecord(const char* tsig_data, const Py_ssize_t tsig_len) {
+ if (tsig_len == 0) {
+ return;
+ }
+
+ // Re-construct the TSIG record from the passed binary. This should
+ // normally succeed because we are generally expected to be called
+ // from the frontend .py, which converts a valid TSIGRecord in its
+ // wire format. If some evil or buggy python program directly calls
+ // us with bogus data, validation in libdns++ will trigger an
+ // exception, which will be caught and converted to a Python exception
+ // in RequestContext_init().
+ isc::util::InputBuffer b(tsig_data, tsig_len);
+ const Name key_name(b);
+ const RRType tsig_type(b.readUint16());
+ const RRClass tsig_class(b.readUint16());
+ const RRTTL ttl(b.readUint32());
+ const size_t rdlen(b.readUint16());
+ const ConstRdataPtr rdata = createRdata(tsig_type, tsig_class, b,
+ rdlen);
+ tsig_record.reset(new TSIGRecord(key_name, tsig_class, ttl,
+ *rdata, 0));
+ }
+
+private:
+ struct sockaddr_storage remote_ss;
+};
+
+} // namespace python
+} // namespace dns
+} // namespace acl
+} // namespace isc
+
+
+//
+// Definition of the classes
+//
+
+// For each class, we need a struct, a helper functions (init, destroy,
+// and static wrappers around the methods we export), a list of methods,
+// and a type description
+
+//
+// RequestContext
+//
+
+// Trivial constructor.
+s_RequestContext::s_RequestContext() : cppobj(NULL), data_(NULL) {
+}
+
+// Import pydoc text
+#include "dns_requestcontext_inc.cc"
+
+namespace {
+// This list contains the actual set of functions we have in
+// python. Each entry has
+// 1. Python method name
+// 2. Our static function here
+// 3. Argument type
+// 4. Documentation
+PyMethodDef RequestContext_methods[] = {
+ { NULL, NULL, 0, NULL }
+};
+
+int
+RequestContext_init(PyObject* po_self, PyObject* args, PyObject*) {
+ s_RequestContext* const self = static_cast<s_RequestContext*>(po_self);
+
+ try {
+ // In this initial implementation, the constructor is simple: It
+ // takes two parameters. The first parameter should be a Python
+ // socket address object.
+ // For IPv4, it's ('address test', numeric_port); for IPv6,
+ // it's ('address text', num_port, num_flowid, num_zoneid).
+ // The second parameter is wire-format TSIG record in the form of
+ // Python byte data. If the TSIG isn't included in the request,
+ // its length will be 0.
+ // Below, we parse the argument in the most straightforward way.
+ // As the constructor becomes more complicated, we should probably
+ // make it more structural (for example, we should first retrieve
+ // the python objects, and parse them recursively)
+
+ const char* remote_addr;
+ unsigned short remote_port;
+ unsigned int remote_flowinfo; // IPv6 only, unused here
+ unsigned int remote_zoneid; // IPv6 only, unused here
+ const char* tsig_data;
+ Py_ssize_t tsig_len;
+
+ if (PyArg_ParseTuple(args, "(sH)y#", &remote_addr, &remote_port,
+ &tsig_data, &tsig_len) ||
+ PyArg_ParseTuple(args, "(sHII)y#", &remote_addr, &remote_port,
+ &remote_flowinfo, &remote_zoneid,
+ &tsig_data, &tsig_len))
+ {
+ // We need to clear the error in case the first call to ParseTuple
+ // fails.
+ PyErr_Clear();
+
+ auto_ptr<s_RequestContext::Data> dataptr(
+ new s_RequestContext::Data(remote_addr, remote_port,
+ tsig_data, tsig_len));
+ self->cppobj = new RequestContext(*dataptr->remote_ipaddr,
+ dataptr->tsig_record.get());
+ self->data_ = dataptr.release();
+ return (0);
+ }
+ } catch (const exception& ex) {
+ const string ex_what = "Failed to construct RequestContext object: " +
+ string(ex.what());
+ PyErr_SetString(getACLException("Error"), ex_what.c_str());
+ return (-1);
+ } catch (...) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "Unexpected exception in constructing RequestContext");
+ return (-1);
+ }
+
+ PyErr_SetString(PyExc_TypeError,
+ "Invalid arguments to RequestContext constructor");
+
+ return (-1);
+}
+
+void
+RequestContext_destroy(PyObject* po_self) {
+ s_RequestContext* const self = static_cast<s_RequestContext*>(po_self);
+
+ delete self->cppobj;
+ delete self->data_;
+ Py_TYPE(self)->tp_free(self);
+}
+
+// A helper function for __str__()
+string
+sockaddrToText(const struct sockaddr& sa, socklen_t sa_len) {
+ char hbuf[NI_MAXHOST], sbuf[NI_MAXSERV];
+ if (getnameinfo(&sa, sa_len, hbuf, sizeof(hbuf), sbuf, sizeof(sbuf),
+ NI_NUMERICHOST | NI_NUMERICSERV)) {
+ // In this context this should never fail.
+ isc_throw(Unexpected, "Unexpected failure in getnameinfo");
+ }
+
+ return ("[" + string(hbuf) + "]:" + string(sbuf));
+}
+
+// for the __str__() method. This method is provided mainly for internal
+// testing.
+PyObject*
+RequestContext_str(PyObject* po_self) {
+ const s_RequestContext* const self =
+ static_cast<s_RequestContext*>(po_self);
+
+ try {
+ stringstream objss;
+ objss << "<" << requestcontext_type.tp_name << " object, "
+ << "remote_addr="
+ << sockaddrToText(self->data_->getRemoteSockaddr(),
+ self->data_->remote_salen);
+ if (self->data_->tsig_record) {
+ objss << ", key=" << self->data_->tsig_record->getName();
+ }
+ objss << ">";
+ return (Py_BuildValue("s", objss.str().c_str()));
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Failed to convert RequestContext object to text: " +
+ string(ex.what());
+ PyErr_SetString(PyExc_RuntimeError, ex_what.c_str());
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError, "Unexpected failure in "
+ "converting RequestContext object to text");
+ }
+ return (NULL);
+}
+} // end of unnamed namespace
+
+namespace isc {
+namespace acl {
+namespace dns {
+namespace python {
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_RequestContext
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject requestcontext_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "isc.acl._dns.RequestContext",
+ sizeof(s_RequestContext), // tp_basicsize
+ 0, // tp_itemsize
+ RequestContext_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ RequestContext_str, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, // tp_flags
+ RequestContext_doc,
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ RequestContext_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ RequestContext_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+bool
+initModulePart_RequestContext(PyObject* mod) {
+ // We initialize the static description object with PyType_Ready(),
+ // then add it to the module. This is not just a check! (leaving
+ // this out results in segmentation faults)
+ if (PyType_Ready(&requestcontext_type) < 0) {
+ return (false);
+ }
+ void* p = &requestcontext_type;
+ if (PyModule_AddObject(mod, "RequestContext",
+ static_cast<PyObject*>(p)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&requestcontext_type);
+
+ return (true);
+}
+} // namespace python
+} // namespace dns
+} // namespace acl
+} // namespace isc
diff --git a/src/lib/python/isc/acl/dns_requestcontext_python.h b/src/lib/python/isc/acl/dns_requestcontext_python.h
new file mode 100644
index 0000000..766133b
--- /dev/null
+++ b/src/lib/python/isc/acl/dns_requestcontext_python.h
@@ -0,0 +1,54 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_REQUESTCONTEXT_H
+#define __PYTHON_REQUESTCONTEXT_H 1
+
+#include <Python.h>
+
+#include <acl/dns.h>
+
+namespace isc {
+namespace acl {
+namespace dns {
+namespace python {
+
+// The s_* Class simply covers one instantiation of the object
+class s_RequestContext : public PyObject {
+public:
+ s_RequestContext();
+ RequestContext* cppobj;
+
+ // This object needs to maintain some source data to construct the
+ // underlying RequestContext object throughout its lifetime.
+ // These are "public" so that it can be accessed in the python wrapper
+ // implementation, but essentially they should be private, and the
+ // implementation details are hidden.
+ struct Data;
+ Data* data_;
+};
+
+extern PyTypeObject requestcontext_type;
+
+bool initModulePart_RequestContext(PyObject* mod);
+
+} // namespace python
+} // namespace dns
+} // namespace acl
+} // namespace isc
+#endif // __PYTHON_REQUESTCONTEXT_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/acl/dns_requestloader_inc.cc b/src/lib/python/isc/acl/dns_requestloader_inc.cc
new file mode 100644
index 0000000..a911275
--- /dev/null
+++ b/src/lib/python/isc/acl/dns_requestloader_inc.cc
@@ -0,0 +1,87 @@
+namespace {
+
+// Note: this is derived from the generic Loader class of the C++
+// implementation, but is slightly different from the original.
+// Be careful when you make further merge from the C++ document.
+const char* const RequestLoader_doc = "\
+Loader of DNS Request ACLs.\n\
+\n\
+The goal of this class is to convert JSON description of an ACL to\n\
+object of the ACL class (including the checks inside it).\n\
+\n\
+To allow any kind of checks to exist in the application, creators are\n\
+registered for the names of the checks (this feature is not yet\n\
+available for the python API).\n\
+\n\
+An ACL definition looks like this: [\n\
+ {\n\
+ \"action\": \"ACCEPT\",\n\
+ \"match-type\": <parameter>\n\
+ },\n\
+ {\n\
+ \"action\": \"REJECT\",\n\
+ \"match-type\": <parameter>,\n\
+ \"another-match-type\": [<parameter1>, <parameter2>]\n\
+ },\n\
+ {\n\
+ \"action\": \"DROP\"\n\
+ }\n\
+ ]\n\
+ \n\
+\n\
+This is a list of elements. Each element must have an \"action\"\n\
+entry/keyword. That one specifies which action is returned if this\n\
+element matches (the value of the key is passed to the action loader\n\
+(see the constructor), which is one of ACCEPT,\n\
+REJECT, or DROP, as defined in the isc.acl.acl module.\n\
+\n\
+The rest of the element are matches. The left side is the name of the\n\
+match type (for example \"from\" to match for source IP address).\n\
+The <parameter> is whatever is needed to describe the\n\
+match and depends on the match type, the loader passes it verbatim to\n\
+creator of that match type.\n\
+\n\
+There may be multiple match types in single element. In such case, all\n\
+of the matches must match for the element to take action (so, in the\n\
+second element, both \"match-type\" and \"another-match-type\" must be\n\
+satisfied). If there's no match in the element, the action is\n\
+taken/returned without conditions, every time (makes sense as the last\n\
+entry, as the ACL will never get past it).\n\
+\n\
+The second entry shows another thing - if there's a list as the value\n\
+for some match and the match itself is not expecting a list, it is\n\
+taken as an \"or\" - a match for at last one of the choices in the\n\
+list must match. So, for the second entry, both \"match-type\" and\n\
+\"another-match-type\" must be satisfied, but the another one is\n\
+satisfied by either parameter1 or parameter2.\n\
+\n\
+Currently, a RequestLoader object cannot be constructed directly;\n\
+an application must use the singleton loader defined in the\n\
+isc.acl.dns module, i.e., isc.acl.dns.REQUEST_LOADER.\n\
+A future version of this implementation may be extended to give\n\
+applications full flexibility of creating arbitrary loader, when\n\
+this restriction may be removed.\n\
+";
+
+const char* const RequestLoader_load_doc = "\
+load(description) -> RequestACL\n\
+\n\
+Load a DNS (Request) ACL.\n\
+\n\
+This parses an ACL list, creates internal data for each rule\n\
+and returns a RequestACl object that contains all given rules.\n\
+\n\
+Exceptions:\n\
+ LoaderError Load failed. The most likely cause of this is a syntax\n\
+ error in the description. Other internal errors such as\n\
+ memory allocation failure is also converted to this\n\
+ exception.\n\
+\n\
+Parameters:\n\
+ description String or Python representation of the JSON list of\n\
+ ACL. The Python representation is ones accepted by the\n\
+ standard json module.\n\
+\n\
+Return Value(s): The newly created RequestACL object\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/acl/dns_requestloader_python.cc b/src/lib/python/isc/acl/dns_requestloader_python.cc
new file mode 100644
index 0000000..ab421c5
--- /dev/null
+++ b/src/lib/python/isc/acl/dns_requestloader_python.cc
@@ -0,0 +1,270 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// Enable this if you use s# variants with PyArg_ParseTuple(), see
+// http://docs.python.org/py3k/c-api/arg.html#strings-and-buffers
+//#define PY_SSIZE_T_CLEAN
+
+// Python.h needs to be placed at the head of the program file, see:
+// http://docs.python.org/py3k/extending/extending.html#a-simple-example
+#include <Python.h>
+
+#include <string>
+#include <stdexcept>
+
+#include <boost/shared_ptr.hpp>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <cc/data.h>
+
+#include <acl/dns.h>
+
+#include "dns.h"
+#include "dns_requestacl_python.h"
+#include "dns_requestloader_python.h"
+
+using namespace std;
+using boost::shared_ptr;
+using namespace isc::util::python;
+using namespace isc::data;
+using namespace isc::acl::dns;
+using namespace isc::acl::dns::python;
+
+//
+// Definition of the classes
+//
+
+// For each class, we need a struct, a helper functions (init, destroy,
+// and static wrappers around the methods we export), a list of methods,
+// and a type description
+
+//
+// RequestLoader
+//
+
+// Trivial constructor.
+s_RequestLoader::s_RequestLoader() : cppobj(NULL) {
+}
+
+// Import pydoc text
+#include "dns_requestloader_inc.cc"
+
+namespace {
+//
+// We declare the functions here, the definitions are below
+// the type definition of the object, since both can use the other
+//
+
+int
+RequestLoader_init(PyObject*, PyObject*, PyObject*) {
+ PyErr_SetString(getACLException("Error"),
+ "RequestLoader cannot be directly constructed");
+ return (-1);
+}
+
+void
+RequestLoader_destroy(PyObject* po_self) {
+ s_RequestLoader* const self = static_cast<s_RequestLoader*>(po_self);
+ delete self->cppobj;
+ self->cppobj = NULL;
+ Py_TYPE(self)->tp_free(self);
+}
+
+// This C structure corresponds to a Python callable object for json.dumps().
+// This is initialized at the class initialization time (in
+// initModulePart_RequestLoader() below) and it's ensured to be non NULL and
+// valid in the rest of the class implementation.
+// Getting access to the json module this way and call one of its functions
+// via PyObject_CallObject() may exceed the reasonably acceptable level for
+// straightforward bindings. But the alternative would be to write a Python
+// frontend for the entire module only for this conversion, which would also
+// be too much. So, right now, we implement everything within the binding
+// implementation. If future extensions require more such non trivial
+// wrappers, we should consider the frontend approach more seriously.
+PyObject* json_dumps_obj = NULL;
+
+PyObject*
+RequestLoader_load(PyObject* po_self, PyObject* args) {
+ s_RequestLoader* const self = static_cast<s_RequestLoader*>(po_self);
+
+ try {
+ PyObjectContainer c1, c2; // placeholder for temporary py objects
+ const char* acl_config;
+
+ // First, try string
+ int py_result = PyArg_ParseTuple(args, "s", &acl_config);
+ if (!py_result) {
+ PyErr_Clear(); // need to clear the error from ParseTuple
+
+ // If that fails, confirm the argument is a single Python object,
+ // and pass the argument to json.dumps() without conversion.
+ // Note that we should pass 'args', not 'json_obj' to
+ // PyObject_CallObject(), since this function expects a form of
+ // tuple as its argument parameter, just like ParseTuple.
+ PyObject* json_obj;
+ if (PyArg_ParseTuple(args, "O", &json_obj)) {
+ c1.reset(PyObject_CallObject(json_dumps_obj, args));
+ c2.reset(Py_BuildValue("(O)", c1.get()));
+ py_result = PyArg_ParseTuple(c2.get(), "s", &acl_config);
+ }
+ }
+ if (py_result) {
+ shared_ptr<RequestACL> acl(
+ self->cppobj->load(Element::fromJSON(acl_config)));
+ s_RequestACL* py_acl = static_cast<s_RequestACL*>(
+ requestacl_type.tp_alloc(&requestacl_type, 0));
+ if (py_acl != NULL) {
+ py_acl->cppobj = acl;
+ }
+ return (py_acl);
+ }
+ } catch (const PyCPPWrapperException&) {
+ // If the wrapper utility throws, it's most likely because an invalid
+ // type of argument is passed (and the call to json.dumps() failed
+ // above), rather than a rare case of system errors such as memory
+ // allocation failure. So we fall through to the end of this function
+ // and raise a TypeError.
+ ;
+ } catch (const exception& ex) {
+ PyErr_SetString(getACLException("LoaderError"), ex.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError, "Unexpected C++ exception");
+ return (NULL);
+ }
+
+ PyErr_SetString(PyExc_TypeError, "RequestLoader.load() "
+ "expects str or python representation of JSON");
+ return (NULL);
+}
+
+// This list contains the actual set of functions we have in
+// python. Each entry has
+// 1. Python method name
+// 2. Our static function here
+// 3. Argument type
+// 4. Documentation
+PyMethodDef RequestLoader_methods[] = {
+ { "load", RequestLoader_load, METH_VARARGS, RequestLoader_load_doc },
+ { NULL, NULL, 0, NULL }
+};
+} // end of unnamed namespace
+
+namespace isc {
+namespace acl {
+namespace dns {
+namespace python {
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_RequestLoader
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject requestloader_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "isc.acl._dns.RequestLoader",
+ sizeof(s_RequestLoader), // tp_basicsize
+ 0, // tp_itemsize
+ RequestLoader_destroy, // tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ NULL, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, // tp_flags
+ RequestLoader_doc,
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ RequestLoader_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ RequestLoader_init, // tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+bool
+initModulePart_RequestLoader(PyObject* mod) {
+ // We initialize the static description object with PyType_Ready(),
+ // then add it to the module. This is not just a check! (leaving
+ // this out results in segmentation faults)
+ if (PyType_Ready(&requestloader_type) < 0) {
+ return (false);
+ }
+ void* p = &requestloader_type;
+ if (PyModule_AddObject(mod, "RequestLoader",
+ static_cast<PyObject*>(p)) < 0) {
+ return (false);
+ }
+
+ // Get and hold our own reference to json.dumps() for later use.
+ // Normally it should succeed as __init__.py of the isc.acl package
+ // explicitly imports the json module, and the code below should be
+ // error free (e.g. they don't require memory allocation) under this
+ // condition.
+ // This could still fail with deviant or evil Python code such as those
+ // that first import json and then delete the reference to it from
+ // sys.modules before it imports the acl.dns module. The RequestLoader
+ // class could still work as long as it doesn't use the JSON decoder,
+ // but we'd rather refuse to import the module than allowing the partially
+ // workable class to keep running.
+ PyObject* json_module = PyImport_AddModule("json");
+ if (json_module != NULL) {
+ PyObject* json_dict = PyModule_GetDict(json_module);
+ if (json_dict != NULL) {
+ json_dumps_obj = PyDict_GetItemString(json_dict, "dumps");
+ }
+ }
+ if (json_dumps_obj != NULL) {
+ Py_INCREF(json_dumps_obj);
+ } else {
+ PyErr_SetString(PyExc_RuntimeError,
+ "isc.acl.dns.RequestLoader needs the json module, but "
+ "it's missing");
+ return (false);
+ }
+
+ Py_INCREF(&requestloader_type);
+
+ return (true);
+}
+} // namespace python
+} // namespace dns
+} // namespace acl
+} // namespace isc
diff --git a/src/lib/python/isc/acl/dns_requestloader_python.h b/src/lib/python/isc/acl/dns_requestloader_python.h
new file mode 100644
index 0000000..9d0b63e
--- /dev/null
+++ b/src/lib/python/isc/acl/dns_requestloader_python.h
@@ -0,0 +1,46 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_REQUESTLOADER_H
+#define __PYTHON_REQUESTLOADER_H 1
+
+#include <Python.h>
+
+#include <acl/dns.h>
+
+namespace isc {
+namespace acl {
+namespace dns {
+namespace python {
+
+// The s_* Class simply covers one instantiation of the object
+class s_RequestLoader : public PyObject {
+public:
+ s_RequestLoader();
+ RequestLoader* cppobj;
+};
+
+extern PyTypeObject requestloader_type;
+
+bool initModulePart_RequestLoader(PyObject* mod);
+
+} // namespace python
+} // namespace dns
+} // namespace acl
+} // namespace isc
+#endif // __PYTHON_REQUESTLOADER_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/acl/dnsacl_inc.cc b/src/lib/python/isc/acl/dnsacl_inc.cc
new file mode 100644
index 0000000..b2e7338
--- /dev/null
+++ b/src/lib/python/isc/acl/dnsacl_inc.cc
@@ -0,0 +1,17 @@
+namespace {
+const char* const dnsacl_doc = "\
+Implementation module for DNS ACL operations\n\n\
+This module provides Python bindings for the C++ classes in the\n\
+isc::acl::dns namespace. Specifically, it defines Python interfaces of\n\
+handling access control lists (ACLs) with DNS related contexts.\n\
+These bindings are close match to the C++ API, but they are not complete\n\
+(some parts are not needed) and some are done in more python-like ways.\n\
+\n\
+Special objects:\n\
+\n\
+REQUEST_LOADER -- A singleton loader of ACLs. It is expected applications\n\
+ will use this function instead of creating their own loaders, because\n\
+ one is enough, this one will have registered default checks and it is\n\
+ known one, so any plugins can registrer additional checks as well.\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/acl/tests/Makefile.am b/src/lib/python/isc/acl/tests/Makefile.am
new file mode 100644
index 0000000..e0a1895
--- /dev/null
+++ b/src/lib/python/isc/acl/tests/Makefile.am
@@ -0,0 +1,30 @@
+PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
+PYTESTS = acl_test.py dns_test.py
+
+EXTRA_DIST = $(PYTESTS)
+
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/acl/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
+# test using command-line arguments, so use check-local target instead of TESTS
+check-local:
+if ENABLE_PYTHON_COVERAGE
+ touch $(abs_top_srcdir)/.coverage
+ rm -f .coverage
+ ${LN_S} $(abs_top_srcdir)/.coverage .coverage
+endif
+ for pytest in $(PYTESTS) ; do \
+ echo Running test: $$pytest ; \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/isc/python/acl/.libs \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ $(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
+ done
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/acl/tests/acl_test.py b/src/lib/python/isc/acl/tests/acl_test.py
new file mode 100644
index 0000000..24a0c94
--- /dev/null
+++ b/src/lib/python/isc/acl/tests/acl_test.py
@@ -0,0 +1,29 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import unittest
+from isc.acl.acl import *
+
+class ACLTest(unittest.TestCase):
+
+ def test_actions(self):
+ # These are simple tests just checking the pre defined actions have
+ # different values
+ self.assertTrue(ACCEPT != REJECT)
+ self.assertTrue(REJECT != DROP)
+ self.assertTrue(DROP != ACCEPT)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/src/lib/python/isc/acl/tests/dns_test.py b/src/lib/python/isc/acl/tests/dns_test.py
new file mode 100644
index 0000000..7ee3023
--- /dev/null
+++ b/src/lib/python/isc/acl/tests/dns_test.py
@@ -0,0 +1,357 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import unittest
+import socket
+from pydnspp import *
+from isc.acl.acl import LoaderError, Error, ACCEPT, REJECT, DROP
+from isc.acl.dns import *
+
+def get_sockaddr(address, port):
+ '''This is a simple shortcut wrapper for getaddrinfo'''
+ ai = socket.getaddrinfo(address, port, 0, socket.SOCK_DGRAM,
+ socket.IPPROTO_UDP, socket.AI_NUMERICHOST)[0]
+ return ai[4]
+
+def get_acl(prefix):
+ '''This is a simple shortcut for creating an ACL containing single rule
+ that accepts addresses for the given IP prefix (and reject any others
+ by default)
+ '''
+ return REQUEST_LOADER.load('[{"action": "ACCEPT", "from": "' + \
+ prefix + '"}]')
+
+def get_acl_json(prefix):
+ '''Same as get_acl, but this function passes a Python representation of
+ JSON to the loader, not a string.'''
+ json = [{"action": "ACCEPT"}]
+ json[0]["from"] = prefix
+ return REQUEST_LOADER.load(json)
+
+# The following two are similar to the previous two, but use a TSIG key name
+# instead of IP prefix.
+def get_tsig_acl(key):
+ return REQUEST_LOADER.load('[{"action": "ACCEPT", "key": "' + \
+ key + '"}]')
+
+def get_tsig_acl_json(key):
+ json = [{"action": "ACCEPT"}]
+ json[0]["key"] = key
+ return REQUEST_LOADER.load(json)
+
+# commonly used TSIG RDATA. For the purpose of ACL checks only the key name
+# matters; other parrameters are simply borrowed from some other tests, which
+# can be anything for the purpose of the tests here.
+TSIG_RDATA = TSIG("hmac-md5.sig-alg.reg.int. 1302890362 " + \
+ "300 16 2tra2tra2tra2tra2tra2g== " + \
+ "11621 0 0")
+
+def get_context(address, key_name=None):
+ '''This is a simple shortcut wrapper for creating a RequestContext
+ object with a given IP address and optionally TSIG key name.
+ Port number doesn't matter in the test (as of the initial implementation),
+ so it's fixed for simplicity.
+ If key_name is not None, it internally creates a (faked) TSIG record
+ and constructs a context with that key. Note that only the key name
+ matters for the purpose of ACL checks.
+ '''
+ tsig_record = None
+ if key_name is not None:
+ tsig_record = TSIGRecord(Name(key_name), TSIG_RDATA)
+ return RequestContext(get_sockaddr(address, 53000), tsig_record)
+
+# These are commonly used RequestContext object
+CONTEXT4 = get_context('192.0.2.1')
+CONTEXT6 = get_context('2001:db8::1')
+
+class RequestContextTest(unittest.TestCase):
+
+ def test_construct(self):
+ # Construct the context from IPv4/IPv6 addresses, check the object
+ # by printing it.
+ self.assertEqual('<isc.acl.dns.RequestContext object, ' + \
+ 'remote_addr=[192.0.2.1]:53001>',
+ RequestContext(('192.0.2.1', 53001)).__str__())
+ self.assertEqual('<isc.acl.dns.RequestContext object, ' + \
+ 'remote_addr=[2001:db8::1234]:53006>',
+ RequestContext(('2001:db8::1234', 53006,
+ 0, 0)).__str__())
+
+ # Construct the context from IP address and a TSIG record.
+ tsig_record = TSIGRecord(Name("key.example.com"), TSIG_RDATA)
+ self.assertEqual('<isc.acl.dns.RequestContext object, ' + \
+ 'remote_addr=[192.0.2.1]:53001, ' + \
+ 'key=key.example.com.>',
+ RequestContext(('192.0.2.1', 53001),
+ tsig_record).__str__())
+
+ # same with IPv6 address, just in case.
+ self.assertEqual('<isc.acl.dns.RequestContext object, ' + \
+ 'remote_addr=[2001:db8::1234]:53006, ' + \
+ 'key=key.example.com.>',
+ RequestContext(('2001:db8::1234', 53006,
+ 0, 0), tsig_record).__str__())
+
+ # Unusual case: port number overflows (this constructor allows that,
+ # although it should be rare anyway; the socket address should
+ # normally come from the Python socket module.
+ self.assertEqual('<isc.acl.dns.RequestContext object, ' + \
+ 'remote_addr=[192.0.2.1]:0>',
+ RequestContext(('192.0.2.1', 65536)).__str__())
+
+ # same test using socket.getaddrinfo() to ensure it accepts the sock
+ # address representation used in the Python socket module.
+ self.assertEqual('<isc.acl.dns.RequestContext object, ' + \
+ 'remote_addr=[192.0.2.1]:53001>',
+ RequestContext(get_sockaddr('192.0.2.1',
+ 53001)).__str__())
+ self.assertEqual('<isc.acl.dns.RequestContext object, ' + \
+ 'remote_addr=[2001:db8::1234]:53006>',
+ RequestContext(get_sockaddr('2001:db8::1234',
+ 53006)).__str__())
+
+ #
+ # Invalid parameters (in our expected usage this should not happen
+ # because the sockaddr would come from the Python socket module, but
+ # validation should still be performed correctly)
+ #
+ # not a tuple
+ self.assertRaises(TypeError, RequestContext, 1)
+ # invalid number of parameters
+ self.assertRaises(TypeError, RequestContext, ('192.0.2.1', 53), 0, 1)
+ # type error for TSIG
+ self.assertRaises(TypeError, RequestContext, ('192.0.2.1', 53), tsig=1)
+ # tuple is not in the form of sockaddr
+ self.assertRaises(TypeError, RequestContext, (0, 53))
+ self.assertRaises(TypeError, RequestContext, ('192.0.2.1', 'http'))
+ self.assertRaises(TypeError, RequestContext, ('::', 0, 'flow', 0))
+ # invalid address
+ self.assertRaises(Error, RequestContext, ('example.com', 5300))
+ self.assertRaises(Error, RequestContext, ('192.0.2.1.1', 5300))
+ self.assertRaises(Error, RequestContext, ('2001:db8:::1', 5300))
+
+class RequestACLTest(unittest.TestCase):
+
+ def test_direct_construct(self):
+ self.assertRaises(Error, RequestACL)
+
+ def test_request_loader(self):
+ # these shouldn't raise an exception
+ REQUEST_LOADER.load('[{"action": "DROP"}]')
+ REQUEST_LOADER.load([{"action": "DROP"}])
+ REQUEST_LOADER.load('[{"action": "DROP", "from": "192.0.2.1"}]')
+ REQUEST_LOADER.load([{"action": "DROP", "from": "192.0.2.1"}])
+
+ # Invalid types (note that arguments like '1' or '[]' is of valid
+ # 'type' (but syntax error at a higher level)). So we need to use
+ # something that is not really JSON nor string.
+ self.assertRaises(TypeError, REQUEST_LOADER.load, b'')
+
+ # Incorrect number of arguments
+ self.assertRaises(TypeError, REQUEST_LOADER.load,
+ '[{"action": "DROP"}]', 0)
+
+ def test_bad_acl_syntax(self):
+ # the following are derived from loader_test.cc
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, '{}');
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, {});
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, '42');
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, 42);
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, 'true');
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, True);
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, 'null');
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, None);
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, '"hello"');
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, "hello");
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, '[42]');
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, [42]);
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, '["hello"]');
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, ["hello"]);
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, '[[]]');
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, [[]]);
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, '[true]');
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, [True]);
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, '[null]');
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, [None]);
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, '[{}]');
+ self.assertRaises(LoaderError, REQUEST_LOADER.load, [{}]);
+
+ # the following are derived from dns_test.cc
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ '[{"action": "ACCEPT", "bad": "192.0.2.1"}]')
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "ACCEPT", "bad": "192.0.2.1"}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ '[{"action": "ACCEPT", "from": 4}]')
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "ACCEPT", "from": 4}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ '[{"action": "ACCEPT", "from": []}]')
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "ACCEPT", "from": []}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ '[{"action": "ACCEPT", "key": 1}]')
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "ACCEPT", "key": 1}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ '[{"action": "ACCEPT", "key": {}}]')
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "ACCEPT", "key": {}}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ '[{"action": "ACCEPT", "from": "bad"}]')
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "ACCEPT", "from": "bad"}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "ACCEPT", "key": "bad..name"}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "ACCEPT", "key": "bad..name"}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ '[{"action": "ACCEPT", "from": null}]')
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "ACCEPT", "from": None}])
+
+ def test_bad_acl_ipsyntax(self):
+ # this test is derived from ip_check_unittest.cc
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ '[{"action": "DROP", "from": "192.0.2.43/-1"}]')
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "DROP", "from": "192.0.2.43/-1"}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ '[{"action": "DROP", "from": "192.0.2.43//1"}]')
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "DROP", "from": "192.0.2.43//1"}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ '[{"action": "DROP", "from": "192.0.2.43/1/"}]')
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "DROP", "from": "192.0.2.43/1/"}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ '[{"action": "DROP", "from": "/192.0.2.43/1"}]')
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "DROP", "from": "/192.0.2.43/1"}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ '[{"action": "DROP", "from": "2001:db8::/xxxx"}]')
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "DROP", "from": "2001:db8::/xxxx"}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ '[{"action": "DROP", "from": "2001:db8::/32/s"}]')
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "DROP", "from": "2001:db8::/32/s"}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ '[{"action": "DROP", "from": "1/"}]')
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "DROP", "from": "1/"}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ '[{"action": "DROP", "from": "/1"}]')
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "DROP", "from": "/1"}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ '[{"action": "DROP", "from": "192.0.2.0/33"}]')
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "DROP", "from": "192.0.2.0/33"}])
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ '[{"action": "DROP", "from": "::1/129"}]')
+ self.assertRaises(LoaderError, REQUEST_LOADER.load,
+ [{"action": "DROP", "from": "::1/129"}])
+
+ def test_execute(self):
+ # tests derived from dns_test.cc. We don't directly expose checks
+ # in the python wrapper, so we test it via execute().
+ self.assertEqual(ACCEPT, get_acl('192.0.2.1').execute(CONTEXT4))
+ self.assertEqual(ACCEPT, get_acl_json('192.0.2.1').execute(CONTEXT4))
+ self.assertEqual(REJECT, get_acl('192.0.2.53').execute(CONTEXT4))
+ self.assertEqual(REJECT, get_acl_json('192.0.2.53').execute(CONTEXT4))
+ self.assertEqual(ACCEPT, get_acl('192.0.2.0/24').execute(CONTEXT4))
+ self.assertEqual(ACCEPT, get_acl_json('192.0.2.0/24').execute(CONTEXT4))
+ self.assertEqual(REJECT, get_acl('192.0.1.0/24').execute(CONTEXT4))
+ self.assertEqual(REJECT, get_acl_json('192.0.1.0/24').execute(CONTEXT4))
+ self.assertEqual(REJECT, get_acl('192.0.1.0/24').execute(CONTEXT4))
+ self.assertEqual(REJECT, get_acl_json('192.0.1.0/24').execute(CONTEXT4))
+
+ self.assertEqual(ACCEPT, get_acl('2001:db8::1').execute(CONTEXT6))
+ self.assertEqual(ACCEPT, get_acl_json('2001:db8::1').execute(CONTEXT6))
+ self.assertEqual(REJECT, get_acl('2001:db8::53').execute(CONTEXT6))
+ self.assertEqual(REJECT, get_acl_json('2001:db8::53').execute(CONTEXT6))
+ self.assertEqual(ACCEPT, get_acl('2001:db8::/64').execute(CONTEXT6))
+ self.assertEqual(ACCEPT,
+ get_acl_json('2001:db8::/64').execute(CONTEXT6))
+ self.assertEqual(REJECT, get_acl('2001:db8:1::/64').execute(CONTEXT6))
+ self.assertEqual(REJECT,
+ get_acl_json('2001:db8:1::/64').execute(CONTEXT6))
+ self.assertEqual(REJECT, get_acl('32.1.13.184').execute(CONTEXT6))
+ self.assertEqual(REJECT, get_acl_json('32.1.13.184').execute(CONTEXT6))
+
+ # TSIG checks, derived from dns_test.cc
+ self.assertEqual(ACCEPT, get_tsig_acl('key.example.com').\
+ execute(get_context('192.0.2.1',
+ 'key.example.com')))
+ self.assertEqual(REJECT, get_tsig_acl_json('key.example.com').\
+ execute(get_context('192.0.2.1',
+ 'badkey.example.com')))
+ self.assertEqual(ACCEPT, get_tsig_acl('key.example.com').\
+ execute(get_context('2001:db8::1',
+ 'key.example.com')))
+ self.assertEqual(REJECT, get_tsig_acl_json('key.example.com').\
+ execute(get_context('2001:db8::1',
+ 'badkey.example.com')))
+ self.assertEqual(REJECT, get_tsig_acl('key.example.com').\
+ execute(CONTEXT4))
+ self.assertEqual(REJECT, get_tsig_acl_json('key.example.com').\
+ execute(CONTEXT4))
+ self.assertEqual(REJECT, get_tsig_acl('key.example.com').\
+ execute(CONTEXT6))
+ self.assertEqual(REJECT, get_tsig_acl_json('key.example.com').\
+ execute(CONTEXT6))
+
+ # A bit more complicated example, derived from resolver_config_unittest
+ acl = REQUEST_LOADER.load('[ {"action": "ACCEPT", ' +
+ ' "from": "192.0.2.1"},' +
+ ' {"action": "REJECT",' +
+ ' "from": "192.0.2.0/24"},' +
+ ' {"action": "DROP",' +
+ ' "from": "2001:db8::1"},' +
+ '] }')
+ self.assertEqual(ACCEPT, acl.execute(CONTEXT4))
+ self.assertEqual(REJECT, acl.execute(get_context('192.0.2.2')))
+ self.assertEqual(DROP, acl.execute(get_context('2001:db8::1')))
+ self.assertEqual(REJECT, acl.execute(get_context('2001:db8::2')))
+
+ # same test using the JSON representation
+ acl = REQUEST_LOADER.load([{"action": "ACCEPT", "from": "192.0.2.1"},
+ {"action": "REJECT",
+ "from": "192.0.2.0/24"},
+ {"action": "DROP", "from": "2001:db8::1"}])
+ self.assertEqual(ACCEPT, acl.execute(CONTEXT4))
+ self.assertEqual(REJECT, acl.execute(get_context('192.0.2.2')))
+ self.assertEqual(DROP, acl.execute(get_context('2001:db8::1')))
+ self.assertEqual(REJECT, acl.execute(get_context('2001:db8::2')))
+
+ def test_bad_execute(self):
+ acl = get_acl('192.0.2.1')
+ # missing parameter
+ self.assertRaises(TypeError, acl.execute)
+ # too many parameters
+ self.assertRaises(TypeError, acl.execute, get_context('192.0.2.2'), 0)
+ # type mismatch
+ self.assertRaises(TypeError, acl.execute, 'bad parameter')
+
+class RequestLoaderTest(unittest.TestCase):
+ # Note: loading ACLs is tested in other test cases.
+
+ def test_construct(self):
+ # at least for now, we don't allow direct construction.
+ self.assertRaises(Error, RequestLoader)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/src/lib/python/isc/bind10/Makefile.am b/src/lib/python/isc/bind10/Makefile.am
new file mode 100644
index 0000000..43a7605
--- /dev/null
+++ b/src/lib/python/isc/bind10/Makefile.am
@@ -0,0 +1,4 @@
+SUBDIRS = . tests
+
+python_PYTHON = __init__.py sockcreator.py
+pythondir = $(pyexecdir)/isc/bind10
diff --git a/src/lib/python/isc/bind10/__init__.py b/src/lib/python/isc/bind10/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/lib/python/isc/bind10/sockcreator.py b/src/lib/python/isc/bind10/sockcreator.py
new file mode 100644
index 0000000..8e5b019
--- /dev/null
+++ b/src/lib/python/isc/bind10/sockcreator.py
@@ -0,0 +1,226 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import socket
+import struct
+import os
+import subprocess
+from isc.log_messages.bind10_messages import *
+from libutil_io_python import recv_fd
+
+logger = isc.log.Logger("boss")
+
+"""
+Module that comunicates with the privileged socket creator (b10-sockcreator).
+"""
+
+class CreatorError(Exception):
+ """
+ Exception for socket creator related errors.
+
+ It has two members: fatal and errno and they are just holding the values
+ passed to the __init__ function.
+ """
+
+ def __init__(self, message, fatal, errno=None):
+ """
+ Creates the exception. The message argument is the usual string.
+ The fatal one tells if the error is fatal (eg. the creator crashed)
+ and errno is the errno value returned from socket creator, if
+ applicable.
+ """
+ Exception.__init__(self, message)
+ self.fatal = fatal
+ self.errno = errno
+
+class Parser:
+ """
+ This class knows the sockcreator language. It creates commands, sends them
+ and receives the answers and parses them.
+
+ It does not start it, the communication channel must be provided.
+
+ In theory, anything here can throw a fatal CreatorError exception, but it
+ happens only in case something like the creator process crashes. Any other
+ occasions are mentioned explicitly.
+ """
+
+ def __init__(self, creator_socket):
+ """
+ Creates the parser. The creator_socket is socket to the socket creator
+ process that will be used for communication. However, the object must
+ have a read_fd() method to read the file descriptor. This slightly
+ unusual trick with modifying an object is used to easy up testing.
+
+ You can use WrappedSocket in production code to add the method to any
+ ordinary socket.
+ """
+ self.__socket = creator_socket
+ logger.info(BIND10_SOCKCREATOR_INIT)
+
+ def terminate(self):
+ """
+ Asks the creator process to terminate and waits for it to close the
+ socket. Does not return anything. Raises a CreatorError if there is
+ still data on the socket, if there is an error closing the socket,
+ or if the socket had already been closed.
+ """
+ if self.__socket is None:
+ raise CreatorError('Terminated already', True)
+ logger.info(BIND10_SOCKCREATOR_TERMINATE)
+ try:
+ self.__socket.sendall(b'T')
+ # Wait for an EOF - it will return empty data
+ eof = self.__socket.recv(1)
+ if len(eof) != 0:
+ raise CreatorError('Protocol error - data after terminated',
+ True)
+ self.__socket = None
+ except socket.error as se:
+ self.__socket = None
+ raise CreatorError(str(se), True)
+
+ def get_socket(self, address, port, socktype):
+ """
+ Asks the socket creator process to create a socket. Pass an address
+ (the isc.net.IPaddr object), port number and socket type (either
+ string "UDP", "TCP" or constant socket.SOCK_DGRAM or
+ socket.SOCK_STREAM.
+
+ Blocks until it is provided by the socket creator process (which
+ should be fast, as it is on localhost) and returns the file descriptor
+ number. It raises a CreatorError exception if the creation fails.
+ """
+ if self.__socket is None:
+ raise CreatorError('Socket requested on terminated creator', True)
+ # First, assemble the request from parts
+ logger.info(BIND10_SOCKET_GET, address, port, socktype)
+ data = b'S'
+ if socktype == 'UDP' or socktype == socket.SOCK_DGRAM:
+ data += b'U'
+ elif socktype == 'TCP' or socktype == socket.SOCK_STREAM:
+ data += b'T'
+ else:
+ raise ValueError('Unknown socket type: ' + str(socktype))
+ if address.family == socket.AF_INET:
+ data += b'4'
+ elif address.family == socket.AF_INET6:
+ data += b'6'
+ else:
+ raise ValueError('Unknown address family in address')
+ data += struct.pack('!H', port)
+ data += address.addr
+ try:
+ # Send the request
+ self.__socket.sendall(data)
+ answer = self.__socket.recv(1)
+ if answer == b'S':
+ # Success!
+ result = self.__socket.read_fd()
+ logger.info(BIND10_SOCKET_CREATED, result)
+ return result
+ elif answer == b'E':
+ # There was an error, read the error as well
+ error = self.__socket.recv(1)
+ errno = struct.unpack('i',
+ self.__read_all(len(struct.pack('i',
+ 0))))
+ if error == b'S':
+ cause = 'socket'
+ elif error == b'B':
+ cause = 'bind'
+ else:
+ self.__socket = None
+ logger.fatal(BIND10_SOCKCREATOR_BAD_CAUSE, error)
+ raise CreatorError('Unknown error cause' + str(answer), True)
+ logger.error(BIND10_SOCKET_ERROR, cause, errno[0],
+ os.strerror(errno[0]))
+ raise CreatorError('Error creating socket on ' + cause, False,
+ errno[0])
+ else:
+ self.__socket = None
+ logger.fatal(BIND10_SOCKCREATOR_BAD_RESPONSE, answer)
+ raise CreatorError('Unknown response ' + str(answer), True)
+ except socket.error as se:
+ self.__socket = None
+ logger.fatal(BIND10_SOCKCREATOR_TRANSPORT_ERROR, str(se))
+ raise CreatorError(str(se), True)
+
+ def __read_all(self, length):
+ """
+ Keeps reading until length data is read or EOF or error happens.
+
+ EOF is considered error as well and throws a CreatorError.
+ """
+ result = b''
+ while len(result) < length:
+ data = self.__socket.recv(length - len(result))
+ if len(data) == 0:
+ self.__socket = None
+ logger.fatal(BIND10_SOCKCREATOR_EOF)
+ raise CreatorError('Unexpected EOF', True)
+ result += data
+ return result
+
+class WrappedSocket:
+ """
+ This class wraps a socket and adds a read_fd method, so it can be used
+ for the Parser class conveniently. It simply copies all its guts into
+ itself and implements the method.
+ """
+ def __init__(self, socket):
+ # Copy whatever can be copied from the socket
+ for name in dir(socket):
+ if name not in ['__class__', '__weakref__']:
+ setattr(self, name, getattr(socket, name))
+ # Keep the socket, so we can prevent it from being garbage-collected
+ # and closed before we are removed ourself
+ self.__orig_socket = socket
+
+ def read_fd(self):
+ """
+ Read the file descriptor from the socket.
+ """
+ return recv_fd(self.fileno())
+
+# FIXME: Any idea how to test this? Starting an external process doesn't sound
+# OK
+class Creator(Parser):
+ """
+ This starts the socket creator and allows asking for the sockets.
+ """
+ def __init__(self, path):
+ (local, remote) = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
+ # Popen does not like, for some reason, having the same socket for
+ # stdin as well as stdout, so we dup it before passing it there.
+ remote2 = socket.fromfd(remote.fileno(), socket.AF_UNIX,
+ socket.SOCK_STREAM)
+ env = os.environ
+ env['PATH'] = path
+ self.__process = subprocess.Popen(['b10-sockcreator'], env=env,
+ stdin=remote.fileno(),
+ stdout=remote2.fileno())
+ remote.close()
+ remote2.close()
+ Parser.__init__(self, WrappedSocket(local))
+
+ def pid(self):
+ return self.__process.pid
+
+ def kill(self):
+ logger.warn(BIND10_SOCKCREATOR_KILL)
+ if self.__process is not None:
+ self.__process.kill()
+ self.__process = None
diff --git a/src/lib/python/isc/bind10/tests/Makefile.am b/src/lib/python/isc/bind10/tests/Makefile.am
new file mode 100644
index 0000000..df8ab30
--- /dev/null
+++ b/src/lib/python/isc/bind10/tests/Makefile.am
@@ -0,0 +1,29 @@
+PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
+#PYTESTS = args_test.py bind10_test.py
+# NOTE: this has a generated test found in the builddir
+PYTESTS = sockcreator_test.py
+
+EXTRA_DIST = $(PYTESTS)
+
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
+# test using command-line arguments, so use check-local target instead of TESTS
+check-local:
+if ENABLE_PYTHON_COVERAGE
+ touch $(abs_top_srcdir)/.coverage
+ rm -f .coverage
+ ${LN_S} $(abs_top_srcdir)/.coverage .coverage
+endif
+ for pytest in $(PYTESTS) ; do \
+ echo Running test: $$pytest ; \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_srcdir)/src/bin:$(abs_top_builddir)/src/bin/bind10:$(abs_top_builddir)/src/lib/util/io/.libs \
+ BIND10_MSGQ_SOCKET_FILE=$(abs_top_builddir)/msgq_socket \
+ $(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
+ done
+
diff --git a/src/lib/python/isc/bind10/tests/sockcreator_test.py b/src/lib/python/isc/bind10/tests/sockcreator_test.py
new file mode 100644
index 0000000..4453184
--- /dev/null
+++ b/src/lib/python/isc/bind10/tests/sockcreator_test.py
@@ -0,0 +1,327 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# This test file is generated .py.in -> .py just to be in the build dir,
+# same as the rest of the tests. Saves a lot of stuff in makefile.
+
+"""
+Tests for the bind10.sockcreator module.
+"""
+
+import unittest
+import struct
+import socket
+from isc.net.addr import IPAddr
+import isc.log
+from libutil_io_python import send_fd
+from isc.bind10.sockcreator import Parser, CreatorError, WrappedSocket
+
+class FakeCreator:
+ """
+ Class emulating the socket to the socket creator. It can be given expected
+ data to receive (and check) and responses to give to the Parser class
+ during testing.
+ """
+
+ class InvalidPlan(Exception):
+ """
+ Raised when someone wants to recv when sending is planned or vice
+ versa.
+ """
+ pass
+
+ class InvalidData(Exception):
+ """
+ Raises when the data passed to sendall are not the same as expected.
+ """
+ pass
+
+ def __init__(self, plan):
+ """
+ Create the object. The plan variable contains list of expected actions,
+ in form:
+
+ [('r', 'Data to return from recv'), ('s', 'Data expected on sendall'),
+ , ('d', 'File descriptor number to return from read_sock'), ('e',
+ None), ...]
+
+ It modifies the array as it goes.
+ """
+ self.__plan = plan
+
+ def __get_plan(self, expected):
+ if len(self.__plan) == 0:
+ raise InvalidPlan('Nothing more planned')
+ (kind, data) = self.__plan[0]
+ if kind == 'e':
+ self.__plan.pop(0)
+ raise socket.error('False socket error')
+ if kind != expected:
+ raise InvalidPlan('Planned ' + kind + ', but ' + expected +
+ 'requested')
+ return data
+
+ def recv(self, maxsize):
+ """
+ Emulate recv. Returs maxsize bytes from the current recv plan. If
+ there are data left from previous recv call, it is used first.
+
+ If no recv is planned, raises InvalidPlan.
+ """
+ data = self.__get_plan('r')
+ result, rest = data[:maxsize], data[maxsize:]
+ if len(rest) > 0:
+ self.__plan[0] = ('r', rest)
+ else:
+ self.__plan.pop(0)
+ return result
+
+ def read_fd(self):
+ """
+ Emulate the reading of file descriptor. Returns one from a plan.
+
+ It raises InvalidPlan if no socket is planned now.
+ """
+ fd = self.__get_plan('f')
+ self.__plan.pop(0)
+ return fd
+
+ def sendall(self, data):
+ """
+ Checks that the data passed are correct according to plan. It raises
+ InvalidData if the data differs or InvalidPlan when sendall is not
+ expected.
+ """
+ planned = self.__get_plan('s')
+ dlen = len(data)
+ prefix, rest = planned[:dlen], planned[dlen:]
+ if prefix != data:
+ raise InvalidData('Expected "' + str(prefix)+ '", got "' +
+ str(data) + '"')
+ if len(rest) > 0:
+ self.__plan[0] = ('s', rest)
+ else:
+ self.__plan.pop(0)
+
+ def all_used(self):
+ """
+ Returns if the whole plan was consumed.
+ """
+ return len(self.__plan) == 0
+
+class ParserTests(unittest.TestCase):
+ """
+ Testcases for the Parser class.
+
+ A lot of these test could be done by
+ `with self.assertRaises(CreatorError) as cm`. But some versions of python
+ take the scope wrong and don't work, so we use the primitive way of
+ try-except.
+ """
+ def __terminate(self):
+ creator = FakeCreator([('s', b'T'), ('r', b'')])
+ parser = Parser(creator)
+ self.assertEqual(None, parser.terminate())
+ self.assertTrue(creator.all_used())
+ return parser
+
+ def test_terminate(self):
+ """
+ Test if the command to terminate is correct and it waits for reading the
+ EOF.
+ """
+ self.__terminate()
+
+ def __terminate_raises(self, parser):
+ """
+ Check that terminate() raises a fatal exception.
+ """
+ try:
+ parser.terminate()
+ self.fail("Not raised")
+ except CreatorError as ce:
+ self.assertTrue(ce.fatal)
+ self.assertEqual(None, ce.errno)
+
+ def test_terminate_error1(self):
+ """
+ Test it reports an exception when there's error terminating the creator.
+ This one raises an error when receiving the EOF.
+ """
+ creator = FakeCreator([('s', b'T'), ('e', None)])
+ parser = Parser(creator)
+ self.__terminate_raises(parser)
+
+ def test_terminate_error2(self):
+ """
+ Test it reports an exception when there's error terminating the creator.
+ This one raises an error when sending data.
+ """
+ creator = FakeCreator([('e', None)])
+ parser = Parser(creator)
+ self.__terminate_raises(parser)
+
+ def test_terminate_error3(self):
+ """
+ Test it reports an exception when there's error terminating the creator.
+ This one sends data when it should have terminated.
+ """
+ creator = FakeCreator([('s', b'T'), ('r', b'Extra data')])
+ parser = Parser(creator)
+ self.__terminate_raises(parser)
+
+ def test_terminate_twice(self):
+ """
+ Test we can't terminate twice.
+ """
+ parser = self.__terminate()
+ self.__terminate_raises(parser)
+
+ def test_crash(self):
+ """
+ Tests that the parser correctly raises exception when it crashes
+ unexpectedly.
+ """
+ creator = FakeCreator([('s', b'SU4\0\0\0\0\0\0'), ('r', b'')])
+ parser = Parser(creator)
+ try:
+ parser.get_socket(IPAddr('0.0.0.0'), 0, 'UDP')
+ self.fail("Not raised")
+ except CreatorError as ce:
+ self.assertTrue(creator.all_used())
+ # Is the exception correct?
+ self.assertTrue(ce.fatal)
+ self.assertEqual(None, ce.errno)
+
+ def test_error(self):
+ """
+ Tests that the parser correctly raises non-fatal exception when
+ the socket can not be created.
+ """
+ # We split the int to see if it can cope with data coming in
+ # different packets
+ intpart = struct.pack('@i', 42)
+ creator = FakeCreator([('s', b'SU4\0\0\0\0\0\0'), ('r', b'ES' +
+ intpart[:1]), ('r', intpart[1:])])
+ parser = Parser(creator)
+ try:
+ parser.get_socket(IPAddr('0.0.0.0'), 0, 'UDP')
+ self.fail("Not raised")
+ except CreatorError as ce:
+ self.assertTrue(creator.all_used())
+ # Is the exception correct?
+ self.assertFalse(ce.fatal)
+ self.assertEqual(42, ce.errno)
+
+ def __error(self, plan):
+ creator = FakeCreator(plan)
+ parser = Parser(creator)
+ try:
+ parser.get_socket(IPAddr('0.0.0.0'), 0, socket.SOCK_DGRAM)
+ self.fail("Not raised")
+ except CreatorError as ce:
+ self.assertTrue(creator.all_used())
+ self.assertTrue(ce.fatal)
+
+ def test_error_send(self):
+ self.__error([('e', None)])
+
+ def test_error_recv(self):
+ self.__error([('s', b'SU4\0\0\0\0\0\0'), ('e', None)])
+
+ def test_error_read_fd(self):
+ self.__error([('s', b'SU4\0\0\0\0\0\0'), ('r', b'S'), ('e', None)])
+
+ def __create(self, addr, socktype, encoded):
+ creator = FakeCreator([('s', b'S' + encoded), ('r', b'S'), ('f', 42)])
+ parser = Parser(creator)
+ self.assertEqual(42, parser.get_socket(IPAddr(addr), 42, socktype))
+
+ def test_create1(self):
+ self.__create('192.0.2.0', 'UDP', b'U4\0\x2A\xC0\0\x02\0')
+
+ def test_create2(self):
+ self.__create('2001:db8::', socket.SOCK_STREAM,
+ b'T6\0\x2A\x20\x01\x0d\xb8\0\0\0\0\0\0\0\0\0\0\0\0')
+
+ def test_create_terminated(self):
+ """
+ Test we can't request sockets after it was terminated.
+ """
+ parser = self.__terminate()
+ try:
+ parser.get_socket(IPAddr('0.0.0.0'), 0, 'UDP')
+ self.fail("Not raised")
+ except CreatorError as ce:
+ self.assertTrue(ce.fatal)
+ self.assertEqual(None, ce.errno)
+
+ def test_invalid_socktype(self):
+ """
+ Test invalid socket type is rejected
+ """
+ self.assertRaises(ValueError, Parser(FakeCreator([])).get_socket,
+ IPAddr('0.0.0.0'), 42, 'RAW')
+
+ def test_invalid_family(self):
+ """
+ Test it rejects invalid address family.
+ """
+ # Note: this produces a bad logger output, since this address
+ # can not be converted to string, so the original message with
+ # placeholders is output. This should not happen in practice, so
+ # it is harmless.
+ addr = IPAddr('0.0.0.0')
+ addr.family = 42
+ self.assertRaises(ValueError, Parser(FakeCreator([])).get_socket,
+ addr, 42, socket.SOCK_DGRAM)
+
+class WrapTests(unittest.TestCase):
+ """
+ Tests for the wrap_socket function.
+ """
+ def test_wrap(self):
+ # We construct two pairs of socket. The receiving side of one pair will
+ # be wrapped. Then we send one of the other pair through this pair and
+ # check the received one can be used as a socket
+
+ # The transport socket
+ (t1, t2) = socket.socketpair()
+ # The payload socket
+ (p1, p2) = socket.socketpair()
+
+ t2 = WrappedSocket(t2)
+
+ # Transfer the descriptor
+ send_fd(t1.fileno(), p1.fileno())
+ p1 = socket.fromfd(t2.read_fd(), socket.AF_UNIX, socket.SOCK_STREAM)
+
+ # Now, pass some data trough the socket
+ p1.send(b'A')
+ data = p2.recv(1)
+ self.assertEqual(b'A', data)
+
+ # Test the wrapping didn't hurt the socket's usual methods
+ t1.send(b'B')
+ data = t2.recv(1)
+ self.assertEqual(b'B', data)
+ t2.send(b'C')
+ data = t1.recv(1)
+ self.assertEqual(b'C', data)
+
+if __name__ == '__main__':
+ isc.log.init("bind10") # FIXME Should this be needed?
+ isc.log.resetUnitTestRootLogger()
+ unittest.main()
diff --git a/src/lib/python/isc/cc/data.py b/src/lib/python/isc/cc/data.py
index ce1bba0..76ef942 100644
--- a/src/lib/python/isc/cc/data.py
+++ b/src/lib/python/isc/cc/data.py
@@ -22,8 +22,22 @@
import json
-class DataNotFoundError(Exception): pass
-class DataTypeError(Exception): pass
+class DataNotFoundError(Exception):
+ """Raised if an identifier does not exist according to a spec file,
+ or if an item is addressed that is not in the current (or default)
+ config (such as a nonexistent list or map element)"""
+ pass
+
+class DataAlreadyPresentError(Exception):
+ """Raised if there is an attemt to add an element to a list or a
+ map that is already present in that list or map (i.e. if 'add'
+ is used when it should be 'set')"""
+ pass
+
+class DataTypeError(Exception):
+ """Raised if there is an attempt to set an element that is of a
+ different type than the type specified in the specification."""
+ pass
def remove_identical(a, b):
"""Removes the values from dict a that are the same as in dict b.
diff --git a/src/lib/python/isc/cc/message.py b/src/lib/python/isc/cc/message.py
index 3601c41..3ebcc43 100644
--- a/src/lib/python/isc/cc/message.py
+++ b/src/lib/python/isc/cc/message.py
@@ -35,7 +35,7 @@ def from_wire(data):
Raises an AttributeError if the given object has no decode()
method (which should return a string).
'''
- return json.loads(data.decode('utf8'))
+ return json.loads(data.decode('utf8'), strict=False)
if __name__ == "__main__":
import doctest
diff --git a/src/lib/python/isc/cc/session.py b/src/lib/python/isc/cc/session.py
index fb7dd06..f6b6265 100644
--- a/src/lib/python/isc/cc/session.py
+++ b/src/lib/python/isc/cc/session.py
@@ -93,6 +93,19 @@ class Session:
self._socket.send(msg)
def recvmsg(self, nonblock = True, seq = None):
+ """Reads a message. If nonblock is true, and there is no
+ message to read, it returns (None, None).
+ If seq is not None, it should be a value as returned by
+ group_sendmsg(), in which case only the response to
+ that message is returned, and others will be queued until
+ the next call to this method.
+ If seq is None, only messages that are *not* responses
+ will be returned, and responses will be queued.
+ The queue is checked for relevant messages before data
+ is read from the socket.
+ Raises a SessionError if there is a JSON decode problem in
+ the message that is read, or if the session has been closed
+ prior to the call of recvmsg()"""
with self._lock:
if len(self._queue) > 0:
i = 0;
@@ -109,16 +122,22 @@ class Session:
if data and len(data) > 2:
header_length = struct.unpack('>H', data[0:2])[0]
data_length = len(data) - 2 - header_length
- if data_length > 0:
- env = isc.cc.message.from_wire(data[2:header_length+2])
- msg = isc.cc.message.from_wire(data[header_length + 2:])
- if (seq == None and "reply" not in env) or (seq != None and "reply" in env and seq == env["reply"]):
- return env, msg
+ try:
+ if data_length > 0:
+ env = isc.cc.message.from_wire(data[2:header_length+2])
+ msg = isc.cc.message.from_wire(data[header_length + 2:])
+ if (seq == None and "reply" not in env) or (seq != None and "reply" in env and seq == env["reply"]):
+ return env, msg
+ else:
+ self._queue.append((env,msg))
+ return self.recvmsg(nonblock, seq)
else:
- self._queue.append((env,msg))
- return self.recvmsg(nonblock, seq)
- else:
- return isc.cc.message.from_wire(data[2:header_length+2]), None
+ return isc.cc.message.from_wire(data[2:header_length+2]), None
+ except ValueError as ve:
+ # TODO: when we have logging here, add a debug
+ # message printing the data that we were unable
+ # to parse as JSON
+ raise SessionError(ve)
return None, None
def _receive_bytes(self, size):
diff --git a/src/lib/python/isc/cc/tests/Makefile.am b/src/lib/python/isc/cc/tests/Makefile.am
index 4e49501..4c2acc0 100644
--- a/src/lib/python/isc/cc/tests/Makefile.am
+++ b/src/lib/python/isc/cc/tests/Makefile.am
@@ -10,7 +10,7 @@ EXTRA_DIST += test_session.py
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -23,7 +23,7 @@ endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python \
+ PYTHONPATH=$(COMMON_PYTHON_PATH) \
BIND10_TEST_SOCKET_FILE=$(builddir)/test_socket.sock \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/lib/python/isc/cc/tests/message_test.py b/src/lib/python/isc/cc/tests/message_test.py
index 2024201..c417068 100644
--- a/src/lib/python/isc/cc/tests/message_test.py
+++ b/src/lib/python/isc/cc/tests/message_test.py
@@ -31,6 +31,10 @@ class MessageTest(unittest.TestCase):
self.msg2_str = "{\"aaa\": [1, 1.1, true, false, null]}";
self.msg2_wire = self.msg2_str.encode()
+ self.msg3 = { "aaa": [ 1, 1.1, True, False, "string\n" ] }
+ self.msg3_str = "{\"aaa\": [1, 1.1, true, false, \"string\n\" ]}";
+ self.msg3_wire = self.msg3_str.encode()
+
def test_encode_json(self):
self.assertEqual(self.msg1_wire, isc.cc.message.to_wire(self.msg1))
self.assertEqual(self.msg2_wire, isc.cc.message.to_wire(self.msg2))
@@ -40,6 +44,7 @@ class MessageTest(unittest.TestCase):
def test_decode_json(self):
self.assertEqual(self.msg1, isc.cc.message.from_wire(self.msg1_wire))
self.assertEqual(self.msg2, isc.cc.message.from_wire(self.msg2_wire))
+ self.assertEqual(self.msg3, isc.cc.message.from_wire(self.msg3_wire))
self.assertRaises(AttributeError, isc.cc.message.from_wire, 1)
self.assertRaises(ValueError, isc.cc.message.from_wire, b'\x001')
diff --git a/src/lib/python/isc/cc/tests/session_test.py b/src/lib/python/isc/cc/tests/session_test.py
index fe35a6c..772ed0c 100644
--- a/src/lib/python/isc/cc/tests/session_test.py
+++ b/src/lib/python/isc/cc/tests/session_test.py
@@ -274,6 +274,16 @@ class testSession(unittest.TestCase):
self.assertEqual({"hello": "b"}, msg)
self.assertFalse(sess.has_queued_msgs())
+ def test_recv_bad_msg(self):
+ sess = MySession()
+ self.assertFalse(sess.has_queued_msgs())
+ sess._socket.addrecv({'to': 'someone' }, {'hello': 'b'})
+ sess._socket.addrecv({'to': 'someone', 'reply': 1}, {'hello': 'a'})
+ # mangle the bytes a bit
+ sess._socket.recvqueue[5] = sess._socket.recvqueue[5] - 2
+ sess._socket.recvqueue = sess._socket.recvqueue[:-2]
+ self.assertRaises(SessionError, sess.recvmsg, True, 1)
+
def test_next_sequence(self):
sess = MySession()
self.assertEqual(sess._sequence, 1)
diff --git a/src/lib/python/isc/config/Makefile.am b/src/lib/python/isc/config/Makefile.am
index 1efb6fc..ef696fb 100644
--- a/src/lib/python/isc/config/Makefile.am
+++ b/src/lib/python/isc/config/Makefile.am
@@ -1,19 +1,31 @@
SUBDIRS = . tests
python_PYTHON = __init__.py ccsession.py cfgmgr.py config_data.py module_spec.py
-pyexec_DATA = cfgmgr_messages.py
-
pythondir = $(pyexecdir)/isc/config
-# Define rule to build logging source files from message file
-cfgmgr_messages.py: cfgmgr_messages.mes
- $(top_builddir)/src/lib/log/compiler/message -p $(top_srcdir)/src/lib/python/isc/config/cfgmgr_messages.mes
+BUILT_SOURCES = $(PYTHON_LOGMSGPKG_DIR)/work/cfgmgr_messages.py
+BUILT_SOURCES += $(PYTHON_LOGMSGPKG_DIR)/work/config_messages.py
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/cfgmgr_messages.py
+nodist_pylogmessage_PYTHON += $(PYTHON_LOGMSGPKG_DIR)/work/config_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
-CLEANFILES = cfgmgr_messages.py cfgmgr_messages.pyc
+CLEANFILES = $(PYTHON_LOGMSGPKG_DIR)/work/cfgmgr_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/cfgmgr_messages.pyc
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/config_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/config_messages.pyc
CLEANDIRS = __pycache__
-EXTRA_DIST = cfgmgr_messages.mes
+EXTRA_DIST = cfgmgr_messages.mes config_messages.mes
+
+# Define rule to build logging source files from message file
+$(PYTHON_LOGMSGPKG_DIR)/work/cfgmgr_messages.py : cfgmgr_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/cfgmgr_messages.mes
+
+$(PYTHON_LOGMSGPKG_DIR)/work/config_messages.py : config_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/config_messages.mes
clean-local:
rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/config/ccsession.py b/src/lib/python/isc/config/ccsession.py
index bff4f58..d07df1e 100644
--- a/src/lib/python/isc/config/ccsession.py
+++ b/src/lib/python/isc/config/ccsession.py
@@ -43,6 +43,9 @@ from isc.util.file import path_search
import bind10_config
from isc.log import log_config_update
import json
+from isc.log_messages.config_messages import *
+
+logger = isc.log.Logger("config")
class ModuleCCSessionError(Exception): pass
@@ -88,6 +91,7 @@ COMMAND_CONFIG_UPDATE = "config_update"
COMMAND_MODULE_SPECIFICATION_UPDATE = "module_specification_update"
COMMAND_GET_COMMANDS_SPEC = "get_commands_spec"
+COMMAND_GET_STATISTICS_SPEC = "get_statistics_spec"
COMMAND_GET_CONFIG = "get_config"
COMMAND_SET_CONFIG = "set_config"
COMMAND_GET_MODULE_SPEC = "get_module_spec"
@@ -127,10 +131,7 @@ def default_logconfig_handler(new_config, config_data):
isc.log.log_config_update(json.dumps(new_config),
json.dumps(config_data.get_module_spec().get_full_spec()))
else:
- # no logging here yet, TODO: log these errors
- print("Error in logging configuration, ignoring config update: ")
- for err in errors:
- print(err)
+ logger.error(CONFIG_LOG_CONFIG_ERRORS, errors)
class ModuleCCSession(ConfigData):
"""This class maintains a connection to the command channel, as
@@ -142,7 +143,7 @@ class ModuleCCSession(ConfigData):
callbacks are called when 'check_command' is called on the
ModuleCCSession"""
- def __init__(self, spec_file_name, config_handler, command_handler, cc_session=None, handle_logging_config=False):
+ def __init__(self, spec_file_name, config_handler, command_handler, cc_session=None, handle_logging_config=True):
"""Initialize a ModuleCCSession. This does *NOT* send the
specification and request the configuration yet. Use start()
for that once the ModuleCCSession has been initialized.
@@ -163,7 +164,7 @@ class ModuleCCSession(ConfigData):
the logger manager to apply it. It will also inform the
logger manager when the logging configuration gets updated.
The module does not need to do anything except intializing
- its loggers, and provide log messages
+ its loggers, and provide log messages. Defaults to true.
"""
module_spec = isc.config.module_spec_from_file(spec_file_name)
ConfigData.__init__(self, module_spec)
@@ -312,7 +313,7 @@ class ModuleCCSession(ConfigData):
module_spec = isc.config.module_spec_from_file(spec_file_name)
module_cfg = ConfigData(module_spec)
module_name = module_spec.get_module_name()
- self._session.group_subscribe(module_name);
+ self._session.group_subscribe(module_name)
# Get the current config for that module now
seq = self._session.group_sendmsg(create_command(COMMAND_GET_CONFIG, { "module_name": module_name }), "ConfigManager")
@@ -327,7 +328,7 @@ class ModuleCCSession(ConfigData):
rcode, value = parse_answer(answer)
if rcode == 0:
if value != None and module_spec.validate_config(False, value):
- module_cfg.set_local_config(value);
+ module_cfg.set_local_config(value)
if config_update_callback is not None:
config_update_callback(value, module_cfg)
@@ -377,7 +378,7 @@ class ModuleCCSession(ConfigData):
if self.get_module_spec().validate_config(False,
value,
errors):
- self.set_local_config(value);
+ self.set_local_config(value)
if self._config_handler:
self._config_handler(value)
else:
@@ -385,8 +386,7 @@ class ModuleCCSession(ConfigData):
"Wrong data in configuration: " +
" ".join(errors))
else:
- # log error
- print("[" + self._module_name + "] Error requesting configuration: " + value)
+ logger.error(CONFIG_GET_FAILED, value)
else:
raise ModuleCCSessionError("No answer from configuration manager")
except isc.cc.SessionTimeout:
@@ -415,8 +415,8 @@ class UIModuleCCSession(MultiConfigData):
self.set_specification(isc.config.ModuleSpec(specs[module]))
def update_specs_and_config(self):
- self.request_specifications();
- self.request_current_config();
+ self.request_specifications()
+ self.request_current_config()
def request_current_config(self):
"""Requests the current configuration from the configuration
@@ -426,47 +426,90 @@ class UIModuleCCSession(MultiConfigData):
raise ModuleCCSessionError("Bad config version")
self._set_current_config(config)
-
- def add_value(self, identifier, value_str = None):
- """Add a value to a configuration list. Raises a DataTypeError
- if the value does not conform to the list_item_spec field
- of the module config data specification. If value_str is
- not given, we add the default as specified by the .spec
- file."""
- module_spec = self.find_spec_part(identifier)
- if (type(module_spec) != dict or "list_item_spec" not in module_spec):
- raise isc.cc.data.DataNotFoundError(str(identifier) + " is not a list")
-
+ def _add_value_to_list(self, identifier, value, module_spec):
cur_list, status = self.get_value(identifier)
if not cur_list:
cur_list = []
- # Hmm. Do we need to check for duplicates?
- value = None
- if value_str is not None:
- value = isc.cc.data.parse_value_str(value_str)
- else:
+ if value is None:
if "item_default" in module_spec["list_item_spec"]:
value = module_spec["list_item_spec"]["item_default"]
if value is None:
- raise isc.cc.data.DataNotFoundError("No value given and no default for " + str(identifier))
-
+ raise isc.cc.data.DataNotFoundError(
+ "No value given and no default for " + str(identifier))
+
if value not in cur_list:
cur_list.append(value)
self.set_value(identifier, cur_list)
+ else:
+ raise isc.cc.data.DataAlreadyPresentError(value +
+ " already in "
+ + identifier)
+
+ def _add_value_to_named_set(self, identifier, value, item_value):
+ if type(value) != str:
+ raise isc.cc.data.DataTypeError("Name for named_set " +
+ identifier +
+ " must be a string")
+ # fail on both None and empty string
+ if not value:
+ raise isc.cc.data.DataNotFoundError(
+ "Need a name to add a new item to named_set " +
+ str(identifier))
+ else:
+ cur_map, status = self.get_value(identifier)
+ if not cur_map:
+ cur_map = {}
+ if value not in cur_map:
+ cur_map[value] = item_value
+ self.set_value(identifier, cur_map)
+ else:
+ raise isc.cc.data.DataAlreadyPresentError(value +
+ " already in "
+ + identifier)
- def remove_value(self, identifier, value_str):
- """Remove a value from a configuration list. The value string
- must be a string representation of the full item. Raises
- a DataTypeError if the value at the identifier is not a list,
- or if the given value_str does not match the list_item_spec
- """
+ def add_value(self, identifier, value_str = None, set_value_str = None):
+ """Add a value to a configuration list. Raises a DataTypeError
+ if the value does not conform to the list_item_spec field
+ of the module config data specification. If value_str is
+ not given, we add the default as specified by the .spec
+ file. Raises a DataNotFoundError if the given identifier
+ is not specified in the specification as a map or list.
+ Raises a DataAlreadyPresentError if the specified element
+ already exists."""
module_spec = self.find_spec_part(identifier)
- if (type(module_spec) != dict or "list_item_spec" not in module_spec):
- raise isc.cc.data.DataNotFoundError(str(identifier) + " is not a list")
+ if module_spec is None:
+ raise isc.cc.data.DataNotFoundError("Unknown item " + str(identifier))
+
+ # the specified element must be a list or a named_set
+ if 'list_item_spec' in module_spec:
+ value = None
+ # in lists, we might get the value with spaces, making it
+ # the third argument. In that case we interpret both as
+ # one big string meant as the value
+ if value_str is not None:
+ if set_value_str is not None:
+ value_str += set_value_str
+ value = isc.cc.data.parse_value_str(value_str)
+ self._add_value_to_list(identifier, value, module_spec)
+ elif 'named_set_item_spec' in module_spec:
+ item_name = None
+ item_value = None
+ if value_str is not None:
+ item_name = isc.cc.data.parse_value_str(value_str)
+ if set_value_str is not None:
+ item_value = isc.cc.data.parse_value_str(set_value_str)
+ else:
+ if 'item_default' in module_spec['named_set_item_spec']:
+ item_value = module_spec['named_set_item_spec']['item_default']
+ self._add_value_to_named_set(identifier, item_name,
+ item_value)
+ else:
+ raise isc.cc.data.DataNotFoundError(str(identifier) + " is not a list or a named set")
- if value_str is None:
+ def _remove_value_from_list(self, identifier, value):
+ if value is None:
# we are directly removing an list index
id, list_indices = isc.cc.data.split_identifier_list_indices(identifier)
if list_indices is None:
@@ -474,17 +517,52 @@ class UIModuleCCSession(MultiConfigData):
else:
self.set_value(identifier, None)
else:
- value = isc.cc.data.parse_value_str(value_str)
- isc.config.config_data.check_type(module_spec, [value])
cur_list, status = self.get_value(identifier)
- #if not cur_list:
- # cur_list = isc.cc.data.find_no_exc(self.config.data, identifier)
if not cur_list:
cur_list = []
- if value in cur_list:
+ elif value in cur_list:
cur_list.remove(value)
self.set_value(identifier, cur_list)
+ def _remove_value_from_named_set(self, identifier, value):
+ if value is None:
+ raise isc.cc.data.DataNotFoundError("Need a name to remove an item from named_set " + str(identifier))
+ elif type(value) != str:
+ raise isc.cc.data.DataTypeError("Name for named_set " + identifier + " must be a string")
+ else:
+ cur_map, status = self.get_value(identifier)
+ if not cur_map:
+ cur_map = {}
+ if value in cur_map:
+ del cur_map[value]
+ else:
+ raise isc.cc.data.DataNotFoundError(value + " not found in named_set " + str(identifier))
+
+ def remove_value(self, identifier, value_str):
+ """Remove a value from a configuration list or named set.
+ The value string must be a string representation of the full
+ item. Raises a DataTypeError if the value at the identifier
+ is not a list, or if the given value_str does not match the
+ list_item_spec """
+ module_spec = self.find_spec_part(identifier)
+ if module_spec is None:
+ raise isc.cc.data.DataNotFoundError("Unknown item " + str(identifier))
+
+ value = None
+ if value_str is not None:
+ value = isc.cc.data.parse_value_str(value_str)
+
+ if 'list_item_spec' in module_spec:
+ if value is not None:
+ isc.config.config_data.check_type(module_spec['list_item_spec'], value)
+ self._remove_value_from_list(identifier, value)
+ elif 'named_set_item_spec' in module_spec:
+ self._remove_value_from_named_set(identifier, value)
+ else:
+ raise isc.cc.data.DataNotFoundError(str(identifier) + " is not a list or a named_set")
+
+
+
def commit(self):
"""Commit all local changes, send them through b10-cmdctl to
the configuration manager"""
@@ -498,7 +576,6 @@ class UIModuleCCSession(MultiConfigData):
self.request_current_config()
self.clear_local_changes()
elif "error" in answer:
- print("Error: " + answer["error"])
- print("Configuration not committed")
+ raise ModuleCCSessionError("Error: " + str(answer["error"]) + "\n" + "Configuration not committed")
else:
raise ModuleCCSessionError("Unknown format of answer in commit(): " + str(answer))
diff --git a/src/lib/python/isc/config/cfgmgr.py b/src/lib/python/isc/config/cfgmgr.py
index 83db159..9996a19 100644
--- a/src/lib/python/isc/config/cfgmgr.py
+++ b/src/lib/python/isc/config/cfgmgr.py
@@ -32,7 +32,7 @@ from isc.config import ccsession, config_data, module_spec
from isc.util.file import path_search
import bind10_config
import isc.log
-from cfgmgr_messages import *
+from isc.log_messages.cfgmgr_messages import *
logger = isc.log.Logger("cfgmgr")
@@ -267,6 +267,19 @@ class ConfigManager:
commands[module_name] = self.module_specs[module_name].get_commands_spec()
return commands
+ def get_statistics_spec(self, name = None):
+ """Returns a dict containing 'module_name': statistics_spec for
+ all modules. If name is specified, only that module will
+ be included"""
+ statistics = {}
+ if name:
+ if name in self.module_specs:
+ statistics[name] = self.module_specs[name].get_statistics_spec()
+ else:
+ for module_name in self.module_specs.keys():
+ statistics[module_name] = self.module_specs[module_name].get_statistics_spec()
+ return statistics
+
def read_config(self):
"""Read the current configuration from the file specificied at init()"""
try:
@@ -380,6 +393,9 @@ class ConfigManager:
answer, env = self.cc.group_recvmsg(False, seq)
except isc.cc.SessionTimeout:
answer = ccsession.create_answer(1, "Timeout waiting for answer from " + module_name)
+ except isc.cc.SessionError as se:
+ logger.error(CFGMGR_BAD_UPDATE_RESPONSE_FROM_MODULE, module_name, se)
+ answer = ccsession.create_answer(1, "Unable to parse response from " + module_name + ": " + str(se))
if answer:
rcode, val = ccsession.parse_answer(answer)
if rcode == 0:
@@ -454,6 +470,8 @@ class ConfigManager:
if cmd:
if cmd == ccsession.COMMAND_GET_COMMANDS_SPEC:
answer = ccsession.create_answer(0, self.get_commands_spec())
+ elif cmd == ccsession.COMMAND_GET_STATISTICS_SPEC:
+ answer = ccsession.create_answer(0, self.get_statistics_spec())
elif cmd == ccsession.COMMAND_GET_MODULE_SPEC:
answer = self._handle_get_module_spec(arg)
elif cmd == ccsession.COMMAND_GET_CONFIG:
diff --git a/src/lib/python/isc/config/cfgmgr_messages.mes b/src/lib/python/isc/config/cfgmgr_messages.mes
index 9355e4d..61a63ed 100644
--- a/src/lib/python/isc/config/cfgmgr_messages.mes
+++ b/src/lib/python/isc/config/cfgmgr_messages.mes
@@ -20,6 +20,13 @@ An older version of the configuration database has been found, from which
there was an automatic upgrade path to the current version. These changes
are now applied, and no action from the administrator is necessary.
+% CFGMGR_BAD_UPDATE_RESPONSE_FROM_MODULE Unable to parse response from module %1: %2
+The configuration manager sent a configuration update to a module, but
+the module responded with an answer that could not be parsed. The answer
+message appears to be invalid JSON data, or not decodable to a string.
+This is likely to be a problem in the module in question. The update is
+assumed to have failed, and will not be stored.
+
% CFGMGR_CC_SESSION_ERROR Error connecting to command channel: %1
The configuration manager daemon was unable to connect to the messaging
system. The most likely cause is that msgq is not running.
diff --git a/src/lib/python/isc/config/config_data.py b/src/lib/python/isc/config/config_data.py
index 1efe4a9..fabd37d 100644
--- a/src/lib/python/isc/config/config_data.py
+++ b/src/lib/python/isc/config/config_data.py
@@ -145,6 +145,8 @@ def _find_spec_part_single(cur_spec, id_part):
return cur_spec['list_item_spec']
# not found
raise isc.cc.data.DataNotFoundError(id + " not found")
+ elif type(cur_spec) == dict and 'named_set_item_spec' in cur_spec.keys():
+ return cur_spec['named_set_item_spec']
elif type(cur_spec) == list:
for cur_spec_item in cur_spec:
if cur_spec_item['item_name'] == id:
@@ -191,11 +193,14 @@ def spec_name_list(spec, prefix="", recurse=False):
result.extend(spec_name_list(map_el['map_item_spec'], prefix + map_el['item_name'], recurse))
else:
result.append(prefix + name)
+ elif 'named_set_item_spec' in spec:
+ # we added a '/' above, but in this one case we don't want it
+ result.append(prefix[:-1])
else:
for name in spec:
result.append(prefix + name + "/")
if recurse:
- result.extend(spec_name_list(spec[name],name, recurse))
+ result.extend(spec_name_list(spec[name], name, recurse))
elif type(spec) == list:
for list_el in spec:
if 'item_name' in list_el:
@@ -207,7 +212,7 @@ def spec_name_list(spec, prefix="", recurse=False):
else:
raise ConfigDataError("Bad specification")
else:
- raise ConfigDataError("Bad specication")
+ raise ConfigDataError("Bad specification")
return result
class ConfigData:
@@ -255,7 +260,7 @@ class ConfigData:
def get_local_config(self):
"""Returns the non-default config values in a dict"""
- return self.data;
+ return self.data
def get_item_list(self, identifier = None, recurse = False):
"""Returns a list of strings containing the full identifiers of
@@ -412,7 +417,39 @@ class MultiConfigData:
item_id, list_indices = isc.cc.data.split_identifier_list_indices(id_part)
id_list = module + "/" + id_prefix + "/" + item_id
id_prefix += "/" + id_part
- if list_indices is not None:
+ part_spec = find_spec_part(self._specifications[module].get_config_spec(), id_prefix)
+ if part_spec['item_type'] == 'named_set':
+ # For named sets, the identifier is partly defined
+ # by which values are actually present, and not
+ # purely by the specification.
+ # So if there is a part of the identifier left,
+ # we need to look up the value, then see if that
+ # contains the next part of the identifier we got
+ if len(id_parts) == 0:
+ if 'item_default' in part_spec:
+ return part_spec['item_default']
+ else:
+ return None
+ id_part = id_parts.pop(0)
+
+ named_set_value, type = self.get_value(id_list)
+ if id_part in named_set_value:
+ if len(id_parts) > 0:
+ # we are looking for the *default* value.
+ # so if not present in here, we need to
+ # lookup the one from the spec
+ rest_of_id = "/".join(id_parts)
+ result = isc.cc.data.find_no_exc(named_set_value[id_part], rest_of_id)
+ if result is None:
+ spec_part = self.find_spec_part(identifier)
+ if 'item_default' in spec_part:
+ return spec_part['item_default']
+ return result
+ else:
+ return named_set_value[id_part]
+ else:
+ return None
+ elif list_indices is not None:
# there's actually two kinds of default here for
# lists; they can have a default value (like an
# empty list), but their elements can also have
@@ -449,7 +486,12 @@ class MultiConfigData:
spec = find_spec_part(self._specifications[module].get_config_spec(), id)
if 'item_default' in spec:
- return spec['item_default']
+ # one special case, named_set
+ if spec['item_type'] == 'named_set':
+ print("is " + id_part + " in named set?")
+ return spec['item_default']
+ else:
+ return spec['item_default']
else:
return None
@@ -493,7 +535,7 @@ class MultiConfigData:
spec_part_list = spec_part['list_item_spec']
list_value, status = self.get_value(identifier)
if list_value is None:
- raise isc.cc.data.DataNotFoundError(identifier)
+ raise isc.cc.data.DataNotFoundError(identifier + " not found")
if type(list_value) != list:
# the identifier specified a single element
@@ -509,12 +551,38 @@ class MultiConfigData:
for i in range(len(list_value)):
self._append_value_item(result, spec_part_list, "%s[%d]" % (identifier, i), all)
elif item_type == "map":
+ value, status = self.get_value(identifier)
# just show the specific contents of a map, we are
# almost never interested in just its name
spec_part_map = spec_part['map_item_spec']
self._append_value_item(result, spec_part_map, identifier, all)
+ elif item_type == "named_set":
+ value, status = self.get_value(identifier)
+
+ # show just the one entry, when either the map is empty,
+ # or when this is element is not requested specifically
+ if len(value.keys()) == 0:
+ entry = _create_value_map_entry(identifier,
+ item_type,
+ {}, status)
+ result.append(entry)
+ elif not first and not all:
+ entry = _create_value_map_entry(identifier,
+ item_type,
+ None, status)
+ result.append(entry)
+ else:
+ spec_part_named_set = spec_part['named_set_item_spec']
+ for entry in value:
+ self._append_value_item(result,
+ spec_part_named_set,
+ identifier + "/" + entry,
+ all)
else:
value, status = self.get_value(identifier)
+ if status == self.NONE and not spec_part['item_optional']:
+ raise isc.cc.data.DataNotFoundError(identifier + " not found")
+
entry = _create_value_map_entry(identifier,
item_type,
value, status)
@@ -569,7 +637,7 @@ class MultiConfigData:
spec_part = spec_part['list_item_spec']
check_type(spec_part, value)
else:
- raise isc.cc.data.DataNotFoundError(identifier)
+ raise isc.cc.data.DataNotFoundError(identifier + " not found")
# Since we do not support list diffs (yet?), we need to
# copy the currently set list of items to _local_changes
@@ -579,15 +647,50 @@ class MultiConfigData:
cur_id_part = '/'
for id_part in id_parts:
id, list_indices = isc.cc.data.split_identifier_list_indices(id_part)
+ cur_value, status = self.get_value(cur_id_part + id)
+ # Check if the value was there in the first place
+ if status == MultiConfigData.NONE and cur_id_part != "/":
+ raise isc.cc.data.DataNotFoundError(id_part +
+ " not found in " +
+ cur_id_part)
if list_indices is not None:
- cur_list, status = self.get_value(cur_id_part + id)
+ # And check if we don't set something outside of any
+ # list
+ cur_list = cur_value
+ for list_index in list_indices:
+ if list_index >= len(cur_list):
+ raise isc.cc.data.DataNotFoundError("No item " +
+ str(list_index) + " in " + id_part)
+ else:
+ cur_list = cur_list[list_index]
if status != MultiConfigData.LOCAL:
isc.cc.data.set(self._local_changes,
cur_id_part + id,
- cur_list)
+ cur_value)
cur_id_part = cur_id_part + id_part + "/"
isc.cc.data.set(self._local_changes, identifier, value)
-
+
+ def _get_list_items(self, item_name):
+ """This method is used in get_config_item_list, to add list
+ indices and named_set names to the completion list. If
+ the given item_name is for a list or named_set, it'll
+ return a list of those (appended to item_name), otherwise
+ the list will only contain the item_name itself."""
+ spec_part = self.find_spec_part(item_name)
+ if 'item_type' in spec_part and \
+ spec_part['item_type'] == 'named_set':
+ subslash = ""
+ if spec_part['named_set_item_spec']['item_type'] == 'map' or\
+ spec_part['named_set_item_spec']['item_type'] == 'named_set':
+ subslash = "/"
+ values, status = self.get_value(item_name)
+ if len(values) > 0:
+ return [ item_name + "/" + v + subslash for v in values.keys() ]
+ else:
+ return [ item_name ]
+ else:
+ return [ item_name ]
+
def get_config_item_list(self, identifier = None, recurse = False):
"""Returns a list of strings containing the item_names of
the child items at the given identifier. If no identifier is
@@ -598,7 +701,11 @@ class MultiConfigData:
if identifier.startswith("/"):
identifier = identifier[1:]
spec = self.find_spec_part(identifier)
- return spec_name_list(spec, identifier + "/", recurse)
+ spec_list = spec_name_list(spec, identifier + "/", recurse)
+ result_list = []
+ for spec_name in spec_list:
+ result_list.extend(self._get_list_items(spec_name))
+ return result_list
else:
if recurse:
id_list = []
diff --git a/src/lib/python/isc/config/config_messages.mes b/src/lib/python/isc/config/config_messages.mes
new file mode 100644
index 0000000..c52efb4
--- /dev/null
+++ b/src/lib/python/isc/config/config_messages.mes
@@ -0,0 +1,33 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the config_messages python module.
+
+# since these messages are for the python config library, care must
+# be taken that names do not conflict with the messages from the c++
+# config library. A checker script should verify that, but we do not
+# have that at this moment. So when adding a message, make sure that
+# the name is not already used in src/lib/config/config_messages.mes
+
+% CONFIG_LOG_CONFIG_ERRORS error(s) in logging configuration: %1
+There was a logging configuration update, but the internal validator
+for logging configuration found that it contained errors. The errors
+are shown, and the update is ignored.
+
+% CONFIG_GET_FAILED error getting configuration from cfgmgr: %1
+The configuration manager returned an error response when the module
+requested its configuration. The full error message answer from the
+configuration manager is appended to the log error.
+
diff --git a/src/lib/python/isc/config/module_spec.py b/src/lib/python/isc/config/module_spec.py
index 6171149..b79f928 100644
--- a/src/lib/python/isc/config/module_spec.py
+++ b/src/lib/python/isc/config/module_spec.py
@@ -23,6 +23,7 @@
import json
import sys
+import time
import isc.cc.data
@@ -91,7 +92,7 @@ class ModuleSpec:
return _validate_spec_list(data_def, full, data, errors)
else:
# no spec, always bad
- if errors != None:
+ if errors is not None:
errors.append("No config_data specification")
return False
@@ -117,6 +118,26 @@ class ModuleSpec:
return False
+ def validate_statistics(self, full, stat, errors = None):
+ """Check whether the given piece of data conforms to this
+ data definition. If so, it returns True. If not, it will
+ return false. If errors is given, and is an array, a string
+ describing the error will be appended to it. The current
+ version stops as soon as there is one error so this list
+ will not be exhaustive. If 'full' is true, it also errors on
+ non-optional missing values. Set this to False if you want to
+ validate only a part of a statistics tree (like a list of
+ non-default values). Also it checks 'item_format' in case
+ of time"""
+ stat_spec = self.get_statistics_spec()
+ if stat_spec is not None:
+ return _validate_spec_list(stat_spec, full, stat, errors)
+ else:
+ # no spec, always bad
+ if errors is not None:
+ errors.append("No statistics specification")
+ return False
+
def get_module_name(self):
"""Returns a string containing the name of the module as
specified by the specification given at __init__()"""
@@ -152,6 +173,14 @@ class ModuleSpec:
else:
return None
+ def get_statistics_spec(self):
+ """Returns a dict representation of the statistics part of the
+ specification, or None if there is none."""
+ if 'statistics' in self._module_spec:
+ return self._module_spec['statistics']
+ else:
+ return None
+
def __str__(self):
"""Returns a string representation of the full specification"""
return self._module_spec.__str__()
@@ -160,8 +189,9 @@ def _check(module_spec):
"""Checks the full specification. This is a dict that contains the
element "module_spec", which is in itself a dict that
must contain at least a "module_name" (string) and optionally
- a "config_data" and a "commands" element, both of which are lists
- of dicts. Raises a ModuleSpecError if there is a problem."""
+ a "config_data", a "commands" and a "statistics" element, all
+ of which are lists of dicts. Raises a ModuleSpecError if there
+ is a problem."""
if type(module_spec) != dict:
raise ModuleSpecError("data specification not a dict")
if "module_name" not in module_spec:
@@ -173,6 +203,8 @@ def _check(module_spec):
_check_config_spec(module_spec["config_data"])
if "commands" in module_spec:
_check_command_spec(module_spec["commands"])
+ if "statistics" in module_spec:
+ _check_statistics_spec(module_spec["statistics"])
def _check_config_spec(config_data):
# config data is a list of items represented by dicts that contain
@@ -229,7 +261,7 @@ def _check_item_spec(config_item):
item_type = config_item["item_type"]
if type(item_type) != str:
raise ModuleSpecError("item_type in " + item_name + " is not a string: " + str(type(item_type)))
- if item_type not in ["integer", "real", "boolean", "string", "list", "map", "any"]:
+ if item_type not in ["integer", "real", "boolean", "string", "list", "map", "named_set", "any"]:
raise ModuleSpecError("unknown item_type in " + item_name + ": " + item_type)
if "item_optional" in config_item:
if type(config_item["item_optional"]) != bool:
@@ -263,39 +295,96 @@ def _check_item_spec(config_item):
if type(map_item) != dict:
raise ModuleSpecError("map_item_spec element is not a dict")
_check_item_spec(map_item)
+ if 'item_format' in config_item and 'item_default' in config_item:
+ item_format = config_item["item_format"]
+ item_default = config_item["item_default"]
+ if not _check_format(item_default, item_format):
+ raise ModuleSpecError(
+ "Wrong format for " + str(item_default) + " in " + str(item_name))
+def _check_statistics_spec(statistics):
+ # statistics is a list of items represented by dicts that contain
+ # things like "item_name", depending on the type they can have
+ # specific subitems
+ """Checks a list that contains the statistics part of the
+ specification. Raises a ModuleSpecError if there is a
+ problem."""
+ if type(statistics) != list:
+ raise ModuleSpecError("statistics is of type " + str(type(statistics))
+ + ", not a list of items")
+ for stat_item in statistics:
+ _check_item_spec(stat_item)
+ # Additionally checks if there are 'item_title' and
+ # 'item_description'
+ for item in [ 'item_title', 'item_description' ]:
+ if item not in stat_item:
+ raise ModuleSpecError("no " + item + " in statistics item")
+
+def _check_format(value, format_name):
+ """Check if specified value and format are correct. Return True if
+ is is correct."""
+ # TODO: should be added other format types if necessary
+ time_formats = { 'date-time' : "%Y-%m-%dT%H:%M:%SZ",
+ 'date' : "%Y-%m-%d",
+ 'time' : "%H:%M:%S" }
+ for fmt in time_formats:
+ if format_name == fmt:
+ try:
+ # reverse check
+ return value == time.strftime(
+ time_formats[fmt],
+ time.strptime(value, time_formats[fmt]))
+ except (ValueError, TypeError):
+ break
+ return False
def _validate_type(spec, value, errors):
"""Returns true if the value is of the correct type given the
specification"""
data_type = spec['item_type']
if data_type == "integer" and type(value) != int:
- if errors != None:
+ if errors is not None:
errors.append(str(value) + " should be an integer")
return False
elif data_type == "real" and type(value) != float:
- if errors != None:
+ if errors is not None:
errors.append(str(value) + " should be a real")
return False
elif data_type == "boolean" and type(value) != bool:
- if errors != None:
+ if errors is not None:
errors.append(str(value) + " should be a boolean")
return False
elif data_type == "string" and type(value) != str:
- if errors != None:
+ if errors is not None:
errors.append(str(value) + " should be a string")
return False
elif data_type == "list" and type(value) != list:
- if errors != None:
+ if errors is not None:
errors.append(str(value) + " should be a list")
return False
elif data_type == "map" and type(value) != dict:
+ if errors is not None:
+ errors.append(str(value) + " should be a map")
+ return False
+ elif data_type == "named_set" and type(value) != dict:
if errors != None:
errors.append(str(value) + " should be a map")
return False
else:
return True
+def _validate_format(spec, value, errors):
+ """Returns true if the value is of the correct format given the
+ specification. And also return true if no 'item_format'"""
+ if "item_format" in spec:
+ item_format = spec['item_format']
+ if not _check_format(value, item_format):
+ if errors is not None:
+ errors.append("format type of " + str(value)
+ + " should be " + item_format)
+ return False
+ return True
+
def _validate_item(spec, full, data, errors):
if not _validate_type(spec, data, errors):
return False
@@ -304,12 +393,24 @@ def _validate_item(spec, full, data, errors):
for data_el in data:
if not _validate_type(list_spec, data_el, errors):
return False
+ if not _validate_format(list_spec, data_el, errors):
+ return False
if list_spec['item_type'] == "map":
if not _validate_item(list_spec, full, data_el, errors):
return False
elif type(data) == dict:
- if not _validate_spec_list(spec['map_item_spec'], full, data, errors):
- return False
+ if 'map_item_spec' in spec:
+ if not _validate_spec_list(spec['map_item_spec'], full, data, errors):
+ return False
+ else:
+ named_set_spec = spec['named_set_item_spec']
+ for data_el in data.values():
+ if not _validate_type(named_set_spec, data_el, errors):
+ return False
+ if not _validate_item(named_set_spec, full, data_el, errors):
+ return False
+ elif not _validate_format(spec, data, errors):
+ return False
return True
def _validate_spec(spec, full, data, errors):
@@ -321,7 +422,7 @@ def _validate_spec(spec, full, data, errors):
elif item_name in data:
return _validate_item(spec, full, data[item_name], errors)
elif full and not item_optional:
- if errors != None:
+ if errors is not None:
errors.append("non-optional item " + item_name + " missing")
return False
else:
@@ -346,7 +447,7 @@ def _validate_spec_list(module_spec, full, data, errors):
if spec_item["item_name"] == item_name:
found = True
if not found and item_name != "version":
- if errors != None:
+ if errors is not None:
errors.append("unknown item " + item_name)
validated = False
return validated
diff --git a/src/lib/python/isc/config/tests/Makefile.am b/src/lib/python/isc/config/tests/Makefile.am
index 47ccc41..6670ee7 100644
--- a/src/lib/python/isc/config/tests/Makefile.am
+++ b/src/lib/python/isc/config/tests/Makefile.am
@@ -8,7 +8,7 @@ EXTRA_DIST += unittest_fakesession.py
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -21,7 +21,7 @@ endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/python/isc/config \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/config \
B10_TEST_PLUGIN_DIR=$(abs_top_srcdir)/src/bin/cfgmgr/plugins \
CONFIG_TESTDATA_PATH=$(abs_top_srcdir)/src/lib/config/tests/testdata \
CONFIG_WR_TESTDATA_PATH=$(abs_top_builddir)/src/lib/config/tests/testdata \
diff --git a/src/lib/python/isc/config/tests/ccsession_test.py b/src/lib/python/isc/config/tests/ccsession_test.py
index 830cbd7..351c8e6 100644
--- a/src/lib/python/isc/config/tests/ccsession_test.py
+++ b/src/lib/python/isc/config/tests/ccsession_test.py
@@ -23,6 +23,7 @@ from isc.config.ccsession import *
from isc.config.config_data import BIND10_CONFIG_DATA_VERSION
from unittest_fakesession import FakeModuleCCSession, WouldBlockForever
import bind10_config
+import isc.log
class TestHelperFunctions(unittest.TestCase):
def test_parse_answer(self):
@@ -107,8 +108,11 @@ class TestModuleCCSession(unittest.TestCase):
def spec_file(self, file):
return self.data_path + os.sep + file
- def create_session(self, spec_file_name, config_handler = None, command_handler = None, cc_session = None):
- return ModuleCCSession(self.spec_file(spec_file_name), config_handler, command_handler, cc_session)
+ def create_session(self, spec_file_name, config_handler = None,
+ command_handler = None, cc_session = None):
+ return ModuleCCSession(self.spec_file(spec_file_name),
+ config_handler, command_handler,
+ cc_session, False)
def test_init(self):
fake_session = FakeModuleCCSession()
@@ -691,6 +695,12 @@ class TestUIModuleCCSession(unittest.TestCase):
fake_conn.set_get_answer('/config_data', { 'version': BIND10_CONFIG_DATA_VERSION })
return UIModuleCCSession(fake_conn)
+ def create_uccs_named_set(self, fake_conn):
+ module_spec = isc.config.module_spec_from_file(self.spec_file("spec32.spec"))
+ fake_conn.set_get_answer('/module_spec', { module_spec.get_module_name(): module_spec.get_full_spec()})
+ fake_conn.set_get_answer('/config_data', { 'version': BIND10_CONFIG_DATA_VERSION })
+ return UIModuleCCSession(fake_conn)
+
def test_init(self):
fake_conn = fakeUIConn()
fake_conn.set_get_answer('/module_spec', {})
@@ -711,12 +721,14 @@ class TestUIModuleCCSession(unittest.TestCase):
def test_add_remove_value(self):
fake_conn = fakeUIConn()
uccs = self.create_uccs2(fake_conn)
+
self.assertRaises(isc.cc.data.DataNotFoundError, uccs.add_value, 1, "a")
self.assertRaises(isc.cc.data.DataNotFoundError, uccs.add_value, "no_such_item", "a")
self.assertRaises(isc.cc.data.DataNotFoundError, uccs.add_value, "Spec2/item1", "a")
self.assertRaises(isc.cc.data.DataNotFoundError, uccs.remove_value, 1, "a")
self.assertRaises(isc.cc.data.DataNotFoundError, uccs.remove_value, "no_such_item", "a")
self.assertRaises(isc.cc.data.DataNotFoundError, uccs.remove_value, "Spec2/item1", "a")
+
self.assertEqual({}, uccs._local_changes)
uccs.add_value("Spec2/item5", "foo")
self.assertEqual({'Spec2': {'item5': ['a', 'b', 'foo']}}, uccs._local_changes)
@@ -726,10 +738,37 @@ class TestUIModuleCCSession(unittest.TestCase):
uccs.remove_value("Spec2/item5", "foo")
uccs.add_value("Spec2/item5", "foo")
self.assertEqual({'Spec2': {'item5': ['foo']}}, uccs._local_changes)
- uccs.add_value("Spec2/item5", "foo")
+ self.assertRaises(isc.cc.data.DataAlreadyPresentError,
+ uccs.add_value, "Spec2/item5", "foo")
self.assertEqual({'Spec2': {'item5': ['foo']}}, uccs._local_changes)
+ self.assertRaises(isc.cc.data.DataNotFoundError,
+ uccs.remove_value, "Spec2/item5[123]", None)
uccs.remove_value("Spec2/item5[0]", None)
self.assertEqual({'Spec2': {'item5': []}}, uccs._local_changes)
+ uccs.add_value("Spec2/item5", None);
+ self.assertEqual({'Spec2': {'item5': ['']}}, uccs._local_changes)
+
+ def test_add_remove_value_named_set(self):
+ fake_conn = fakeUIConn()
+ uccs = self.create_uccs_named_set(fake_conn)
+ value, status = uccs.get_value("/Spec32/named_set_item")
+ self.assertEqual({'a': 1, 'b': 2}, value)
+ uccs.add_value("/Spec32/named_set_item", "foo")
+ value, status = uccs.get_value("/Spec32/named_set_item")
+ self.assertEqual({'a': 1, 'b': 2, 'foo': 3}, value)
+
+ uccs.remove_value("/Spec32/named_set_item", "a")
+ uccs.remove_value("/Spec32/named_set_item", "foo")
+ value, status = uccs.get_value("/Spec32/named_set_item")
+ self.assertEqual({'b': 2}, value)
+
+ self.assertRaises(isc.cc.data.DataNotFoundError,
+ uccs.set_value,
+ "/Spec32/named_set_item/no_such_item",
+ 4)
+ self.assertRaises(isc.cc.data.DataNotFoundError,
+ uccs.remove_value, "/Spec32/named_set_item",
+ "no_such_item")
def test_commit(self):
fake_conn = fakeUIConn()
@@ -739,5 +778,6 @@ class TestUIModuleCCSession(unittest.TestCase):
uccs.commit()
if __name__ == '__main__':
+ isc.log.init("bind10")
unittest.main()
diff --git a/src/lib/python/isc/config/tests/cfgmgr_test.py b/src/lib/python/isc/config/tests/cfgmgr_test.py
index 0a9e2d3..eacc425 100644
--- a/src/lib/python/isc/config/tests/cfgmgr_test.py
+++ b/src/lib/python/isc/config/tests/cfgmgr_test.py
@@ -219,6 +219,25 @@ class TestConfigManager(unittest.TestCase):
commands_spec = self.cm.get_commands_spec('Spec2')
self.assertEqual(commands_spec['Spec2'], module_spec.get_commands_spec())
+ def test_get_statistics_spec(self):
+ statistics_spec = self.cm.get_statistics_spec()
+ self.assertEqual(statistics_spec, {})
+ module_spec = isc.config.module_spec.module_spec_from_file(self.data_path + os.sep + "spec1.spec")
+ self.assert_(module_spec.get_module_name() not in self.cm.module_specs)
+ self.cm.set_module_spec(module_spec)
+ self.assert_(module_spec.get_module_name() in self.cm.module_specs)
+ statistics_spec = self.cm.get_statistics_spec()
+ self.assertEqual(statistics_spec, { 'Spec1': None })
+ self.cm.remove_module_spec('Spec1')
+ module_spec = isc.config.module_spec.module_spec_from_file(self.data_path + os.sep + "spec2.spec")
+ self.assert_(module_spec.get_module_name() not in self.cm.module_specs)
+ self.cm.set_module_spec(module_spec)
+ self.assert_(module_spec.get_module_name() in self.cm.module_specs)
+ statistics_spec = self.cm.get_statistics_spec()
+ self.assertEqual(statistics_spec['Spec2'], module_spec.get_statistics_spec())
+ statistics_spec = self.cm.get_statistics_spec('Spec2')
+ self.assertEqual(statistics_spec['Spec2'], module_spec.get_statistics_spec())
+
def test_read_config(self):
self.assertEqual(self.cm.config.data, {'version': config_data.BIND10_CONFIG_DATA_VERSION})
self.cm.read_config()
@@ -241,6 +260,7 @@ class TestConfigManager(unittest.TestCase):
self._handle_msg_helper("", { 'result': [ 1, 'Unknown message format: ']})
self._handle_msg_helper({ "command": [ "badcommand" ] }, { 'result': [ 1, "Unknown command: badcommand"]})
self._handle_msg_helper({ "command": [ "get_commands_spec" ] }, { 'result': [ 0, {} ]})
+ self._handle_msg_helper({ "command": [ "get_statistics_spec" ] }, { 'result': [ 0, {} ]})
self._handle_msg_helper({ "command": [ "get_module_spec" ] }, { 'result': [ 0, {} ]})
self._handle_msg_helper({ "command": [ "get_module_spec", { "module_name": "Spec2" } ] }, { 'result': [ 0, {} ]})
#self._handle_msg_helper({ "command": [ "get_module_spec", { "module_name": "nosuchmodule" } ] },
@@ -329,6 +349,7 @@ class TestConfigManager(unittest.TestCase):
{ "module_name" : "Spec2" } ] },
{ 'result': [ 0, self.spec.get_full_spec() ] })
self._handle_msg_helper({ "command": [ "get_commands_spec" ] }, { 'result': [ 0, { self.spec.get_module_name(): self.spec.get_commands_spec() } ]})
+ self._handle_msg_helper({ "command": [ "get_statistics_spec" ] }, { 'result': [ 0, { self.spec.get_module_name(): self.spec.get_statistics_spec() } ]})
# re-add this once we have new way to propagate spec changes (1 instead of the current 2 messages)
#self.assertEqual(len(self.fake_session.message_queue), 2)
# the name here is actually wrong (and hardcoded), but needed in the current version
@@ -450,6 +471,7 @@ class TestConfigManager(unittest.TestCase):
def test_run(self):
self.fake_session.group_sendmsg({ "command": [ "get_commands_spec" ] }, "ConfigManager")
+ self.fake_session.group_sendmsg({ "command": [ "get_statistics_spec" ] }, "ConfigManager")
self.fake_session.group_sendmsg({ "command": [ "shutdown" ] }, "ConfigManager")
self.cm.run()
pass
diff --git a/src/lib/python/isc/config/tests/config_data_test.py b/src/lib/python/isc/config/tests/config_data_test.py
index fc1bffa..0dd441d 100644
--- a/src/lib/python/isc/config/tests/config_data_test.py
+++ b/src/lib/python/isc/config/tests/config_data_test.py
@@ -236,6 +236,7 @@ class TestConfigData(unittest.TestCase):
value, default = self.cd.get_value("item6/value2")
self.assertEqual(None, value)
self.assertEqual(False, default)
+ self.assertRaises(isc.cc.data.DataNotFoundError, self.cd.get_value, "item6/no_such_item")
def test_get_default_value(self):
self.assertEqual(1, self.cd.get_default_value("item1"))
@@ -360,7 +361,7 @@ class TestMultiConfigData(unittest.TestCase):
def test_get_current_config(self):
cf = { 'module1': { 'item1': 2, 'item2': True } }
- self.mcd._set_current_config(cf);
+ self.mcd._set_current_config(cf)
self.assertEqual(cf, self.mcd.get_current_config())
def test_get_local_changes(self):
@@ -421,6 +422,17 @@ class TestMultiConfigData(unittest.TestCase):
value = self.mcd.get_default_value("Spec2/no_such_item/asdf")
self.assertEqual(None, value)
+ module_spec = isc.config.module_spec_from_file(self.data_path + os.sep + "spec32.spec")
+ self.mcd.set_specification(module_spec)
+ value = self.mcd.get_default_value("Spec32/named_set_item")
+ self.assertEqual({ 'a': 1, 'b': 2}, value)
+ value = self.mcd.get_default_value("Spec32/named_set_item/a")
+ self.assertEqual(1, value)
+ value = self.mcd.get_default_value("Spec32/named_set_item/b")
+ self.assertEqual(2, value)
+ value = self.mcd.get_default_value("Spec32/named_set_item/no_such_item")
+ self.assertEqual(None, value)
+
def test_get_value(self):
module_spec = isc.config.module_spec_from_file(self.data_path + os.sep + "spec2.spec")
self.mcd.set_specification(module_spec)
@@ -544,6 +556,29 @@ class TestMultiConfigData(unittest.TestCase):
maps = self.mcd.get_value_maps("/Spec22/value9")
self.assertEqual(expected, maps)
+ def test_get_value_maps_named_set(self):
+ module_spec = isc.config.module_spec_from_file(self.data_path + os.sep + "spec32.spec")
+ self.mcd.set_specification(module_spec)
+ maps = self.mcd.get_value_maps()
+ self.assertEqual([{'default': False, 'type': 'module',
+ 'name': 'Spec32', 'value': None,
+ 'modified': False}], maps)
+ maps = self.mcd.get_value_maps("/Spec32/named_set_item")
+ self.assertEqual([{'default': True, 'type': 'integer',
+ 'name': 'Spec32/named_set_item/a',
+ 'value': 1, 'modified': False},
+ {'default': True, 'type': 'integer',
+ 'name': 'Spec32/named_set_item/b',
+ 'value': 2, 'modified': False}], maps)
+ maps = self.mcd.get_value_maps("/Spec32/named_set_item/a")
+ self.assertEqual([{'default': True, 'type': 'integer',
+ 'name': 'Spec32/named_set_item/a',
+ 'value': 1, 'modified': False}], maps)
+ maps = self.mcd.get_value_maps("/Spec32/named_set_item/b")
+ self.assertEqual([{'default': True, 'type': 'integer',
+ 'name': 'Spec32/named_set_item/b',
+ 'value': 2, 'modified': False}], maps)
+
def test_set_value(self):
module_spec = isc.config.module_spec_from_file(self.data_path + os.sep + "spec2.spec")
self.mcd.set_specification(module_spec)
@@ -582,6 +617,24 @@ class TestMultiConfigData(unittest.TestCase):
config_items = self.mcd.get_config_item_list("Spec2", True)
self.assertEqual(['Spec2/item1', 'Spec2/item2', 'Spec2/item3', 'Spec2/item4', 'Spec2/item5', 'Spec2/item6/value1', 'Spec2/item6/value2'], config_items)
+ def test_get_config_item_list_named_set(self):
+ config_items = self.mcd.get_config_item_list()
+ self.assertEqual([], config_items)
+ module_spec = isc.config.module_spec_from_file(self.data_path + os.sep + "spec32.spec")
+ self.mcd.set_specification(module_spec)
+ config_items = self.mcd.get_config_item_list()
+ self.assertEqual(['Spec32'], config_items)
+ config_items = self.mcd.get_config_item_list(None, False)
+ self.assertEqual(['Spec32'], config_items)
+ config_items = self.mcd.get_config_item_list(None, True)
+ self.assertEqual(['Spec32/named_set_item'], config_items)
+ self.mcd.set_value('Spec32/named_set_item', { "aaaa": 4, "aabb": 5, "bbbb": 6})
+ config_items = self.mcd.get_config_item_list("/Spec32/named_set_item", True)
+ self.assertEqual(['Spec32/named_set_item/aaaa',
+ 'Spec32/named_set_item/aabb',
+ 'Spec32/named_set_item/bbbb',
+ ], config_items)
+
if __name__ == '__main__':
unittest.main()
diff --git a/src/lib/python/isc/config/tests/module_spec_test.py b/src/lib/python/isc/config/tests/module_spec_test.py
index a4dcdec..fc53d23 100644
--- a/src/lib/python/isc/config/tests/module_spec_test.py
+++ b/src/lib/python/isc/config/tests/module_spec_test.py
@@ -81,6 +81,11 @@ class TestModuleSpec(unittest.TestCase):
self.assertRaises(ModuleSpecError, self.read_spec_file, "spec20.spec")
self.assertRaises(ModuleSpecError, self.read_spec_file, "spec21.spec")
self.assertRaises(ModuleSpecError, self.read_spec_file, "spec26.spec")
+ self.assertRaises(ModuleSpecError, self.read_spec_file, "spec34.spec")
+ self.assertRaises(ModuleSpecError, self.read_spec_file, "spec35.spec")
+ self.assertRaises(ModuleSpecError, self.read_spec_file, "spec36.spec")
+ self.assertRaises(ModuleSpecError, self.read_spec_file, "spec37.spec")
+ self.assertRaises(ModuleSpecError, self.read_spec_file, "spec38.spec")
def validate_data(self, specfile_name, datafile_name):
dd = self.read_spec_file(specfile_name);
@@ -98,6 +103,9 @@ class TestModuleSpec(unittest.TestCase):
self.assertEqual(True, self.validate_data("spec22.spec", "data22_6.data"))
self.assertEqual(True, self.validate_data("spec22.spec", "data22_7.data"))
self.assertEqual(False, self.validate_data("spec22.spec", "data22_8.data"))
+ self.assertEqual(True, self.validate_data("spec32.spec", "data32_1.data"))
+ self.assertEqual(False, self.validate_data("spec32.spec", "data32_2.data"))
+ self.assertEqual(False, self.validate_data("spec32.spec", "data32_3.data"))
def validate_command_params(self, specfile_name, datafile_name, cmd_name):
dd = self.read_spec_file(specfile_name);
@@ -120,6 +128,17 @@ class TestModuleSpec(unittest.TestCase):
self.assertEqual(False, self.validate_command_params("spec27.spec", "data22_8.data", 'cmd1'))
self.assertEqual(False, self.validate_command_params("spec27.spec", "data22_8.data", 'cmd2'))
+ def test_statistics_validation(self):
+ def _validate_stat(specfile_name, datafile_name):
+ dd = self.read_spec_file(specfile_name);
+ data_file = open(self.spec_file(datafile_name))
+ data_str = data_file.read()
+ data = isc.cc.data.parse_value_str(data_str)
+ return dd.validate_statistics(True, data, [])
+ self.assertFalse(self.read_spec_file("spec1.spec").validate_statistics(True, None, None));
+ self.assertTrue(_validate_stat("spec33.spec", "data33_1.data"))
+ self.assertFalse(_validate_stat("spec33.spec", "data33_2.data"))
+
def test_init(self):
self.assertRaises(ModuleSpecError, ModuleSpec, 1)
module_spec = isc.config.module_spec_from_file(self.spec_file("spec1.spec"), False)
@@ -266,6 +285,80 @@ class TestModuleSpec(unittest.TestCase):
}
)
+ self.assertRaises(ModuleSpecError, isc.config.module_spec._check_item_spec,
+ { 'item_name': "a_datetime",
+ 'item_type': "string",
+ 'item_optional': False,
+ 'item_default': 1,
+ 'item_format': "date-time"
+ }
+ )
+
+ self.assertRaises(ModuleSpecError, isc.config.module_spec._check_item_spec,
+ { 'item_name': "a_date",
+ 'item_type': "string",
+ 'item_optional': False,
+ 'item_default': 1,
+ 'item_format': "date"
+ }
+ )
+
+ self.assertRaises(ModuleSpecError, isc.config.module_spec._check_item_spec,
+ { 'item_name': "a_time",
+ 'item_type': "string",
+ 'item_optional': False,
+ 'item_default': 1,
+ 'item_format': "time"
+ }
+ )
+
+ self.assertRaises(ModuleSpecError, isc.config.module_spec._check_item_spec,
+ { 'item_name': "a_datetime",
+ 'item_type': "string",
+ 'item_optional': False,
+ 'item_default': "2011-05-27T19:42:57Z",
+ 'item_format': "dummy-format"
+ }
+ )
+
+ self.assertRaises(ModuleSpecError, isc.config.module_spec._check_item_spec,
+ { 'item_name': "a_date",
+ 'item_type': "string",
+ 'item_optional': False,
+ 'item_default': "2011-05-27",
+ 'item_format': "dummy-format"
+ }
+ )
+
+ self.assertRaises(ModuleSpecError, isc.config.module_spec._check_item_spec,
+ { 'item_name': "a_time",
+ 'item_type': "string",
+ 'item_optional': False,
+ 'item_default': "19:42:57Z",
+ 'item_format': "dummy-format"
+ }
+ )
+
+ def test_check_format(self):
+ self.assertTrue(isc.config.module_spec._check_format('2011-05-27T19:42:57Z', 'date-time'))
+ self.assertTrue(isc.config.module_spec._check_format('2011-05-27', 'date'))
+ self.assertTrue(isc.config.module_spec._check_format('19:42:57', 'time'))
+ self.assertFalse(isc.config.module_spec._check_format('2011-05-27T19:42:57Z', 'dummy'))
+ self.assertFalse(isc.config.module_spec._check_format('2011-05-27', 'dummy'))
+ self.assertFalse(isc.config.module_spec._check_format('19:42:57', 'dummy'))
+ self.assertFalse(isc.config.module_spec._check_format('2011-13-99T99:99:99Z', 'date-time'))
+ self.assertFalse(isc.config.module_spec._check_format('2011-13-99', 'date'))
+ self.assertFalse(isc.config.module_spec._check_format('99:99:99', 'time'))
+ self.assertFalse(isc.config.module_spec._check_format('', 'date-time'))
+ self.assertFalse(isc.config.module_spec._check_format(None, 'date-time'))
+ self.assertFalse(isc.config.module_spec._check_format(None, None))
+ # wrong date-time-type format not ending with "Z"
+ self.assertFalse(isc.config.module_spec._check_format('2011-05-27T19:42:57', 'date-time'))
+ # wrong date-type format ending with "T"
+ self.assertFalse(isc.config.module_spec._check_format('2011-05-27T', 'date'))
+ # wrong time-type format ending with "Z"
+ self.assertFalse(isc.config.module_spec._check_format('19:42:57Z', 'time'))
+
def test_validate_type(self):
errors = []
self.assertEqual(True, isc.config.module_spec._validate_type({ 'item_type': 'integer' }, 1, errors))
@@ -303,6 +396,25 @@ class TestModuleSpec(unittest.TestCase):
self.assertEqual(False, isc.config.module_spec._validate_type({ 'item_type': 'map' }, 1, errors))
self.assertEqual(['1 should be a map'], errors)
+ def test_validate_format(self):
+ errors = []
+ self.assertEqual(True, isc.config.module_spec._validate_format({ 'item_format': 'date-time' }, "2011-05-27T19:42:57Z", errors))
+ self.assertEqual(False, isc.config.module_spec._validate_format({ 'item_format': 'date-time' }, "a", None))
+ self.assertEqual(False, isc.config.module_spec._validate_format({ 'item_format': 'date-time' }, "a", errors))
+ self.assertEqual(['format type of a should be date-time'], errors)
+
+ errors = []
+ self.assertEqual(True, isc.config.module_spec._validate_format({ 'item_format': 'date' }, "2011-05-27", errors))
+ self.assertEqual(False, isc.config.module_spec._validate_format({ 'item_format': 'date' }, "a", None))
+ self.assertEqual(False, isc.config.module_spec._validate_format({ 'item_format': 'date' }, "a", errors))
+ self.assertEqual(['format type of a should be date'], errors)
+
+ errors = []
+ self.assertEqual(True, isc.config.module_spec._validate_format({ 'item_format': 'time' }, "19:42:57", errors))
+ self.assertEqual(False, isc.config.module_spec._validate_format({ 'item_format': 'time' }, "a", None))
+ self.assertEqual(False, isc.config.module_spec._validate_format({ 'item_format': 'time' }, "a", errors))
+ self.assertEqual(['format type of a should be time'], errors)
+
def test_validate_spec(self):
spec = { 'item_name': "an_item",
'item_type': "string",
diff --git a/src/lib/python/isc/datasrc/Makefile.am b/src/lib/python/isc/datasrc/Makefile.am
index 46fb661..07fb417 100644
--- a/src/lib/python/isc/datasrc/Makefile.am
+++ b/src/lib/python/isc/datasrc/Makefile.am
@@ -1,10 +1,44 @@
SUBDIRS = . tests
+# old data, should be removed in the near future once conversion is done
+pythondir = $(pyexecdir)/isc/datasrc
python_PYTHON = __init__.py master.py sqlite3_ds.py
-pythondir = $(pyexecdir)/isc/datasrc
+
+# new data
+
+AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
+AM_CPPFLAGS += $(SQLITE_CFLAGS)
+
+python_LTLIBRARIES = datasrc.la
+datasrc_la_SOURCES = datasrc.cc datasrc.h
+datasrc_la_SOURCES += client_python.cc client_python.h
+datasrc_la_SOURCES += iterator_python.cc iterator_python.h
+datasrc_la_SOURCES += finder_python.cc finder_python.h
+datasrc_la_SOURCES += updater_python.cc updater_python.h
+# This is a temporary workaround for #1206, where the InMemoryClient has been
+# moved to an ldopened library. We could add that library to LDADD, but that
+# is nonportable. When #1207 is done this becomes moot anyway, and the
+# specific workaround is not needed anymore, so we can then remove this
+# line again.
+datasrc_la_SOURCES += ${top_srcdir}/src/lib/datasrc/sqlite3_accessor.cc
+
+datasrc_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
+datasrc_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
+datasrc_la_LDFLAGS = $(PYTHON_LDFLAGS)
+datasrc_la_LDFLAGS += -module
+datasrc_la_LIBADD = $(top_builddir)/src/lib/datasrc/libdatasrc.la
+datasrc_la_LIBADD += $(top_builddir)/src/lib/dns/python/libpydnspp.la
+datasrc_la_LIBADD += $(PYTHON_LIB)
+#datasrc_la_LIBADD += $(SQLITE_LIBS)
+
+EXTRA_DIST = client_inc.cc
+EXTRA_DIST += finder_inc.cc
+EXTRA_DIST += iterator_inc.cc
+EXTRA_DIST += updater_inc.cc
CLEANDIRS = __pycache__
clean-local:
rm -rf $(CLEANDIRS)
+
diff --git a/src/lib/python/isc/datasrc/__init__.py b/src/lib/python/isc/datasrc/__init__.py
index 0e1e481..0b4ed98 100644
--- a/src/lib/python/isc/datasrc/__init__.py
+++ b/src/lib/python/isc/datasrc/__init__.py
@@ -1,2 +1,21 @@
-from isc.datasrc.master import *
+import sys
+import os
+
+# this setup is a temporary workaround to deal with the problem of
+# having both 'normal' python modules and a wrapper module
+# Once all programs use the new interface, we should remove the
+# old, and the setup can be made similar to that of the log wrappers.
+intree = False
+for base in sys.path[:]:
+ datasrc_libdir = os.path.join(base, 'isc/datasrc/.libs')
+ if os.path.exists(datasrc_libdir):
+ sys.path.insert(0, datasrc_libdir)
+ intree = True
+
+if intree:
+ from datasrc import *
+else:
+ from isc.datasrc.datasrc import *
from isc.datasrc.sqlite3_ds import *
+from isc.datasrc.master import *
+
diff --git a/src/lib/python/isc/datasrc/client_inc.cc b/src/lib/python/isc/datasrc/client_inc.cc
new file mode 100644
index 0000000..1eba488
--- /dev/null
+++ b/src/lib/python/isc/datasrc/client_inc.cc
@@ -0,0 +1,157 @@
+namespace {
+
+const char* const DataSourceClient_doc = "\
+The base class of data source clients.\n\
+\n\
+This is the python wrapper for the abstract base class that defines\n\
+the common interface for various types of data source clients. A data\n\
+source client is a top level access point to a data source, allowing \n\
+various operations on the data source such as lookups, traversing or \n\
+updates. The client class itself has limited focus and delegates \n\
+the responsibility for these specific operations to other (c++) classes;\n\
+in general methods of this class act as factories of these other classes.\n\
+\n\
+- InMemoryClient: A client of a conceptual data source that stores all\n\
+ necessary data in memory for faster lookups\n\
+- DatabaseClient: A client that uses a real database backend (such as\n\
+ an SQL database). It would internally hold a connection to the\n\
+ underlying database system.\n\
+\n\
+It is intentional that while the term these derived classes don't\n\
+contain \"DataSource\" unlike their base class. It's also noteworthy\n\
+that the naming of the base class is somewhat redundant because the\n\
+namespace datasrc would indicate that it's related to a data source.\n\
+The redundant naming comes from the observation that namespaces are\n\
+often omitted with using directives, in which case \"Client\" would be\n\
+too generic. On the other hand, concrete derived classes are generally\n\
+not expected to be referenced directly from other modules and\n\
+applications, so we'll give them more concise names such as\n\
+InMemoryClient. A single DataSourceClient object is expected to handle\n\
+only a single RR class even if the underlying data source contains\n\
+records for multiple RR classes. Likewise, (when we support views) a\n\
+DataSourceClient object is expected to handle only a single view.\n\
+\n\
+If the application uses multiple threads, each thread will need to\n\
+create and use a separate DataSourceClient. This is because some\n\
+database backend doesn't allow multiple threads to share the same\n\
+connection to the database.\n\
+\n\
+For a client using an in memory backend, this may result in having a\n\
+multiple copies of the same data in memory, increasing the memory\n\
+footprint substantially. Depending on how to support multiple CPU\n\
+cores for concurrent lookups on the same single data source (which is\n\
+not fully fixed yet, and for which multiple threads may be used), this\n\
+design may have to be revisited. This class (and therefore its derived\n\
+classes) are not copyable. This is because the derived classes would\n\
+generally contain attributes that are not easy to copy (such as a\n\
+large size of in memory data or a network connection to a database\n\
+server). In order to avoid a surprising disruption with a naive copy\n\
+it's prohibited explicitly. For the expected usage of the client\n\
+classes the restriction should be acceptable.\n\
+\n\
+Todo: This class is still not complete. It will need more factory\n\
+methods, e.g. for (re)loading a zone.\n\
+";
+
+const char* const DataSourceClient_findZone_doc = "\
+find_zone(name) -> (code, ZoneFinder)\n\
+\n\
+Returns a ZoneFinder for a zone that best matches the given name.\n\
+\n\
+code: The result code of the operation (integer).\n\
+- DataSourceClient.SUCCESS: A zone that gives an exact match is found\n\
+- DataSourceClient.PARTIALMATCH: A zone whose origin is a super domain of name\n\
+ is found (but there is no exact match)\n\
+- DataSourceClient.NOTFOUND: For all other cases.\n\
+ZoneFinder: ZoneFinder object for the found zone if one is found;\n\
+otherwise None.\n\
+\n\
+Any internal error will be raised as an isc.datasrc.Error exception\n\
+\n\
+Parameters:\n\
+ name A domain name for which the search is performed.\n\
+\n\
+Return Value(s): A tuple containing a result value and a ZoneFinder object or\n\
+None\n\
+";
+
+const char* const DataSourceClient_getIterator_doc = "\
+get_iterator(name) -> ZoneIterator\n\
+\n\
+Returns an iterator to the given zone.\n\
+\n\
+This allows for traversing the whole zone. The returned object can\n\
+provide the RRsets one by one.\n\
+\n\
+This throws isc.datasrc.Error when the zone does not exist in the\n\
+datasource, or when an internal error occurs.\n\
+\n\
+The default implementation throws isc.datasrc.NotImplemented. This allows for\n\
+easy and fast deployment of minimal custom data sources, where the\n\
+user/implementator doesn't have to care about anything else but the\n\
+actual queries. Also, in some cases, it isn't possible to traverse the\n\
+zone from logic point of view (eg. dynamically generated zone data).\n\
+\n\
+It is not fixed if a concrete implementation of this method can throw\n\
+anything else.\n\
+\n\
+Parameters:\n\
+ isc.dns.Name The name of zone apex to be traversed. It doesn't do\n\
+ nearest match as find_zone.\n\
+\n\
+Return Value(s): Pointer to the iterator.\n\
+";
+
+const char* const DataSourceClient_getUpdater_doc = "\
+get_updater(name, replace) -> ZoneUpdater\n\
+\n\
+Return an updater to make updates to a specific zone.\n\
+\n\
+The RR class of the zone is the one that the client is expected to\n\
+handle (see the detailed description of this class).\n\
+\n\
+If the specified zone is not found via the client, a NULL pointer will\n\
+be returned; in other words a completely new zone cannot be created\n\
+using an updater. It must be created beforehand (even if it's an empty\n\
+placeholder) in a way specific to the underlying data source.\n\
+\n\
+Conceptually, the updater will trigger a separate transaction for\n\
+subsequent updates to the zone within the context of the updater (the\n\
+actual implementation of the \"transaction\" may vary for the specific\n\
+underlying data source). Until commit() is performed on the updater,\n\
+the intermediate updates won't affect the results of other methods\n\
+(and the result of the object's methods created by other factory\n\
+methods). Likewise, if the updater is destructed without performing\n\
+commit(), the intermediate updates will be effectively canceled and\n\
+will never affect other methods.\n\
+\n\
+If the underlying data source allows concurrent updates, this method\n\
+can be called multiple times while the previously returned updater(s)\n\
+are still active. In this case each updater triggers a different\n\
+\"transaction\". Normally it would be for different zones for such a\n\
+case as handling multiple incoming AXFR streams concurrently, but this\n\
+interface does not even prohibit an attempt of getting more than one\n\
+updater for the same zone, as long as the underlying data source\n\
+allows such an operation (and any conflict resolution is left to the\n\
+specific implementation).\n\
+\n\
+If replace is true, any existing RRs of the zone will be deleted on\n\
+successful completion of updates (after commit() on the updater); if\n\
+it's false, the existing RRs will be intact unless explicitly deleted\n\
+by delete_rrset() on the updater.\n\
+\n\
+A data source can be \"read only\" or can prohibit partial updates. In\n\
+such cases this method will result in an isc.datasrc.NotImplemented exception\n\
+unconditionally or when replace is false).\n\
+\n\
+Exceptions:\n\
+ isc.datasrc. NotImplemented The underlying data source does not support\n\
+ updates.\n\
+ isc.datasrc.Error Internal error in the underlying data source.\n\
+\n\
+Parameters:\n\
+ name The zone name to be updated\n\
+ replace Whether to delete existing RRs before making updates\n\
+\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/datasrc/client_python.cc b/src/lib/python/isc/datasrc/client_python.cc
new file mode 100644
index 0000000..984eabf
--- /dev/null
+++ b/src/lib/python/isc/datasrc/client_python.cc
@@ -0,0 +1,264 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// Enable this if you use s# variants with PyArg_ParseTuple(), see
+// http://docs.python.org/py3k/c-api/arg.html#strings-and-buffers
+//#define PY_SSIZE_T_CLEAN
+
+// Python.h needs to be placed at the head of the program file, see:
+// http://docs.python.org/py3k/extending/extending.html#a-simple-example
+#include <Python.h>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <datasrc/client.h>
+#include <datasrc/database.h>
+#include <datasrc/data_source.h>
+#include <datasrc/sqlite3_accessor.h>
+#include <datasrc/iterator.h>
+
+#include <dns/python/name_python.h>
+#include <dns/python/rrset_python.h>
+#include <dns/python/pydnspp_common.h>
+
+#include "datasrc.h"
+#include "client_python.h"
+#include "finder_python.h"
+#include "iterator_python.h"
+#include "updater_python.h"
+#include "client_inc.cc"
+
+using namespace std;
+using namespace isc::util::python;
+using namespace isc::dns::python;
+using namespace isc::datasrc;
+using namespace isc::datasrc::python;
+
+namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_DataSourceClient : public PyObject {
+public:
+ s_DataSourceClient() : cppobj(NULL) {};
+ DataSourceClient* cppobj;
+};
+
+// Shortcut type which would be convenient for adding class variables safely.
+typedef CPPPyObjectContainer<s_DataSourceClient, DataSourceClient>
+ DataSourceClientContainer;
+
+PyObject*
+DataSourceClient_findZone(PyObject* po_self, PyObject* args) {
+ s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
+ PyObject *name;
+ if (PyArg_ParseTuple(args, "O!", &name_type, &name)) {
+ try {
+ DataSourceClient::FindResult find_result(
+ self->cppobj->findZone(PyName_ToName(name)));
+
+ result::Result r = find_result.code;
+ ZoneFinderPtr zfp = find_result.zone_finder;
+ // Use N instead of O so refcount isn't increased twice
+ return (Py_BuildValue("IN", r, createZoneFinderObject(zfp)));
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+ } else {
+ return (NULL);
+ }
+}
+
+PyObject*
+DataSourceClient_getIterator(PyObject* po_self, PyObject* args) {
+ s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
+ PyObject *name_obj;
+ if (PyArg_ParseTuple(args, "O!", &name_type, &name_obj)) {
+ try {
+ return (createZoneIteratorObject(
+ self->cppobj->getIterator(PyName_ToName(name_obj))));
+ } catch (const isc::NotImplemented& ne) {
+ PyErr_SetString(getDataSourceException("NotImplemented"),
+ ne.what());
+ return (NULL);
+ } catch (const DataSourceError& dse) {
+ PyErr_SetString(getDataSourceException("Error"), dse.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+ } else {
+ return (NULL);
+ }
+}
+
+PyObject*
+DataSourceClient_getUpdater(PyObject* po_self, PyObject* args) {
+ s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
+ PyObject *name_obj;
+ PyObject *replace_obj;
+ if (PyArg_ParseTuple(args, "O!O", &name_type, &name_obj, &replace_obj) &&
+ PyBool_Check(replace_obj)) {
+ bool replace = (replace_obj != Py_False);
+ try {
+ return (createZoneUpdaterObject(
+ self->cppobj->getUpdater(PyName_ToName(name_obj),
+ replace)));
+ } catch (const isc::NotImplemented& ne) {
+ PyErr_SetString(getDataSourceException("NotImplemented"),
+ ne.what());
+ return (NULL);
+ } catch (const DataSourceError& dse) {
+ PyErr_SetString(getDataSourceException("Error"), dse.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+ } else {
+ return (NULL);
+ }
+}
+
+// This list contains the actual set of functions we have in
+// python. Each entry has
+// 1. Python method name
+// 2. Our static function here
+// 3. Argument type
+// 4. Documentation
+PyMethodDef DataSourceClient_methods[] = {
+ { "find_zone", reinterpret_cast<PyCFunction>(DataSourceClient_findZone),
+ METH_VARARGS, DataSourceClient_findZone_doc },
+ { "get_iterator",
+ reinterpret_cast<PyCFunction>(DataSourceClient_getIterator), METH_VARARGS,
+ DataSourceClient_getIterator_doc },
+ { "get_updater", reinterpret_cast<PyCFunction>(DataSourceClient_getUpdater),
+ METH_VARARGS, DataSourceClient_getUpdater_doc },
+ { NULL, NULL, 0, NULL }
+};
+
+int
+DataSourceClient_init(s_DataSourceClient* self, PyObject* args) {
+ // TODO: we should use the factory function which hasn't been written
+ // yet. For now we hardcode the sqlite3 initialization, and pass it one
+ // string for the database file. (similar to how the 'old direct'
+ // sqlite3_ds code works)
+ try {
+ char* db_file_name;
+ if (PyArg_ParseTuple(args, "s", &db_file_name)) {
+ boost::shared_ptr<DatabaseAccessor> sqlite3_accessor(
+ new SQLite3Accessor(db_file_name, isc::dns::RRClass::IN()));
+ self->cppobj = new DatabaseClient(isc::dns::RRClass::IN(),
+ sqlite3_accessor);
+ return (0);
+ } else {
+ return (-1);
+ }
+
+ } catch (const exception& ex) {
+ const string ex_what = "Failed to construct DataSourceClient object: " +
+ string(ex.what());
+ PyErr_SetString(getDataSourceException("Error"), ex_what.c_str());
+ return (-1);
+ } catch (...) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "Unexpected exception in constructing DataSourceClient");
+ return (-1);
+ }
+ PyErr_SetString(PyExc_TypeError,
+ "Invalid arguments to DataSourceClient constructor");
+
+ return (-1);
+}
+
+void
+DataSourceClient_destroy(s_DataSourceClient* const self) {
+ delete self->cppobj;
+ self->cppobj = NULL;
+ Py_TYPE(self)->tp_free(self);
+}
+
+} // end anonymous namespace
+
+namespace isc {
+namespace datasrc {
+namespace python {
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_DataSourceClient
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject datasourceclient_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "datasrc.DataSourceClient",
+ sizeof(s_DataSourceClient), // tp_basicsize
+ 0, // tp_itemsize
+ reinterpret_cast<destructor>(DataSourceClient_destroy),// tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ NULL, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ DataSourceClient_doc,
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ DataSourceClient_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ reinterpret_cast<initproc>(DataSourceClient_init),// tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
diff --git a/src/lib/python/isc/datasrc/client_python.h b/src/lib/python/isc/datasrc/client_python.h
new file mode 100644
index 0000000..b20fb6b
--- /dev/null
+++ b/src/lib/python/isc/datasrc/client_python.h
@@ -0,0 +1,35 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_DATASRC_CLIENT_H
+#define __PYTHON_DATASRC_CLIENT_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace datasrc {
+class DataSourceClient;
+
+namespace python {
+
+extern PyTypeObject datasourceclient_type;
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+#endif // __PYTHON_DATASRC_CLIENT_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/datasrc/datasrc.cc b/src/lib/python/isc/datasrc/datasrc.cc
new file mode 100644
index 0000000..4b0324a
--- /dev/null
+++ b/src/lib/python/isc/datasrc/datasrc.cc
@@ -0,0 +1,225 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#define PY_SSIZE_T_CLEAN
+#include <Python.h>
+#include <structmember.h>
+
+#include <config.h>
+
+#include <datasrc/client.h>
+#include <datasrc/database.h>
+#include <datasrc/sqlite3_accessor.h>
+
+#include "datasrc.h"
+#include "client_python.h"
+#include "finder_python.h"
+#include "iterator_python.h"
+#include "updater_python.h"
+
+#include <util/python/pycppwrapper_util.h>
+#include <dns/python/pydnspp_common.h>
+
+using namespace isc::datasrc;
+using namespace isc::datasrc::python;
+using namespace isc::util::python;
+using namespace isc::dns::python;
+
+namespace isc {
+namespace datasrc {
+namespace python {
+PyObject*
+getDataSourceException(const char* ex_name) {
+ PyObject* ex_obj = NULL;
+
+ PyObject* datasrc_module = PyImport_AddModule("isc.datasrc");
+ if (datasrc_module != NULL) {
+ PyObject* datasrc_dict = PyModule_GetDict(datasrc_module);
+ if (datasrc_dict != NULL) {
+ ex_obj = PyDict_GetItemString(datasrc_dict, ex_name);
+ }
+ }
+
+ if (ex_obj == NULL) {
+ ex_obj = PyExc_RuntimeError;
+ }
+ return (ex_obj);
+}
+
+} // end namespace python
+} // end namespace datasrc
+} // end namespace isc
+
+namespace {
+
+bool
+initModulePart_DataSourceClient(PyObject* mod) {
+ // We initialize the static description object with PyType_Ready(),
+ // then add it to the module. This is not just a check! (leaving
+ // this out results in segmentation faults)
+ if (PyType_Ready(&datasourceclient_type) < 0) {
+ return (false);
+ }
+ void* dscp = &datasourceclient_type;
+ if (PyModule_AddObject(mod, "DataSourceClient", static_cast<PyObject*>(dscp)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&datasourceclient_type);
+
+ addClassVariable(datasourceclient_type, "SUCCESS",
+ Py_BuildValue("I", result::SUCCESS));
+ addClassVariable(datasourceclient_type, "EXIST",
+ Py_BuildValue("I", result::EXIST));
+ addClassVariable(datasourceclient_type, "NOTFOUND",
+ Py_BuildValue("I", result::NOTFOUND));
+ addClassVariable(datasourceclient_type, "PARTIALMATCH",
+ Py_BuildValue("I", result::PARTIALMATCH));
+
+ return (true);
+}
+
+bool
+initModulePart_ZoneFinder(PyObject* mod) {
+ // We initialize the static description object with PyType_Ready(),
+ // then add it to the module. This is not just a check! (leaving
+ // this out results in segmentation faults)
+ if (PyType_Ready(&zonefinder_type) < 0) {
+ return (false);
+ }
+ void* zip = &zonefinder_type;
+ if (PyModule_AddObject(mod, "ZoneFinder", static_cast<PyObject*>(zip)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&zonefinder_type);
+
+ addClassVariable(zonefinder_type, "SUCCESS",
+ Py_BuildValue("I", ZoneFinder::SUCCESS));
+ addClassVariable(zonefinder_type, "DELEGATION",
+ Py_BuildValue("I", ZoneFinder::DELEGATION));
+ addClassVariable(zonefinder_type, "NXDOMAIN",
+ Py_BuildValue("I", ZoneFinder::NXDOMAIN));
+ addClassVariable(zonefinder_type, "NXRRSET",
+ Py_BuildValue("I", ZoneFinder::NXRRSET));
+ addClassVariable(zonefinder_type, "CNAME",
+ Py_BuildValue("I", ZoneFinder::CNAME));
+ addClassVariable(zonefinder_type, "DNAME",
+ Py_BuildValue("I", ZoneFinder::DNAME));
+
+ addClassVariable(zonefinder_type, "FIND_DEFAULT",
+ Py_BuildValue("I", ZoneFinder::FIND_DEFAULT));
+ addClassVariable(zonefinder_type, "FIND_GLUE_OK",
+ Py_BuildValue("I", ZoneFinder::FIND_GLUE_OK));
+ addClassVariable(zonefinder_type, "FIND_DNSSEC",
+ Py_BuildValue("I", ZoneFinder::FIND_DNSSEC));
+
+
+ return (true);
+}
+
+bool
+initModulePart_ZoneIterator(PyObject* mod) {
+ // We initialize the static description object with PyType_Ready(),
+ // then add it to the module. This is not just a check! (leaving
+ // this out results in segmentation faults)
+ if (PyType_Ready(&zoneiterator_type) < 0) {
+ return (false);
+ }
+ void* zip = &zoneiterator_type;
+ if (PyModule_AddObject(mod, "ZoneIterator", static_cast<PyObject*>(zip)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&zoneiterator_type);
+
+ return (true);
+}
+
+bool
+initModulePart_ZoneUpdater(PyObject* mod) {
+ // We initialize the static description object with PyType_Ready(),
+ // then add it to the module. This is not just a check! (leaving
+ // this out results in segmentation faults)
+ if (PyType_Ready(&zoneupdater_type) < 0) {
+ return (false);
+ }
+ void* zip = &zoneupdater_type;
+ if (PyModule_AddObject(mod, "ZoneUpdater", static_cast<PyObject*>(zip)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&zoneupdater_type);
+
+ return (true);
+}
+
+
+PyObject* po_DataSourceError;
+PyObject* po_NotImplemented;
+
+PyModuleDef iscDataSrc = {
+ { PyObject_HEAD_INIT(NULL) NULL, 0, NULL},
+ "datasrc",
+ "Python bindings for the classes in the isc::datasrc namespace.\n\n"
+ "These bindings are close match to the C++ API, but they are not complete "
+ "(some parts are not needed) and some are done in more python-like ways.",
+ -1,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+
+} // end anonymous namespace
+
+PyMODINIT_FUNC
+PyInit_datasrc(void) {
+ PyObject* mod = PyModule_Create(&iscDataSrc);
+ if (mod == NULL) {
+ return (NULL);
+ }
+
+ if (!initModulePart_DataSourceClient(mod)) {
+ Py_DECREF(mod);
+ return (NULL);
+ }
+
+ if (!initModulePart_ZoneFinder(mod)) {
+ Py_DECREF(mod);
+ return (NULL);
+ }
+
+ if (!initModulePart_ZoneIterator(mod)) {
+ Py_DECREF(mod);
+ return (NULL);
+ }
+
+ if (!initModulePart_ZoneUpdater(mod)) {
+ Py_DECREF(mod);
+ return (NULL);
+ }
+
+ try {
+ po_DataSourceError = PyErr_NewException("isc.datasrc.Error", NULL,
+ NULL);
+ PyObjectContainer(po_DataSourceError).installToModule(mod, "Error");
+ po_NotImplemented = PyErr_NewException("isc.datasrc.NotImplemented",
+ NULL, NULL);
+ PyObjectContainer(po_NotImplemented).installToModule(mod,
+ "NotImplemented");
+ } catch (...) {
+ Py_DECREF(mod);
+ return (NULL);
+ }
+
+ return (mod);
+}
diff --git a/src/lib/python/isc/datasrc/datasrc.h b/src/lib/python/isc/datasrc/datasrc.h
new file mode 100644
index 0000000..d82881b
--- /dev/null
+++ b/src/lib/python/isc/datasrc/datasrc.h
@@ -0,0 +1,50 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_DATASRC_H
+#define __PYTHON_DATASRC_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace datasrc {
+namespace python {
+
+// Return a Python exception object of the given name (ex_name) defined in
+// the isc.datasrc.datasrc loadable module.
+//
+// Since the datasrc module is a different binary image and is loaded separately
+// from the dns module, it would be very tricky to directly access to
+// C/C++ symbols defined in that module. So we get access to these object
+// using the Python interpretor through this wrapper function.
+//
+// The __init__.py file should ensure isc.datasrc has been loaded by the time
+// whenever this function is called, and there shouldn't be any operation
+// within this function that can fail (such as dynamic memory allocation),
+// so this function should always succeed. Yet there may be an overlooked
+// failure mode, perhaps due to a bug in the binding implementation, or
+// due to invalid usage. As a last resort for such cases, this function
+// returns PyExc_RuntimeError (a C binding of Python's RuntimeError) should
+// it encounters an unexpected failure.
+extern PyObject* getDataSourceException(const char* ex_name);
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+
+#endif // __PYTHON_ACL_DNS_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/datasrc/finder_inc.cc b/src/lib/python/isc/datasrc/finder_inc.cc
new file mode 100644
index 0000000..2b47d02
--- /dev/null
+++ b/src/lib/python/isc/datasrc/finder_inc.cc
@@ -0,0 +1,96 @@
+namespace {
+const char* const ZoneFinder_doc = "\
+The base class to search a zone for RRsets.\n\
+\n\
+The ZoneFinder class is a wrapper for the c++ base class for representing an\n\
+object that performs DNS lookups in a specific zone accessible via a\n\
+data source. In general, different types of data sources (in-memory,\n\
+database-based, etc) define their own derived c++ classes of ZoneFinder,\n\
+implementing ways to retrieve the required data through the common\n\
+interfaces declared in the base class. Each concrete ZoneFinder object\n\
+is therefore (conceptually) associated with a specific zone of one\n\
+specific data source instance.\n\
+\n\
+The origin name and the RR class of the associated zone are available\n\
+via the get_origin() and get_class() methods, respectively.\n\
+\n\
+The most important method of this class is find(), which performs the\n\
+lookup for a given domain and type. See the description of the method\n\
+for details.\n\
+\n\
+It's not clear whether we should request that a zone finder form a\n\
+\"transaction\", that is, whether to ensure the finder is not\n\
+susceptible to changes made by someone else than the creator of the\n\
+finder. If we don't request that, for example, two different lookup\n\
+results for the same name and type can be different if other threads\n\
+or programs make updates to the zone between the lookups. We should\n\
+revisit this point as we gain more experiences.\n\
+\n\
+";
+
+const char* const ZoneFinder_getOrigin_doc = "\
+get_origin() -> isc.dns.Name\n\
+\n\
+Return the origin name of the zone.\n\
+\n\
+";
+
+const char* const ZoneFinder_getClass_doc = "\
+get_class() -> isc.dns.RRClass\n\
+\n\
+Return the RR class of the zone.\n\
+\n\
+";
+
+const char* const ZoneFinder_find_doc = "\
+find(name, type, target=NULL, options=FIND_DEFAULT) -> (code, FindResult)\n\
+\n\
+Search the zone for a given pair of domain name and RR type.\n\
+\n\
+- If the search name belongs under a zone cut, it returns the code of\n\
+ DELEGATION and the NS RRset at the zone cut.\n\
+- If there is no matching name, it returns the code of NXDOMAIN, and,\n\
+ if DNSSEC is requested, the NSEC RRset that proves the non-\n\
+ existence.\n\
+- If there is a matching name but no RRset of the search type, it\n\
+ returns the code of NXRRSET, and, if DNSSEC is required, the NSEC\n\
+ RRset for that name.\n\
+- If there is a CNAME RR of the searched name but there is no RR of\n\
+ the searched type of the name (so this type is different from\n\
+ CNAME), it returns the code of CNAME and that CNAME RR. Note that if\n\
+ the searched RR type is CNAME, it is considered a successful match,\n\
+ and the code of SUCCESS will be returned.\n\
+- If the search name matches a delegation point of DNAME, it returns\n\
+ the code of DNAME and that DNAME RR.\n\
+- If the target is a list, all RRsets under the domain are inserted\n\
+ there and SUCCESS (or NXDOMAIN, in case of empty domain) is returned\n\
+ instead of normall processing. This is intended to handle ANY query.\n\
+ : this behavior is controversial as we discussed in\n\
+ https://lists.isc.org/pipermail/bind10-dev/2011-January/001918.html\n\
+ We should revisit the interface before we heavily rely on it. The\n\
+ options parameter specifies customized behavior of the search. Their\n\
+ semantics is as follows:\n\
+ (This feature is disable at this time)\n\
+- GLUE_OK Allow search under a zone cut. By default the search will\n\
+ stop once it encounters a zone cut. If this option is specified it\n\
+ remembers information about the highest zone cut and continues the\n\
+ search until it finds an exact match for the given name or it\n\
+ detects there is no exact match. If an exact match is found, RRsets\n\
+ for that name are searched just like the normal case; otherwise, if\n\
+ the search has encountered a zone cut, DELEGATION with the\n\
+ information of the highest zone cut will be returned.\n\
+\n\
+This method raises an isc.datasrc.Error exception if there is an internal\n\
+error in the datasource.\n\
+\n\
+Parameters:\n\
+ name The domain name to be searched for.\n\
+ type The RR type to be searched for.\n\
+ target If target is not NULL, insert all RRs under the domain\n\
+ into it.\n\
+ options The search options.\n\
+\n\
+Return Value(s): A tuple of a result code an a FindResult object enclosing\n\
+the search result (see above).\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/datasrc/finder_python.cc b/src/lib/python/isc/datasrc/finder_python.cc
new file mode 100644
index 0000000..598d300
--- /dev/null
+++ b/src/lib/python/isc/datasrc/finder_python.cc
@@ -0,0 +1,248 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// Enable this if you use s# variants with PyArg_ParseTuple(), see
+// http://docs.python.org/py3k/c-api/arg.html#strings-and-buffers
+//#define PY_SSIZE_T_CLEAN
+
+// Python.h needs to be placed at the head of the program file, see:
+// http://docs.python.org/py3k/extending/extending.html#a-simple-example
+#include <Python.h>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <datasrc/client.h>
+#include <datasrc/database.h>
+#include <datasrc/data_source.h>
+#include <datasrc/sqlite3_accessor.h>
+#include <datasrc/iterator.h>
+#include <datasrc/zone.h>
+
+#include <dns/python/name_python.h>
+#include <dns/python/rrset_python.h>
+#include <dns/python/rrclass_python.h>
+#include <dns/python/rrtype_python.h>
+#include <dns/python/pydnspp_common.h>
+
+#include "datasrc.h"
+#include "finder_python.h"
+#include "finder_inc.cc"
+
+using namespace std;
+using namespace isc::util::python;
+using namespace isc::dns::python;
+using namespace isc::datasrc;
+using namespace isc::datasrc::python;
+
+namespace isc_datasrc_internal {
+// This is the shared code for the find() call in the finder and the updater
+// Is is intentionally not available through any header, nor at our standard
+// namespace, as it is not supposed to be called anywhere but from finder and
+// updater
+PyObject* ZoneFinder_helper(ZoneFinder* finder, PyObject* args) {
+ if (finder == NULL) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Internal error in find() wrapper; finder object NULL");
+ return (NULL);
+ }
+ PyObject *name;
+ PyObject *rrtype;
+ PyObject *target;
+ int options_int;
+ if (PyArg_ParseTuple(args, "O!O!OI", &name_type, &name,
+ &rrtype_type, &rrtype,
+ &target, &options_int)) {
+ try {
+ ZoneFinder::FindOptions options =
+ static_cast<ZoneFinder::FindOptions>(options_int);
+ ZoneFinder::FindResult find_result(
+ finder->find(PyName_ToName(name),
+ PyRRType_ToRRType(rrtype),
+ NULL,
+ options
+ ));
+ ZoneFinder::Result r = find_result.code;
+ isc::dns::ConstRRsetPtr rrsp = find_result.rrset;
+ if (rrsp) {
+ // Use N instead of O so the refcount isn't increased twice
+ return (Py_BuildValue("IN", r, createRRsetObject(*rrsp)));
+ } else {
+ return (Py_BuildValue("IO", r, Py_None));
+ }
+ } catch (const DataSourceError& dse) {
+ PyErr_SetString(getDataSourceException("Error"), dse.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+ } else {
+ return (NULL);
+ }
+ return Py_BuildValue("I", 1);
+}
+
+} // end namespace internal
+
+namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_ZoneFinder : public PyObject {
+public:
+ s_ZoneFinder() : cppobj(ZoneFinderPtr()) {};
+ ZoneFinderPtr cppobj;
+};
+
+// Shortcut type which would be convenient for adding class variables safely.
+typedef CPPPyObjectContainer<s_ZoneFinder, ZoneFinder> ZoneFinderContainer;
+
+// General creation and destruction
+int
+ZoneFinder_init(s_ZoneFinder* self, PyObject* args) {
+ // can't be called directly
+ PyErr_SetString(PyExc_TypeError,
+ "ZoneFinder cannot be constructed directly");
+
+ return (-1);
+}
+
+void
+ZoneFinder_destroy(s_ZoneFinder* const self) {
+ // cppobj is a shared ptr, but to make sure things are not destroyed in
+ // the wrong order, we reset it here.
+ self->cppobj.reset();
+ Py_TYPE(self)->tp_free(self);
+}
+
+PyObject*
+ZoneFinder_getClass(PyObject* po_self, PyObject*) {
+ s_ZoneFinder* self = static_cast<s_ZoneFinder*>(po_self);
+ try {
+ return (createRRClassObject(self->cppobj->getClass()));
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ }
+}
+
+PyObject*
+ZoneFinder_getOrigin(PyObject* po_self, PyObject*) {
+ s_ZoneFinder* self = static_cast<s_ZoneFinder*>(po_self);
+ try {
+ return (createNameObject(self->cppobj->getOrigin()));
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+}
+
+PyObject*
+ZoneFinder_find(PyObject* po_self, PyObject* args) {
+ s_ZoneFinder* const self = static_cast<s_ZoneFinder*>(po_self);
+ return (isc_datasrc_internal::ZoneFinder_helper(self->cppobj.get(), args));
+}
+
+// This list contains the actual set of functions we have in
+// python. Each entry has
+// 1. Python method name
+// 2. Our static function here
+// 3. Argument type
+// 4. Documentation
+PyMethodDef ZoneFinder_methods[] = {
+ { "get_origin", reinterpret_cast<PyCFunction>(ZoneFinder_getOrigin),
+ METH_NOARGS, ZoneFinder_getOrigin_doc },
+ { "get_class", reinterpret_cast<PyCFunction>(ZoneFinder_getClass),
+ METH_NOARGS, ZoneFinder_getClass_doc },
+ { "find", reinterpret_cast<PyCFunction>(ZoneFinder_find), METH_VARARGS,
+ ZoneFinder_find_doc },
+ { NULL, NULL, 0, NULL }
+};
+
+} // end of unnamed namespace
+
+namespace isc {
+namespace datasrc {
+namespace python {
+
+PyTypeObject zonefinder_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "datasrc.ZoneFinder",
+ sizeof(s_ZoneFinder), // tp_basicsize
+ 0, // tp_itemsize
+ reinterpret_cast<destructor>(ZoneFinder_destroy),// tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ NULL, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ ZoneFinder_doc,
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ ZoneFinder_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ reinterpret_cast<initproc>(ZoneFinder_init),// tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+PyObject*
+createZoneFinderObject(isc::datasrc::ZoneFinderPtr source) {
+ s_ZoneFinder* py_zi = static_cast<s_ZoneFinder*>(
+ zonefinder_type.tp_alloc(&zonefinder_type, 0));
+ if (py_zi != NULL) {
+ py_zi->cppobj = source;
+ }
+ return (py_zi);
+}
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+
diff --git a/src/lib/python/isc/datasrc/finder_python.h b/src/lib/python/isc/datasrc/finder_python.h
new file mode 100644
index 0000000..5f2404e
--- /dev/null
+++ b/src/lib/python/isc/datasrc/finder_python.h
@@ -0,0 +1,36 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_DATASRC_FINDER_H
+#define __PYTHON_DATASRC_FINDER_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace datasrc {
+
+namespace python {
+
+extern PyTypeObject zonefinder_type;
+
+PyObject* createZoneFinderObject(isc::datasrc::ZoneFinderPtr source);
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+#endif // __PYTHON_DATASRC_FINDER_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/datasrc/iterator_inc.cc b/src/lib/python/isc/datasrc/iterator_inc.cc
new file mode 100644
index 0000000..b1d9d25
--- /dev/null
+++ b/src/lib/python/isc/datasrc/iterator_inc.cc
@@ -0,0 +1,34 @@
+namespace {
+
+const char* const ZoneIterator_doc = "\
+Read-only iterator to a zone.\n\
+\n\
+You can get an instance of the ZoneIterator from\n\
+DataSourceClient.get_iterator() method. The actual concrete\n\
+c++ implementation will be different depending on the actual data source\n\
+used. This is the abstract interface.\n\
+\n\
+There's no way to start iterating from the beginning again or return.\n\
+\n\
+The ZoneIterator is a python iterator, and can be iterated over directly.\n\
+";
+
+const char* const ZoneIterator_getNextRRset_doc = "\
+get_next_rrset() -> isc.dns.RRset\n\
+\n\
+Get next RRset from the zone.\n\
+\n\
+This returns the next RRset in the zone.\n\
+\n\
+Any special order is not guaranteed.\n\
+\n\
+While this can potentially throw anything (including standard\n\
+allocation errors), it should be rare.\n\
+\n\
+Pointer to the next RRset or None pointer when the iteration gets to\n\
+the end of the zone.\n\
+\n\
+Raises an isc.datasrc.Error exception if it is called again after returning\n\
+None\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/datasrc/iterator_python.cc b/src/lib/python/isc/datasrc/iterator_python.cc
new file mode 100644
index 0000000..b482ea6
--- /dev/null
+++ b/src/lib/python/isc/datasrc/iterator_python.cc
@@ -0,0 +1,202 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// Enable this if you use s# variants with PyArg_ParseTuple(), see
+// http://docs.python.org/py3k/c-api/arg.html#strings-and-buffers
+//#define PY_SSIZE_T_CLEAN
+
+// Python.h needs to be placed at the head of the program file, see:
+// http://docs.python.org/py3k/extending/extending.html#a-simple-example
+#include <Python.h>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <datasrc/client.h>
+#include <datasrc/database.h>
+#include <datasrc/sqlite3_accessor.h>
+#include <datasrc/iterator.h>
+
+#include <dns/python/name_python.h>
+#include <dns/python/rrset_python.h>
+
+#include "datasrc.h"
+#include "iterator_python.h"
+
+#include "iterator_inc.cc"
+
+using namespace std;
+using namespace isc::util::python;
+using namespace isc::dns::python;
+using namespace isc::datasrc;
+using namespace isc::datasrc::python;
+
+namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_ZoneIterator : public PyObject {
+public:
+ s_ZoneIterator() : cppobj(ZoneIteratorPtr()) {};
+ ZoneIteratorPtr cppobj;
+};
+
+// Shortcut type which would be convenient for adding class variables safely.
+typedef CPPPyObjectContainer<s_ZoneIterator, ZoneIterator>
+ ZoneIteratorContainer;
+
+// General creation and destruction
+int
+ZoneIterator_init(s_ZoneIterator* self, PyObject* args) {
+ // can't be called directly
+ PyErr_SetString(PyExc_TypeError,
+ "ZoneIterator cannot be constructed directly");
+
+ return (-1);
+}
+
+void
+ZoneIterator_destroy(s_ZoneIterator* const self) {
+ // cppobj is a shared ptr, but to make sure things are not destroyed in
+ // the wrong order, we reset it here.
+ self->cppobj.reset();
+ Py_TYPE(self)->tp_free(self);
+}
+
+//
+// We declare the functions here, the definitions are below
+// the type definition of the object, since both can use the other
+//
+PyObject*
+ZoneIterator_getNextRRset(PyObject* po_self, PyObject*) {
+ s_ZoneIterator* self = static_cast<s_ZoneIterator*>(po_self);
+ if (!self->cppobj) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "get_next_rrset() called past end of iterator");
+ return (NULL);
+ }
+ try {
+ isc::dns::ConstRRsetPtr rrset = self->cppobj->getNextRRset();
+ if (!rrset) {
+ Py_RETURN_NONE;
+ }
+ return (createRRsetObject(*rrset));
+ } catch (const isc::Exception& isce) {
+ // isc::Unexpected is thrown when we call getNextRRset() when we are
+ // already done iterating ('iterating past end')
+ // We could also simply return None again
+ PyErr_SetString(getDataSourceException("Error"), isce.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+}
+
+PyObject*
+ZoneIterator_iter(PyObject *self) {
+ Py_INCREF(self);
+ return (self);
+}
+
+PyObject*
+ZoneIterator_next(PyObject* self) {
+ PyObject *result = ZoneIterator_getNextRRset(self, NULL);
+ // iter_next must return NULL without error instead of Py_None
+ if (result == Py_None) {
+ Py_DECREF(result);
+ return (NULL);
+ } else {
+ return (result);
+ }
+}
+
+PyMethodDef ZoneIterator_methods[] = {
+ { "get_next_rrset",
+ reinterpret_cast<PyCFunction>(ZoneIterator_getNextRRset), METH_NOARGS,
+ ZoneIterator_getNextRRset_doc },
+ { NULL, NULL, 0, NULL }
+};
+
+
+} // end of unnamed namespace
+
+namespace isc {
+namespace datasrc {
+namespace python {
+PyTypeObject zoneiterator_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "datasrc.ZoneIterator",
+ sizeof(s_ZoneIterator), // tp_basicsize
+ 0, // tp_itemsize
+ reinterpret_cast<destructor>(ZoneIterator_destroy),// tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ NULL, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ ZoneIterator_doc,
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ ZoneIterator_iter, // tp_iter
+ ZoneIterator_next, // tp_iternext
+ ZoneIterator_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ reinterpret_cast<initproc>(ZoneIterator_init),// tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+PyObject*
+createZoneIteratorObject(isc::datasrc::ZoneIteratorPtr source) {
+ s_ZoneIterator* py_zi = static_cast<s_ZoneIterator*>(
+ zoneiterator_type.tp_alloc(&zoneiterator_type, 0));
+ if (py_zi != NULL) {
+ py_zi->cppobj = source;
+ }
+ return (py_zi);
+}
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+
diff --git a/src/lib/python/isc/datasrc/iterator_python.h b/src/lib/python/isc/datasrc/iterator_python.h
new file mode 100644
index 0000000..b457740
--- /dev/null
+++ b/src/lib/python/isc/datasrc/iterator_python.h
@@ -0,0 +1,38 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_DATASRC_ITERATOR_H
+#define __PYTHON_DATASRC_ITERATOR_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace datasrc {
+class DataSourceClient;
+
+namespace python {
+
+extern PyTypeObject zoneiterator_type;
+
+PyObject* createZoneIteratorObject(isc::datasrc::ZoneIteratorPtr source);
+
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+#endif // __PYTHON_DATASRC_ITERATOR_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/datasrc/sqlite3_ds.py b/src/lib/python/isc/datasrc/sqlite3_ds.py
index a77645a..fd63741 100644
--- a/src/lib/python/isc/datasrc/sqlite3_ds.py
+++ b/src/lib/python/isc/datasrc/sqlite3_ds.py
@@ -33,44 +33,63 @@ def create(cur):
Arguments:
cur - sqlite3 cursor.
"""
- cur.execute("CREATE TABLE schema_version (version INTEGER NOT NULL)")
- cur.execute("INSERT INTO schema_version VALUES (1)")
- cur.execute("""CREATE TABLE zones (id INTEGER PRIMARY KEY,
- name STRING NOT NULL COLLATE NOCASE,
- rdclass STRING NOT NULL COLLATE NOCASE DEFAULT 'IN',
- dnssec BOOLEAN NOT NULL DEFAULT 0)""")
- cur.execute("CREATE INDEX zones_byname ON zones (name)")
- cur.execute("""CREATE TABLE records (id INTEGER PRIMARY KEY,
- zone_id INTEGER NOT NULL,
- name STRING NOT NULL COLLATE NOCASE,
- rname STRING NOT NULL COLLATE NOCASE,
- ttl INTEGER NOT NULL,
- rdtype STRING NOT NULL COLLATE NOCASE,
- sigtype STRING COLLATE NOCASE,
- rdata STRING NOT NULL)""")
- cur.execute("CREATE INDEX records_byname ON records (name)")
- cur.execute("CREATE INDEX records_byrname ON records (rname)")
- cur.execute("""CREATE TABLE nsec3 (id INTEGER PRIMARY KEY,
- zone_id INTEGER NOT NULL,
- hash STRING NOT NULL COLLATE NOCASE,
- owner STRING NOT NULL COLLATE NOCASE,
- ttl INTEGER NOT NULL,
- rdtype STRING NOT NULL COLLATE NOCASE,
- rdata STRING NOT NULL)""")
- cur.execute("CREATE INDEX nsec3_byhash ON nsec3 (hash)")
-
-def open(dbfile):
+ # We are creating the database because it apparently had not been at
+ # the time we tried to read from it. However, another process may have
+ # had the same idea, resulting in a potential race condition.
+ # Therefore, we obtain an exclusive lock before we create anything
+ # When we have it, we check *again* whether the database has been
+ # initialized. If not, we do so.
+
+ # If the database is perpetually locked, it'll time out automatically
+ # and we just let it fail.
+ cur.execute("BEGIN EXCLUSIVE TRANSACTION")
+ try:
+ cur.execute("SELECT version FROM schema_version")
+ row = cur.fetchone()
+ except sqlite3.OperationalError:
+ cur.execute("CREATE TABLE schema_version (version INTEGER NOT NULL)")
+ cur.execute("INSERT INTO schema_version VALUES (1)")
+ cur.execute("""CREATE TABLE zones (id INTEGER PRIMARY KEY,
+ name STRING NOT NULL COLLATE NOCASE,
+ rdclass STRING NOT NULL COLLATE NOCASE DEFAULT 'IN',
+ dnssec BOOLEAN NOT NULL DEFAULT 0)""")
+ cur.execute("CREATE INDEX zones_byname ON zones (name)")
+ cur.execute("""CREATE TABLE records (id INTEGER PRIMARY KEY,
+ zone_id INTEGER NOT NULL,
+ name STRING NOT NULL COLLATE NOCASE,
+ rname STRING NOT NULL COLLATE NOCASE,
+ ttl INTEGER NOT NULL,
+ rdtype STRING NOT NULL COLLATE NOCASE,
+ sigtype STRING COLLATE NOCASE,
+ rdata STRING NOT NULL)""")
+ cur.execute("CREATE INDEX records_byname ON records (name)")
+ cur.execute("CREATE INDEX records_byrname ON records (rname)")
+ cur.execute("""CREATE TABLE nsec3 (id INTEGER PRIMARY KEY,
+ zone_id INTEGER NOT NULL,
+ hash STRING NOT NULL COLLATE NOCASE,
+ owner STRING NOT NULL COLLATE NOCASE,
+ ttl INTEGER NOT NULL,
+ rdtype STRING NOT NULL COLLATE NOCASE,
+ rdata STRING NOT NULL)""")
+ cur.execute("CREATE INDEX nsec3_byhash ON nsec3 (hash)")
+ row = [1]
+ cur.execute("COMMIT TRANSACTION")
+ return row
+
+def open(dbfile, connect_timeout=5.0):
""" Open a database, if the database is not yet set up, call create
to do so. It may raise Sqlite3DSError if failed to open sqlite3
database file or find bad database schema version in the database.
Arguments:
dbfile - the filename for the sqlite3 database.
+ connect_timeout - timeout for opening the database or acquiring locks
+ defaults to sqlite3 module's default of 5.0 seconds
Return sqlite3 connection, sqlite3 cursor.
"""
try:
- conn = sqlite3.connect(dbfile)
+ conn = sqlite3.connect(dbfile, timeout=connect_timeout)
cur = conn.cursor()
except Exception as e:
fail = "Failed to open " + dbfile + ": " + e.args[0]
@@ -80,10 +99,13 @@ def open(dbfile):
try:
cur.execute("SELECT version FROM schema_version")
row = cur.fetchone()
- except:
- create(cur)
- conn.commit()
- row = [1]
+ except sqlite3.OperationalError:
+ # temporarily disable automatic transactions so
+ # we can do our own
+ iso_lvl = conn.isolation_level
+ conn.isolation_level = None
+ row = create(cur)
+ conn.isolation_level = iso_lvl
if row == None or row[0] != 1:
raise Sqlite3DSError("Bad database schema version")
diff --git a/src/lib/python/isc/datasrc/tests/Makefile.am b/src/lib/python/isc/datasrc/tests/Makefile.am
index 6f6d157..be30dfa 100644
--- a/src/lib/python/isc/datasrc/tests/Makefile.am
+++ b/src/lib/python/isc/datasrc/tests/Makefile.am
@@ -1,16 +1,18 @@
PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
-PYTESTS = master_test.py sqlite3_ds_test.py
+# old tests, TODO remove or change to use new API?
+#PYTESTS = master_test.py sqlite3_ds_test.py
+PYTESTS = datasrc_test.py
EXTRA_DIST = $(PYTESTS)
EXTRA_DIST += testdata/brokendb.sqlite3
EXTRA_DIST += testdata/example.com.sqlite3
-CLEANFILES = $(abs_builddir)/example.com.out.sqlite3
+CLEANFILES = $(abs_builddir)/rwtest.sqlite3.copied
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -23,7 +25,7 @@ endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/python/isc/log \
+ PYTHONPATH=:$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/log:$(abs_top_builddir)/src/lib/python/isc/datasrc/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs \
TESTDATA_PATH=$(abs_srcdir)/testdata \
TESTDATA_WRITE_PATH=$(abs_builddir) \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
diff --git a/src/lib/python/isc/datasrc/tests/datasrc_test.py b/src/lib/python/isc/datasrc/tests/datasrc_test.py
new file mode 100644
index 0000000..15ceb80
--- /dev/null
+++ b/src/lib/python/isc/datasrc/tests/datasrc_test.py
@@ -0,0 +1,389 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import isc.log
+import isc.datasrc
+import isc.dns
+import unittest
+import os
+import shutil
+
+TESTDATA_PATH = os.environ['TESTDATA_PATH'] + os.sep
+TESTDATA_WRITE_PATH = os.environ['TESTDATA_WRITE_PATH'] + os.sep
+
+READ_ZONE_DB_FILE = TESTDATA_PATH + "example.com.sqlite3"
+BROKEN_DB_FILE = TESTDATA_PATH + "brokendb.sqlite3"
+WRITE_ZONE_DB_FILE = TESTDATA_WRITE_PATH + "rwtest.sqlite3.copied"
+NEW_DB_FILE = TESTDATA_WRITE_PATH + "new_db.sqlite3"
+
+def add_rrset(rrset_list, name, rrclass, rrtype, ttl, rdatas):
+ rrset_to_add = isc.dns.RRset(name, rrclass, rrtype, ttl)
+ if rdatas is not None:
+ for rdata in rdatas:
+ rrset_to_add.add_rdata(isc.dns.Rdata(rrtype, rrclass, rdata))
+ rrset_list.append(rrset_to_add)
+
+# helper function, we have no direct rrset comparison atm
+def rrsets_equal(a, b):
+ # no accessor for sigs either (so this only checks name, class, type, ttl,
+ # and rdata)
+ # also, because of the fake data in rrsigs, if the type is rrsig, the
+ # rdata is not checked
+ return a.get_name() == b.get_name() and\
+ a.get_class() == b.get_class() and\
+ a.get_type() == b.get_type() and \
+ a.get_ttl() == b.get_ttl() and\
+ (a.get_type() == isc.dns.RRType.RRSIG() or
+ sorted(a.get_rdata()) == sorted(b.get_rdata()))
+
+# returns true if rrset is in expected_rrsets
+# will remove the rrset from expected_rrsets if found
+def check_for_rrset(expected_rrsets, rrset):
+ for cur_rrset in expected_rrsets[:]:
+ if rrsets_equal(cur_rrset, rrset):
+ expected_rrsets.remove(cur_rrset)
+ return True
+ return False
+
+class DataSrcClient(unittest.TestCase):
+
+ def test_construct(self):
+ # can't construct directly
+ self.assertRaises(TypeError, isc.datasrc.ZoneIterator)
+
+
+ def test_iterate(self):
+ dsc = isc.datasrc.DataSourceClient(READ_ZONE_DB_FILE)
+
+ # for RRSIGS, the TTL's are currently modified. This test should
+ # start failing when we fix that.
+ rrs = dsc.get_iterator(isc.dns.Name("sql1.example.com."))
+
+ # we do not know the order in which they are returned by the iterator
+ # but we do want to check them, so we put all records into one list
+ # sort it (doesn't matter which way it is sorted, as long as it is
+ # sorted)
+
+ # RRset is (atm) an unorderable type, and within an rrset, the
+ # rdatas and rrsigs may also be in random order. In theory the
+ # rrsets themselves can be returned in any order.
+ #
+ # So we create a second list with all rrsets we expect, and for each
+ # rrset we get from the iterator, see if it is in that list, and
+ # remove it.
+ #
+ # When the iterator is empty, we check no rrsets are left in the
+ # list of expected ones
+ expected_rrset_list = []
+
+ name = isc.dns.Name("sql1.example.com")
+ rrclass = isc.dns.RRClass.IN()
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.DNSKEY(), isc.dns.RRTTL(3600),
+ [
+ "256 3 5 AwEAAdYdRhBAEY67R/8G1N5AjGF6asIiNh/pNGeQ8xDQP13J"+
+ "N2lo+sNqWcmpYNhuVqRbLB+mamsU1XcCICSBvAlSmfz/ZUdafX23knAr"+
+ "TlALxMmspcfdpqun3Yr3YYnztuj06rV7RqmveYckWvAUXVYMSMQZfJ30"+
+ "5fs0dE/xLztL/CzZ",
+ "257 3 5 AwEAAbaKDSa9XEFTsjSYpUTHRotTS9Tz3krfDucugW5UokGQ"+
+ "KC26QlyHXlPTZkC+aRFUs/dicJX2kopndLcnlNAPWiKnKtrsFSCnIJDB"+
+ "ZIyvcKq+9RXmV3HK3bUdHnQZ88IZWBRmWKfZ6wnzHo53kdYKAemTErkz"+
+ "taX3lRRPLYWpxRcDPEjysXT3Lh0vfL5D+CIO1yKw/q7C+v6+/kYAxc2l"+
+ "fbNE3HpklSuF+dyX4nXxWgzbcFuLz5Bwfq6ZJ9RYe/kNkA0uMWNa1KkG"+
+ "eRh8gg22kgD/KT5hPTnpezUWLvoY5Qc7IB3T0y4n2JIwiF2ZrZYVrWgD"+
+ "jRWAzGsxJiJyjd6w2k0="
+ ])
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.NS(), isc.dns.RRTTL(3600),
+ [
+ "dns01.example.com.",
+ "dns02.example.com.",
+ "dns03.example.com."
+ ])
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.NSEC(), isc.dns.RRTTL(7200),
+ [
+ "www.sql1.example.com. NS SOA RRSIG NSEC DNSKEY"
+ ])
+ # For RRSIGS, we can't add the fake data through the API, so we
+ # simply pass no rdata at all (which is skipped by the check later)
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.SOA(), isc.dns.RRTTL(3600),
+ [
+ "master.example.com. admin.example.com. 678 3600 1800 2419200 7200"
+ ])
+ name = isc.dns.Name("www.sql1.example.com.")
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.A(), isc.dns.RRTTL(3600),
+ [
+ "192.0.2.100"
+ ])
+ name = isc.dns.Name("www.sql1.example.com.")
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.NSEC(), isc.dns.RRTTL(7200),
+ [
+ "sql1.example.com. A RRSIG NSEC"
+ ])
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
+
+ # rrs is an iterator, but also has direct get_next_rrset(), use
+ # the latter one here
+ rrset_to_check = rrs.get_next_rrset()
+ while (rrset_to_check != None):
+ self.assertTrue(check_for_rrset(expected_rrset_list,
+ rrset_to_check),
+ "Unexpected rrset returned by iterator:\n" +
+ rrset_to_check.to_text())
+ rrset_to_check = rrs.get_next_rrset()
+
+ # Now check there are none left
+ self.assertEqual(0, len(expected_rrset_list),
+ "RRset(s) not returned by iterator: " +
+ str([rrset.to_text() for rrset in expected_rrset_list ]
+ ))
+
+ # TODO should we catch this (iterating past end) and just return None
+ # instead of failing?
+ self.assertRaises(isc.datasrc.Error, rrs.get_next_rrset)
+
+ rrets = dsc.get_iterator(isc.dns.Name("example.com"))
+ # there are more than 80 RRs in this zone... let's just count them
+ # (already did a full check of the smaller zone above)
+ self.assertEqual(55, len(list(rrets)))
+ # TODO should we catch this (iterating past end) and just return None
+ # instead of failing?
+ self.assertRaises(isc.datasrc.Error, rrs.get_next_rrset)
+
+ self.assertRaises(TypeError, dsc.get_iterator, "asdf")
+
+ def test_construct(self):
+ # can't construct directly
+ self.assertRaises(TypeError, isc.datasrc.ZoneFinder)
+
+ def test_find(self):
+ dsc = isc.datasrc.DataSourceClient(READ_ZONE_DB_FILE)
+
+ result, finder = dsc.find_zone(isc.dns.Name("example.com"))
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual(isc.dns.RRClass.IN(), finder.get_class())
+ self.assertEqual("example.com.", finder.get_origin().to_text())
+
+ result, rrset = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+ rrset.to_text())
+
+ result, rrset = finder.find(isc.dns.Name("www.sql1.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.DELEGATION, result)
+ self.assertEqual("sql1.example.com. 3600 IN NS dns01.example.com.\n" +
+ "sql1.example.com. 3600 IN NS dns02.example.com.\n" +
+ "sql1.example.com. 3600 IN NS dns03.example.com.\n",
+ rrset.to_text())
+
+ result, rrset = finder.find(isc.dns.Name("doesnotexist.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.NXDOMAIN, result)
+ self.assertEqual(None, rrset)
+
+ result, rrset = finder.find(isc.dns.Name("www.some.other.domain"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.NXDOMAIN, result)
+ self.assertEqual(None, rrset)
+
+ result, rrset = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.TXT(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.NXRRSET, result)
+ self.assertEqual(None, rrset)
+
+ result, rrset = finder.find(isc.dns.Name("cname-ext.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.CNAME, result)
+ self.assertEqual(
+ "cname-ext.example.com. 3600 IN CNAME www.sql1.example.com.\n",
+ rrset.to_text())
+
+ self.assertRaises(TypeError, finder.find,
+ "foo",
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertRaises(TypeError, finder.find,
+ isc.dns.Name("cname-ext.example.com"),
+ "foo",
+ None,
+ finder.FIND_DEFAULT)
+ self.assertRaises(TypeError, finder.find,
+ isc.dns.Name("cname-ext.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ "foo")
+
+
+class DataSrcUpdater(unittest.TestCase):
+
+ def setUp(self):
+ # Make a fresh copy of the writable database with all original content
+ shutil.copyfile(READ_ZONE_DB_FILE, WRITE_ZONE_DB_FILE)
+
+ def test_construct(self):
+ # can't construct directly
+ self.assertRaises(TypeError, isc.datasrc.ZoneUpdater)
+
+ def test_update_delete_commit(self):
+
+ dsc = isc.datasrc.DataSourceClient(WRITE_ZONE_DB_FILE)
+
+ # first make sure, through a separate finder, that some record exists
+ result, finder = dsc.find_zone(isc.dns.Name("example.com"))
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual(isc.dns.RRClass.IN(), finder.get_class())
+ self.assertEqual("example.com.", finder.get_origin().to_text())
+
+ result, rrset = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+ rrset.to_text())
+
+ rrset_to_delete = rrset;
+
+ # can't delete rrset with associated sig. Abuse that to force an
+ # exception first, then remove the sig, then delete the record
+ updater = dsc.get_updater(isc.dns.Name("example.com"), True)
+ self.assertRaises(isc.datasrc.Error, updater.delete_rrset,
+ rrset_to_delete)
+
+ rrset_to_delete.remove_rrsig()
+
+ updater.delete_rrset(rrset_to_delete)
+
+ # The record should be gone in the updater, but not in the original
+ # finder (since we have not committed)
+ result, rrset = updater.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.NXDOMAIN, result)
+ self.assertEqual(None, rrset)
+
+ result, rrset = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+ rrset.to_text())
+
+ updater.commit()
+ # second commit should raise exception
+ self.assertRaises(isc.datasrc.Error, updater.commit)
+
+ # the record should be gone now in the 'real' finder as well
+ result, rrset = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.NXDOMAIN, result)
+ self.assertEqual(None, rrset)
+
+ # now add it again
+ updater = dsc.get_updater(isc.dns.Name("example.com"), True)
+ updater.add_rrset(rrset_to_delete)
+ updater.commit()
+
+ # second commit should throw
+ self.assertRaises(isc.datasrc.Error, updater.commit)
+
+ result, rrset = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+ rrset.to_text())
+
+ def test_update_delete_abort(self):
+ dsc = isc.datasrc.DataSourceClient(WRITE_ZONE_DB_FILE)
+
+ # first make sure, through a separate finder, that some record exists
+ result, finder = dsc.find_zone(isc.dns.Name("example.com"))
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual(isc.dns.RRClass.IN(), finder.get_class())
+ self.assertEqual("example.com.", finder.get_origin().to_text())
+
+ result, rrset = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+ rrset.to_text())
+
+ rrset_to_delete = rrset;
+
+ # can't delete rrset with associated sig. Abuse that to force an
+ # exception first, then remove the sig, then delete the record
+ updater = dsc.get_updater(isc.dns.Name("example.com"), True)
+ self.assertRaises(isc.datasrc.Error, updater.delete_rrset,
+ rrset_to_delete)
+
+ rrset_to_delete.remove_rrsig()
+
+ updater.delete_rrset(rrset_to_delete)
+
+ # The record should be gone in the updater, but not in the original
+ # finder (since we have not committed)
+ result, rrset = updater.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.NXDOMAIN, result)
+ self.assertEqual(None, rrset)
+
+ # destroy the updater, which should make it roll back
+ updater = None
+
+ # the record should still be available in the 'real' finder as well
+ result, rrset = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+ rrset.to_text())
+
+
+if __name__ == "__main__":
+ isc.log.init("bind10")
+ unittest.main()
diff --git a/src/lib/python/isc/datasrc/tests/sqlite3_ds_test.py b/src/lib/python/isc/datasrc/tests/sqlite3_ds_test.py
index 707994f..10c61cf 100644
--- a/src/lib/python/isc/datasrc/tests/sqlite3_ds_test.py
+++ b/src/lib/python/isc/datasrc/tests/sqlite3_ds_test.py
@@ -23,8 +23,9 @@ TESTDATA_PATH = os.environ['TESTDATA_PATH'] + os.sep
TESTDATA_WRITE_PATH = os.environ['TESTDATA_WRITE_PATH'] + os.sep
READ_ZONE_DB_FILE = TESTDATA_PATH + "example.com.sqlite3"
-WRITE_ZONE_DB_FILE = TESTDATA_WRITE_PATH + "example.com.out.sqlite3"
BROKEN_DB_FILE = TESTDATA_PATH + "brokendb.sqlite3"
+WRITE_ZONE_DB_FILE = TESTDATA_WRITE_PATH + "example.com.out.sqlite3"
+NEW_DB_FILE = TESTDATA_WRITE_PATH + "new_db.sqlite3"
def example_reader():
my_zone = [
@@ -91,5 +92,52 @@ class TestSqlite3_ds(unittest.TestCase):
# and make sure lock does not stay
sqlite3_ds.load(WRITE_ZONE_DB_FILE, ".", example_reader)
+class NewDBFile(unittest.TestCase):
+ def tearDown(self):
+ # remove the created database after every test
+ if (os.path.exists(NEW_DB_FILE)):
+ os.remove(NEW_DB_FILE)
+
+ def setUp(self):
+ # remove the created database before every test too, just
+ # in case a test got aborted half-way, and cleanup didn't occur
+ if (os.path.exists(NEW_DB_FILE)):
+ os.remove(NEW_DB_FILE)
+
+ def test_new_db(self):
+ self.assertFalse(os.path.exists(NEW_DB_FILE))
+ sqlite3_ds.open(NEW_DB_FILE)
+ self.assertTrue(os.path.exists(NEW_DB_FILE))
+
+ def test_new_db_locked(self):
+ self.assertFalse(os.path.exists(NEW_DB_FILE))
+ con = sqlite3.connect(NEW_DB_FILE);
+ con.isolation_level = None
+ cur = con.cursor()
+ cur.execute("BEGIN IMMEDIATE TRANSACTION")
+
+ # load should now fail, since the database is locked,
+ # and the open() call needs an exclusive lock
+ self.assertRaises(sqlite3.OperationalError,
+ sqlite3_ds.open, NEW_DB_FILE, 0.1)
+
+ con.rollback()
+ cur.close()
+ con.close()
+ self.assertTrue(os.path.exists(NEW_DB_FILE))
+
+ # now that we closed our connection, load should work again
+ sqlite3_ds.open(NEW_DB_FILE)
+
+ # the database should now have been created, and a new load should
+ # not require an exclusive lock anymore, so we lock it again
+ con = sqlite3.connect(NEW_DB_FILE);
+ cur = con.cursor()
+ cur.execute("BEGIN IMMEDIATE TRANSACTION")
+ sqlite3_ds.open(NEW_DB_FILE, 0.1)
+ con.rollback()
+ cur.close()
+ con.close()
+
if __name__ == '__main__':
unittest.main()
diff --git a/src/lib/python/isc/datasrc/updater_inc.cc b/src/lib/python/isc/datasrc/updater_inc.cc
new file mode 100644
index 0000000..32715ec
--- /dev/null
+++ b/src/lib/python/isc/datasrc/updater_inc.cc
@@ -0,0 +1,181 @@
+namespace {
+
+const char* const ZoneUpdater_doc = "\
+The base class to make updates to a single zone.\n\
+\n\
+On construction, each derived class object will start a\n\
+\"transaction\" for making updates to a specific zone (this means a\n\
+constructor of a derived class would normally take parameters to\n\
+identify the zone to be updated). The underlying realization of a\n\
+\"transaction\" will differ for different derived classes; if it uses\n\
+a general purpose database as a backend, it will involve performing\n\
+some form of \"begin transaction\" statement for the database.\n\
+\n\
+Updates (adding or deleting RRs) are made via add_rrset() and\n\
+delete_rrset() methods. Until the commit() method is called the\n\
+changes are local to the updater object. For example, they won't be\n\
+visible via a ZoneFinder object, but only by the updater's own find()\n\
+method. The commit() completes the transaction and makes the changes\n\
+visible to others.\n\
+\n\
+This class does not provide an explicit \"rollback\" interface. If\n\
+something wrong or unexpected happens during the updates and the\n\
+caller wants to cancel the intermediate updates, the caller should\n\
+simply destroy the updater object without calling commit(). The\n\
+destructor is supposed to perform the \"rollback\" operation,\n\
+depending on the internal details of the derived class.\n\
+\n\
+This initial implementation provides a quite simple interface of\n\
+adding and deleting RRs (see the description of the related methods).\n\
+It may be revisited as we gain more experiences.\n\
+\n\
+";
+
+const char* const ZoneUpdater_addRRset_doc = "\
+add_rrset(rrset) -> No return value\n\
+\n\
+Add an RRset to a zone via the updater.\n\
+It performs a few basic checks:\n\
+- Whether the RR class is identical to that for the zone to be updated\n\
+- Whether the RRset is not empty, i.e., it has at least one RDATA\n\
+- Whether the RRset is not associated with an RRSIG, i.e., whether\n\
+ get_rrsig() on the RRset returns a NULL pointer.\n\
+\n\
+and otherwise does not check any oddity. For example, it doesn't check\n\
+whether the owner name of the specified RRset is a subdomain of the\n\
+zone's origin; it doesn't care whether or not there is already an\n\
+RRset of the same name and RR type in the zone, and if there is,\n\
+whether any of the existing RRs have duplicate RDATA with the added\n\
+ones. If these conditions matter the calling application must examine\n\
+the existing data beforehand using the ZoneFinder returned by\n\
+get_finder().\n\
+\n\
+The validation requirement on the associated RRSIG is temporary. If we\n\
+find it more reasonable and useful to allow adding a pair of RRset and\n\
+its RRSIG RRset as we gain experiences with the interface, we may\n\
+remove this restriction. Until then we explicitly check it to prevent\n\
+accidental misuse.\n\
+\n\
+Conceptually, on successful call to this method, the zone will have\n\
+the specified RRset, and if there is already an RRset of the same name\n\
+and RR type, these two sets will be \"merged\". \"Merged\" means that\n\
+a subsequent call to ZoneFinder.find() for the name and type will\n\
+result in success and the returned RRset will contain all previously\n\
+existing and newly added RDATAs with the TTL being the minimum of the\n\
+two RRsets. The underlying representation of the \"merged\" RRsets may\n\
+vary depending on the characteristic of the underlying data source.\n\
+For example, if it uses a general purpose database that stores each RR\n\
+of the same RRset separately, it may simply be a larger sets of RRs\n\
+based on both the existing and added RRsets; the TTLs of the RRs may\n\
+be different within the database, and there may even be duplicate RRs\n\
+in different database rows. As long as the RRset returned via\n\
+ZoneFinder.find() conforms to the concept of \"merge\", the actual\n\
+internal representation is up to the implementation.\n\
+\n\
+This method must not be called once commit() is performed. If it calls\n\
+after commit() the implementation must throw a isc.datasrc.Error\n\
+exception.\n\
+\n\
+Todo As noted above we may have to revisit the design details as we\n\
+gain experiences:\n\
+\n\
+- we may want to check (and maybe reject) if there is already a\n\
+ duplicate RR (that has the same RDATA).\n\
+- we may want to check (and maybe reject) if there is already an RRset\n\
+ of the same name and RR type with different TTL\n\
+- we may even want to check if there is already any RRset of the same\n\
+ name and RR type.\n\
+- we may want to add an \"options\" parameter that can control the\n\
+ above points\n\
+- we may want to have this method return a value containing the\n\
+ information on whether there's a duplicate, etc.\n\
+\n\
+Exceptions:\n\
+ isc.datasrc.Error Called after commit(), RRset is invalid (see above),\n\
+ internal data source error, or wrapper error\n\
+\n\
+Parameters:\n\
+ rrset The RRset to be added\n\
+\n\
+";
+
+const char* const ZoneUpdater_deleteRRset_doc = "\
+delete_rrset(rrset) -> No return value\n\
+\n\
+Delete an RRset from a zone via the updater.\n\
+\n\
+Like add_rrset(), the detailed semantics and behavior of this method\n\
+may have to be revisited in a future version. The following are based\n\
+on the initial implementation decisions.\n\
+\n\
+- Existing RRs that don't match any of the specified RDATAs will\n\
+ remain in the zone.\n\
+- Any RRs of the specified RRset that doesn't exist in the zone will\n\
+ simply be ignored; the implementation of this method is not supposed\n\
+ to check that condition.\n\
+- The TTL of the RRset is ignored; matching is only performed by the\n\
+ owner name, RR type and RDATA\n\
+\n\
+Ignoring the TTL may not look sensible, but it's based on the\n\
+observation that it will result in more intuitive result, especially\n\
+when the underlying data source is a general purpose database. See\n\
+also the c++ documentation of DatabaseAccessor::DeleteRecordInZone()\n\
+on this point. It also matches the dynamic update protocol (RFC2136),\n\
+where TTLs are ignored when deleting RRs.\n\
+\n\
+This method performs a limited level of validation on the specified\n\
+RRset:\n\
+- Whether the RR class is identical to that for the zone to be updated\n\
+- Whether the RRset is not empty, i.e., it has at least one RDATA\n\
+- Whether the RRset is not associated with an RRSIG\n\
+\n\
+This method must not be called once commit() is performed. If it calls\n\
+after commit() the implementation must throw a isc.datasrc.Error\n\
+exception.\n\
+\n\
+Todo: As noted above we may have to revisit the design details as we\n\
+gain experiences:\n\
+\n\
+- we may want to check (and maybe reject) if some or all of the RRs\n\
+ for the specified RRset don't exist in the zone\n\
+- we may want to allow an option to \"delete everything\" for\n\
+ specified name and/or specified name + RR type.\n\
+- as mentioned above, we may want to include the TTL in matching the\n\
+ deleted RRs\n\
+- we may want to add an \"options\" parameter that can control the\n\
+ above points\n\
+- we may want to have this method return a value containing the\n\
+ information on whether there's any RRs that are specified but don't\n\
+ exit, the number of actually deleted RRs, etc.\n\
+\n\
+Exceptions:\n\
+ isc.datasrc.Error Called after commit(), RRset is invalid (see above),\n\
+ internal data source error\n\
+ std.bad_alloc Resource allocation failure\n\
+\n\
+Parameters:\n\
+ rrset The RRset to be deleted\n\
+\n\
+";
+
+const char* const ZoneUpdater_commit_doc = "\
+commit() -> void\n\
+\n\
+Commit the updates made in the updater to the zone.\n\
+\n\
+This method completes the \"transaction\" started at the creation of\n\
+the updater. After successful completion of this method, the updates\n\
+will be visible outside the scope of the updater. The actual internal\n\
+behavior will defer for different derived classes. For a derived class\n\
+with a general purpose database as a backend, for example, this method\n\
+would perform a \"commit\" statement for the database.\n\
+\n\
+This operation can only be performed at most once. A duplicate call\n\
+must result in a isc.datasrc.Error exception.\n\
+\n\
+Exceptions:\n\
+ isc.datasrc.Error Duplicate call of the method, internal data source\n\
+ error, or wrapper error\n\\n\
+\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/datasrc/updater_python.cc b/src/lib/python/isc/datasrc/updater_python.cc
new file mode 100644
index 0000000..a9dc581
--- /dev/null
+++ b/src/lib/python/isc/datasrc/updater_python.cc
@@ -0,0 +1,318 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// Enable this if you use s# variants with PyArg_ParseTuple(), see
+// http://docs.python.org/py3k/c-api/arg.html#strings-and-buffers
+//#define PY_SSIZE_T_CLEAN
+
+// Python.h needs to be placed at the head of the program file, see:
+// http://docs.python.org/py3k/extending/extending.html#a-simple-example
+#include <Python.h>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <datasrc/client.h>
+#include <datasrc/database.h>
+#include <datasrc/data_source.h>
+#include <datasrc/sqlite3_accessor.h>
+#include <datasrc/zone.h>
+
+#include <dns/python/name_python.h>
+#include <dns/python/rrset_python.h>
+#include <dns/python/rrclass_python.h>
+#include <dns/python/rrtype_python.h>
+
+#include "datasrc.h"
+#include "updater_python.h"
+
+#include "updater_inc.cc"
+#include "finder_inc.cc"
+
+using namespace std;
+using namespace isc::util::python;
+using namespace isc::dns::python;
+using namespace isc::datasrc;
+using namespace isc::datasrc::python;
+
+namespace isc_datasrc_internal {
+// See finder_python.cc
+PyObject* ZoneFinder_helper(ZoneFinder* finder, PyObject* args);
+}
+
+namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_ZoneUpdater : public PyObject {
+public:
+ s_ZoneUpdater() : cppobj(ZoneUpdaterPtr()) {};
+ ZoneUpdaterPtr cppobj;
+};
+
+// Shortcut type which would be convenient for adding class variables safely.
+typedef CPPPyObjectContainer<s_ZoneUpdater, ZoneUpdater> ZoneUpdaterContainer;
+
+//
+// We declare the functions here, the definitions are below
+// the type definition of the object, since both can use the other
+//
+
+// General creation and destruction
+int
+ZoneUpdater_init(s_ZoneUpdater* self, PyObject* args) {
+ // can't be called directly
+ PyErr_SetString(PyExc_TypeError,
+ "ZoneUpdater cannot be constructed directly");
+
+ return (-1);
+}
+
+void
+ZoneUpdater_destroy(s_ZoneUpdater* const self) {
+ // cppobj is a shared ptr, but to make sure things are not destroyed in
+ // the wrong order, we reset it here.
+ self->cppobj.reset();
+ Py_TYPE(self)->tp_free(self);
+}
+
+PyObject*
+ZoneUpdater_addRRset(PyObject* po_self, PyObject* args) {
+ s_ZoneUpdater* const self = static_cast<s_ZoneUpdater*>(po_self);
+ PyObject* rrset_obj;
+ if (PyArg_ParseTuple(args, "O!", &rrset_type, &rrset_obj)) {
+ try {
+ self->cppobj->addRRset(PyRRset_ToRRset(rrset_obj));
+ Py_RETURN_NONE;
+ } catch (const DataSourceError& dse) {
+ PyErr_SetString(getDataSourceException("Error"), dse.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ }
+ } else {
+ return (NULL);
+ }
+}
+
+PyObject*
+ZoneUpdater_deleteRRset(PyObject* po_self, PyObject* args) {
+ s_ZoneUpdater* const self = static_cast<s_ZoneUpdater*>(po_self);
+ PyObject* rrset_obj;
+ if (PyArg_ParseTuple(args, "O!", &rrset_type, &rrset_obj)) {
+ try {
+ self->cppobj->deleteRRset(PyRRset_ToRRset(rrset_obj));
+ Py_RETURN_NONE;
+ } catch (const DataSourceError& dse) {
+ PyErr_SetString(getDataSourceException("Error"), dse.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ }
+ } else {
+ return (NULL);
+ }
+}
+
+PyObject*
+ZoneUpdater_commit(PyObject* po_self, PyObject*) {
+ s_ZoneUpdater* const self = static_cast<s_ZoneUpdater*>(po_self);
+ try {
+ self->cppobj->commit();
+ Py_RETURN_NONE;
+ } catch (const DataSourceError& dse) {
+ PyErr_SetString(getDataSourceException("Error"), dse.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ }
+}
+
+PyObject*
+ZoneUpdater_getClass(PyObject* po_self, PyObject*) {
+ s_ZoneUpdater* self = static_cast<s_ZoneUpdater*>(po_self);
+ try {
+ return (createRRClassObject(self->cppobj->getFinder().getClass()));
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+}
+
+PyObject*
+ZoneUpdater_getOrigin(PyObject* po_self, PyObject*) {
+ s_ZoneUpdater* self = static_cast<s_ZoneUpdater*>(po_self);
+ try {
+ return (createNameObject(self->cppobj->getFinder().getOrigin()));
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+}
+
+PyObject*
+ZoneUpdater_find(PyObject* po_self, PyObject* args) {
+ s_ZoneUpdater* const self = static_cast<s_ZoneUpdater*>(po_self);
+ return (isc_datasrc_internal::ZoneFinder_helper(&self->cppobj->getFinder(),
+ args));
+}
+
+PyObject*
+AZoneUpdater_find(PyObject* po_self, PyObject* args) {
+ s_ZoneUpdater* const self = static_cast<s_ZoneUpdater*>(po_self);
+ PyObject *name;
+ PyObject *rrtype;
+ PyObject *target;
+ int options_int;
+ if (PyArg_ParseTuple(args, "O!O!OI", &name_type, &name,
+ &rrtype_type, &rrtype,
+ &target, &options_int)) {
+ try {
+ ZoneFinder::FindOptions options =
+ static_cast<ZoneFinder::FindOptions>(options_int);
+ ZoneFinder::FindResult find_result(
+ self->cppobj->getFinder().find(PyName_ToName(name),
+ PyRRType_ToRRType(rrtype),
+ NULL,
+ options
+ ));
+ ZoneFinder::Result r = find_result.code;
+ isc::dns::ConstRRsetPtr rrsp = find_result.rrset;
+ if (rrsp) {
+ // Use N instead of O so the refcount isn't increased twice
+ return Py_BuildValue("IN", r, createRRsetObject(*rrsp));
+ } else {
+ return Py_BuildValue("IO", r, Py_None);
+ }
+ } catch (const DataSourceError& dse) {
+ PyErr_SetString(getDataSourceException("Error"), dse.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+ } else {
+ return (NULL);
+ }
+ return Py_BuildValue("I", 1);
+}
+
+
+// This list contains the actual set of functions we have in
+// python. Each entry has
+// 1. Python method name
+// 2. Our static function here
+// 3. Argument type
+// 4. Documentation
+PyMethodDef ZoneUpdater_methods[] = {
+ { "add_rrset", reinterpret_cast<PyCFunction>(ZoneUpdater_addRRset),
+ METH_VARARGS, ZoneUpdater_addRRset_doc },
+ { "delete_rrset", reinterpret_cast<PyCFunction>(ZoneUpdater_deleteRRset),
+ METH_VARARGS, ZoneUpdater_deleteRRset_doc },
+ { "commit", reinterpret_cast<PyCFunction>(ZoneUpdater_commit), METH_NOARGS,
+ ZoneUpdater_commit_doc },
+ // Instead of a getFinder, we implement the finder functionality directly
+ // This is because ZoneFinder is non-copyable, and we should not create
+ // a ZoneFinder object from a reference only (which is what is returned
+ // by getFinder(). Apart from that
+ { "get_origin", reinterpret_cast<PyCFunction>(ZoneUpdater_getOrigin),
+ METH_NOARGS, ZoneFinder_getOrigin_doc },
+ { "get_class", reinterpret_cast<PyCFunction>(ZoneUpdater_getClass),
+ METH_NOARGS, ZoneFinder_getClass_doc },
+ { "find", reinterpret_cast<PyCFunction>(ZoneUpdater_find), METH_VARARGS,
+ ZoneFinder_find_doc },
+ { NULL, NULL, 0, NULL }
+};
+
+} // end of unnamed namespace
+
+namespace isc {
+namespace datasrc {
+namespace python {
+PyTypeObject zoneupdater_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "datasrc.ZoneUpdater",
+ sizeof(s_ZoneUpdater), // tp_basicsize
+ 0, // tp_itemsize
+ reinterpret_cast<destructor>(ZoneUpdater_destroy),// tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ NULL, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ ZoneUpdater_doc,
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ ZoneUpdater_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ reinterpret_cast<initproc>(ZoneUpdater_init),// tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+PyObject*
+createZoneUpdaterObject(isc::datasrc::ZoneUpdaterPtr source) {
+ s_ZoneUpdater* py_zi = static_cast<s_ZoneUpdater*>(
+ zoneupdater_type.tp_alloc(&zoneupdater_type, 0));
+ if (py_zi != NULL) {
+ py_zi->cppobj = source;
+ }
+ return (py_zi);
+}
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+
diff --git a/src/lib/python/isc/datasrc/updater_python.h b/src/lib/python/isc/datasrc/updater_python.h
new file mode 100644
index 0000000..3886aa3
--- /dev/null
+++ b/src/lib/python/isc/datasrc/updater_python.h
@@ -0,0 +1,39 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_DATASRC_UPDATER_H
+#define __PYTHON_DATASRC_UPDATER_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace datasrc {
+class DataSourceClient;
+
+namespace python {
+
+
+extern PyTypeObject zoneupdater_type;
+
+PyObject* createZoneUpdaterObject(isc::datasrc::ZoneUpdaterPtr source);
+
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+#endif // __PYTHON_DATASRC_UPDATER_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/dns/Makefile.am b/src/lib/python/isc/dns/Makefile.am
new file mode 100644
index 0000000..161c2a5
--- /dev/null
+++ b/src/lib/python/isc/dns/Makefile.am
@@ -0,0 +1,7 @@
+python_PYTHON = __init__.py
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
+
diff --git a/src/lib/python/isc/log/Makefile.am b/src/lib/python/isc/log/Makefile.am
index b228caf..5ff2c28 100644
--- a/src/lib/python/isc/log/Makefile.am
+++ b/src/lib/python/isc/log/Makefile.am
@@ -23,6 +23,15 @@ log_la_LIBADD += $(PYTHON_LIB)
# This is not installed, it helps locate the module during tests
EXTRA_DIST = __init__.py
+# We're going to abuse install-data-local for a pre-install check.
+# This is to be considered a short term hack and is expected to be removed
+# in a near future version.
+install-data-local:
+ if test -d @pyexecdir@/isc/log; then \
+ echo "@pyexecdir@/isc/log is deprecated, and will confuse newer versions. Please (re)move it by hand."; \
+ exit 1; \
+ fi
+
pytest:
$(SHELL) tests/log_test
diff --git a/src/lib/python/isc/log/log.cc b/src/lib/python/isc/log/log.cc
index 484151f..5bb6a94 100644
--- a/src/lib/python/isc/log/log.cc
+++ b/src/lib/python/isc/log/log.cc
@@ -20,6 +20,7 @@
#include <log/message_dictionary.h>
#include <log/logger_manager.h>
+#include <log/logger_support.h>
#include <log/logger.h>
#include <config/ccsession.h>
@@ -35,7 +36,7 @@ using boost::bind;
// (tags/RELEASE_28 115909)) on OSX, where unwinding the stack
// segfaults the moment this exception was thrown and caught.
//
-// Placing it in a named namespace instead of the original
+// Placing it in a named namespace instead of the originalRecommend
// unnamed namespace appears to solve this, so as a temporary
// workaround, we create a local randomly named namespace here
// to solve this issue.
@@ -184,6 +185,27 @@ init(PyObject*, PyObject* args) {
Py_RETURN_NONE;
}
+// This initialization is for unit tests. It allows message settings to
+// be determined by a set of B10_xxx environment variables. (See the
+// description of initLogger() for more details.) The function has been named
+// resetUnitTestRootLogger() here as being more descriptive and
+// trying to avoid confusion.
+PyObject*
+resetUnitTestRootLogger(PyObject*, PyObject*) {
+ try {
+ isc::log::resetUnitTestRootLogger();
+ }
+ catch (const std::exception& e) {
+ PyErr_SetString(PyExc_RuntimeError, e.what());
+ return (NULL);
+ }
+ catch (...) {
+ PyErr_SetString(PyExc_RuntimeError, "Unknown C++ exception");
+ return (NULL);
+ }
+ Py_RETURN_NONE;
+}
+
PyObject*
logConfigUpdate(PyObject*, PyObject* args) {
// we have no wrappers for ElementPtr and ConfigData,
@@ -246,6 +268,12 @@ PyMethodDef methods[] = {
"logging severity (one of 'DEBUG', 'INFO', 'WARN', 'ERROR' or "
"'FATAL'), a debug level (integer in the range 0-99) and a file name "
"of a dictionary with message text translations."},
+ {"resetUnitTestRootLogger", resetUnitTestRootLogger, METH_VARARGS,
+ "Resets the configuration of the root logger to that set by the "
+ "B10_XXX environment variables. It is aimed at unit tests, where "
+ "the logging is initialized by the code under test; called before "
+ "the unit test starts, this function resets the logging configuration "
+ "to that in use for the C++ unit tests."},
{"log_config_update", logConfigUpdate, METH_VARARGS,
"Update logger settings. This method is automatically used when "
"ModuleCCSession is initialized with handle_logging_config set "
diff --git a/src/lib/python/isc/log/tests/Makefile.am b/src/lib/python/isc/log/tests/Makefile.am
index 6bb67de..170eee6 100644
--- a/src/lib/python/isc/log/tests/Makefile.am
+++ b/src/lib/python/isc/log/tests/Makefile.am
@@ -1,28 +1,40 @@
PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
-PYTESTS = log_test.py
-EXTRA_DIST = $(PYTESTS) log_console.py.in console.out check_output.sh
+PYTESTS_GEN = log_console.py
+PYTESTS_NOGEN = log_test.py
+noinst_SCRIPTS = $(PYTESTS_GEN)
+EXTRA_DIST = console.out check_output.sh $(PYTESTS_NOGEN)
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
+# We need to run the cycle twice, because once the files are in builddir, once in srcdir
check-local:
+ chmod +x $(abs_builddir)/log_console.py
$(LIBRARY_PATH_PLACEHOLDER) \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/python/isc/log \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/log \
$(abs_srcdir)/check_output.sh $(abs_builddir)/log_console.py $(abs_srcdir)/console.out
if ENABLE_PYTHON_COVERAGE
touch $(abs_top_srcdir)/.coverage
rm -f .coverage
${LN_S} $(abs_top_srcdir)/.coverage .coverage
endif
- for pytest in $(PYTESTS) ; do \
+ for pytest in $(PYTESTS_NOGEN) ; do \
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/python/isc/log:$(abs_top_builddir)/src/lib/log/python/.libs \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/log:$(abs_top_builddir)/src/lib/log/python/.libs \
B10_TEST_PLUGIN_DIR=$(abs_top_srcdir)/src/bin/cfgmgr/plugins \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
+ done ; \
+ for pytest in $(PYTESTS_GEN) ; do \
+ echo Running test: $$pytest ; \
+ chmod +x $(abs_builddir)/$$pytest ; \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/log:$(abs_top_builddir)/src/lib/log/python/.libs \
+ B10_TEST_PLUGIN_DIR=$(abs_top_srcdir)/src/bin/cfgmgr/plugins \
+ $(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
done
diff --git a/src/lib/python/isc/log_messages/Makefile.am b/src/lib/python/isc/log_messages/Makefile.am
new file mode 100644
index 0000000..30f8374
--- /dev/null
+++ b/src/lib/python/isc/log_messages/Makefile.am
@@ -0,0 +1,32 @@
+SUBDIRS = work
+
+EXTRA_DIST = __init__.py
+EXTRA_DIST += bind10_messages.py
+EXTRA_DIST += cmdctl_messages.py
+EXTRA_DIST += stats_messages.py
+EXTRA_DIST += stats_httpd_messages.py
+EXTRA_DIST += xfrin_messages.py
+EXTRA_DIST += xfrout_messages.py
+EXTRA_DIST += zonemgr_messages.py
+EXTRA_DIST += cfgmgr_messages.py
+EXTRA_DIST += config_messages.py
+EXTRA_DIST += notify_out_messages.py
+EXTRA_DIST += libxfrin_messages.py
+
+CLEANFILES = __init__.pyc
+CLEANFILES += bind10_messages.pyc
+CLEANFILES += cmdctl_messages.pyc
+CLEANFILES += stats_messages.pyc
+CLEANFILES += stats_httpd_messages.pyc
+CLEANFILES += xfrin_messages.pyc
+CLEANFILES += xfrout_messages.pyc
+CLEANFILES += zonemgr_messages.pyc
+CLEANFILES += cfgmgr_messages.pyc
+CLEANFILES += config_messages.pyc
+CLEANFILES += notify_out_messages.pyc
+CLEANFILES += libxfrin_messages.pyc
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/log_messages/README b/src/lib/python/isc/log_messages/README
new file mode 100644
index 0000000..c96f78c
--- /dev/null
+++ b/src/lib/python/isc/log_messages/README
@@ -0,0 +1,68 @@
+This is a placeholder package for logging messages of various modules
+in the form of python scripts. This package is expected to be installed
+somewhere like <top-install-dir>/python3.x/site-packages/isc/log_messages
+and each message script is expected to be imported as
+"isc.log_messages.some_module_messages".
+
+We also need to allow in-source test code to get access to the message
+scripts in the same manner. That's why the package is stored in the
+directory that shares the same trailing part as the install directory,
+i.e., isc/log_messages.
+
+Furthermore, we need to support a build mode using a separate build
+tree (such as in the case with 'make distcheck'). In that case if an
+application (via a test script) imports "isc.log_messages.xxx", it
+would try to import the module under the source tree, where the
+generated message script doesn't exist. So, in the source directory
+(i.e., here) we provide dummy scripts that subsequently import the
+same name of module under the "work" sub-package. The caller
+application is assumed to have <top_builddir>/src/lib/python/isc/log_messages
+in its module search path (this is done by including
+$(COMMON_PYTHON_PATH) in the PYTHONPATH environment variable),
+which ensures the right directory is chosen.
+
+A python module or program that defines its own log messages needs to
+make sure that the setup described above is implemented. It's a
+complicated process, but can generally be done by following a common
+pattern:
+
+1. Create the dummy script (see above) for the module and update
+ Makefile.am in this directory accordingly. See (and use)
+ a helper shell script named gen-forwarder.sh.
+2. Update Makefil.am of the module that defines the log message. The
+ following are a sample snippet for Makefile.am for a module named
+ "mymodule" (which is supposed to be generated from a file
+ "mymodule_messages.mes"). In many cases it should work simply by
+ replacing 'mymodule' with the actual module name.
+
+==================== begin Makefile.am additions ===================
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/mymodule_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+CLEANFILES = $(PYTHON_LOGMSGPKG_DIR)/work/mymodule_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/mymodule_messages.pyc
+
+EXTRA_DIST = mymodule_messages.mes
+
+$(PYTHON_LOGMSGPKG_DIR)/work/mymodule_messages.py : mymodule_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/mymodule_messages.mes
+
+# This rule ensures mymodule_messages.py is (re)generated as a result of
+# 'make'. If there's no other appropriate target, specify
+# mymodule_messages.py in BUILT_SOURCES.
+mymodule: <other source files> $(PYTHON_LOGMSGPKG_DIR)/work/mymodule_messages.py
+===================== end Makefile.am additions ====================
+
+Notes:
+- "nodist_" prefix is important. Without this, 'make distcheck' tries
+ to make _messages.py before actually starting the main build, which
+ would fail because the message compiler isn't built yet.
+- "pylogmessage" is a prefix for python scripts that define log
+ messages and are expected to be installed in the common isc/log_messages
+ directory. It's intentionally named differently from the common
+ "python" prefix (as in python_PYTHON), because the latter may be
+ used for other scripts in the same Makefile.am file.
+- $(PYTHON_LOGMSGPKG_DIR) should be set to point to this directory (or
+ the corresponding build directory if it's different) by the
+ configure script.
diff --git a/src/lib/python/isc/log_messages/__init__.py b/src/lib/python/isc/log_messages/__init__.py
new file mode 100644
index 0000000..d222b8c
--- /dev/null
+++ b/src/lib/python/isc/log_messages/__init__.py
@@ -0,0 +1,3 @@
+"""
+This is an in-source forwarder package redirecting to work/* scripts.
+"""
diff --git a/src/lib/python/isc/log_messages/bind10_messages.py b/src/lib/python/isc/log_messages/bind10_messages.py
new file mode 100644
index 0000000..68ce94c
--- /dev/null
+++ b/src/lib/python/isc/log_messages/bind10_messages.py
@@ -0,0 +1 @@
+from work.bind10_messages import *
diff --git a/src/lib/python/isc/log_messages/cfgmgr_messages.py b/src/lib/python/isc/log_messages/cfgmgr_messages.py
new file mode 100644
index 0000000..5557100
--- /dev/null
+++ b/src/lib/python/isc/log_messages/cfgmgr_messages.py
@@ -0,0 +1 @@
+from work.cfgmgr_messages import *
diff --git a/src/lib/python/isc/log_messages/cmdctl_messages.py b/src/lib/python/isc/log_messages/cmdctl_messages.py
new file mode 100644
index 0000000..7283d5a
--- /dev/null
+++ b/src/lib/python/isc/log_messages/cmdctl_messages.py
@@ -0,0 +1 @@
+from work.cmdctl_messages import *
diff --git a/src/lib/python/isc/log_messages/config_messages.py b/src/lib/python/isc/log_messages/config_messages.py
new file mode 100644
index 0000000..c557975
--- /dev/null
+++ b/src/lib/python/isc/log_messages/config_messages.py
@@ -0,0 +1 @@
+from work.config_messages import *
diff --git a/src/lib/python/isc/log_messages/gen-forwarder.sh b/src/lib/python/isc/log_messages/gen-forwarder.sh
new file mode 100755
index 0000000..84c2450
--- /dev/null
+++ b/src/lib/python/isc/log_messages/gen-forwarder.sh
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+MODULE_NAME=$1
+if test -z $MODULE_NAME; then
+ echo 'Usage: gen-forwarder.sh module_name'
+ exit 1
+fi
+
+echo "from work.${MODULE_NAME}_messages import *" > ${MODULE_NAME}_messages.py
+echo "Forwarder python script is generated. Make sure to perform:"
+echo "git add ${MODULE_NAME}_messages.py"
+echo "and add the following to Makefile.am:"
+echo "EXTRA_DIST += ${MODULE_NAME}_messages.py"
+echo "CLEANFILES += ${MODULE_NAME}_messages.pyc"
diff --git a/src/lib/python/isc/log_messages/libxfrin_messages.py b/src/lib/python/isc/log_messages/libxfrin_messages.py
new file mode 100644
index 0000000..74da329
--- /dev/null
+++ b/src/lib/python/isc/log_messages/libxfrin_messages.py
@@ -0,0 +1 @@
+from work.libxfrin_messages import *
diff --git a/src/lib/python/isc/log_messages/notify_out_messages.py b/src/lib/python/isc/log_messages/notify_out_messages.py
new file mode 100644
index 0000000..6aa37ea
--- /dev/null
+++ b/src/lib/python/isc/log_messages/notify_out_messages.py
@@ -0,0 +1 @@
+from work.notify_out_messages import *
diff --git a/src/lib/python/isc/log_messages/stats_httpd_messages.py b/src/lib/python/isc/log_messages/stats_httpd_messages.py
new file mode 100644
index 0000000..7782c34
--- /dev/null
+++ b/src/lib/python/isc/log_messages/stats_httpd_messages.py
@@ -0,0 +1 @@
+from work.stats_httpd_messages import *
diff --git a/src/lib/python/isc/log_messages/stats_messages.py b/src/lib/python/isc/log_messages/stats_messages.py
new file mode 100644
index 0000000..1324cfc
--- /dev/null
+++ b/src/lib/python/isc/log_messages/stats_messages.py
@@ -0,0 +1 @@
+from work.stats_messages import *
diff --git a/src/lib/python/isc/log_messages/work/Makefile.am b/src/lib/python/isc/log_messages/work/Makefile.am
new file mode 100644
index 0000000..9bc5e0f
--- /dev/null
+++ b/src/lib/python/isc/log_messages/work/Makefile.am
@@ -0,0 +1,12 @@
+# .py is generated in the builddir by the configure script so that test
+# scripts can refer to it when a separate builddir is used.
+
+python_PYTHON = __init__.py
+
+pythondir = $(pyexecdir)/isc/log_messages/
+
+CLEANFILES = __init__.pyc
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/log_messages/work/__init__.py.in b/src/lib/python/isc/log_messages/work/__init__.py.in
new file mode 100644
index 0000000..991f10a
--- /dev/null
+++ b/src/lib/python/isc/log_messages/work/__init__.py.in
@@ -0,0 +1,3 @@
+"""
+This package is a placeholder for python scripts of log messages.
+"""
diff --git a/src/lib/python/isc/log_messages/xfrin_messages.py b/src/lib/python/isc/log_messages/xfrin_messages.py
new file mode 100644
index 0000000..b412519
--- /dev/null
+++ b/src/lib/python/isc/log_messages/xfrin_messages.py
@@ -0,0 +1 @@
+from work.xfrin_messages import *
diff --git a/src/lib/python/isc/log_messages/xfrout_messages.py b/src/lib/python/isc/log_messages/xfrout_messages.py
new file mode 100644
index 0000000..2093d5c
--- /dev/null
+++ b/src/lib/python/isc/log_messages/xfrout_messages.py
@@ -0,0 +1 @@
+from work.xfrout_messages import *
diff --git a/src/lib/python/isc/log_messages/zonemgr_messages.py b/src/lib/python/isc/log_messages/zonemgr_messages.py
new file mode 100644
index 0000000..b3afe9c
--- /dev/null
+++ b/src/lib/python/isc/log_messages/zonemgr_messages.py
@@ -0,0 +1 @@
+from work.zonemgr_messages import *
diff --git a/src/lib/python/isc/net/tests/Makefile.am b/src/lib/python/isc/net/tests/Makefile.am
index 3a04f17..dd94946 100644
--- a/src/lib/python/isc/net/tests/Makefile.am
+++ b/src/lib/python/isc/net/tests/Makefile.am
@@ -6,7 +6,7 @@ EXTRA_DIST = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -19,6 +19,6 @@ endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/python/.libs \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/lib/python/isc/notify/Makefile.am b/src/lib/python/isc/notify/Makefile.am
index 4081a17..c247ab8 100644
--- a/src/lib/python/isc/notify/Makefile.am
+++ b/src/lib/python/isc/notify/Makefile.am
@@ -1,10 +1,22 @@
SUBDIRS = . tests
python_PYTHON = __init__.py notify_out.py
-
pythondir = $(pyexecdir)/isc/notify
+BUILT_SOURCES = $(PYTHON_LOGMSGPKG_DIR)/work/notify_out_messages.py
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/notify_out_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+EXTRA_DIST = notify_out_messages.mes
+
+CLEANFILES = $(PYTHON_LOGMSGPKG_DIR)/work/notify_out_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/notify_out_messages.pyc
+
CLEANDIRS = __pycache__
+$(PYTHON_LOGMSGPKG_DIR)/work/notify_out_messages.py : notify_out_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/notify_out_messages.mes
+
clean-local:
rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/notify/notify_out.py b/src/lib/python/isc/notify/notify_out.py
index 4b25463..6b91c87 100644
--- a/src/lib/python/isc/notify/notify_out.py
+++ b/src/lib/python/isc/notify/notify_out.py
@@ -23,11 +23,15 @@ import errno
from isc.datasrc import sqlite3_ds
from isc.net import addr
import isc
-try:
- from pydnspp import *
-except ImportError as e:
- # C++ loadable module may not be installed;
- sys.stderr.write('[b10-xfrout] failed to import DNS or XFR module: %s\n' % str(e))
+from isc.log_messages.notify_out_messages import *
+
+logger = isc.log.Logger("notify_out")
+
+# there used to be a printed message if this import failed, but if
+# we can't import we should not start anyway, and logging an error
+# is a bad idea since the logging system is most likely not
+# initialized yet. see trac ticket #1103
+from pydnspp import *
ZONE_NEW_DATA_READY_CMD = 'zone_new_data_ready'
_MAX_NOTIFY_NUM = 30
@@ -46,9 +50,6 @@ _BAD_QR = 4
_BAD_REPLY_PACKET = 5
SOCK_DATA = b's'
-def addr_to_str(addr):
- return '%s#%s' % (addr[0], addr[1])
-
class ZoneNotifyInfo:
'''This class keeps track of notify-out information for one zone.'''
@@ -105,11 +106,10 @@ class NotifyOut:
notify message to its slaves). notify service can be started by
calling dispatcher(), and it can be stoped by calling shutdown()
in another thread. '''
- def __init__(self, datasrc_file, log=None, verbose=True):
+ def __init__(self, datasrc_file, verbose=True):
self._notify_infos = {} # key is (zone_name, zone_class)
self._waiting_zones = []
self._notifying_zones = []
- self._log = log
self._serving = False
self._read_sock, self._write_sock = socket.socketpair()
self._read_sock.setblocking(False)
@@ -362,18 +362,19 @@ class NotifyOut:
tgt = zone_notify_info.get_current_notify_target()
if event_type == _EVENT_READ:
reply = self._get_notify_reply(zone_notify_info.get_socket(), tgt)
- if reply:
- if self._handle_notify_reply(zone_notify_info, reply):
+ if reply is not None:
+ if self._handle_notify_reply(zone_notify_info, reply, tgt):
self._notify_next_target(zone_notify_info)
elif event_type == _EVENT_TIMEOUT and zone_notify_info.notify_try_num > 0:
- self._log_msg('info', 'notify retry to %s' % addr_to_str(tgt))
+ logger.info(NOTIFY_OUT_TIMEOUT, tgt[0], tgt[1])
tgt = zone_notify_info.get_current_notify_target()
if tgt:
zone_notify_info.notify_try_num += 1
if zone_notify_info.notify_try_num > _MAX_NOTIFY_TRY_NUM:
- self._log_msg('info', 'notify to %s: retried exceeded' % addr_to_str(tgt))
+ logger.warn(NOTIFY_OUT_RETRY_EXCEEDED, tgt[0], tgt[1],
+ _MAX_NOTIFY_TRY_NUM)
self._notify_next_target(zone_notify_info)
else:
# set exponential backoff according rfc1996 section 3.6
@@ -412,10 +413,15 @@ class NotifyOut:
try:
sock = zone_notify_info.create_socket(addrinfo[0])
sock.sendto(render.get_data(), 0, addrinfo)
- self._log_msg('info', 'sending notify to %s' % addr_to_str(addrinfo))
+ logger.info(NOTIFY_OUT_SENDING_NOTIFY, addrinfo[0],
+ addrinfo[1])
except (socket.error, addr.InvalidAddress) as err:
- self._log_msg('error', 'send notify to %s failed: %s' %
- (addr_to_str(addrinfo), str(err)))
+ logger.error(NOTIFY_OUT_SOCKET_ERROR, addrinfo[0],
+ addrinfo[1], err)
+ return False
+ except addr.InvalidAddress as iae:
+ logger.error(NOTIFY_OUT_INVALID_ADDRESS, addrinfo[0],
+ addrinfo[1], iae)
return False
return True
@@ -446,34 +452,38 @@ class NotifyOut:
msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
return msg, qid
- def _handle_notify_reply(self, zone_notify_info, msg_data):
+ def _handle_notify_reply(self, zone_notify_info, msg_data, from_addr):
'''Parse the notify reply message.
- TODO, the error message should be refined properly.
rcode will not checked here, If we get the response
from the slave, it means the slaves has got the notify.'''
msg = Message(Message.PARSE)
try:
- errstr = 'notify reply error: '
msg.from_wire(msg_data)
if not msg.get_header_flag(Message.HEADERFLAG_QR):
- self._log_msg('error', errstr + 'bad flags')
+ logger.warn(NOTIFY_OUT_REPLY_QR_NOT_SET, from_addr[0],
+ from_addr[1])
return _BAD_QR
if msg.get_qid() != zone_notify_info.notify_msg_id:
- self._log_msg('error', errstr + 'bad query ID')
+ logger.warn(NOTIFY_OUT_REPLY_BAD_QID, from_addr[0],
+ from_addr[1], msg.get_qid(),
+ zone_notify_info.notify_msg_id)
return _BAD_QUERY_ID
question = msg.get_question()[0]
if question.get_name() != Name(zone_notify_info.zone_name):
- self._log_msg('error', errstr + 'bad query name')
+ logger.warn(NOTIFY_OUT_REPLY_BAD_QUERY_NAME, from_addr[0],
+ from_addr[1], question.get_name().to_text(),
+ Name(zone_notify_info.zone_name).to_text())
return _BAD_QUERY_NAME
if msg.get_opcode() != Opcode.NOTIFY():
- self._log_msg('error', errstr + 'bad opcode')
+ logger.warn(NOTIFY_OUT_REPLY_BAD_OPCODE, from_addr[0],
+ from_addr[1], msg.get_opcode().to_text())
return _BAD_OPCODE
except Exception as err:
# We don't care what exception, just report it?
- self._log_msg('error', errstr + str(err))
+ logger.error(NOTIFY_OUT_REPLY_UNCAUGHT_EXCEPTION, err)
return _BAD_REPLY_PACKET
return _REPLY_OK
@@ -481,14 +491,9 @@ class NotifyOut:
def _get_notify_reply(self, sock, tgt_addr):
try:
msg, addr = sock.recvfrom(512)
- except socket.error:
- self._log_msg('error', "notify to %s failed: can't read notify reply" % addr_to_str(tgt_addr))
+ except socket.error as err:
+ logger.error(NOTIFY_OUT_SOCKET_RECV_ERROR, tgt_addr[0],
+ tgt_addr[1], err)
return None
return msg
-
-
- def _log_msg(self, level, msg):
- if self._log:
- self._log.log_message(level, msg)
-
diff --git a/src/lib/python/isc/notify/notify_out_messages.mes b/src/lib/python/isc/notify/notify_out_messages.mes
new file mode 100644
index 0000000..570f51e
--- /dev/null
+++ b/src/lib/python/isc/notify/notify_out_messages.mes
@@ -0,0 +1,83 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the notify_out_messages python module.
+
+% NOTIFY_OUT_INVALID_ADDRESS invalid address %1#%2: %3
+The notify_out library tried to send a notify message to the given
+address, but it appears to be an invalid address. The configuration
+for secondary nameservers might contain a typographic error, or a
+different BIND 10 module has forgotten to validate its data before
+sending this module a notify command. As such, this should normally
+not happen, and points to an oversight in a different module.
+
+% NOTIFY_OUT_REPLY_BAD_OPCODE bad opcode in notify reply from %1#%2: %3
+The notify_out library sent a notify message to the nameserver at
+the given address, but the response did not have the opcode set to
+NOTIFY. The opcode in the response is printed. Since there was a
+response, no more notifies will be sent to this server for this
+notification event.
+
+% NOTIFY_OUT_REPLY_BAD_QID bad QID in notify reply from %1#%2: got %3, should be %4
+The notify_out library sent a notify message to the nameserver at
+the given address, but the query id in the response does not match
+the one we sent. Since there was a response, no more notifies will
+be sent to this server for this notification event.
+
+% NOTIFY_OUT_REPLY_BAD_QUERY_NAME bad query name in notify reply from %1#%2: got %3, should be %4
+The notify_out library sent a notify message to the nameserver at
+the given address, but the query name in the response does not match
+the one we sent. Since there was a response, no more notifies will
+be sent to this server for this notification event.
+
+% NOTIFY_OUT_REPLY_QR_NOT_SET QR flags set to 0 in reply to notify from %1#%2
+The notify_out library sent a notify message to the namesever at the
+given address, but the reply did not have the QR bit set to one.
+Since there was a response, no more notifies will be sent to this
+server for this notification event.
+
+% NOTIFY_OUT_RETRY_EXCEEDED notify to %1#%2: number of retries (%3) exceeded
+The maximum number of retries for the notify target has been exceeded.
+Either the address of the secondary nameserver is wrong, or it is not
+responding.
+
+% NOTIFY_OUT_SENDING_NOTIFY sending notify to %1#%2
+A notify message is sent to the secondary nameserver at the given
+address.
+
+% NOTIFY_OUT_SOCKET_ERROR socket error sending notify to %1#%2: %3
+There was a network error while trying to send a notify message to
+the given address. The address might be unreachable. The socket
+error is printed and should provide more information.
+
+% NOTIFY_OUT_SOCKET_RECV_ERROR socket error reading notify reply from %1#%2: %3
+There was a network error while trying to read a notify reply
+message from the given address. The socket error is printed and should
+provide more information.
+
+% NOTIFY_OUT_TIMEOUT retry notify to %1#%2
+The notify message to the given address (noted as address#port) has
+timed out, and the message will be resent until the max retry limit
+is reached.
+
+% NOTIFY_OUT_REPLY_UNCAUGHT_EXCEPTION uncaught exception: %1
+There was an uncaught exception in the handling of a notify reply
+message, either in the message parser, or while trying to extract data
+from the parsed message. The error is printed, and notify_out will
+treat the response as a bad message, but this does point to a
+programming error, since all exceptions should have been caught
+explicitly. Please file a bug report. Since there was a response,
+no more notifies will be sent to this server for this notification
+event.
diff --git a/src/lib/python/isc/notify/tests/Makefile.am b/src/lib/python/isc/notify/tests/Makefile.am
index 1427d93..00c2eee 100644
--- a/src/lib/python/isc/notify/tests/Makefile.am
+++ b/src/lib/python/isc/notify/tests/Makefile.am
@@ -6,7 +6,7 @@ EXTRA_DIST = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -18,7 +18,7 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/python/.libs \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs \
$(LIBRARY_PATH_PLACEHOLDER) \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/lib/python/isc/notify/tests/notify_out_test.py b/src/lib/python/isc/notify/tests/notify_out_test.py
index 0eb77a3..83f6d1a 100644
--- a/src/lib/python/isc/notify/tests/notify_out_test.py
+++ b/src/lib/python/isc/notify/tests/notify_out_test.py
@@ -21,6 +21,7 @@ import time
import socket
from isc.datasrc import sqlite3_ds
from isc.notify import notify_out, SOCK_DATA
+import isc.log
# our fake socket, where we can read and insert messages
class MockSocket():
@@ -79,7 +80,6 @@ class TestZoneNotifyInfo(unittest.TestCase):
self.info.prepare_notify_out()
self.assertEqual(self.info.get_current_notify_target(), ('127.0.0.1', 53))
- self.assertEqual('127.0.0.1#53', notify_out.addr_to_str(('127.0.0.1', 53)))
self.info.set_next_notify_target()
self.assertEqual(self.info.get_current_notify_target(), ('1.1.1.1', 5353))
self.info.set_next_notify_target()
@@ -223,29 +223,30 @@ class TestNotifyOut(unittest.TestCase):
self.assertEqual(0, len(self._notify._waiting_zones))
def test_handle_notify_reply(self):
- self.assertEqual(notify_out._BAD_REPLY_PACKET, self._notify._handle_notify_reply(None, b'badmsg'))
+ fake_address = ('192.0.2.1', 53)
+ self.assertEqual(notify_out._BAD_REPLY_PACKET, self._notify._handle_notify_reply(None, b'badmsg', fake_address))
example_com_info = self._notify._notify_infos[('example.com.', 'IN')]
example_com_info.notify_msg_id = 0X2f18
# test with right notify reply message
data = b'\x2f\x18\xa0\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07example\03com\x00\x00\x06\x00\x01'
- self.assertEqual(notify_out._REPLY_OK, self._notify._handle_notify_reply(example_com_info, data))
+ self.assertEqual(notify_out._REPLY_OK, self._notify._handle_notify_reply(example_com_info, data, fake_address))
# test with unright query id
data = b'\x2e\x18\xa0\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07example\03com\x00\x00\x06\x00\x01'
- self.assertEqual(notify_out._BAD_QUERY_ID, self._notify._handle_notify_reply(example_com_info, data))
+ self.assertEqual(notify_out._BAD_QUERY_ID, self._notify._handle_notify_reply(example_com_info, data, fake_address))
# test with unright query name
data = b'\x2f\x18\xa0\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07example\03net\x00\x00\x06\x00\x01'
- self.assertEqual(notify_out._BAD_QUERY_NAME, self._notify._handle_notify_reply(example_com_info, data))
+ self.assertEqual(notify_out._BAD_QUERY_NAME, self._notify._handle_notify_reply(example_com_info, data, fake_address))
# test with unright opcode
data = b'\x2f\x18\x80\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07example\03com\x00\x00\x06\x00\x01'
- self.assertEqual(notify_out._BAD_OPCODE, self._notify._handle_notify_reply(example_com_info, data))
+ self.assertEqual(notify_out._BAD_OPCODE, self._notify._handle_notify_reply(example_com_info, data, fake_address))
# test with unright qr
data = b'\x2f\x18\x10\x10\x00\x01\x00\x00\x00\x00\x00\x00\x07example\03com\x00\x00\x06\x00\x01'
- self.assertEqual(notify_out._BAD_QR, self._notify._handle_notify_reply(example_com_info, data))
+ self.assertEqual(notify_out._BAD_QR, self._notify._handle_notify_reply(example_com_info, data, fake_address))
def test_send_notify_message_udp_ipv4(self):
example_com_info = self._notify._notify_infos[('example.net.', 'IN')]
@@ -300,6 +301,15 @@ class TestNotifyOut(unittest.TestCase):
self._notify._zone_notify_handler(example_net_info, notify_out._EVENT_NONE)
self.assertNotEqual(cur_tgt, example_net_info._notify_current)
+ cur_tgt = example_net_info._notify_current
+ example_net_info.create_socket('127.0.0.1')
+ # dns message, will result in bad_qid, but what we are testing
+ # here is whether handle_notify_reply is called correctly
+ example_net_info._sock.remote_end().send(b'\x2f\x18\xa0\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07example\03com\x00\x00\x06\x00\x01')
+ self._notify._zone_notify_handler(example_net_info, notify_out._EVENT_READ)
+ self.assertNotEqual(cur_tgt, example_net_info._notify_current)
+
+
def _example_net_data_reader(self):
zone_data = [
('example.net.', '1000', 'IN', 'SOA', 'a.dns.example.net. mail.example.net. 1 1 1 1 1'),
@@ -406,6 +416,7 @@ class TestNotifyOut(unittest.TestCase):
self.assertFalse(thread.is_alive())
if __name__== "__main__":
+ isc.log.init("bind10")
unittest.main()
diff --git a/src/lib/python/isc/util/tests/Makefile.am b/src/lib/python/isc/util/tests/Makefile.am
index c3d35c2..3b882b4 100644
--- a/src/lib/python/isc/util/tests/Makefile.am
+++ b/src/lib/python/isc/util/tests/Makefile.am
@@ -6,7 +6,7 @@ EXTRA_DIST = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -19,6 +19,6 @@ endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/python/.libs \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/lib/python/isc/xfrin/Makefile.am b/src/lib/python/isc/xfrin/Makefile.am
new file mode 100644
index 0000000..5804de6
--- /dev/null
+++ b/src/lib/python/isc/xfrin/Makefile.am
@@ -0,0 +1,23 @@
+SUBDIRS = . tests
+
+python_PYTHON = __init__.py diff.py
+BUILT_SOURCES = $(PYTHON_LOGMSGPKG_DIR)/work/libxfrin_messages.py
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/libxfrin_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+EXTRA_DIST = libxfrin_messages.mes
+
+CLEANFILES = $(PYTHON_LOGMSGPKG_DIR)/work/libxfrin_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/libxfrin_messages.pyc
+
+# Define rule to build logging source files from message file
+$(PYTHON_LOGMSGPKG_DIR)/work/libxfrin_messages.py: libxfrin_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/libxfrin_messages.mes
+
+pythondir = $(pyexecdir)/isc/xfrin
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/xfrin/__init__.py b/src/lib/python/isc/xfrin/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/lib/python/isc/xfrin/diff.py b/src/lib/python/isc/xfrin/diff.py
new file mode 100644
index 0000000..b6d8244
--- /dev/null
+++ b/src/lib/python/isc/xfrin/diff.py
@@ -0,0 +1,235 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""
+This helps the XFR in process with accumulating parts of diff and applying
+it to the datasource.
+
+The name of the module is not yet fully decided. We might want to move it
+under isc.datasrc or somewhere else, because we might want to reuse it with
+future DDNS process. But until then, it lives here.
+"""
+
+import isc.dns
+import isc.log
+from isc.log_messages.libxfrin_messages import *
+
+class NoSuchZone(Exception):
+ """
+ This is raised if a diff for non-existant zone is being created.
+ """
+ pass
+
+"""
+This is the amount of changes we accumulate before calling Diff.apply
+automatically.
+
+The number 100 is just taken from BIND 9. We don't know the rationale
+for exactly this amount, but we think it is just some randomly chosen
+number.
+"""
+# If changing this, modify the tests accordingly as well.
+DIFF_APPLY_TRESHOLD = 100
+
+logger = isc.log.Logger('libxfrin')
+
+class Diff:
+ """
+ The class represents a diff against current state of datasource on
+ one zone. The usual way of working with it is creating it, then putting
+ bunch of changes in and commiting at the end.
+
+ If you change your mind, you can just stop using the object without
+ really commiting it. In that case no changes will happen in the data
+ sounce.
+
+ The class works as a kind of a buffer as well, it does not direct
+ the changes to underlying data source right away, but keeps them for
+ a while.
+ """
+ def __init__(self, ds_client, zone):
+ """
+ Initializes the diff to a ready state. It checks the zone exists
+ in the datasource and if not, NoSuchZone is raised. This also creates
+ a transaction in the data source.
+
+ The ds_client is the datasource client containing the zone. Zone is
+ isc.dns.Name object representing the name of the zone (its apex).
+
+ You can also expect isc.datasrc.Error or isc.datasrc.NotImplemented
+ exceptions.
+ """
+ self.__updater = ds_client.get_updater(zone, False)
+ if self.__updater is None:
+ # The no such zone case
+ raise NoSuchZone("Zone " + str(zone) +
+ " does not exist in the data source " +
+ str(ds_client))
+ self.__buffer = []
+
+ def __check_commited(self):
+ """
+ This checks if the diff is already commited or broken. If it is, it
+ raises ValueError. This check is for methods that need to work only on
+ yet uncommited diffs.
+ """
+ if self.__updater is None:
+ raise ValueError("The diff is already commited or it has raised " +
+ "an exception, you come late")
+
+ def __data_common(self, rr, operation):
+ """
+ Schedules an operation with rr.
+
+ It does all the real work of add_data and remove_data, including
+ all checks.
+ """
+ self.__check_commited()
+ if rr.get_rdata_count() != 1:
+ raise ValueError('The rrset must contain exactly 1 Rdata, but ' +
+ 'it holds ' + str(rr.get_rdata_count()))
+ if rr.get_class() != self.__updater.get_class():
+ raise ValueError("The rrset's class " + str(rr.get_class()) +
+ " does not match updater's " +
+ str(self.__updater.get_class()))
+ self.__buffer.append((operation, rr))
+ if len(self.__buffer) >= DIFF_APPLY_TRESHOLD:
+ # Time to auto-apply, so the data don't accumulate too much
+ self.apply()
+
+ def add_data(self, rr):
+ """
+ Schedules addition of an RR into the zone in this diff.
+
+ The rr is of isc.dns.RRset type and it must contain only one RR.
+ If this is not the case or if the diff was already commited, this
+ raises the ValueError exception.
+
+ The rr class must match the one of the datasource client. If
+ it does not, ValueError is raised.
+ """
+ self.__data_common(rr, 'add')
+
+ def remove_data(self, rr):
+ """
+ Schedules removal of an RR from the zone in this diff.
+
+ The rr is of isc.dns.RRset type and it must contain only one RR.
+ If this is not the case or if the diff was already commited, this
+ raises the ValueError exception.
+
+ The rr class must match the one of the datasource client. If
+ it does not, ValueError is raised.
+ """
+ self.__data_common(rr, 'remove')
+
+ def compact(self):
+ """
+ Tries to compact the operations in buffer a little by putting some of
+ the operations together, forming RRsets with more than one RR.
+
+ This is called by apply before putting the data into datasource. You
+ may, but not have to, call this manually.
+
+ Currently it merges consecutive same operations on the same
+ domain/type. We could do more fancy things, like sorting by the domain
+ and do more merging, but such diffs should be rare in practice anyway,
+ so we don't bother and do it this simple way.
+ """
+ buf = []
+ for (op, rrset) in self.__buffer:
+ old = buf[-1][1] if len(buf) > 0 else None
+ if old is None or op != buf[-1][0] or \
+ rrset.get_name() != old.get_name() or \
+ rrset.get_type() != old.get_type():
+ buf.append((op, isc.dns.RRset(rrset.get_name(),
+ rrset.get_class(),
+ rrset.get_type(),
+ rrset.get_ttl())))
+ if rrset.get_ttl() != buf[-1][1].get_ttl():
+ logger.warn(LIBXFRIN_DIFFERENT_TTL, rrset.get_ttl(),
+ buf[-1][1].get_ttl())
+ for rdatum in rrset.get_rdata():
+ buf[-1][1].add_rdata(rdatum)
+ self.__buffer = buf
+
+ def apply(self):
+ """
+ Push the buffered changes inside this diff down into the data source.
+ This does not stop you from adding more changes later through this
+ diff and it does not close the datasource transaction, so the changes
+ will not be shown to others yet. It just means the internal memory
+ buffer is flushed.
+
+ This is called from time to time automatically, but you can call it
+ manually if you really want to.
+
+ This raises ValueError if the diff was already commited.
+
+ It also can raise isc.datasrc.Error. If that happens, you should stop
+ using this object and abort the modification.
+ """
+ self.__check_commited()
+ # First, compact the data
+ self.compact()
+ try:
+ # Then pass the data inside the data source
+ for (operation, rrset) in self.__buffer:
+ if operation == 'add':
+ self.__updater.add_rrset(rrset)
+ elif operation == 'remove':
+ self.__updater.remove_rrset(rrset)
+ else:
+ raise ValueError('Unknown operation ' + operation)
+ # As everything is already in, drop the buffer
+ except:
+ # If there's a problem, we can't continue.
+ self.__updater = None
+ raise
+
+ self.__buffer = []
+
+ def commit(self):
+ """
+ Writes all the changes into the data source and makes them visible.
+ This closes the diff, you may not use it any more. If you try to use
+ it, you'll get ValueError.
+
+ This might raise isc.datasrc.Error.
+ """
+ self.__check_commited()
+ # Push the data inside the data source
+ self.apply()
+ # Make sure they are visible.
+ try:
+ self.__updater.commit()
+ finally:
+ # Remove the updater. That will free some resources for one, but
+ # mark this object as already commited, so we can check
+
+ # We remove it even in case the commit failed, as that makes us
+ # unusable.
+ self.__updater = None
+
+ def get_buffer(self):
+ """
+ Returns the current buffer of changes not yet passed into the data
+ source. It is in a form like [('add', rrset), ('remove', rrset),
+ ('remove', rrset), ...].
+
+ Probably useful only for testing and introspection purposes. Don't
+ modify the list.
+ """
+ return self.__buffer
diff --git a/src/lib/python/isc/xfrin/libxfrin_messages.mes b/src/lib/python/isc/xfrin/libxfrin_messages.mes
new file mode 100644
index 0000000..be943c8
--- /dev/null
+++ b/src/lib/python/isc/xfrin/libxfrin_messages.mes
@@ -0,0 +1,21 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the libxfrin_messages python module.
+
+% LIBXFRIN_DIFFERENT_TTL multiple data with different TTLs (%1, %2) on %3/%4. Adjusting %2 -> %1.
+The xfrin module received an update containing multiple rdata changes for the
+same RRset. But the TTLs of these don't match each other. As we combine them
+together, the later one get's overwritten to the earlier one in the sequence.
diff --git a/src/lib/python/isc/xfrin/tests/Makefile.am b/src/lib/python/isc/xfrin/tests/Makefile.am
new file mode 100644
index 0000000..416d62b
--- /dev/null
+++ b/src/lib/python/isc/xfrin/tests/Makefile.am
@@ -0,0 +1,24 @@
+PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
+PYTESTS = diff_tests.py
+EXTRA_DIST = $(PYTESTS)
+
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
+# test using command-line arguments, so use check-local target instead of TESTS
+check-local:
+if ENABLE_PYTHON_COVERAGE
+ touch $(abs_top_srcdir)/.coverage
+ rm -f .coverage
+ ${LN_S} $(abs_top_srcdir)/.coverage .coverage
+endif
+ for pytest in $(PYTESTS) ; do \
+ echo Running test: $$pytest ; \
+ $(LIBRARY_PATH_PLACEHOLDER) \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs \
+ $(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
+ done
diff --git a/src/lib/python/isc/xfrin/tests/diff_tests.py b/src/lib/python/isc/xfrin/tests/diff_tests.py
new file mode 100644
index 0000000..9652a1a
--- /dev/null
+++ b/src/lib/python/isc/xfrin/tests/diff_tests.py
@@ -0,0 +1,437 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import isc.log
+import unittest
+from isc.dns import Name, RRset, RRClass, RRType, RRTTL, Rdata
+from isc.xfrin.diff import Diff, NoSuchZone
+
+class TestError(Exception):
+ """
+ Just to have something to be raised during the tests.
+ Not used outside.
+ """
+ pass
+
+class DiffTest(unittest.TestCase):
+ """
+ Tests for the isc.xfrin.diff.Diff class.
+
+ It also plays role of a data source and an updater, so it can manipulate
+ some test variables while being called.
+ """
+ def setUp(self):
+ """
+ This sets internal variables so we can see nothing was called yet.
+
+ It also creates some variables used in multiple tests.
+ """
+ # Track what was called already
+ self.__updater_requested = False
+ self.__compact_called = False
+ self.__data_operations = []
+ self.__apply_called = False
+ self.__commit_called = False
+ self.__broken_called = False
+ self.__warn_called = False
+ # Some common values
+ self.__rrclass = RRClass.IN()
+ self.__type = RRType.A()
+ self.__ttl = RRTTL(3600)
+ # And RRsets
+ # Create two valid rrsets
+ self.__rrset1 = RRset(Name('a.example.org.'), self.__rrclass,
+ self.__type, self.__ttl)
+ self.__rdata = Rdata(self.__type, self.__rrclass, '192.0.2.1')
+ self.__rrset1.add_rdata(self.__rdata)
+ self.__rrset2 = RRset(Name('b.example.org.'), self.__rrclass,
+ self.__type, self.__ttl)
+ self.__rrset2.add_rdata(self.__rdata)
+ # And two invalid
+ self.__rrset_empty = RRset(Name('empty.example.org.'), self.__rrclass,
+ self.__type, self.__ttl)
+ self.__rrset_multi = RRset(Name('multi.example.org.'), self.__rrclass,
+ self.__type, self.__ttl)
+ self.__rrset_multi.add_rdata(self.__rdata)
+ self.__rrset_multi.add_rdata(Rdata(self.__type, self.__rrclass,
+ '192.0.2.2'))
+
+ def __mock_compact(self):
+ """
+ This can be put into the diff to hook into its compact method and see
+ if it gets called.
+ """
+ self.__compact_called = True
+
+ def __mock_apply(self):
+ """
+ This can be put into the diff to hook into its apply method and see
+ it gets called.
+ """
+ self.__apply_called = True
+
+ def __broken_operation(self, *args):
+ """
+ This can be used whenever an operation should fail. It raises TestError.
+ It should take whatever amount of parameters needed, so it can be put
+ quite anywhere.
+ """
+ self.__broken_called = True
+ raise TestError("Test error")
+
+ def warn(self, *args):
+ """
+ This is for checking the warn function was called, we replace the logger
+ in the tested module.
+ """
+ self.__warn_called = True
+
+ def commit(self):
+ """
+ This is part of pretending to be a zone updater. This notes the commit
+ was called.
+ """
+ self.__commit_called = True
+
+ def add_rrset(self, rrset):
+ """
+ This one is part of pretending to be a zone updater. It writes down
+ addition of an rrset was requested.
+ """
+ self.__data_operations.append(('add', rrset))
+
+ def remove_rrset(self, rrset):
+ """
+ This one is part of pretending to be a zone updater. It writes down
+ removal of an rrset was requested.
+ """
+ self.__data_operations.append(('remove', rrset))
+
+ def get_class(self):
+ """
+ This one is part of pretending to be a zone updater. It returns
+ the IN class.
+ """
+ return self.__rrclass
+
+ def get_updater(self, zone_name, replace):
+ """
+ This one pretends this is the data source client and serves
+ getting an updater.
+
+ If zone_name is 'none.example.org.', it returns None, otherwise
+ it returns self.
+ """
+ # The diff should not delete the old data.
+ self.assertFalse(replace)
+ self.__updater_requested = True
+ # Pretend this zone doesn't exist
+ if zone_name == Name('none.example.org.'):
+ return None
+ else:
+ return self
+
+ def test_create(self):
+ """
+ This test the case when the diff is successfuly created. It just
+ tries it does not throw and gets the updater.
+ """
+ diff = Diff(self, Name('example.org.'))
+ self.assertTrue(self.__updater_requested)
+ self.assertEqual([], diff.get_buffer())
+
+ def test_create_nonexist(self):
+ """
+ Try to create a diff on a zone that doesn't exist. This should
+ raise a correct exception.
+ """
+ self.assertRaises(NoSuchZone, Diff, self, Name('none.example.org.'))
+ self.assertTrue(self.__updater_requested)
+
+ def __data_common(self, diff, method, operation):
+ """
+ Common part of test for test_add and test_remove.
+ """
+ # Try putting there the bad data first
+ self.assertRaises(ValueError, method, self.__rrset_empty)
+ self.assertRaises(ValueError, method, self.__rrset_multi)
+ # They were not added
+ self.assertEqual([], diff.get_buffer())
+ # Put some proper data into the diff
+ method(self.__rrset1)
+ method(self.__rrset2)
+ dlist = [(operation, self.__rrset1), (operation, self.__rrset2)]
+ self.assertEqual(dlist, diff.get_buffer())
+ # Check the data are not destroyed by raising an exception because of
+ # bad data
+ self.assertRaises(ValueError, method, self.__rrset_empty)
+ self.assertEqual(dlist, diff.get_buffer())
+
+ def test_add(self):
+ """
+ Try to add few items into the diff and see they are stored in there.
+
+ Also try passing an rrset that has differnt amount of RRs than 1.
+ """
+ diff = Diff(self, Name('example.org.'))
+ self.__data_common(diff, diff.add_data, 'add')
+
+ def test_remove(self):
+ """
+ Try scheduling removal of few items into the diff and see they are
+ stored in there.
+
+ Also try passing an rrset that has different amount of RRs than 1.
+ """
+ diff = Diff(self, Name('example.org.'))
+ self.__data_common(diff, diff.remove_data, 'remove')
+
+ def test_apply(self):
+ """
+ Schedule few additions and check the apply works by passing the
+ data into the updater.
+ """
+ # Prepare the diff
+ diff = Diff(self, Name('example.org.'))
+ diff.add_data(self.__rrset1)
+ diff.remove_data(self.__rrset2)
+ dlist = [('add', self.__rrset1), ('remove', self.__rrset2)]
+ self.assertEqual(dlist, diff.get_buffer())
+ # Do the apply, hook the compact method
+ diff.compact = self.__mock_compact
+ diff.apply()
+ # It should call the compact
+ self.assertTrue(self.__compact_called)
+ # And pass the data. Our local history of what happened is the same
+ # format, so we can check the same way
+ self.assertEqual(dlist, self.__data_operations)
+ # And the buffer in diff should become empty, as everything
+ # got inside.
+ self.assertEqual([], diff.get_buffer())
+
+ def test_commit(self):
+ """
+ If we call a commit, it should first apply whatever changes are
+ left (we hook into that instead of checking the effect) and then
+ the commit on the updater should have been called.
+
+ Then we check it raises value error for whatever operation we try.
+ """
+ diff = Diff(self, Name('example.org.'))
+ diff.add_data(self.__rrset1)
+ orig_apply = diff.apply
+ diff.apply = self.__mock_apply
+ diff.commit()
+ self.assertTrue(self.__apply_called)
+ self.assertTrue(self.__commit_called)
+ # The data should be handled by apply which we replaced.
+ self.assertEqual([], self.__data_operations)
+ # Now check all range of other methods raise ValueError
+ self.assertRaises(ValueError, diff.commit)
+ self.assertRaises(ValueError, diff.add_data, self.__rrset2)
+ self.assertRaises(ValueError, diff.remove_data, self.__rrset1)
+ diff.apply = orig_apply
+ self.assertRaises(ValueError, diff.apply)
+ # This one does not state it should raise, so check it doesn't
+ # But it is NOP in this situation anyway
+ diff.compact()
+
+ def test_autoapply(self):
+ """
+ Test the apply is called all by itself after 100 tasks are added.
+ """
+ diff = Diff(self, Name('example.org.'))
+ # A method to check the apply is called _after_ the 100th element
+ # is added. We don't use it anywhere else, so we define it locally
+ # as lambda function
+ def check():
+ self.assertEqual(100, len(diff.get_buffer()))
+ self.__mock_apply()
+ orig_apply = diff.apply
+ diff.apply = check
+ # If we put 99, nothing happens yet
+ for i in range(0, 99):
+ diff.add_data(self.__rrset1)
+ expected = [('add', self.__rrset1)] * 99
+ self.assertEqual(expected, diff.get_buffer())
+ self.assertFalse(self.__apply_called)
+ # Now we push the 100th and it should call the apply method
+ # This will _not_ flush the data yet, as we replaced the method.
+ # It, however, would in the real life.
+ diff.add_data(self.__rrset1)
+ # Now the apply method (which is replaced by our check) should
+ # have been called. If it wasn't, this is false. If it was, but
+ # still with 99 elements, the check would complain
+ self.assertTrue(self.__apply_called)
+ # Reset the buffer by calling the original apply.
+ orig_apply()
+ self.assertEqual([], diff.get_buffer())
+ # Similar with remove
+ self.__apply_called = False
+ for i in range(0, 99):
+ diff.remove_data(self.__rrset2)
+ expected = [('remove', self.__rrset2)] * 99
+ self.assertEqual(expected, diff.get_buffer())
+ self.assertFalse(self.__apply_called)
+ diff.remove_data(self.__rrset2)
+ self.assertTrue(self.__apply_called)
+
+ def test_compact(self):
+ """
+ Test the compaction works as expected, eg. it compacts only consecutive
+ changes of the same operation and on the same domain/type.
+
+ The test case checks that it does merge them, but also puts some
+ different operations "in the middle", changes the type and name and
+ places the same kind of change further away of each other to see they
+ are not merged in that case.
+ """
+ diff = Diff(self, Name('example.org.'))
+ # Check we can do a compact on empty data, it shouldn't break
+ diff.compact()
+ self.assertEqual([], diff.get_buffer())
+ # This data is the way it should look like after the compact
+ # ('operation', 'domain.prefix', 'type', ['rdata', 'rdata'])
+ # The notes say why the each of consecutive can't be merged
+ data = [
+ ('add', 'a', 'A', ['192.0.2.1', '192.0.2.2']),
+ # Different type.
+ ('add', 'a', 'AAAA', ['2001:db8::1', '2001:db8::2']),
+ # Different operation
+ ('remove', 'a', 'AAAA', ['2001:db8::3']),
+ # Different domain
+ ('remove', 'b', 'AAAA', ['2001:db8::4']),
+ # This does not get merged with the first, even if logically
+ # possible. We just don't do this.
+ ('add', 'a', 'A', ['192.0.2.3'])
+ ]
+ # Now, fill the data into the diff, in a "flat" way, one by one
+ for (op, nprefix, rrtype, rdata) in data:
+ name = Name(nprefix + '.example.org.')
+ rrtype_obj = RRType(rrtype)
+ for rdatum in rdata:
+ rrset = RRset(name, self.__rrclass, rrtype_obj, self.__ttl)
+ rrset.add_rdata(Rdata(rrtype_obj, self.__rrclass, rdatum))
+ if op == 'add':
+ diff.add_data(rrset)
+ else:
+ diff.remove_data(rrset)
+ # Compact it
+ diff.compact()
+ # Now check they got compacted. They should be in the same order as
+ # pushed inside. So it should be the same as data modulo being in
+ # the rrsets and isc.dns objects.
+ def check():
+ buf = diff.get_buffer()
+ self.assertEqual(len(data), len(buf))
+ for (expected, received) in zip(data, buf):
+ (eop, ename, etype, edata) = expected
+ (rop, rrrset) = received
+ self.assertEqual(eop, rop)
+ ename_obj = Name(ename + '.example.org.')
+ self.assertEqual(ename_obj, rrrset.get_name())
+ # We check on names to make sure they are printed nicely
+ self.assertEqual(etype, str(rrrset.get_type()))
+ rdata = rrrset.get_rdata()
+ self.assertEqual(len(edata), len(rdata))
+ # It should also preserve the order
+ for (edatum, rdatum) in zip(edata, rdata):
+ self.assertEqual(edatum, str(rdatum))
+ check()
+ # Try another compact does nothing, but survives
+ diff.compact()
+ check()
+
+ def test_wrong_class(self):
+ """
+ Test a wrong class of rrset is rejected.
+ """
+ diff = Diff(self, Name('example.org.'))
+ rrset = RRset(Name('a.example.org.'), RRClass.CH(), RRType.NS(),
+ self.__ttl)
+ rrset.add_rdata(Rdata(RRType.NS(), RRClass.CH(), 'ns.example.org.'))
+ self.assertRaises(ValueError, diff.add_data, rrset)
+ self.assertRaises(ValueError, diff.remove_data, rrset)
+
+ def __do_raise_test(self):
+ """
+ Do a raise test. Expects that one of the operations is exchanged for
+ broken version.
+ """
+ diff = Diff(self, Name('example.org.'))
+ diff.add_data(self.__rrset1)
+ diff.remove_data(self.__rrset2)
+ self.assertRaises(TestError, diff.commit)
+ self.assertTrue(self.__broken_called)
+ self.assertRaises(ValueError, diff.add_data, self.__rrset1)
+ self.assertRaises(ValueError, diff.remove_data, self.__rrset2)
+ self.assertRaises(ValueError, diff.commit)
+ self.assertRaises(ValueError, diff.apply)
+
+ def test_raise_add(self):
+ """
+ Test the exception from add_rrset is propagated and the diff can't be
+ used afterwards.
+ """
+ self.add_rrset = self.__broken_operation
+ self.__do_raise_test()
+
+ def test_raise_remove(self):
+ """
+ Test the exception from remove_rrset is propagated and the diff can't be
+ used afterwards.
+ """
+ self.remove_rrset = self.__broken_operation
+ self.__do_raise_test()
+
+ def test_raise_commit(self):
+ """
+ Test the exception from updater's commit gets propagated and it can't be
+ used afterwards.
+ """
+ self.commit = self.__broken_operation
+ self.__do_raise_test()
+
+ def test_ttl(self):
+ """
+ Test the TTL handling. A warn function should have been called if they
+ differ, but that's all, it should not crash or raise.
+ """
+ orig_logger = isc.xfrin.diff.logger
+ try:
+ isc.xfrin.diff.logger = self
+ diff = Diff(self, Name('example.org.'))
+ diff.add_data(self.__rrset1)
+ rrset2 = RRset(Name('a.example.org.'), self.__rrclass,
+ self.__type, RRTTL(120))
+ rrset2.add_rdata(Rdata(self.__type, self.__rrclass, '192.10.2.2'))
+ diff.add_data(rrset2)
+ rrset2 = RRset(Name('a.example.org.'), self.__rrclass,
+ self.__type, RRTTL(6000))
+ rrset2.add_rdata(Rdata(self.__type, self.__rrclass, '192.10.2.3'))
+ diff.add_data(rrset2)
+ # They should get compacted together and complain.
+ diff.compact()
+ self.assertEqual(1, len(diff.get_buffer()))
+ # The TTL stays on the first value, no matter if smaller or bigger
+ # ones come later.
+ self.assertEqual(self.__ttl, diff.get_buffer()[0][1].get_ttl())
+ self.assertTrue(self.__warn_called)
+ finally:
+ isc.xfrin.diff.logger = orig_logger
+
+if __name__ == "__main__":
+ isc.log.init("bind10")
+ unittest.main()
diff --git a/src/lib/resolve/resolve_messages.mes b/src/lib/resolve/resolve_messages.mes
index 97c4d90..f702d9b 100644
--- a/src/lib/resolve/resolve_messages.mes
+++ b/src/lib/resolve/resolve_messages.mes
@@ -123,11 +123,11 @@ called because a nameserver has been found, and that a query is being sent
to the specified nameserver.
% RESLIB_TEST_SERVER setting test server to %1(%2)
-This is an internal debugging message and is only generated in unit tests.
-It indicates that all upstream queries from the resolver are being routed to
-the specified server, regardless of the address of the nameserver to which
-the query would normally be routed. As it should never be seen in normal
-operation, it is a warning message instead of a debug message.
+This is a warning message only generated in unit tests. It indicates
+that all upstream queries from the resolver are being routed to the
+specified server, regardless of the address of the nameserver to which
+the query would normally be routed. If seen during normal operation,
+please submit a bug report.
% RESLIB_TEST_UPSTREAM sending upstream query for <%1> to test server at %2
This is a debug message and should only be seen in unit tests. A query for
@@ -135,8 +135,8 @@ the specified <name, class, type> tuple is being sent to a test nameserver
whose address is given in the message.
% RESLIB_TIMEOUT query <%1> to %2 timed out
-A debug message indicating that the specified query has timed out and as
-there are no retries left, an error will be reported.
+A debug message indicating that the specified upstream query has timed out and
+there are no retries left.
% RESLIB_TIMEOUT_RETRY query <%1> to %2 timed out, re-trying (retries left: %3)
A debug message indicating that the specified query has timed out and that
diff --git a/src/lib/resolve/tests/Makefile.am b/src/lib/resolve/tests/Makefile.am
index ee311a6..cf05d9b 100644
--- a/src/lib/resolve/tests/Makefile.am
+++ b/src/lib/resolve/tests/Makefile.am
@@ -31,6 +31,7 @@ run_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
run_unittests_LDADD += $(top_builddir)/src/lib/asiodns/libasiodns.la
run_unittests_LDADD += $(top_builddir)/src/lib/resolve/libresolve.la
run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
diff --git a/src/lib/server_common/Makefile.am b/src/lib/server_common/Makefile.am
index d576104..c2779b4 100644
--- a/src/lib/server_common/Makefile.am
+++ b/src/lib/server_common/Makefile.am
@@ -20,6 +20,9 @@ lib_LTLIBRARIES = libserver_common.la
libserver_common_la_SOURCES = client.h client.cc
libserver_common_la_SOURCES += keyring.h keyring.cc
libserver_common_la_SOURCES += portconfig.h portconfig.cc
+libserver_common_la_SOURCES += logger.h logger.cc
+nodist_libserver_common_la_SOURCES = server_common_messages.h
+nodist_libserver_common_la_SOURCES += server_common_messages.cc
libserver_common_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
libserver_common_la_LIBADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
libserver_common_la_LIBADD += $(top_builddir)/src/lib/cc/libcc.la
@@ -27,5 +30,10 @@ libserver_common_la_LIBADD += $(top_builddir)/src/lib/config/libcfgclient.la
libserver_common_la_LIBADD += $(top_builddir)/src/lib/log/liblog.la
libserver_common_la_LIBADD += $(top_builddir)/src/lib/acl/libacl.la
libserver_common_la_LIBADD += $(top_builddir)/src/lib/dns/libdns++.la
+BUILT_SOURCES = server_common_messages.h server_common_messages.cc
+server_common_messages.h server_common_messages.cc: server_common_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/server_common/server_common_messages.mes
-CLEANFILES = *.gcno *.gcda
+EXTRA_DIST = server_common_messages.mes
+
+CLEANFILES = *.gcno *.gcda server_common_messages.h server_common_messages.cc
diff --git a/src/lib/server_common/client.cc b/src/lib/server_common/client.cc
index 31dee88..e6383d6 100644
--- a/src/lib/server_common/client.cc
+++ b/src/lib/server_common/client.cc
@@ -66,10 +66,3 @@ std::ostream&
isc::server_common::operator<<(std::ostream& os, const Client& client) {
return (os << client.toText());
}
-
-template <>
-bool
-IPCheck<Client>::matches(const Client& client) const {
- const IPAddress& request_src(client.getRequestSourceIPAddress());
- return (compare(request_src.getData(), request_src.getFamily()));
-}
diff --git a/src/lib/server_common/client.h b/src/lib/server_common/client.h
index 148e069..1c5928a 100644
--- a/src/lib/server_common/client.h
+++ b/src/lib/server_common/client.h
@@ -145,17 +145,6 @@ private:
/// parameter \c os after the insertion operation.
std::ostream& operator<<(std::ostream& os, const Client& client);
}
-
-namespace acl {
-/// The specialization of \c IPCheck for access control with \c Client.
-///
-/// It returns \c true if the source IP address of the client's request
-/// matches the expression encapsulated in the \c IPCheck, and returns
-/// \c false if not.
-template <>
-bool IPCheck<server_common::Client>::matches(
- const server_common::Client& client) const;
-}
}
#endif // __CLIENT_H
diff --git a/src/lib/server_common/keyring.cc b/src/lib/server_common/keyring.cc
index b60e796..501dfd9 100644
--- a/src/lib/server_common/keyring.cc
+++ b/src/lib/server_common/keyring.cc
@@ -13,6 +13,7 @@
// PERFORMANCE OF THIS SOFTWARE.
#include <server_common/keyring.h>
+#include <server_common/logger.h>
using namespace isc::dns;
using namespace isc::data;
@@ -31,6 +32,7 @@ updateKeyring(const std::string&, ConstElementPtr data,
const isc::config::ConfigData&) {
ConstElementPtr list(data->get("keys"));
KeyringPtr load(new TSIGKeyRing);
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, SRVCOMM_KEYS_UPDATE);
// Note that 'data' only contains explicitly configured config parameters.
// So if we use the default list is NULL, rather than an empty list, and
@@ -50,6 +52,7 @@ initKeyring(config::ModuleCCSession& session) {
// We are already initialized
return;
}
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, SRVCOMM_KEYS_INIT);
session.addRemoteConfig("tsig_keys", updateKeyring, false);
}
@@ -59,6 +62,7 @@ deinitKeyring(config::ModuleCCSession& session) {
// Not initialized, ignore it
return;
}
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, SRVCOMM_KEYS_DEINIT);
keyring.reset();
session.removeRemoteConfig("tsig_keys");
}
diff --git a/src/lib/server_common/logger.cc b/src/lib/server_common/logger.cc
new file mode 100644
index 0000000..0b9ab6e
--- /dev/null
+++ b/src/lib/server_common/logger.cc
@@ -0,0 +1,23 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <server_common/logger.h>
+
+namespace isc {
+namespace server_common {
+
+isc::log::Logger logger("server_common");
+
+}
+}
diff --git a/src/lib/server_common/logger.h b/src/lib/server_common/logger.h
new file mode 100644
index 0000000..cfca1f3
--- /dev/null
+++ b/src/lib/server_common/logger.h
@@ -0,0 +1,44 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __SERVER_COMMON_LOGGER_H
+#define __SERVER_COMMON_LOGGER_H
+
+#include <log/macros.h>
+#include <server_common/server_common_messages.h>
+
+/// \file logger.h
+/// \brief Server Common library global logger
+///
+/// This holds the logger for the server common library. It is a private header
+/// and should not be included in any publicly used header, only in local
+/// cc files.
+
+namespace isc {
+namespace server_common {
+
+/// \brief The logger for this library
+extern isc::log::Logger logger;
+
+enum {
+ /// \brief Trace basic operations
+ DBG_TRACE_BASIC = 10,
+ /// \brief Print also values used
+ DBG_TRACE_VALUES = 40
+};
+
+}
+}
+
+#endif
diff --git a/src/lib/server_common/portconfig.cc b/src/lib/server_common/portconfig.cc
index 7b2b3dd..379a0a1 100644
--- a/src/lib/server_common/portconfig.cc
+++ b/src/lib/server_common/portconfig.cc
@@ -13,10 +13,10 @@
// PERFORMANCE OF THIS SOFTWARE.
#include <server_common/portconfig.h>
+#include <server_common/logger.h>
#include <asiolink/io_address.h>
#include <asiodns/dns_service.h>
-#include <log/dummylog.h>
#include <boost/foreach.hpp>
#include <boost/lexical_cast.hpp>
@@ -25,7 +25,6 @@ using namespace std;
using namespace isc::data;
using namespace isc::asiolink;
using namespace isc::asiodns;
-using isc::log::dlog;
namespace isc {
namespace server_common {
@@ -43,6 +42,8 @@ parseAddresses(isc::data::ConstElementPtr addresses,
ConstElementPtr addr(addrPair->get("address"));
ConstElementPtr port(addrPair->get("port"));
if (!addr || ! port) {
+ LOG_ERROR(logger, SRVCOMM_ADDRESS_MISSING).
+ arg(addrPair->str());
isc_throw(BadValue, "Address must contain both the IP"
"address and port");
}
@@ -50,6 +51,8 @@ parseAddresses(isc::data::ConstElementPtr addresses,
IOAddress(addr->stringValue());
if (port->intValue() < 0 ||
port->intValue() > 0xffff) {
+ LOG_ERROR(logger, SRVCOMM_PORT_RANGE).
+ arg(port->intValue()).arg(addrPair->str());
isc_throw(BadValue, "Bad port value (" <<
port->intValue() << ")");
}
@@ -57,11 +60,14 @@ parseAddresses(isc::data::ConstElementPtr addresses,
port->intValue()));
}
catch (const TypeError &e) { // Better error message
+ LOG_ERROR(logger, SRVCOMM_ADDRESS_TYPE).
+ arg(addrPair->str());
isc_throw(TypeError,
"Address must be a string and port an integer");
}
}
} else if (addresses->getType() != Element::null) {
+ LOG_ERROR(logger, SRVCOMM_ADDRESSES_NOT_LIST).arg(elemName);
isc_throw(TypeError, elemName + " config element must be a list");
}
}
@@ -86,10 +92,10 @@ installListenAddresses(const AddressList& newAddresses,
isc::asiodns::DNSService& service)
{
try {
- dlog("Setting listen addresses:");
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, SRVCOMM_SET_LISTEN);
BOOST_FOREACH(const AddressPair& addr, newAddresses) {
- dlog(" " + addr.first + ":" +
- boost::lexical_cast<string>(addr.second));
+ LOG_DEBUG(logger, DBG_TRACE_VALUES, SRVCOMM_ADDRESS_VALUE).
+ arg(addr.first).arg(addr.second);
}
setAddresses(service, newAddresses);
addressStore = newAddresses;
@@ -108,13 +114,12 @@ installListenAddresses(const AddressList& newAddresses,
* user will get error info, command control can be used to set new
* address. So we just catch the exception without propagating outside
*/
- dlog(string("Unable to set new address: ") + e.what(), true);
+ LOG_ERROR(logger, SRVCOMM_ADDRESS_FAIL).arg(e.what());
try {
setAddresses(service, addressStore);
}
catch (const exception& e2) {
- dlog("Unable to recover from error;", true);
- dlog(string("Rollback failed with: ") + e2.what(), true);
+ LOG_FATAL(logger, SRVCOMM_ADDRESS_UNRECOVERABLE).arg(e2.what());
}
//Anyway the new configure has problem, we need to notify configure
//manager the new configure doesn't work
diff --git a/src/lib/server_common/server_common_messages.mes b/src/lib/server_common/server_common_messages.mes
new file mode 100644
index 0000000..5fbbb0b
--- /dev/null
+++ b/src/lib/server_common/server_common_messages.mes
@@ -0,0 +1,73 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+$NAMESPACE isc::server_common
+
+# \brief Messages for the server_common library
+
+% SRVCOMM_ADDRESSES_NOT_LIST the address and port specification is not a list in %1
+This points to an error in configuration. What was supposed to be a list of
+IP address - port pairs isn't a list at all but something else.
+
+% SRVCOMM_ADDRESS_FAIL failed to listen on addresses (%1)
+The server failed to bind to one of the address/port pair it should according
+to configuration, for reason listed in the message (usually because that pair
+is already used by other service or missing privileges). The server will try
+to recover and bind the address/port pairs it was listening to before (if any).
+
+% SRVCOMM_ADDRESS_MISSING address specification is missing "address" or "port" element in %1
+This points to an error in configuration. An address specification in the
+configuration is missing either an address or port and so cannot be used. The
+specification causing the error is given in the message.
+
+% SRVCOMM_ADDRESS_TYPE address specification type is invalid in %1
+This points to an error in configuration. An address specification in the
+configuration malformed. The specification causing the error is given in the
+message. A valid specification contains an address part (which must be a string
+and must represent a valid IPv4 or IPv6 address) and port (which must be an
+integer in the range valid for TCP/UDP ports on your system).
+
+% SRVCOMM_ADDRESS_UNRECOVERABLE failed to recover original addresses also (%2)
+The recovery of old addresses after SRVCOMM_ADDRESS_FAIL also failed for
+the reason listed.
+
+The condition indicates problems with the server and/or the system on
+which it is running. The server will continue running to allow
+reconfiguration, but will not be listening on any address or port until
+an administrator does so.
+
+% SRVCOMM_ADDRESS_VALUE address to set: %1#%2
+Debug message. This lists one address and port value of the set of
+addresses we are going to listen on (eg. there will be one log message
+per pair). This appears only after SRVCOMM_SET_LISTEN, but might
+be hidden, as it has higher debug level.
+
+% SRVCOMM_KEYS_DEINIT deinitializing TSIG keyring
+Debug message indicating that the server is deinitializing the TSIG keyring.
+
+% SRVCOMM_KEYS_INIT initializing TSIG keyring
+Debug message indicating that the server is initializing the global TSIG
+keyring. This should be seen only at server start.
+
+% SRVCOMM_KEYS_UPDATE updating TSIG keyring
+Debug message indicating new keyring is being loaded from configuration (either
+on startup or as a result of configuration update).
+
+% SRVCOMM_PORT_RANGE port out of valid range (%1 in %2)
+This points to an error in configuration. The port in an address
+specification is outside the valid range of 0 to 65535.
+
+% SRVCOMM_SET_LISTEN setting addresses to listen to
+Debug message, noting that the server is about to start listening on a
+different set of IP addresses and ports than before.
diff --git a/src/lib/server_common/tests/Makefile.am b/src/lib/server_common/tests/Makefile.am
index 3c061c2..d7e113a 100644
--- a/src/lib/server_common/tests/Makefile.am
+++ b/src/lib/server_common/tests/Makefile.am
@@ -38,8 +38,10 @@ run_unittests_LDADD += $(top_builddir)/src/lib/server_common/libserver_common.la
run_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
run_unittests_LDADD += $(top_builddir)/src/lib/asiodns/libasiodns.la
run_unittests_LDADD += $(top_builddir)/src/lib/cc/libcc.la
+run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
run_unittests_LDADD += $(top_builddir)/src/lib/acl/libacl.la
run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
+run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
diff --git a/src/lib/server_common/tests/client_unittest.cc b/src/lib/server_common/tests/client_unittest.cc
index 34a90a2..287a926 100644
--- a/src/lib/server_common/tests/client_unittest.cc
+++ b/src/lib/server_common/tests/client_unittest.cc
@@ -89,30 +89,6 @@ TEST_F(ClientTest, constructIPv6) {
client6->getRequestSourceIPAddress().getData(), 16));
}
-TEST_F(ClientTest, ACLCheckIPv4) {
- // Exact match
- EXPECT_TRUE(IPCheck<Client>("192.0.2.1").matches(*client4));
- // Exact match (negative)
- EXPECT_FALSE(IPCheck<Client>("192.0.2.53").matches(*client4));
- // Prefix match
- EXPECT_TRUE(IPCheck<Client>("192.0.2.0/24").matches(*client4));
- // Prefix match (negative)
- EXPECT_FALSE(IPCheck<Client>("192.0.1.0/24").matches(*client4));
- // Address family mismatch (the first 4 bytes of the IPv6 address has the
- // same binary representation as the client's IPv4 address, which
- // shouldn't confuse the match logic)
- EXPECT_FALSE(IPCheck<Client>("c000:0201::").matches(*client4));
-}
-
-TEST_F(ClientTest, ACLCheckIPv6) {
- // The following are a set of tests of the same concept as ACLCheckIPv4
- EXPECT_TRUE(IPCheck<Client>("2001:db8::1").matches(*client6));
- EXPECT_FALSE(IPCheck<Client>("2001:db8::53").matches(*client6));
- EXPECT_TRUE(IPCheck<Client>("2001:db8::/64").matches(*client6));
- EXPECT_FALSE(IPCheck<Client>("2001:db8:1::/64").matches(*client6));
- EXPECT_FALSE(IPCheck<Client>("32.1.13.184").matches(*client6));
-}
-
TEST_F(ClientTest, toText) {
EXPECT_EQ("192.0.2.1#53214", client4->toText());
EXPECT_EQ("2001:db8::1#53216", client6->toText());
diff --git a/src/lib/server_common/tests/keyring_test.cc b/src/lib/server_common/tests/keyring_test.cc
index d79b541..dab43df 100644
--- a/src/lib/server_common/tests/keyring_test.cc
+++ b/src/lib/server_common/tests/keyring_test.cc
@@ -38,7 +38,8 @@ public:
specfile(std::string(TEST_DATA_PATH) + "/spec.spec")
{
session.getMessages()->add(createAnswer());
- mccs.reset(new ModuleCCSession(specfile, session, NULL, NULL, false));
+ mccs.reset(new ModuleCCSession(specfile, session, NULL, NULL,
+ false, false));
}
isc::cc::FakeSession session;
std::auto_ptr<ModuleCCSession> mccs;
diff --git a/src/lib/server_common/tests/run_unittests.cc b/src/lib/server_common/tests/run_unittests.cc
index b982ef3..860cb77 100644
--- a/src/lib/server_common/tests/run_unittests.cc
+++ b/src/lib/server_common/tests/run_unittests.cc
@@ -16,6 +16,7 @@
#include <gtest/gtest.h>
#include <util/unittests/run_all.h>
+#include <log/logger_support.h>
#include <dns/tests/unittest_util.h>
@@ -23,5 +24,7 @@ int
main(int argc, char* argv[]) {
::testing::InitGoogleTest(&argc, argv);
+ isc::log::initLogger();
+
return (isc::util::unittests::run_all());
}
diff --git a/src/lib/testutils/testdata/Makefile.am b/src/lib/testutils/testdata/Makefile.am
index 93b9eb9..918d5c5 100644
--- a/src/lib/testutils/testdata/Makefile.am
+++ b/src/lib/testutils/testdata/Makefile.am
@@ -32,4 +32,4 @@ EXTRA_DIST += test2.zone.in
EXTRA_DIST += test2-new.zone.in
.spec.wire:
- $(abs_top_builddir)/src/lib/dns/tests/testdata/gen-wiredata.py -o $@ $<
+ $(PYTHON) $(top_builddir)/src/lib/util/python/gen_wiredata.py -o $@ $<
diff --git a/src/lib/util/Makefile.am b/src/lib/util/Makefile.am
index 3db9ac4..0b78b29 100644
--- a/src/lib/util/Makefile.am
+++ b/src/lib/util/Makefile.am
@@ -1,4 +1,4 @@
-SUBDIRS = . io unittests tests pyunittests
+SUBDIRS = . io unittests tests pyunittests python
AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
AM_CPPFLAGS += -I$(top_srcdir)/src/lib/util -I$(top_builddir)/src/lib/util
diff --git a/src/lib/util/filename.cc b/src/lib/util/filename.cc
index 1f2e5db..d7da9c8 100644
--- a/src/lib/util/filename.cc
+++ b/src/lib/util/filename.cc
@@ -132,6 +132,24 @@ Filename::useAsDefault(const string& name) const {
return (retstring);
}
+void
+Filename::setDirectory(const std::string& new_directory) {
+ std::string directory(new_directory);
+
+ if (directory.length() > 0) {
+ // append '/' if necessary
+ size_t sep = directory.rfind('/');
+ if (sep == std::string::npos || sep < directory.size() - 1) {
+ directory += "/";
+ }
+ }
+ // and regenerate the full name
+ std::string full_name = directory + name_ + extension_;
+
+ directory_.swap(directory);
+ full_name_.swap(full_name);
+}
+
} // namespace log
} // namespace isc
diff --git a/src/lib/util/filename.h b/src/lib/util/filename.h
index 984ecb0..f625938 100644
--- a/src/lib/util/filename.h
+++ b/src/lib/util/filename.h
@@ -86,6 +86,13 @@ public:
return (directory_);
}
+ /// \brief Set directory for the file
+ ///
+ /// \param new_directory The directory to set. If this is an empty
+ /// string, the directory this filename object currently
+ /// has will be removed.
+ void setDirectory(const std::string& new_directory);
+
/// \return Name of Given File Name
std::string name() const {
return (name_);
@@ -96,6 +103,11 @@ public:
return (extension_);
}
+ /// \return Name + extension of Given File Name
+ std::string nameAndExtension() const {
+ return (name_ + extension_);
+ }
+
/// \brief Expand Name with Default
///
/// A default file specified is supplied and used to fill in any missing
diff --git a/src/lib/util/python/Makefile.am b/src/lib/util/python/Makefile.am
new file mode 100644
index 0000000..81d528c
--- /dev/null
+++ b/src/lib/util/python/Makefile.am
@@ -0,0 +1 @@
+noinst_SCRIPTS = gen_wiredata.py mkpywrapper.py
diff --git a/src/lib/util/python/gen_wiredata.py.in b/src/lib/util/python/gen_wiredata.py.in
new file mode 100755
index 0000000..8bd2b3c
--- /dev/null
+++ b/src/lib/util/python/gen_wiredata.py.in
@@ -0,0 +1,1232 @@
+#!@PYTHON@
+
+# Copyright (C) 2010 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Generator of various types of DNS data in the hex format.
+
+This script reads a human readable specification file (called "spec
+file" hereafter) that defines some type of DNS data (an RDATA, an RR,
+or a complete message) and dumps the defined data to a separate file
+as a "wire format" sequence parsable by the
+UnitTestUtil::readWireData() function (currently defined as part of
+libdns++ tests). Many DNS related tests involve wire format test
+data, so it will be convenient if we can define the data in a more
+intuitive way than writing the entire hex sequence by hand.
+
+Here is a simple example. Consider the following spec file:
+
+ [custom]
+ sections: a
+ [a]
+ as_rr: True
+
+When the script reads this file, it detects the file specifies a single
+component (called "section" here) that consists of a single A RDATA,
+which must be dumped as an RR (not only the part of RDATA). It then
+dumps the following content:
+
+ # A RR (QNAME=example.com Class=IN(1) TTL=86400 RDLEN=4)
+ 076578616d706c6503636f6d00 0001 0001 00015180 0004
+ # Address=192.0.2.1
+ c0000201
+
+As can be seen, the script automatically completes all variable
+parameters of RRs: owner name, class, TTL, RDATA length and data. For
+testing purposes many of these will be the same common one (like
+"example.com" or 192.0.2.1), so it would be convenient if we only have
+to specify non default parameters. To change the RDATA (i.e., the
+IPv4 address), we should add the following line at the end of the spec
+file:
+
+ address: 192.0.2.2
+
+Then the last two lines of the output file will be as follows:
+
+ # Address=192.0.2.2
+ c0000202
+
+In some cases we would rather specify malformed data for tests. This
+script has the ability to specify broken parameters for many types of
+data. For example, we can generate data that would look like an A RR
+but the RDLEN is 3 by adding the following line to the spec file:
+
+ rdlen: 3
+
+Then the first two lines of the output file will be as follows:
+
+ # A RR (QNAME=example.com Class=IN(1) TTL=86400 RDLEN=3)
+ 076578616d706c6503636f6d00 0001 0001 00015180 0003
+
+** USAGE **
+
+ gen_wiredata.py [-o output_file] spec_file
+
+If the -o option is missing, and if the spec_file has a suffix (such as
+in the form of "data.spec"), the output file name will be the prefix
+part of it (as in "data"); if -o is missing and the spec_file does not
+have a suffix, the script will fail.
+
+** SPEC FILE SYNTAX **
+
+A spec file accepted in this script should be in the form of a
+configuration file that is parsable by the Python's standard
+configparser module. In short, it consists of sections; each section
+is identified in the form of [section_name] followed by "name: value"
+entries. Lines beginning with # or ; will be treated as comments.
+Refer to the configparser module documentation for further details of
+the general syntax.
+
+This script has two major modes: the custom mode and the DNS query
+mode. The former generates an arbitrary combination of DNS message
+header, question section, RDATAs or RRs. It is mainly intended to
+generate a test data for a single type of RDATA or RR, or for
+complicated complete DNS messages. The DNS query mode is actually a
+special case of the custom mode, which is a shortcut to generate a
+simple DNS query message (with or without EDNS).
+
+* Custom mode syntax *
+
+By default this script assumes the DNS query mode. To specify the
+custom mode, there must be a special "custom" section in the spec
+file, which should contain 'sections' entry. This value of this
+entryis colon-separated string fields, each of which is either
+"header", "question", "edns", "name", or a string specifying an RR
+type. For RR types the string is lower-cased string mnemonic that
+identifies the type: 'a' for type A, 'ns' for type NS, and so on
+(note: in the current implementation it's case sensitive, and must be
+lower cased).
+
+Each of these fields is interpreted as a section name of the spec
+(configuration), and in that section parameters specific to the
+semantics of the field can be configured.
+
+A "header" section specifies the content of a DNS message header.
+See the documentation of the DNSHeader class of this module for
+configurable parameters.
+
+A "question" section specifies the content of a single question that
+is normally to be placed in the Question section of a DNS message.
+See the documentation of the DNSQuestion class of this module for
+configurable parameters.
+
+An "edns" section specifies the content of an EDNS OPT RR. See the
+documentation of the EDNS class of this module for configurable
+parameters.
+
+A "name" section specifies a domain name with or without compression.
+This is specifically intended to be used for testing name related
+functionalities and would rarely be used with other sections. See the
+documentation of the Name class of this module for configurable
+parameters.
+
+In a specific section for an RR or RDATA, possible entries depend on
+the type. But there are some common configurable entries. See the
+description of the RR class. The most important one would be "as_rr".
+It controls whether the entry should be treated as an RR (with name,
+type, class and TTL) or only as an RDATA. By default as_rr is
+"False", so if an entry is to be intepreted as an RR, an as_rr entry
+must be explicitly specified with a value of "True".
+
+Another common entry is "rdlen". It specifies the RDLEN field value
+of the RR (note: this is included when the entry is interpreted as
+RDATA, too). By default this value is automatically determined by the
+RR type and (it has a variable length) from other fields of RDATA, but
+as shown in the above example, it can be explicitly set, possibly to a
+bogus value for testing against invalid data.
+
+For type specific entries (and their defaults when provided), see the
+documentation of the corresponding Python class defined in this
+module. In general, there should be a class named the same mnemonic
+of the corresponding RR type for each supported type, and they are a
+subclass of the RR class. For example, the "NS" class is defined for
+RR type NS.
+
+Look again at the A RR example shown at the beginning of this
+description. There's a "custom" section, which consists of a
+"sections" entry whose value is a single "a", which means the data to
+be generated is an A RR or RDATA. There's a corresponding "a"
+section, which only specifies that it should be interpreted as an RR
+(all field values of the RR are derived from the default).
+
+If you want to generate a data sequence for two ore more RRs or
+RDATAs, you can specify them in the form of colon-separated fields for
+the "sections" entry. For example, to generate a sequence of A and NS
+RRs in that order, the "custom" section would be something like this:
+
+ [custom]
+ sections: a:ns
+
+and there must be an "ns" section in addtion to "a".
+
+If a sequence of two or more RRs/RDATAs of the same RR type should be
+generated, these should be uniquely indexed with the "/" separator.
+For example, to generate two A RRs, the "custom" section would be as
+follows:
+
+ [custom]
+ sections: a/1:a/2
+
+and there must be "a/1" and "a/2" sections.
+
+Another practical example that would be used for many tests is to
+generate data for a complete DNS ressponse message. The spec file of
+such an example configuration would look like as follows:
+
+ [custom]
+ sections: header:question:a
+ [header]
+ qr: 1
+ ancount: 1
+ [question]
+ [a]
+ as_rr: True
+
+With this configuration, this script will generate test data for a DNS
+response to a query for example.com/IN/A containing one corresponding
+A RR in the answer section.
+
+* DNS query mode syntax *
+
+If the spec file does not contain a "custom" section (that has a
+"sections" entry), this script assumes the DNS query mode. This mode
+is actually a special case of custom mode; it implicitly assumes the
+"sections" entry whose value is "header:question:edns".
+
+In this mode it is expected that the spec file also contains at least
+a "header" and "question" sections, and optionally an "edns" section.
+But the script does not warn or fail even if the expected sections are
+missing.
+
+* Entry value types *
+
+As described above, a section of the spec file accepts entries
+specific to the semantics of the section. They generally correspond
+to DNS message or RR fields.
+
+Many of them are expected to be integral values, for which either decimal or
+hexadecimal representation is accepted, for example:
+
+ rr_ttl: 3600
+ tag: 0x1234
+
+Some others are expected to be string. A string value does not have
+to be quated:
+
+ address: 192.0.2.2
+
+but can also be quoated with single quotes:
+
+ address: '192.0.2.2'
+
+Note 1: a string that can be interpreted as an integer must be quated.
+For example, if you want to set a "string" entry to "3600", it should
+be:
+
+ string: '3600'
+
+instead of
+
+ string: 3600
+
+Note 2: a string enclosed with double quotes is not accepted:
+
+ # This doesn't work:
+ address: "192.0.2.2"
+
+In general, string values are converted to hexadecimal sequences
+according to the semantics of the entry. For instance, a textual IPv4
+address in the above example will be converted to a hexadecimal
+sequence corresponding to a 4-byte integer. So, in many cases, the
+acceptable syntax for a particular string entry value should be
+obvious from the context. There are still some exceptional cases
+especially for complicated RR field values, for which the
+corresponding class documentation should be referenced.
+
+One special string syntax that would be worth noting is domain names,
+which would natually be used in many kinds of entries. The simplest
+form of acceptable syntax is a textual representation of domain names
+such as "example.com" (note: names are always assumed to be
+"absolute", so the trailing dot can be omitted). But a domain name in
+the wire format can also contain a compression pointer. This script
+provides a simple support for name compression with a special notation
+of "ptr=nn" where nn is the numeric pointer value (decimal). For example,
+if the NSDNAME field of an NS RDATA is specified as follows:
+
+ nsname: ns.ptr=12
+
+this script will generate the following output:
+
+ # NS name=ns.ptr=12
+ 026e73c00c
+
+** EXTEND THE SCRIPT **
+
+This script is expected to be extended as we add more support for
+various types of RR. It is encouraged to add support for a new type
+of RR to this script as we see the need for testing that type. Here
+is a simple instruction of how to do that.
+
+Assume you are adding support for "FOO" RR. Also assume that the FOO
+RDATA contains a single field named "value".
+
+What you are expected to do is as follows:
+
+- Define a new class named "FOO" inherited from the RR class. Also
+ define a class variable named "value" for the FOO RDATA field (the
+ variable name can be different from the field name, but it's
+ convenient if it can be easily identifiable.) with an appropriate
+ default value (if possible):
+
+ class FOO(RR):
+ value = 10
+
+ The name of the variable will be (automatically) used as the
+ corresponding entry name in the spec file. So, a spec file that
+ sets this field to 20 would look like this:
+
+ [foo]
+ value: 20
+
+- Define the "dump()" method for class FOO. It must call
+ self.dump_header() (which is derived from class RR) at the
+ beginning. It then prints the RDATA field values in an appropriate
+ way. Assuming the value is a 16-bit integer field, a complete
+ dump() method would look like this:
+
+ def dump(self, f):
+ if self.rdlen is None:
+ self.rdlen = 2
+ self.dump_header(f, self.rdlen)
+ f.write('# Value=%d\\n' % (self.value))
+ f.write('%04x\\n' % (self.value))
+
+ The first f.write() call is not mandatory, but is encouraged to
+ be provided so that the generated files will be more human readable.
+ Depending on the complexity of the RDATA fields, the dump()
+ implementation would be more complicated. In particular, if the
+ RDATA length is variable and the RDLEN field value is not specified
+ in the spec file, the dump() method is normally expected to
+ calculate the correct length and pass it to dump_header(). See the
+ implementation of various derived classes of class RR for actual
+ examples.
+"""
+
+import configparser, re, time, socket, sys
+from datetime import datetime
+from optparse import OptionParser
+
+re_hex = re.compile(r'^0x[0-9a-fA-F]+')
+re_decimal = re.compile(r'^\d+$')
+re_string = re.compile(r"\'(.*)\'$")
+
+dnssec_timefmt = '%Y%m%d%H%M%S'
+
+dict_qr = { 'query' : 0, 'response' : 1 }
+dict_opcode = { 'query' : 0, 'iquery' : 1, 'status' : 2, 'notify' : 4,
+ 'update' : 5 }
+rdict_opcode = dict([(dict_opcode[k], k.upper()) for k in dict_opcode.keys()])
+dict_rcode = { 'noerror' : 0, 'formerr' : 1, 'servfail' : 2, 'nxdomain' : 3,
+ 'notimp' : 4, 'refused' : 5, 'yxdomain' : 6, 'yxrrset' : 7,
+ 'nxrrset' : 8, 'notauth' : 9, 'notzone' : 10 }
+rdict_rcode = dict([(dict_rcode[k], k.upper()) for k in dict_rcode.keys()])
+dict_rrtype = { 'none' : 0, 'a' : 1, 'ns' : 2, 'md' : 3, 'mf' : 4, 'cname' : 5,
+ 'soa' : 6, 'mb' : 7, 'mg' : 8, 'mr' : 9, 'null' : 10,
+ 'wks' : 11, 'ptr' : 12, 'hinfo' : 13, 'minfo' : 14, 'mx' : 15,
+ 'txt' : 16, 'rp' : 17, 'afsdb' : 18, 'x25' : 19, 'isdn' : 20,
+ 'rt' : 21, 'nsap' : 22, 'nsap_tr' : 23, 'sig' : 24, 'key' : 25,
+ 'px' : 26, 'gpos' : 27, 'aaaa' : 28, 'loc' : 29, 'nxt' : 30,
+ 'srv' : 33, 'naptr' : 35, 'kx' : 36, 'cert' : 37, 'a6' : 38,
+ 'dname' : 39, 'opt' : 41, 'apl' : 42, 'ds' : 43, 'sshfp' : 44,
+ 'ipseckey' : 45, 'rrsig' : 46, 'nsec' : 47, 'dnskey' : 48,
+ 'dhcid' : 49, 'nsec3' : 50, 'nsec3param' : 51, 'hip' : 55,
+ 'spf' : 99, 'unspec' : 103, 'tkey' : 249, 'tsig' : 250,
+ 'dlv' : 32769, 'ixfr' : 251, 'axfr' : 252, 'mailb' : 253,
+ 'maila' : 254, 'any' : 255 }
+rdict_rrtype = dict([(dict_rrtype[k], k.upper()) for k in dict_rrtype.keys()])
+dict_rrclass = { 'in' : 1, 'ch' : 3, 'hs' : 4, 'any' : 255 }
+rdict_rrclass = dict([(dict_rrclass[k], k.upper()) for k in \
+ dict_rrclass.keys()])
+dict_algorithm = { 'rsamd5' : 1, 'dh' : 2, 'dsa' : 3, 'ecc' : 4,
+ 'rsasha1' : 5 }
+dict_nsec3_algorithm = { 'reserved' : 0, 'sha1' : 1 }
+rdict_algorithm = dict([(dict_algorithm[k], k.upper()) for k in \
+ dict_algorithm.keys()])
+rdict_nsec3_algorithm = dict([(dict_nsec3_algorithm[k], k.upper()) for k in \
+ dict_nsec3_algorithm.keys()])
+
+header_xtables = { 'qr' : dict_qr, 'opcode' : dict_opcode,
+ 'rcode' : dict_rcode }
+question_xtables = { 'rrtype' : dict_rrtype, 'rrclass' : dict_rrclass }
+
+def parse_value(value, xtable = {}):
+ if re.search(re_hex, value):
+ return int(value, 16)
+ if re.search(re_decimal, value):
+ return int(value)
+ m = re.match(re_string, value)
+ if m:
+ return m.group(1)
+ lovalue = value.lower()
+ if lovalue in xtable:
+ return xtable[lovalue]
+ return value
+
+def code_totext(code, dict):
+ if code in dict.keys():
+ return dict[code] + '(' + str(code) + ')'
+ return str(code)
+
+def encode_name(name, absolute=True):
+ # make sure the name is dot-terminated. duplicate dots will be ignored
+ # below.
+ name += '.'
+ labels = name.split('.')
+ wire = ''
+ for l in labels:
+ if len(l) > 4 and l[0:4] == 'ptr=':
+ # special meta-syntax for compression pointer
+ wire += '%04x' % (0xc000 | int(l[4:]))
+ break
+ if absolute or len(l) > 0:
+ wire += '%02x' % len(l)
+ wire += ''.join(['%02x' % ord(ch) for ch in l])
+ if len(l) == 0:
+ break
+ return wire
+
+def encode_string(name, len=None):
+ if type(name) is int and len is not None:
+ return '%0.*x' % (len * 2, name)
+ return ''.join(['%02x' % ord(ch) for ch in name])
+
+def count_namelabels(name):
+ if name == '.': # special case
+ return 0
+ m = re.match('^(.*)\.$', name)
+ if m:
+ name = m.group(1)
+ return len(name.split('.'))
+
+def get_config(config, section, configobj, xtables = {}):
+ try:
+ for field in config.options(section):
+ value = config.get(section, field)
+ if field in xtables.keys():
+ xtable = xtables[field]
+ else:
+ xtable = {}
+ configobj.__dict__[field] = parse_value(value, xtable)
+ except configparser.NoSectionError:
+ return False
+ return True
+
+def print_header(f, input_file):
+ f.write('''###
+### This data file was auto-generated from ''' + input_file + '''
+###
+''')
+
+class Name:
+ '''Implements rendering a single domain name in the test data format.
+
+ Configurable parameter is as follows (see the description of the
+ same name of attribute for the default value):
+ - name (string): A textual representation of the name, such as
+ 'example.com'.
+ - pointer (int): If specified, compression pointer will be
+ prepended to the generated data with the offset being the value
+ of this parameter.
+ '''
+
+ name = 'example.com'
+ pointer = None # no compression by default
+ def dump(self, f):
+ name = self.name
+ if self.pointer is not None:
+ if len(name) > 0 and name[-1] != '.':
+ name += '.'
+ name += 'ptr=%d' % self.pointer
+ name_wire = encode_name(name)
+ f.write('\n# DNS Name: %s' % self.name)
+ if self.pointer is not None:
+ f.write(' + compression pointer: %d' % self.pointer)
+ f.write('\n')
+ f.write('%s' % name_wire)
+ f.write('\n')
+
+class DNSHeader:
+ '''Implements rendering a DNS Header section in the test data format.
+
+ Configurable parameter is as follows (see the description of the
+ same name of attribute for the default value):
+ - id (16-bit int):
+ - qr, aa, tc, rd, ra, ad, cd (0 or 1): Standard header bits as
+ defined in RFC1035 and RFC4035. If set to 1, the corresponding
+ bit will be set; if set to 0, it will be cleared.
+ - mbz (0-3): The reserved field of the 3rd and 4th octets of the
+ header.
+ - rcode (4-bit int or string): The RCODE field. If specified as a
+ string, it must be the commonly used textual mnemonic of the RCODEs
+ (NOERROR, FORMERR, etc, case insensitive).
+ - opcode (4-bit int or string): The OPCODE field. If specified as
+ a string, it must be the commonly used textual mnemonic of the
+ OPCODEs (QUERY, NOTIFY, etc, case insensitive).
+ - qdcount, ancount, nscount, arcount (16-bit int): The QD/AN/NS/AR
+ COUNT fields, respectively.
+ '''
+
+ id = 0x1035
+ (qr, aa, tc, rd, ra, ad, cd) = 0, 0, 0, 0, 0, 0, 0
+ mbz = 0
+ rcode = 0 # noerror
+ opcode = 0 # query
+ (qdcount, ancount, nscount, arcount) = 1, 0, 0, 0
+
+ def dump(self, f):
+ f.write('\n# Header Section\n')
+ f.write('# ID=' + str(self.id))
+ f.write(' QR=' + ('Response' if self.qr else 'Query'))
+ f.write(' Opcode=' + code_totext(self.opcode, rdict_opcode))
+ f.write(' Rcode=' + code_totext(self.rcode, rdict_rcode))
+ f.write('%s' % (' AA' if self.aa else ''))
+ f.write('%s' % (' TC' if self.tc else ''))
+ f.write('%s' % (' RD' if self.rd else ''))
+ f.write('%s' % (' AD' if self.ad else ''))
+ f.write('%s' % (' CD' if self.cd else ''))
+ f.write('\n')
+ f.write('%04x ' % self.id)
+ flag_and_code = 0
+ flag_and_code |= (self.qr << 15 | self.opcode << 14 | self.aa << 10 |
+ self.tc << 9 | self.rd << 8 | self.ra << 7 |
+ self.mbz << 6 | self.ad << 5 | self.cd << 4 |
+ self.rcode)
+ f.write('%04x\n' % flag_and_code)
+ f.write('# QDCNT=%d, ANCNT=%d, NSCNT=%d, ARCNT=%d\n' %
+ (self.qdcount, self.ancount, self.nscount, self.arcount))
+ f.write('%04x %04x %04x %04x\n' % (self.qdcount, self.ancount,
+ self.nscount, self.arcount))
+
+class DNSQuestion:
+ '''Implements rendering a DNS question in the test data format.
+
+ Configurable parameter is as follows (see the description of the
+ same name of attribute for the default value):
+ - name (string): The QNAME. The string must be interpreted as a
+ valid domain name.
+ - rrtype (int or string): The question type. If specified
+ as an integer, it must be the 16-bit RR type value of the
+ covered type. If specifed as a string, it must be the textual
+ mnemonic of the type.
+ - rrclass (int or string): The question class. If specified as an
+ integer, it must be the 16-bit RR class value of the covered
+ type. If specifed as a string, it must be the textual mnemonic
+ of the class.
+ '''
+ name = 'example.com.'
+ rrtype = parse_value('A', dict_rrtype)
+ rrclass = parse_value('IN', dict_rrclass)
+
+ def dump(self, f):
+ f.write('\n# Question Section\n')
+ f.write('# QNAME=%s QTYPE=%s QCLASS=%s\n' %
+ (self.name,
+ code_totext(self.rrtype, rdict_rrtype),
+ code_totext(self.rrclass, rdict_rrclass)))
+ f.write(encode_name(self.name))
+ f.write(' %04x %04x\n' % (self.rrtype, self.rrclass))
+
+class EDNS:
+ '''Implements rendering EDNS OPT RR in the test data format.
+
+ Configurable parameter is as follows (see the description of the
+ same name of attribute for the default value):
+ - name (string): The owner name of the OPT RR. The string must be
+ interpreted as a valid domain name.
+ - udpsize (16-bit int): The UDP payload size (set as the RR class)
+ - extrcode (8-bit int): The upper 8 bits of the extended RCODE.
+ - version (8-bit int): The EDNS version.
+ - do (int): The DNSSEC DO bit. The bit will be set if this value
+ is 1; otherwise the bit will be unset.
+ - mbz (15-bit int): The rest of the flags field.
+ - rdlen (16-bit int): The RDLEN field. Note: right now specifying
+ a non 0 value (except for making bogus data) doesn't make sense
+ because there is no way to configure RDATA.
+ '''
+ name = '.'
+ udpsize = 4096
+ extrcode = 0
+ version = 0
+ do = 0
+ mbz = 0
+ rdlen = 0
+ def dump(self, f):
+ f.write('\n# EDNS OPT RR\n')
+ f.write('# NAME=%s TYPE=%s UDPSize=%d ExtRcode=%s Version=%s DO=%d\n' %
+ (self.name, code_totext(dict_rrtype['opt'], rdict_rrtype),
+ self.udpsize, self.extrcode, self.version,
+ 1 if self.do else 0))
+
+ code_vers = (self.extrcode << 8) | (self.version & 0x00ff)
+ extflags = (self.do << 15) | (self.mbz & ~0x8000)
+ f.write('%s %04x %04x %04x %04x\n' %
+ (encode_name(self.name), dict_rrtype['opt'], self.udpsize,
+ code_vers, extflags))
+ f.write('# RDLEN=%d\n' % self.rdlen)
+ f.write('%04x\n' % self.rdlen)
+
+class RR:
+ '''This is a base class for various types of RR test data.
+ For each RR type (A, AAAA, NS, etc), we define a derived class of RR
+ to dump type specific RDATA parameters. This class defines parameters
+ common to all types of RDATA, namely the owner name, RR class and TTL.
+ The dump() method of derived classes are expected to call dump_header(),
+ whose default implementation is provided in this class. This method
+ decides whether to dump the test data as an RR (with name, type, class)
+ or only as RDATA (with its length), and dumps the corresponding data
+ via the specified file object.
+
+ By convention we assume derived classes are named after the common
+ standard mnemonic of the corresponding RR types. For example, the
+ derived class for the RR type SOA should be named "SOA".
+
+ Configurable parameters are as follows:
+ - as_rr (bool): Whether or not the data is to be dumped as an RR.
+ False by default.
+ - rr_name (string): The owner name of the RR. The string must be
+ interpreted as a valid domain name (compression pointer can be
+ contained). Default is 'example.com.'
+ - rr_class (string): The RR class of the data. Only meaningful
+ when the data is dumped as an RR. Default is 'IN'.
+ - rr_ttl (int): The TTL value of the RR. Only meaningful when
+ the data is dumped as an RR. Default is 86400 (1 day).
+ - rdlen (int): 16-bit RDATA length. It can be None (i.e. omitted
+ in the spec file), in which case the actual length of the
+ generated RDATA is automatically determined and used; if
+ negative, the RDLEN field will be omitted from the output data.
+ (Note that omitting RDLEN with as_rr being True is mostly
+ meaningless, although the script doesn't complain about it).
+ Default is None.
+ '''
+
+ def __init__(self):
+ self.as_rr = False
+ # only when as_rr is True, same for class/TTL:
+ self.rr_name = 'example.com'
+ self.rr_class = 'IN'
+ self.rr_ttl = 86400
+ self.rdlen = None
+
+ def dump_header(self, f, rdlen):
+ type_txt = self.__class__.__name__
+ type_code = parse_value(type_txt, dict_rrtype)
+ rdlen_spec = ''
+ rdlen_data = ''
+ if rdlen >= 0:
+ rdlen_spec = ', RDLEN=%d' % rdlen
+ rdlen_data = '%04x' % rdlen
+ if self.as_rr:
+ rrclass = parse_value(self.rr_class, dict_rrclass)
+ f.write('\n# %s RR (QNAME=%s Class=%s TTL=%d%s)\n' %
+ (type_txt, self.rr_name,
+ code_totext(rrclass, rdict_rrclass), self.rr_ttl,
+ rdlen_spec))
+ f.write('%s %04x %04x %08x %s\n' %
+ (encode_name(self.rr_name), type_code, rrclass,
+ self.rr_ttl, rdlen_data))
+ else:
+ f.write('\n# %s RDATA%s\n' % (type_txt, rdlen_spec))
+ f.write('%s\n' % rdlen_data)
+
+class A(RR):
+ '''Implements rendering A RDATA (of class IN) in the test data format.
+
+ Configurable parameter is as follows (see the description of the
+ same name of attribute for the default value):
+ - address (string): The address field. This must be a valid textual
+ IPv4 address.
+ '''
+ RDLEN_DEFAULT = 4 # fixed by default
+ address = '192.0.2.1'
+
+ def dump(self, f):
+ if self.rdlen is None:
+ self.rdlen = self.RDLEN_DEFAULT
+ self.dump_header(f, self.rdlen)
+ f.write('# Address=%s\n' % (self.address))
+ bin_address = socket.inet_aton(self.address)
+ f.write('%02x%02x%02x%02x\n' % (bin_address[0], bin_address[1],
+ bin_address[2], bin_address[3]))
+
+class AAAA(RR):
+ '''Implements rendering AAAA RDATA (of class IN) in the test data
+ format.
+
+ Configurable parameter is as follows (see the description of the
+ same name of attribute for the default value):
+ - address (string): The address field. This must be a valid textual
+ IPv6 address.
+ '''
+ RDLEN_DEFAULT = 16 # fixed by default
+ address = '2001:db8::1'
+
+ def dump(self, f):
+ if self.rdlen is None:
+ self.rdlen = self.RDLEN_DEFAULT
+ self.dump_header(f, self.rdlen)
+ f.write('# Address=%s\n' % (self.address))
+ bin_address = socket.inet_pton(socket.AF_INET6, self.address)
+ [f.write('%02x' % x) for x in bin_address]
+ f.write('\n')
+
+class NS(RR):
+ '''Implements rendering NS RDATA in the test data format.
+
+ Configurable parameter is as follows (see the description of the
+ same name of attribute for the default value):
+ - nsname (string): The NSDNAME field. The string must be
+ interpreted as a valid domain name.
+ '''
+
+ nsname = 'ns.example.com'
+
+ def dump(self, f):
+ nsname_wire = encode_name(self.nsname)
+ if self.rdlen is None:
+ self.rdlen = len(nsname_wire) / 2
+ self.dump_header(f, self.rdlen)
+ f.write('# NS name=%s\n' % (self.nsname))
+ f.write('%s\n' % nsname_wire)
+
+class SOA(RR):
+ '''Implements rendering SOA RDATA in the test data format.
+
+ Configurable parameters are as follows (see the description of the
+ same name of attribute for the default value):
+ - mname/rname (string): The MNAME/RNAME fields, respectively. The
+ string must be interpreted as a valid domain name.
+ - serial (32-bit int): The SERIAL field
+ - refresh (32-bit int): The REFRESH field
+ - retry (32-bit int): The RETRY field
+ - expire (32-bit int): The EXPIRE field
+ - minimum (32-bit int): The MINIMUM field
+ '''
+
+ mname = 'ns.example.com'
+ rname = 'root.example.com'
+ serial = 2010012601
+ refresh = 3600
+ retry = 300
+ expire = 3600000
+ minimum = 1200
+ def dump(self, f):
+ mname_wire = encode_name(self.mname)
+ rname_wire = encode_name(self.rname)
+ if self.rdlen is None:
+ self.rdlen = int(20 + len(mname_wire) / 2 + len(str(rname_wire)) / 2)
+ self.dump_header(f, self.rdlen)
+ f.write('# NNAME=%s RNAME=%s\n' % (self.mname, self.rname))
+ f.write('%s %s\n' % (mname_wire, rname_wire))
+ f.write('# SERIAL(%d) REFRESH(%d) RETRY(%d) EXPIRE(%d) MINIMUM(%d)\n' %
+ (self.serial, self.refresh, self.retry, self.expire,
+ self.minimum))
+ f.write('%08x %08x %08x %08x %08x\n' % (self.serial, self.refresh,
+ self.retry, self.expire,
+ self.minimum))
+
+class TXT(RR):
+ '''Implements rendering TXT RDATA in the test data format.
+
+ Configurable parameters are as follows (see the description of the
+ same name of attribute for the default value):
+ - nstring (int): number of character-strings
+ - stringlenN (int) (int, N = 0, ..., nstring-1): the length of the
+ N-th character-string.
+ - stringN (string, N = 0, ..., nstring-1): the N-th
+ character-string.
+ - stringlen (int): the default string. If nstring >= 1 and the
+ corresponding stringlenN isn't specified in the spec file, this
+ value will be used. If this parameter isn't specified either,
+ the length of the string will be used. Note that it means
+ this parameter (or any stringlenN) doesn't have to be specified
+ unless you want to intentially build a broken character string.
+ - string (string): the default string. If nstring >= 1 and the
+ corresponding stringN isn't specified in the spec file, this
+ string will be used.
+ '''
+
+ nstring = 1
+ stringlen = None
+ string = 'Test String'
+
+ def dump(self, f):
+ stringlen_list = []
+ string_list = []
+ wirestring_list = []
+ for i in range(0, self.nstring):
+ key_string = 'string' + str(i)
+ if key_string in self.__dict__:
+ string_list.append(self.__dict__[key_string])
+ else:
+ string_list.append(self.string)
+ wirestring_list.append(encode_string(string_list[-1]))
+ key_stringlen = 'stringlen' + str(i)
+ if key_stringlen in self.__dict__:
+ stringlen_list.append(self.__dict__[key_stringlen])
+ else:
+ stringlen_list.append(self.stringlen)
+ if stringlen_list[-1] is None:
+ stringlen_list[-1] = int(len(wirestring_list[-1]) / 2)
+ if self.rdlen is None:
+ self.rdlen = int(len(''.join(wirestring_list)) / 2) + self.nstring
+ self.dump_header(f, self.rdlen)
+ for i in range(0, self.nstring):
+ f.write('# String Len=%d, String=\"%s\"\n' %
+ (stringlen_list[i], string_list[i]))
+ f.write('%02x%s%s\n' % (stringlen_list[i],
+ ' ' if len(wirestring_list[i]) > 0 else '',
+ wirestring_list[i]))
+
+class RP(RR):
+ '''Implements rendering RP RDATA in the test data format.
+
+ Configurable parameters are as follows (see the description of the
+ same name of attribute for the default value):
+ - mailbox (string): The mailbox field.
+ - text (string): The text field.
+ These strings must be interpreted as a valid domain name.
+ '''
+ mailbox = 'root.example.com'
+ text = 'rp-text.example.com'
+ def dump(self, f):
+ mailbox_wire = encode_name(self.mailbox)
+ text_wire = encode_name(self.text)
+ if self.rdlen is None:
+ self.rdlen = (len(mailbox_wire) + len(text_wire)) / 2
+ else:
+ self.rdlen = int(self.rdlen)
+ self.dump_header(f, self.rdlen)
+ f.write('# MAILBOX=%s TEXT=%s\n' % (self.mailbox, self.text))
+ f.write('%s %s\n' % (mailbox_wire, text_wire))
+
+class MINFO(RR):
+ '''Implements rendering MINFO RDATA in the test data format.
+
+ Configurable parameters are as follows (see the description of the
+ same name of attribute for the default value):
+ - rmailbox (string): The rmailbox field.
+ - emailbox (string): The emailbox field.
+ These strings must be interpreted as a valid domain name.
+ '''
+ rmailbox = 'rmailbox.example.com'
+ emailbox = 'emailbox.example.com'
+ def dump(self, f):
+ rmailbox_wire = encode_name(self.rmailbox)
+ emailbox_wire = encode_name(self.emailbox)
+ if self.rdlen is None:
+ self.rdlen = (len(rmailbox_wire) + len(emailbox_wire)) / 2
+ else:
+ self.rdlen = int(self.rdlen)
+ self.dump_header(f, self.rdlen)
+ f.write('# RMAILBOX=%s EMAILBOX=%s\n' % (self.rmailbox, self.emailbox))
+ f.write('%s %s\n' % (rmailbox_wire, emailbox_wire))
+
+class AFSDB(RR):
+ '''Implements rendering AFSDB RDATA in the test data format.
+
+ Configurable parameters are as follows (see the description of the
+ same name of attribute for the default value):
+ - subtype (16 bit int): The subtype field.
+ - server (string): The server field.
+ The string must be interpreted as a valid domain name.
+ '''
+ subtype = 1
+ server = 'afsdb.example.com'
+ def dump(self, f):
+ server_wire = encode_name(self.server)
+ if self.rdlen is None:
+ self.rdlen = 2 + len(server_wire) / 2
+ else:
+ self.rdlen = int(self.rdlen)
+ self.dump_header(f, self.rdlen)
+ f.write('# SUBTYPE=%d SERVER=%s\n' % (self.subtype, self.server))
+ f.write('%04x %s\n' % (self.subtype, server_wire))
+
+class NSECBASE(RR):
+ '''Implements rendering NSEC/NSEC3 type bitmaps commonly used for
+ these RRs. The NSEC and NSEC3 classes will be inherited from this
+ class.
+
+ Configurable parameters are as follows (see the description of the
+ same name of attribute for the default value):
+ - nbitmap (int): The number of type bitmaps.
+ The following three define the bitmaps. If suffixed with "N"
+ (0 <= N < nbitmaps), it means the definition for the N-th bitmap.
+ If there is no suffix (e.g., just "block", it means the default
+ for any unspecified values)
+ - block[N] (8-bit int): The Window Block.
+ - maplen[N] (8-bit int): The Bitmap Length. The default "maplen"
+ can also be unspecified (with being set to None), in which case
+ the corresponding length will be calculated from the bitmap.
+ - bitmap[N] (string): The Bitmap. This must be the hexadecimal
+ representation of the bitmap field. For example, for a bitmap
+ where the 7th and 15th bits (and only these bits) are set, it
+ must be '0101'. Note also that the value must be quated with
+ single quatations because it could also be interpreted as an
+ integer.
+ '''
+ nbitmap = 1 # number of bitmaps
+ block = 0
+ maplen = None # default bitmap length, auto-calculate
+ bitmap = '040000000003' # an arbtrarily chosen bitmap sample
+ def dump(self, f):
+ # first, construct the bitmpa data
+ block_list = []
+ maplen_list = []
+ bitmap_list = []
+ for i in range(0, self.nbitmap):
+ key_bitmap = 'bitmap' + str(i)
+ if key_bitmap in self.__dict__:
+ bitmap_list.append(self.__dict__[key_bitmap])
+ else:
+ bitmap_list.append(self.bitmap)
+ key_maplen = 'maplen' + str(i)
+ if key_maplen in self.__dict__:
+ maplen_list.append(self.__dict__[key_maplen])
+ else:
+ maplen_list.append(self.maplen)
+ if maplen_list[-1] is None: # calculate it if not specified
+ maplen_list[-1] = int(len(bitmap_list[-1]) / 2)
+ key_block = 'block' + str(i)
+ if key_block in self.__dict__:
+ block_list.append(self.__dict__[key_block])
+ else:
+ block_list.append(self.block)
+
+ # dump RR-type specific part (NSEC or NSEC3)
+ self.dump_fixedpart(f, 2 * self.nbitmap + \
+ int(len(''.join(bitmap_list)) / 2))
+
+ # dump the bitmap
+ for i in range(0, self.nbitmap):
+ f.write('# Bitmap: Block=%d, Length=%d\n' %
+ (block_list[i], maplen_list[i]))
+ f.write('%02x %02x %s\n' %
+ (block_list[i], maplen_list[i], bitmap_list[i]))
+
+class NSEC(NSECBASE):
+ '''Implements rendering NSEC RDATA in the test data format.
+
+ Configurable parameters are as follows (see the description of the
+ same name of attribute for the default value):
+ - Type bitmap related parameters: see class NSECBASE
+ - nextname (string): The Next Domain Name field. The string must be
+ interpreted as a valid domain name.
+ '''
+
+ nextname = 'next.example.com'
+ def dump_fixedpart(self, f, bitmap_totallen):
+ name_wire = encode_name(self.nextname)
+ if self.rdlen is None:
+ # if rdlen needs to be calculated, it must be based on the bitmap
+ # length, because the configured maplen can be fake.
+ self.rdlen = int(len(name_wire) / 2) + bitmap_totallen
+ self.dump_header(f, self.rdlen)
+ f.write('# Next Name=%s (%d bytes)\n' % (self.nextname,
+ int(len(name_wire) / 2)))
+ f.write('%s\n' % name_wire)
+
+class NSEC3(NSECBASE):
+ '''Implements rendering NSEC3 RDATA in the test data format.
+
+ Configurable parameters are as follows (see the description of the
+ same name of attribute for the default value):
+ - Type bitmap related parameters: see class NSECBASE
+ - hashalg (8-bit int): The Hash Algorithm field. Note that
+ currently the only defined algorithm is SHA-1, for which a value
+ of 1 will be used, and it's the default. So this implementation
+ does not support any string representation right now.
+ - optout (bool): The Opt-Out flag of the Flags field.
+ - mbz (7-bit int): The rest of the Flags field. This value will
+ be left shifted for 1 bit and then OR-ed with optout to
+ construct the complete Flags field.
+ - iterations (16-bit int): The Iterations field.
+ - saltlen (int): The Salt Length field.
+ - salt (string): The Salt field. It is converted to a sequence of
+ ascii codes and its hexadecimal representation will be used.
+ - hashlen (int): The Hash Length field.
+ - hash (string): The Next Hashed Owner Name field. This parameter
+ is interpreted as "salt".
+ '''
+
+ hashalg = 1 # SHA-1
+ optout = False # opt-out flag
+ mbz = 0 # other flag fields (none defined yet)
+ iterations = 1
+ saltlen = 5
+ salt = 's' * saltlen
+ hashlen = 20
+ hash = 'h' * hashlen
+ def dump_fixedpart(self, f, bitmap_totallen):
+ if self.rdlen is None:
+ # if rdlen needs to be calculated, it must be based on the bitmap
+ # length, because the configured maplen can be fake.
+ self.rdlen = 4 + 1 + len(self.salt) + 1 + len(self.hash) \
+ + bitmap_totallen
+ self.dump_header(f, self.rdlen)
+ optout_val = 1 if self.optout else 0
+ f.write('# Hash Alg=%s, Opt-Out=%d, Other Flags=%0x, Iterations=%d\n' %
+ (code_totext(self.hashalg, rdict_nsec3_algorithm),
+ optout_val, self.mbz, self.iterations))
+ f.write('%02x %02x %04x\n' %
+ (self.hashalg, (self.mbz << 1) | optout_val, self.iterations))
+ f.write("# Salt Len=%d, Salt='%s'\n" % (self.saltlen, self.salt))
+ f.write('%02x%s%s\n' % (self.saltlen,
+ ' ' if len(self.salt) > 0 else '',
+ encode_string(self.salt)))
+ f.write("# Hash Len=%d, Hash='%s'\n" % (self.hashlen, self.hash))
+ f.write('%02x%s%s\n' % (self.hashlen,
+ ' ' if len(self.hash) > 0 else '',
+ encode_string(self.hash)))
+
+class RRSIG(RR):
+ '''Implements rendering RRSIG RDATA in the test data format.
+
+ Configurable parameters are as follows (see the description of the
+ same name of attribute for the default value):
+ - covered (int or string): The Type Covered field. If specified
+ as an integer, it must be the 16-bit RR type value of the
+ covered type. If specifed as a string, it must be the textual
+ mnemonic of the type.
+ - algorithm (int or string): The Algorithm field. If specified
+ as an integer, it must be the 8-bit algorithm number as defined
+ in RFC4034. If specifed as a string, it must be one of the keys
+ of dict_algorithm (case insensitive).
+ - labels (int): The Labels field. If omitted (the corresponding
+ variable being set to None), the number of labels of "signer"
+ (excluding the trailing null label as specified in RFC4034) will
+ be used.
+ - originalttl (32-bit int): The Original TTL field.
+ - expiration (32-bit int): The Expiration TTL field.
+ - inception (32-bit int): The Inception TTL field.
+ - tag (16-bit int): The Key Tag field.
+ - signer (string): The Signer's Name field. The string must be
+ interpreted as a valid domain name.
+ - signature (int): The Signature field. Right now only a simple
+ integer form is supported. A prefix of "0" will be prepended if
+ the resulting hexadecimal representation consists of an odd
+ number of characters.
+ '''
+
+ covered = 'A'
+ algorithm = 'RSASHA1'
+ labels = None # auto-calculate (#labels of signer)
+ originalttl = 3600
+ expiration = int(time.mktime(datetime.strptime('20100131120000',
+ dnssec_timefmt).timetuple()))
+ inception = int(time.mktime(datetime.strptime('20100101120000',
+ dnssec_timefmt).timetuple()))
+ tag = 0x1035
+ signer = 'example.com'
+ signature = 0x123456789abcdef123456789abcdef
+
+ def dump(self, f):
+ name_wire = encode_name(self.signer)
+ sig_wire = '%x' % self.signature
+ if len(sig_wire) % 2 != 0:
+ sig_wire = '0' + sig_wire
+ if self.rdlen is None:
+ self.rdlen = int(18 + len(name_wire) / 2 + len(str(sig_wire)) / 2)
+ self.dump_header(f, self.rdlen)
+
+ if type(self.covered) is str:
+ self.covered = dict_rrtype[self.covered.lower()]
+ if type(self.algorithm) is str:
+ self.algorithm = dict_algorithm[self.algorithm.lower()]
+ if self.labels is None:
+ self.labels = count_namelabels(self.signer)
+ f.write('# Covered=%s Algorithm=%s Labels=%d OrigTTL=%d\n' %
+ (code_totext(self.covered, rdict_rrtype),
+ code_totext(self.algorithm, rdict_algorithm), self.labels,
+ self.originalttl))
+ f.write('%04x %02x %02x %08x\n' % (self.covered, self.algorithm,
+ self.labels, self.originalttl))
+ f.write('# Expiration=%s, Inception=%s\n' %
+ (str(self.expiration), str(self.inception)))
+ f.write('%08x %08x\n' % (self.expiration, self.inception))
+ f.write('# Tag=%d Signer=%s and Signature\n' % (self.tag, self.signer))
+ f.write('%04x %s %s\n' % (self.tag, name_wire, sig_wire))
+
+class TSIG(RR):
+ '''Implements rendering TSIG RDATA in the test data format.
+
+ As a meta RR type TSIG uses some non common parameters. This
+ class overrides some of the default attributes of the RR class
+ accordingly:
+ - rr_class is set to 'ANY'
+ - rr_ttl is set to 0
+ Like other derived classes these can be overridden via the spec
+ file.
+
+ Other configurable parameters are as follows (see the description
+ of the same name of attribute for the default value):
+ - algorithm (string): The Algorithm Name field. The value is
+ generally interpreted as a domain name string, and will
+ typically be one of the standard algorithm names defined in
+ RFC4635. For convenience, however, a shortcut value "hmac-md5"
+ is allowed instead of the standard "hmac-md5.sig-alg.reg.int".
+ - time_signed (48-bit int): The Time Signed field.
+ - fudge (16-bit int): The Fudge field.
+ - mac_size (int): The MAC Size field. If omitted, the common value
+ determined by the algorithm will be used.
+ - mac (int or string): The MAC field. If specified as an integer,
+ the integer value is used as the MAC, possibly with prepended
+ 0's so that the total length will be mac_size. If specifed as a
+ string, it is converted to a sequence of ascii codes and its
+ hexadecimal representation will be used. So, for example, if
+ "mac" is set to 'abc', it will be converted to '616263'. Note
+ that in this case the length of "mac" may not be equal to
+ mac_size. If unspecified, the mac_size number of '78' (ascii
+ code of 'x') will be used.
+ - original_id (16-bit int): The Original ID field.
+ - error (16-bit int): The Error field.
+ - other_len (int): The Other Len field.
+ - other_data (int or string): The Other Data field. This is
+ interpreted just like "mac" except that other_len is used
+ instead of mac_size. If unspecified this will be empty unless
+ the "error" is set to 18 (which means the "BADTIME" error), in
+ which case a hexadecimal representation of "time_signed + fudge
+ + 1" will be used.
+ '''
+
+ algorithm = 'hmac-sha256'
+ time_signed = 1286978795 # arbitrarily chosen default
+ fudge = 300
+ mac_size = None # use a common value for the algorithm
+ mac = None # use 'x' * mac_size
+ original_id = 2845 # arbitrarily chosen default
+ error = 0
+ other_len = None # 6 if error is BADTIME; otherwise 0
+ other_data = None # use time_signed + fudge + 1 for BADTIME
+ dict_macsize = { 'hmac-md5' : 16, 'hmac-sha1' : 20, 'hmac-sha256' : 32 }
+
+ # TSIG has some special defaults
+ def __init__(self):
+ super().__init__()
+ self.rr_class = 'ANY'
+ self.rr_ttl = 0
+
+ def dump(self, f):
+ if str(self.algorithm) == 'hmac-md5':
+ name_wire = encode_name('hmac-md5.sig-alg.reg.int')
+ else:
+ name_wire = encode_name(self.algorithm)
+ mac_size = self.mac_size
+ if mac_size is None:
+ if self.algorithm in self.dict_macsize.keys():
+ mac_size = self.dict_macsize[self.algorithm]
+ else:
+ raise RuntimeError('TSIG Mac Size cannot be determined')
+ mac = encode_string('x' * mac_size) if self.mac is None else \
+ encode_string(self.mac, mac_size)
+ other_len = self.other_len
+ if other_len is None:
+ # 18 = BADTIME
+ other_len = 6 if self.error == 18 else 0
+ other_data = self.other_data
+ if other_data is None:
+ other_data = '%012x' % (self.time_signed + self.fudge + 1) \
+ if self.error == 18 else ''
+ else:
+ other_data = encode_string(self.other_data, other_len)
+ if self.rdlen is None:
+ self.rdlen = int(len(name_wire) / 2 + 16 + len(mac) / 2 + \
+ len(other_data) / 2)
+ self.dump_header(f, self.rdlen)
+ f.write('# Algorithm=%s Time-Signed=%d Fudge=%d\n' %
+ (self.algorithm, self.time_signed, self.fudge))
+ f.write('%s %012x %04x\n' % (name_wire, self.time_signed, self.fudge))
+ f.write('# MAC Size=%d MAC=(see hex)\n' % mac_size)
+ f.write('%04x%s\n' % (mac_size, ' ' + mac if len(mac) > 0 else ''))
+ f.write('# Original-ID=%d Error=%d\n' % (self.original_id, self.error))
+ f.write('%04x %04x\n' % (self.original_id, self.error))
+ f.write('# Other-Len=%d Other-Data=(see hex)\n' % other_len)
+ f.write('%04x%s\n' % (other_len,
+ ' ' + other_data if len(other_data) > 0 else ''))
+
+# Build section-class mapping
+config_param = { 'name' : (Name, {}),
+ 'header' : (DNSHeader, header_xtables),
+ 'question' : (DNSQuestion, question_xtables),
+ 'edns' : (EDNS, {}) }
+for rrtype in dict_rrtype.keys():
+ # For any supported RR types add the tuple of (RR_CLASS, {}).
+ # We expect KeyError as not all the types are supported, and simply
+ # ignore them.
+ try:
+ cur_mod = sys.modules[__name__]
+ config_param[rrtype] = (cur_mod.__dict__[rrtype.upper()], {})
+ except KeyError:
+ pass
+
+def get_config_param(section):
+ s = section
+ m = re.match('^([^:]+)/\d+$', section)
+ if m:
+ s = m.group(1)
+ return config_param[s]
+
+usage = '''usage: %prog [options] input_file'''
+
+if __name__ == "__main__":
+ parser = OptionParser(usage=usage)
+ parser.add_option('-o', '--output', action='store', dest='output',
+ default=None, metavar='FILE',
+ help='output file name [default: prefix of input_file]')
+ (options, args) = parser.parse_args()
+
+ if len(args) == 0:
+ parser.error('input file is missing')
+ configfile = args[0]
+
+ outputfile = options.output
+ if not outputfile:
+ m = re.match('(.*)\.[^.]+$', configfile)
+ if m:
+ outputfile = m.group(1)
+ else:
+ raise ValueError('output file is not specified and input file is not in the form of "output_file.suffix"')
+
+ config = configparser.SafeConfigParser()
+ config.read(configfile)
+
+ output = open(outputfile, 'w')
+
+ print_header(output, configfile)
+
+ # First try the 'custom' mode; if it fails assume the query mode.
+ try:
+ sections = config.get('custom', 'sections').split(':')
+ except configparser.NoSectionError:
+ sections = ['header', 'question', 'edns']
+
+ for s in sections:
+ section_param = get_config_param(s)
+ (obj, xtables) = (section_param[0](), section_param[1])
+ if get_config(config, s, obj, xtables):
+ obj.dump(output)
+
+ output.close()
diff --git a/src/lib/util/python/pycppwrapper_util.h b/src/lib/util/python/pycppwrapper_util.h
index fd55c19..462e715 100644
--- a/src/lib/util/python/pycppwrapper_util.h
+++ b/src/lib/util/python/pycppwrapper_util.h
@@ -94,6 +94,22 @@ public:
/// the reference to be decreased, the original bare pointer should be
/// extracted using the \c release() method.
///
+/// In some other cases, it would be convenient if it's possible to create
+/// an "empty" container and reset it with a Python object later.
+/// For example, we may want to create a temporary Python object in the
+/// middle of a function and make sure that it's valid within the rest of
+/// the function scope, while we want to make sure its reference is released
+/// when the function returns (either normally or as a result of exception).
+/// To allow this scenario, this class defines the default constructor
+/// and the \c reset() method. The default constructor allows the class
+/// object with an "empty" (NULL) Python object, while \c reset() allows
+/// the stored object to be replaced with a new one. If there's a valid
+/// object was already set, \c reset() releases its reference.
+/// In general, it's safer to construct the container object with a valid
+/// Python object pointer. The use of the default constructor and
+/// \c reset() should therefore be restricted to cases where it's
+/// absolutely necessary.
+///
/// There are two convenience methods for commonly used operations:
/// \c installAsClassVariable() to add the PyObject as a class variable
/// and \c installToModule to add the PyObject to a specified python module.
@@ -166,16 +182,27 @@ public:
/// exception in a python biding written in C/C++. See the code comment
/// of the method for more details.
struct PyObjectContainer {
+ PyObjectContainer() : obj_(NULL) {}
PyObjectContainer(PyObject* obj) : obj_(obj) {
if (obj_ == NULL) {
isc_throw(PyCPPWrapperException, "Unexpected NULL PyObject, "
"probably due to short memory");
}
}
- virtual ~PyObjectContainer() {
+ ~PyObjectContainer() {
+ if (obj_ != NULL) {
+ Py_DECREF(obj_);
+ }
+ }
+ void reset(PyObject* obj) {
+ if (obj == NULL) {
+ isc_throw(PyCPPWrapperException, "Unexpected NULL PyObject, "
+ "probably due to short memory");
+ }
if (obj_ != NULL) {
Py_DECREF(obj_);
}
+ obj_ = obj;
}
PyObject* get() {
return (obj_);
@@ -266,7 +293,7 @@ protected:
/// \c PyObject_New() to the caller.
template <typename PYSTRUCT, typename CPPCLASS>
struct CPPPyObjectContainer : public PyObjectContainer {
- CPPPyObjectContainer(PYSTRUCT* obj) : PyObjectContainer(obj) {}
+ explicit CPPPyObjectContainer(PYSTRUCT* obj) : PyObjectContainer(obj) {}
// This method associates a C++ object with the corresponding python
// object enclosed in this class.
diff --git a/src/lib/util/python/wrapper_template.cc b/src/lib/util/python/wrapper_template.cc
index 691e4bf..426ced5 100644
--- a/src/lib/util/python/wrapper_template.cc
+++ b/src/lib/util/python/wrapper_template.cc
@@ -210,7 +210,7 @@ namespace python {
// Most of the functions are not actually implemented and NULL here.
PyTypeObject @cppclass at _type = {
PyVarObject_HEAD_INIT(NULL, 0)
- "pydnspp. at CPPCLASS@",
+ "@MODULE at .@CPPCLASS@",
sizeof(s_ at CPPCLASS@), // tp_basicsize
0, // tp_itemsize
reinterpret_cast<destructor>(@CPPCLASS at _destroy), // tp_dealloc
@@ -222,7 +222,7 @@ PyTypeObject @cppclass at _type = {
NULL, // tp_as_number
NULL, // tp_as_sequence
NULL, // tp_as_mapping
- NULL, // tp_hash
+ NULL, // tp_hash
NULL, // tp_call
// THIS MAY HAVE TO BE CHANGED TO NULL:
@CPPCLASS at _str, // tp_str
@@ -299,8 +299,8 @@ initModulePart_ at CPPCLASS@(PyObject* mod) {
PyObject*
create at CPPCLASS@Object(const @CPPCLASS@& source) {
- @CPPCLASS at Container container =
- PyObject_New(s_ at CPPCLASS@, &@cppclass at _type);
+ @CPPCLASS at Container container(PyObject_New(s_ at CPPCLASS@,
+ &@cppclass at _type));
container.set(new @CPPCLASS@(source));
return (container.release());
}
diff --git a/src/lib/util/python/wrapper_template.h b/src/lib/util/python/wrapper_template.h
index d68a658..be701e1 100644
--- a/src/lib/util/python/wrapper_template.h
+++ b/src/lib/util/python/wrapper_template.h
@@ -37,15 +37,15 @@ bool initModulePart_ at CPPCLASS@(PyObject* mod);
// Note: this utility function works only when @CPPCLASS@ is a copy
// constructable.
// And, it would only be useful when python binding needs to create this
-// object frequently. Otherwise, it would (or should) probably better to
+// object frequently. Otherwise, it would (or should) probably be better to
// remove the declaration and definition of this function.
//
-/// This is A simple shortcut to create a python @CPPCLASS@ object (in the
+/// This is a simple shortcut to create a python @CPPCLASS@ object (in the
/// form of a pointer to PyObject) with minimal exception safety.
/// On success, it returns a valid pointer to PyObject with a reference
/// counter of 1; if something goes wrong it throws an exception (it never
/// returns a NULL pointer).
-/// This function is expected to be called with in a try block
+/// This function is expected to be called within a try block
/// followed by necessary setup for python exception.
PyObject* create at CPPCLASS@Object(const @CPPCLASS@& source);
diff --git a/src/lib/util/strutil.cc b/src/lib/util/strutil.cc
index 161f9ac..ed7fc9b 100644
--- a/src/lib/util/strutil.cc
+++ b/src/lib/util/strutil.cc
@@ -132,6 +132,17 @@ format(const std::string& format, const std::vector<std::string>& args) {
return (result);
}
+std::string
+getToken(std::istringstream& iss) {
+ string token;
+ iss >> token;
+ if (iss.bad() || iss.fail()) {
+ isc_throw(StringTokenError, "could not read token from string");
+ }
+ return (token);
+}
+
+
} // namespace str
} // namespace util
} // namespace isc
diff --git a/src/lib/util/strutil.h b/src/lib/util/strutil.h
index e044c15..021c236 100644
--- a/src/lib/util/strutil.h
+++ b/src/lib/util/strutil.h
@@ -18,7 +18,10 @@
#include <algorithm>
#include <cctype>
#include <string>
+#include <sstream>
#include <vector>
+#include <exceptions/exceptions.h>
+#include <boost/lexical_cast.hpp>
namespace isc {
namespace util {
@@ -26,6 +29,16 @@ namespace str {
/// \brief A Set of C++ Utilities for Manipulating Strings
+///
+/// \brief A standard string util exception that is thrown if getToken or
+/// numToToken are called with bad input data
+///
+class StringTokenError : public Exception {
+public:
+ StringTokenError(const char* file, size_t line, const char* what) :
+ isc::Exception(file, line, what) {}
+};
+
/// \brief Normalize Backslash
///
/// Only relevant to Windows, this replaces all "\" in a string with "/" and
@@ -140,6 +153,55 @@ std::string format(const std::string& format,
const std::vector<std::string>& args);
+/// \brief Returns one token from the given stringstream
+///
+/// Using the >> operator, with basic error checking
+///
+/// \exception StringTokenError if the token cannot be read from the stream
+///
+/// \param iss stringstream to read one token from
+///
+/// \return the first token read from the stringstream
+std::string getToken(std::istringstream& iss);
+
+/// \brief Converts a string token to an *unsigned* integer.
+///
+/// The value is converted using a lexical cast, with error and bounds
+/// checking.
+///
+/// NumType is a *signed* integral type (e.g. int32_t) that is sufficiently
+/// wide to store resulting integers.
+///
+/// BitSize is the maximum number of bits that the resulting integer can take.
+/// This function first checks whether the given token can be converted to
+/// an integer of NumType type. It then confirms the conversion result is
+/// within the valid range, i.e., [0, 2^BitSize - 1]. The second check is
+/// necessary because lexical_cast<T> where T is an unsigned integer type
+/// doesn't correctly reject negative numbers when compiled with SunStudio.
+///
+/// \exception StringTokenError if the value is out of range, or if it
+/// could not be converted
+///
+/// \param num_token the string token to convert
+///
+/// \return the converted value, of type NumType
+template <typename NumType, int BitSize>
+NumType
+tokenToNum(const std::string& num_token) {
+ NumType num;
+ try {
+ num = boost::lexical_cast<NumType>(num_token);
+ } catch (const boost::bad_lexical_cast& ex) {
+ isc_throw(StringTokenError, "Invalid SRV numeric parameter: " <<
+ num_token);
+ }
+ if (num < 0 || num >= (static_cast<NumType>(1) << BitSize)) {
+ isc_throw(StringTokenError, "Numeric SRV parameter out of range: " <<
+ num);
+ }
+ return (num);
+}
+
} // namespace str
} // namespace util
} // namespace isc
diff --git a/src/lib/util/tests/filename_unittest.cc b/src/lib/util/tests/filename_unittest.cc
index 33e6456..07f3525 100644
--- a/src/lib/util/tests/filename_unittest.cc
+++ b/src/lib/util/tests/filename_unittest.cc
@@ -51,42 +51,49 @@ TEST_F(FilenameTest, Components) {
EXPECT_EQ("/alpha/beta/", fname.directory());
EXPECT_EQ("gamma", fname.name());
EXPECT_EQ(".delta", fname.extension());
+ EXPECT_EQ("gamma.delta", fname.nameAndExtension());
// Directory only
fname.setName("/gamma/delta/");
EXPECT_EQ("/gamma/delta/", fname.directory());
EXPECT_EQ("", fname.name());
EXPECT_EQ("", fname.extension());
+ EXPECT_EQ("", fname.nameAndExtension());
// Filename only
fname.setName("epsilon");
EXPECT_EQ("", fname.directory());
EXPECT_EQ("epsilon", fname.name());
EXPECT_EQ("", fname.extension());
+ EXPECT_EQ("epsilon", fname.nameAndExtension());
// Extension only
fname.setName(".zeta");
EXPECT_EQ("", fname.directory());
EXPECT_EQ("", fname.name());
EXPECT_EQ(".zeta", fname.extension());
+ EXPECT_EQ(".zeta", fname.nameAndExtension());
// Missing directory
fname.setName("eta.theta");
EXPECT_EQ("", fname.directory());
EXPECT_EQ("eta", fname.name());
EXPECT_EQ(".theta", fname.extension());
+ EXPECT_EQ("eta.theta", fname.nameAndExtension());
// Missing filename
fname.setName("/iota/.kappa");
EXPECT_EQ("/iota/", fname.directory());
EXPECT_EQ("", fname.name());
EXPECT_EQ(".kappa", fname.extension());
+ EXPECT_EQ(".kappa", fname.nameAndExtension());
// Missing extension
fname.setName("lambda/mu/nu");
EXPECT_EQ("lambda/mu/", fname.directory());
EXPECT_EQ("nu", fname.name());
EXPECT_EQ("", fname.extension());
+ EXPECT_EQ("nu", fname.nameAndExtension());
// Check that the decomposition can occur in the presence of leading and
// trailing spaces
@@ -94,18 +101,21 @@ TEST_F(FilenameTest, Components) {
EXPECT_EQ("lambda/mu/", fname.directory());
EXPECT_EQ("nu", fname.name());
EXPECT_EQ("", fname.extension());
+ EXPECT_EQ("nu", fname.nameAndExtension());
// Empty string
fname.setName("");
EXPECT_EQ("", fname.directory());
EXPECT_EQ("", fname.name());
EXPECT_EQ("", fname.extension());
+ EXPECT_EQ("", fname.nameAndExtension());
// ... and just spaces
fname.setName(" ");
EXPECT_EQ("", fname.directory());
EXPECT_EQ("", fname.name());
EXPECT_EQ("", fname.extension());
+ EXPECT_EQ("", fname.nameAndExtension());
// Check corner cases - where separators are present, but strings are
// absent.
@@ -113,16 +123,19 @@ TEST_F(FilenameTest, Components) {
EXPECT_EQ("/", fname.directory());
EXPECT_EQ("", fname.name());
EXPECT_EQ("", fname.extension());
+ EXPECT_EQ("", fname.nameAndExtension());
fname.setName(".");
EXPECT_EQ("", fname.directory());
EXPECT_EQ("", fname.name());
EXPECT_EQ(".", fname.extension());
+ EXPECT_EQ(".", fname.nameAndExtension());
fname.setName("/.");
EXPECT_EQ("/", fname.directory());
EXPECT_EQ("", fname.name());
EXPECT_EQ(".", fname.extension());
+ EXPECT_EQ(".", fname.nameAndExtension());
// Note that the space is a valid filename here; only leading and trailing
// spaces should be trimmed.
@@ -130,11 +143,13 @@ TEST_F(FilenameTest, Components) {
EXPECT_EQ("/", fname.directory());
EXPECT_EQ(" ", fname.name());
EXPECT_EQ(".", fname.extension());
+ EXPECT_EQ(" .", fname.nameAndExtension());
fname.setName(" / . ");
EXPECT_EQ("/", fname.directory());
EXPECT_EQ(" ", fname.name());
EXPECT_EQ(".", fname.extension());
+ EXPECT_EQ(" .", fname.nameAndExtension());
}
// Check that the expansion with a default works.
@@ -177,3 +192,40 @@ TEST_F(FilenameTest, UseAsDefault) {
EXPECT_EQ("/s/t/u", fname.useAsDefault("/s/t/u"));
EXPECT_EQ("/a/b/c", fname.useAsDefault(""));
}
+
+TEST_F(FilenameTest, setDirectory) {
+ Filename fname("a.b");
+ EXPECT_EQ("", fname.directory());
+ EXPECT_EQ("a.b", fname.fullName());
+ EXPECT_EQ("a.b", fname.expandWithDefault(""));
+
+ fname.setDirectory("/just/some/dir/");
+ EXPECT_EQ("/just/some/dir/", fname.directory());
+ EXPECT_EQ("/just/some/dir/a.b", fname.fullName());
+ EXPECT_EQ("/just/some/dir/a.b", fname.expandWithDefault(""));
+
+ fname.setDirectory("/just/some/dir");
+ EXPECT_EQ("/just/some/dir/", fname.directory());
+ EXPECT_EQ("/just/some/dir/a.b", fname.fullName());
+ EXPECT_EQ("/just/some/dir/a.b", fname.expandWithDefault(""));
+
+ fname.setDirectory("/");
+ EXPECT_EQ("/", fname.directory());
+ EXPECT_EQ("/a.b", fname.fullName());
+ EXPECT_EQ("/a.b", fname.expandWithDefault(""));
+
+ fname.setDirectory("");
+ EXPECT_EQ("", fname.directory());
+ EXPECT_EQ("a.b", fname.fullName());
+ EXPECT_EQ("a.b", fname.expandWithDefault(""));
+
+ fname = Filename("/first/a.b");
+ EXPECT_EQ("/first/", fname.directory());
+ EXPECT_EQ("/first/a.b", fname.fullName());
+ EXPECT_EQ("/first/a.b", fname.expandWithDefault(""));
+
+ fname.setDirectory("/just/some/dir");
+ EXPECT_EQ("/just/some/dir/", fname.directory());
+ EXPECT_EQ("/just/some/dir/a.b", fname.fullName());
+ EXPECT_EQ("/just/some/dir/a.b", fname.expandWithDefault(""));
+}
diff --git a/src/lib/util/tests/strutil_unittest.cc b/src/lib/util/tests/strutil_unittest.cc
index cd3a9ca..74bc17d 100644
--- a/src/lib/util/tests/strutil_unittest.cc
+++ b/src/lib/util/tests/strutil_unittest.cc
@@ -12,6 +12,8 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <stdint.h>
+
#include <string>
#include <gtest/gtest.h>
@@ -22,17 +24,9 @@ using namespace isc;
using namespace isc::util;
using namespace std;
-class StringUtilTest : public ::testing::Test {
-protected:
- StringUtilTest()
- {
- }
-};
-
-
// Check for slash replacement
-TEST_F(StringUtilTest, Slash) {
+TEST(StringUtilTest, Slash) {
string instring = "";
isc::util::str::normalizeSlash(instring);
@@ -49,7 +43,7 @@ TEST_F(StringUtilTest, Slash) {
// Check that leading and trailing space trimming works
-TEST_F(StringUtilTest, Trim) {
+TEST(StringUtilTest, Trim) {
// Empty and full string.
EXPECT_EQ("", isc::util::str::trim(""));
@@ -71,7 +65,7 @@ TEST_F(StringUtilTest, Trim) {
// returned vector; if not as expected, the following references may be invalid
// so should not be used.
-TEST_F(StringUtilTest, Tokens) {
+TEST(StringUtilTest, Tokens) {
vector<string> result;
// Default delimiters
@@ -157,7 +151,7 @@ TEST_F(StringUtilTest, Tokens) {
// Changing case
-TEST_F(StringUtilTest, ChangeCase) {
+TEST(StringUtilTest, ChangeCase) {
string mixed("abcDEFghiJKLmno123[]{=+--+]}");
string upper("ABCDEFGHIJKLMNO123[]{=+--+]}");
string lower("abcdefghijklmno123[]{=+--+]}");
@@ -173,7 +167,7 @@ TEST_F(StringUtilTest, ChangeCase) {
// Formatting
-TEST_F(StringUtilTest, Formatting) {
+TEST(StringUtilTest, Formatting) {
vector<string> args;
args.push_back("arg1");
@@ -213,3 +207,63 @@ TEST_F(StringUtilTest, Formatting) {
string format9 = "%s %s";
EXPECT_EQ(format9, isc::util::str::format(format9, args));
}
+
+TEST(StringUtilTest, getToken) {
+ string s("a b c");
+ istringstream ss(s);
+ EXPECT_EQ("a", isc::util::str::getToken(ss));
+ EXPECT_EQ("b", isc::util::str::getToken(ss));
+ EXPECT_EQ("c", isc::util::str::getToken(ss));
+ EXPECT_THROW(isc::util::str::getToken(ss), isc::util::str::StringTokenError);
+}
+
+int32_t tokenToNumCall_32_16(const string& token) {
+ return isc::util::str::tokenToNum<int32_t, 16>(token);
+}
+
+int16_t tokenToNumCall_16_8(const string& token) {
+ return isc::util::str::tokenToNum<int16_t, 8>(token);
+}
+
+TEST(StringUtilTest, tokenToNum) {
+ uint32_t num32 = tokenToNumCall_32_16("0");
+ EXPECT_EQ(0, num32);
+ num32 = tokenToNumCall_32_16("123");
+ EXPECT_EQ(123, num32);
+ num32 = tokenToNumCall_32_16("65535");
+ EXPECT_EQ(65535, num32);
+
+ EXPECT_THROW(tokenToNumCall_32_16(""),
+ isc::util::str::StringTokenError);
+ EXPECT_THROW(tokenToNumCall_32_16("a"),
+ isc::util::str::StringTokenError);
+ EXPECT_THROW(tokenToNumCall_32_16("-1"),
+ isc::util::str::StringTokenError);
+ EXPECT_THROW(tokenToNumCall_32_16("65536"),
+ isc::util::str::StringTokenError);
+ EXPECT_THROW(tokenToNumCall_32_16("1234567890"),
+ isc::util::str::StringTokenError);
+ EXPECT_THROW(tokenToNumCall_32_16("-1234567890"),
+ isc::util::str::StringTokenError);
+
+ uint16_t num16 = tokenToNumCall_16_8("123");
+ EXPECT_EQ(123, num16);
+ num16 = tokenToNumCall_16_8("0");
+ EXPECT_EQ(0, num16);
+ num16 = tokenToNumCall_16_8("255");
+ EXPECT_EQ(255, num16);
+
+ EXPECT_THROW(tokenToNumCall_16_8(""),
+ isc::util::str::StringTokenError);
+ EXPECT_THROW(tokenToNumCall_16_8("a"),
+ isc::util::str::StringTokenError);
+ EXPECT_THROW(tokenToNumCall_16_8("-1"),
+ isc::util::str::StringTokenError);
+ EXPECT_THROW(tokenToNumCall_16_8("256"),
+ isc::util::str::StringTokenError);
+ EXPECT_THROW(tokenToNumCall_16_8("1234567890"),
+ isc::util::str::StringTokenError);
+ EXPECT_THROW(tokenToNumCall_16_8("-1234567890"),
+ isc::util::str::StringTokenError);
+
+}
diff --git a/tests/system/bindctl/tests.sh b/tests/system/bindctl/tests.sh
index 6923c41..49ef0f1 100755
--- a/tests/system/bindctl/tests.sh
+++ b/tests/system/bindctl/tests.sh
@@ -24,6 +24,10 @@ SYSTEMTESTTOP=..
status=0
n=0
+# TODO: consider consistency with statistics definition in auth.spec
+auth_queries_tcp="\<queries\.tcp\>"
+auth_queries_udp="\<queries\.udp\>"
+
echo "I:Checking b10-auth is working by default ($n)"
$DIG +norec @10.53.0.1 -p 53210 ns.example.com. A >dig.out.$n || status=1
# perform a simple check on the output (digcomp would be too much for this)
@@ -40,8 +44,8 @@ echo 'Stats show
--csv-file-dir=$BINDCTL_CSV_DIR > bindctl.out.$n || status=1
# the server should have received 1 UDP and 1 TCP queries (TCP query was
# sent from the server startup script)
-grep "\"auth.queries.tcp\": 1," bindctl.out.$n > /dev/null || status=1
-grep "\"auth.queries.udp\": 1," bindctl.out.$n > /dev/null || status=1
+grep $auth_queries_tcp".*\<1\>" bindctl.out.$n > /dev/null || status=1
+grep $auth_queries_udp".*\<1\>" bindctl.out.$n > /dev/null || status=1
if [ $status != 0 ]; then echo "I:failed"; fi
n=`expr $n + 1`
@@ -73,8 +77,8 @@ echo 'Stats show
' | $RUN_BINDCTL \
--csv-file-dir=$BINDCTL_CSV_DIR > bindctl.out.$n || status=1
# The statistics counters should have been reset while stop/start.
-grep "\"auth.queries.tcp\": 0," bindctl.out.$n > /dev/null || status=1
-grep "\"auth.queries.udp\": 1," bindctl.out.$n > /dev/null || status=1
+grep $auth_queries_tcp".*\<0\>" bindctl.out.$n > /dev/null || status=1
+grep $auth_queries_udp".*\<1\>" bindctl.out.$n > /dev/null || status=1
if [ $status != 0 ]; then echo "I:failed"; fi
n=`expr $n + 1`
@@ -97,8 +101,8 @@ echo 'Stats show
' | $RUN_BINDCTL \
--csv-file-dir=$BINDCTL_CSV_DIR > bindctl.out.$n || status=1
# The statistics counters shouldn't be reset due to hot-swapping datasource.
-grep "\"auth.queries.tcp\": 0," bindctl.out.$n > /dev/null || status=1
-grep "\"auth.queries.udp\": 2," bindctl.out.$n > /dev/null || status=1
+grep $auth_queries_tcp".*\<0\>" bindctl.out.$n > /dev/null || status=1
+grep $auth_queries_udp".*\<2\>" bindctl.out.$n > /dev/null || status=1
if [ $status != 0 ]; then echo "I:failed"; fi
n=`expr $n + 1`
diff --git a/tests/system/cleanall.sh b/tests/system/cleanall.sh
index 17c3d4a..d23d103 100755
--- a/tests/system/cleanall.sh
+++ b/tests/system/cleanall.sh
@@ -27,7 +27,10 @@ find . -type f \( \
status=0
-for d in `find . -type d -maxdepth 1 -mindepth 1 -print`
+for d in ./.* ./*
do
+ case $d in ./.|./..) continue ;; esac
+ test -d $d || continue
+
test ! -f $d/clean.sh || ( cd $d && sh clean.sh )
done
diff --git a/tools/system_messages.py b/tools/system_messages.py
index 6cf3ce9..7b0d60c 100644
--- a/tools/system_messages.py
+++ b/tools/system_messages.py
@@ -58,6 +58,12 @@ SEC_HEADER="""<?xml version="1.0" encoding="UTF-8"?>
<!ENTITY % version SYSTEM "version.ent">
%version;
]>
+<!--
+ This XML document is generated using the system_messages.py tool
+ based on the .mes message files.
+
+ Do not edit this file.
+-->
<book>
<?xml-stylesheet href="bind10-guide.css" type="text/css"?>
More information about the bind10-changes
mailing list