BIND 10 trac1206, updated. bb444bae93e8e87d1e62214b1819fb73fd7634e4 Merge branch 'master' of ssh://git.bind10.isc.org/var/bind10/git/bind10
BIND 10 source code commits
bind10-changes at lists.isc.org
Mon Oct 3 09:58:25 UTC 2011
The branch, trac1206 has been updated
via bb444bae93e8e87d1e62214b1819fb73fd7634e4 (commit)
via 8fe024cd171ecf1610419abb70e5d613b94ba5a0 (commit)
via feeddd7e5b966c9445fc4ac97a6526fa792413cd (commit)
via 85e4dfa61bf440c132f4ce6bc73130bc6e91719c (commit)
via 054699635affd9c9ecbe7a108d880829f3ba229e (commit)
via d04acfb82c3425a638f09d2f49208ef86bc7a6b3 (commit)
via 434f4fd17dd3dee1d17e7b2e008f1ab1416d5799 (commit)
via ce8b5fe9567f06f7acba34b9e9b35ad471e2ab67 (commit)
via 34ead9dfeff5f64af36a209cae28075fcbbb3330 (commit)
via fcfe5af9c22c5b666e5ecf646bbe0d9da7b655e9 (commit)
via 1f967a8ffe37f6732dd628d28a13abc442541c38 (commit)
via 3efca5f9b7b7bfeac53044fdd44e5add61397157 (commit)
via a35b62699480e149f22f4e039935bfcf41f97ac2 (commit)
via 9dedc72e89b9ca8ba2c5f3bc562ad9ccd1aa05b0 (commit)
via 7808524aa9bbb424327ac67d7408647cb18840f5 (commit)
via 5b866ef26bd5ae980bb86c494a592ef232552b68 (commit)
via a5387c15e93c6d1925bf4ad0eacdcfd63790c32a (commit)
via d56c782197242e32ccdd23c9e3652ff520f3d58f (commit)
via bd8cb42b61666342ee8bc6c33aed2a168301ff67 (commit)
via 9accf90bb081b057023479f0a86e54017b02cdd3 (commit)
via 9eafb04ee8dbd47022dd9a5e5c1310f88f398d2c (commit)
via 7af1aeddc36a1ac1343f1af12aa29164f1028f03 (commit)
via 15f5d7895a2744376062229cf19593016a773cde (commit)
via ddec42c7a23cca11903ece8f7ab614dcc7e5edd3 (commit)
via d8cac904c7aea4a652a47afb35aceb6ca4808ce8 (commit)
via 433381e5ca62418fc90377d16f1805260b27b619 (commit)
via c8bbdd1d74ac313d8b57d8debe4f7b75490e5df2 (commit)
via e57c5196d3e8dd56b0190799c98b56a5be55333a (commit)
via 06f7bc4b3b69e8fda96f6e626a7dac5b1fbbb233 (commit)
via 0aa4c14ebd1eb0a68c2bcf5c617325596657ea71 (commit)
via 9daa2f686b3bdb03b13e9becf45a722344888cf3 (commit)
via f159ac66aa577889514dc170c87a92c49be5a6cc (commit)
via d6b86a88c7a486f2e5b742fc60d374e48382320e (commit)
via 5ddc441f77a34158039f0328c3ab7c2106b7b3b8 (commit)
via 290e89c515e051dad269f1acbce0b52a541d9c8c (commit)
via 9b8925a4d0ecbd8a09d307dfd56fa15fb8eedcc6 (commit)
via 53314ecb63f3f0f85629b66a228207658d8fd73f (commit)
via 863509e2dc3bf96fd38476d787abb62e0da46624 (commit)
via fe1d6665faf06b3fcc0aaf8ec72905aa4b7ce1f7 (commit)
via 7581a21a7dce1dc6b92ad24293b4269a3531e6d4 (commit)
via 1fd37ae8a4bb25a6e85ffb2158b2ae95fe8cbd04 (commit)
via 8ed3b760c179df435882f2ad96b6dcfad5b6e9fa (commit)
via 3516ab551851273faeeb0b8696695e5f3ffc88f9 (commit)
via 9f8ddd6ee1b73c9403f85b6ef5c85605ca393aa7 (commit)
via 898485cd30084d478e8be688151cd11fb4d492a7 (commit)
via 30f4856101bf23ce155ef0f2ebd1ca6f034d2420 (commit)
via eb4be17ddf3b26c379e3f100cf8e8b0fd4329537 (commit)
via ac06a06d1df9a1cc905b224b79921b0d0ade4c05 (commit)
via 611d0300fb8bb2e87d787023cb5c6030ee07d8d2 (commit)
via fdf02d580f2bb1fbc6fa85ee0edd81a07404d1de (commit)
via a0bb482b46bd05f8c8774bacdd26dc891cb3bef7 (commit)
via cebd7e3562312ade50d972af49239cee7f10d057 (commit)
via 8750dc3ab772e29d7374d779cefb3c8b8c61d2d1 (commit)
via b743e6ba98c8cbb53c45e1c0f59e5a78ba62f5d4 (commit)
via 6556a2ffdd7bdb5370c2f1b3d8c9e8799ef82140 (commit)
via 3e9189a483c0f53eba4f05092c90f7955123f52c (commit)
via 7f5702a379516cee041129c03dd37d67f26d49c1 (commit)
via e60ecc91ad65087c3cff3af479cc455abccbe020 (commit)
via 62bd7736311e166aea3604b8e486b58c1315f82f (commit)
via 9687077033661cf07b6ea2e966299e837a501612 (commit)
via 703d5f36d0102993f311d21e662a28492d8cf7b4 (commit)
via 84d9095c66c765cf78814323597b2e3bbef293d5 (commit)
via e54bc83c4e8a66fd9ab1ae9f27899d70ef82a066 (commit)
via 1a8c86ea2503bffe6dc1f2300dfc2b4efba108cc (commit)
via ed5311a26b7b1368f28191c405ec13da907213ae (commit)
via 493a6449b37b34ac5fe36257b266c229e34d105c (commit)
via 6f6a4cf9d98f2b4550e0949da1e20a7f38440610 (commit)
via 36a53f41a7da580926111dca65652d6389fcd909 (commit)
via 61681dac2023240a4a029072add3a39809ccb7f0 (commit)
via 96dd4d2daf1fb91672a798fa478da0ec8a7ac737 (commit)
via 9354737244e0bb7c22ec684ed652c89991eca913 (commit)
via 9bbc77b6b8381c9a6d831e490a7715ba84b9356f (commit)
via 8023760a5fc6f346cf82340aa50df755b0d0d00a (commit)
via cc0d6e4674fd2e6ebe3775a28ec87fc5c869f924 (commit)
via f9cb0d187f02078b27a0119ce42c83f62461a507 (commit)
via 4cde36d2b97a24f03c192a61248545d0180fb856 (commit)
via c874cb056e2a5e656165f3c160e1b34ccfe8b302 (commit)
via 12fd115d2e1ea8b55f43313ac665c32e07f9498e (commit)
via 84ada921a2fe98489b578b6d780c1ad2e6c31482 (commit)
via 8c838cf57adef3c004b910b086513d9620147692 (commit)
via 1378551aa74712c929a79964ae18d9962ce73787 (commit)
via bb7833f2054edca11a32d24d17486f153db00ec1 (commit)
via c430e464860b4460a0ab32454e53918a1cc7444b (commit)
via 39e529c506a4350cd676bf5ddff6d61686e8814f (commit)
via aba10a01b765b472d57112fd4e09a6fb47b49fa7 (commit)
via 9688dee697e9ad279c6542bf164b820e907e526f (commit)
via c1a72c46b572eee2d94ab53a5589c724fcb1fcf1 (commit)
via 9016513b4d19d2781d0b6f2575b490431e04ec79 (commit)
via 13e8bc43e4888fe9e6df7e536ea0b439c6351199 (commit)
via e89895b7e5f3b7074271c89de281e426c53be347 (commit)
via 938f4e9ba14954551fbc390abb7d1e06d38189c2 (commit)
via b0b0da67c915f3c02020397b8dcf6a078a9b3a90 (commit)
via 1ee8ad4a2b092a6edc35c111c5a3b5b761da0dae (commit)
via c943619d223be1158ae8db5223f655343d06785f (commit)
via 0d874a95d3c782b9c663c64be619f449956df457 (commit)
via 2d325650009f46a1f16ef2e7c1f4ed0827db236f (commit)
via abe73e885b980aace1fd0df492fa321bdd35f01f (commit)
via 53d45f54e33d23a5b4df42dc977a3a6ab597f5c5 (commit)
via 338b54ef4631f0d35601f174eabfa10f1541f46d (commit)
via 698176eccd5d55759fe9448b2c249717c932ac31 (commit)
via 41cbf5a91bdfa0b311aade6b05d2f51f59cce978 (commit)
via d845ae918fe8dce6806c3f927a7c101fc0e2173d (commit)
via 7bc93774a449b3f19748a37186db3efcd3d6c537 (commit)
via d5a58bbe641d32257035a6087f18655e7b66d8fd (commit)
via c64c4730852f74fff8ea75730e0b40cd3b23a85e (commit)
via c69a1675dd0434db0b99682d14fa7905fcd3af8f (commit)
via 4a605525cda67bea8c43ca8b3eae6e6749797450 (commit)
via b2d2acebebc66495b98eef634ce633eb70cc2411 (commit)
via acb299784ddbf280aac6ee5a78977c9acbf1fd32 (commit)
via 2418922a1389bbf265b02328f7c4f594257c4026 (commit)
via 44a44c0b568dc997e7522292212e0ef02b522f3d (commit)
via 250ce2abb3d6b48fce778b5e0c651d57582aff7c (commit)
via 99be45a44f97942f9327b16aff368f1650994e0e (commit)
via 7592596f7a9f8dce2e5e8d9311cc40c5199c66e3 (commit)
via c24c42a5e29444313efee6528f172ad66452050d (commit)
via 05eaa177051b212669c2a7b9e2194c3e9ba47f14 (commit)
via 9797d47ab90761c50020f78d5a55fb2672ffd7c0 (commit)
via 000164d51a974acf3846a6b0a7795f484e915161 (commit)
via 7dfa14ccdb6777ccacb99fe0d716b7d63654426f (commit)
via f0ff0a2f69bcfae3e2a30a3bdeae37b475ae9106 (commit)
via 38816f95cc01f1c7aeec1d42bde3febb308dd98f (commit)
via 0f8868d1ed7d479d05e2a70de67897d133d41ef9 (commit)
via bc03b37015ab6ea23cbec70dbd299c74fb001aba (commit)
via e56e0f7d1ad206f1ebc26e285d82a8e7ff6390e1 (commit)
via 7d2b0148161460b928cf39c7c2969d95d2870d9c (commit)
via 40cd22fc64c7755efe60cd42cb12851cf3de55a4 (commit)
via ed8d686171f140fd12164d2d34f65b4ab3c97645 (commit)
via 1e32824c93dac7e406d1b35449b42700bf854679 (commit)
via c5d5522f83888a8b442aa7ff17738f3f688749fe (commit)
via 688867daa34ade5075443c77535f80e1d2d76743 (commit)
via d36ded7d95a695f0412f6ccdb59bf55fc600e9d3 (commit)
via b8e90124c19177e0b6b33bd624e244860e2424b3 (commit)
via 5cf1b7ab58c42675c1396fbbd5b1aaf037eb8d19 (commit)
via 17d9827aa40e363650d1698fddba9204f27b5171 (commit)
via 27f447c8b054b17d96abfba431568c1ffe017f0a (commit)
via 219818389cc848dc2d67aff732b9790968851b51 (commit)
via e602f86dae29c62619b0ea8bf2ca69e1ce1b8295 (commit)
via 57f7044d690d38cff90487b5883883a674d2589f (commit)
via 383b6b2891226228ddf3cfd4c3dd8b17ea186b8a (commit)
via 8cc8f4c008f640b7f13f8f1160261275ec14475b (commit)
via 70bba1b3f811261fcef30694568245e83cd64bc5 (commit)
via 6c5f8867a45f40411594372bca09c04ddf5c0002 (commit)
via f1fef139dbc592aa4c7071d47e38e14487ab72e7 (commit)
via 2c8b76ed408547789f2e26ad76773e40e316a392 (commit)
via eefa62a767ec09c20d679876842e15e9d3742499 (commit)
via 58845974d57ee0cd0b261b00d1ededccc7bde105 (commit)
via d49e3c5e79e00b59e518c4bc1f71882adf721696 (commit)
via 06a24c688282b61dd2ce5b6c00608bee34ae3563 (commit)
via b902e70583a9dfb1ee410e297e2da4c8b944ba8d (commit)
via 09349cf206ee9e68618713b97e621b7ef2a6c0a9 (commit)
via ff1bd2a00278bc753a7d035fd5020ff936df1882 (commit)
via c89f3a2f43fd7fe70bcb199fad0ccf94364b1ebe (commit)
via 4c86025464db4603ec07490169aaf4b77868057b (commit)
via 9b4326dc093b71bcd77a527111ea6778795bf068 (commit)
via 2c5b2fc19c21dd12747eb960baee65759847a118 (commit)
via 0aa89cf84c78a9ee8b97a51c17b3982324021f81 (commit)
via d9dd4c5a7438c152f6c9ae2bcc4c9f5ee598728b (commit)
via 03da93322b956e003882c09a8d4ea949f790dbc4 (commit)
via bfa93c0ee79935bf37d379065e219ba0afb0c4e3 (commit)
via 7a061c2e82d62e2b275cb5a8d7460dce7d36f050 (commit)
via a6cbb14cc9c986d109983087313225829f1c91fe (commit)
via 7cc32b7915532354ed7e2fd15f7ca5a9b9b64610 (commit)
via dd340b32df88083fdc17f682094b451f7dcdf6d6 (commit)
via 30c277567f64d09c11cadcb173eef066efdaea07 (commit)
via ec2793914d1090db8c8d94a2f9b92ed97b1a6cba (commit)
via a59c7f28a458842b4edce2d6639639b17a85eb9f (commit)
via ccb4c0aa696918c579a0b80448fc93606152ec93 (commit)
via 0fa8006ade38ac7206ff57934f3bb866be6407a2 (commit)
via b25df34f6a7582baff54dab59c4e033f6db4e42c (commit)
via 4e544fba3459913e23f86dc5e628665bd288c483 (commit)
via 259955ba65c102bd36ec818ca4193aab311e983d (commit)
via 1f81b4916fa3bd0cbf4f41cc7ad8f13450aa6481 (commit)
via 6d6353cea42ed088df3c2c90c4c2741a1b8b2871 (commit)
via 7efa61c40b94d3234dd7fc79a0fc7ae0f1b0a105 (commit)
via 5c3a7ca7b3b28a7a163b0af3cbadc3d8fe7a702b (commit)
via 54c6127e005c8e3dd82cd97d49aca23f5a5d8029 (commit)
via b6261f09b53af42a26d88fd50d74ab1e84524cce (commit)
via 8634aa9cab1c2205629540b4d99b88847148bd80 (commit)
via d1a1871cc6c93ababba62f42bcab5205320b8867 (commit)
via 2a5c5383e3df0e625367bf85b740f62bf777b211 (commit)
via f16de89251e4607eb413df666a64022c50478a4c (commit)
via 4e93ba217318854742144bf0b8e30f4c3614db92 (commit)
via 38d80ef7186ac2b18ed234a825894f5f78fc90b1 (commit)
via 88bee2515653d3b5481608bc92a1956c7ea7cf48 (commit)
via e9286ce511be095f2b16b1b7bc503b1e4377593d (commit)
via 723a6d1f333f1d513d5e4fe26a8ee7611767c9fc (commit)
via 88fe1bafce118f40d256097c2bfbdf9e53553784 (commit)
via 51c4b53945599a72d550d7380c7107e11b467d5c (commit)
via 84d7ae48d44e055cb16e3900cf2c4b2262f6a6da (commit)
via 61aaae27e12db2a00cfde674931e5080e733e6b3 (commit)
via 59e2ceaf7b75c38391c518436a70ac3d41b8c8be (commit)
via 4e3c6c5e5b19be3a0f970a06e3e135d1b2fae668 (commit)
via 03e9f45f8a6584a373f1bd15f01f56d9296c842a (commit)
via cb4d8443645a5c3e973b4e2477198686d8d8c507 (commit)
via f847a5e079ceae0346b84fb320ed06ce9b443a63 (commit)
via 05512e090c6c3cb852cebdb85ae7c12e8001603b (commit)
via c35f6b15bb6b703154e05399266dd2051ef9cfa9 (commit)
via 3f2864bf1271ca525858cf3e1fa641e3496eec59 (commit)
via f8720ba467d8e107c512160a5502caf9be58a425 (commit)
via 38af8a4225e8c82564758e8a5629da438220bc87 (commit)
via c5e0db2b7d8fbdb13548e01310f623f131ea0e9c (commit)
via 26c7bfe851f00422beb442a77d25cc0887557b79 (commit)
via f5239632a06383f2b4f6825cb6a006ceb8bea417 (commit)
via 680f05c35753bf1f70392d25b1e6310cf46476ce (commit)
via b12351c21ee92a13536aa89331cc73bd166dbe5f (commit)
via 2e1dceedf6a4f661a8d7e57757b28f9f6cb1a9b3 (commit)
via df69ad0d0231218610f68ecb2b1953ae7f28fa68 (commit)
via 5b713ea8e5fd35fdb1ab7ff953e010ef9b60f98c (commit)
from fdf1c88a53f5970aa4e6d55da42303ce7d4730f7 (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
- Log -----------------------------------------------------------------
commit bb444bae93e8e87d1e62214b1819fb73fd7634e4
Merge: feeddd7e5b966c9445fc4ac97a6526fa792413cd 8fe024cd171ecf1610419abb70e5d613b94ba5a0
Author: Jelte Jansen <jelte at isc.org>
Date: Mon Oct 3 11:33:16 2011 +0200
Merge branch 'master' of ssh://git.bind10.isc.org/var/bind10/git/bind10
commit feeddd7e5b966c9445fc4ac97a6526fa792413cd
Merge: 8c838cf57adef3c004b910b086513d9620147692 85e4dfa61bf440c132f4ce6bc73130bc6e91719c
Author: Jelte Jansen <jelte at isc.org>
Date: Mon Oct 3 10:14:13 2011 +0200
Merge branch 'master' of ssh://git.bind10.isc.org/var/bind10/git/bind10
commit 8c838cf57adef3c004b910b086513d9620147692
Author: Jelte Jansen <jelte at isc.org>
Date: Fri Sep 30 16:03:51 2011 +0200
[master] one additional fix for #1206
pending #1253 the wrappers from #1179 also need a makefile workaround to have access to the currently hardcoded sqlite3 backend, which has now been moved to a dynloaded lib and is not a part of libdatasrc anymore
commit 1378551aa74712c929a79964ae18d9962ce73787
Merge: c430e464860b4460a0ab32454e53918a1cc7444b fdf1c88a53f5970aa4e6d55da42303ce7d4730f7
Author: Jelte Jansen <jelte at isc.org>
Date: Fri Sep 30 15:12:10 2011 +0200
[master] Merge branch 'trac1206'
Conflicts:
src/lib/datasrc/sqlite3_accessor.cc
-----------------------------------------------------------------------
Summary of changes:
ChangeLog | 40 +
configure.ac | 13 +-
doc/guide/bind10-guide.html | 30 +-
doc/guide/bind10-guide.xml | 30 +-
src/bin/auth/auth_messages.mes | 3 +
src/bin/auth/auth_srv.cc | 24 +
src/bin/auth/query.cc | 7 +
src/bin/auth/statistics.cc | 32 +-
src/bin/auth/statistics.h | 20 +
src/bin/auth/tests/query_unittest.cc | 4 +
src/bin/auth/tests/statistics_unittest.cc | 74 ++-
src/bin/bind10/bind10_messages.mes | 4 +
src/bin/bind10/bind10_src.py.in | 35 +-
src/bin/bind10/run_bind10.sh.in | 4 +-
src/bin/bind10/tests/Makefile.am | 2 +-
src/bin/bind10/tests/bind10_test.py.in | 28 +-
src/bin/bindctl/run_bindctl.sh.in | 2 +-
src/bin/bindctl/tests/Makefile.am | 2 +-
src/bin/cfgmgr/plugins/tests/Makefile.am | 2 +-
src/bin/cfgmgr/tests/Makefile.am | 2 +-
src/bin/cmdctl/run_b10-cmdctl.sh.in | 10 +-
src/bin/cmdctl/tests/Makefile.am | 2 +-
src/bin/dhcp6/tests/Makefile.am | 2 +-
src/bin/host/b10-host.1 | 4 -
src/bin/host/b10-host.xml | 5 -
src/bin/loadzone/run_loadzone.sh.in | 2 +-
src/bin/loadzone/tests/correct/Makefile.am | 2 +-
src/bin/loadzone/tests/error/Makefile.am | 2 +-
src/bin/msgq/tests/Makefile.am | 2 +-
src/bin/stats/Makefile.am | 4 +-
src/bin/stats/b10-stats-httpd.8 | 6 +-
src/bin/stats/b10-stats-httpd.xml | 10 +-
src/bin/stats/b10-stats.8 | 4 -
src/bin/stats/b10-stats.xml | 6 -
src/bin/stats/stats-httpd-xsl.tpl | 1 +
src/bin/stats/stats-schema.spec | 86 --
src/bin/stats/stats.py.in | 589 +++++-----
src/bin/stats/stats.spec | 71 +-
src/bin/stats/stats_httpd.py.in | 280 +++---
src/bin/stats/stats_messages.mes | 21 +-
src/bin/stats/tests/Makefile.am | 12 +-
src/bin/stats/tests/b10-stats-httpd_test.py | 782 ++++++++-----
src/bin/stats/tests/b10-stats_test.py | 1197 ++++++++++----------
src/bin/stats/tests/fake_select.py | 43 -
src/bin/stats/tests/fake_socket.py | 70 --
src/bin/stats/tests/fake_time.py | 47 -
src/bin/stats/tests/http/Makefile.am | 6 -
src/bin/stats/tests/http/server.py | 96 --
src/bin/stats/tests/isc/Makefile.am | 8 -
src/bin/stats/tests/isc/cc/Makefile.am | 7 -
src/bin/stats/tests/isc/cc/__init__.py | 1 -
src/bin/stats/tests/isc/cc/session.py | 156 ---
src/bin/stats/tests/isc/config/Makefile.am | 7 -
src/bin/stats/tests/isc/config/__init__.py | 1 -
src/bin/stats/tests/isc/config/ccsession.py | 249 ----
src/bin/stats/tests/isc/log/__init__.py | 33 -
src/bin/stats/tests/isc/util/Makefile.am | 7 -
src/bin/stats/tests/isc/util/process.py | 21 -
src/bin/stats/tests/test_utils.py | 364 ++++++
src/bin/stats/tests/testdata/Makefile.am | 1 -
src/bin/stats/tests/testdata/stats_test.spec | 19 -
src/bin/tests/Makefile.am | 4 +-
src/bin/xfrin/tests/Makefile.am | 2 +-
src/bin/xfrin/tests/xfrin_test.py | 12 +-
src/bin/xfrin/xfrin.py.in | 11 +
src/bin/xfrin/xfrin.spec | 5 +
src/bin/xfrout/tests/Makefile.am | 5 +-
src/bin/xfrout/tests/xfrout_test.py.in | 197 +++-
src/bin/xfrout/xfrout.py.in | 182 +++-
src/bin/xfrout/xfrout.spec.pre.in | 41 +-
src/bin/xfrout/xfrout_messages.mes | 11 +
src/bin/zonemgr/tests/Makefile.am | 2 +-
src/cppcheck-suppress.lst | 2 +-
src/lib/Makefile.am | 6 +-
src/lib/datasrc/Makefile.am | 1 +
src/lib/datasrc/database.cc | 469 ++++++---
src/lib/datasrc/database.h | 126 ++-
src/lib/datasrc/datasrc_messages.mes | 5 +
src/lib/datasrc/memory_datasrc.cc | 6 +
src/lib/datasrc/memory_datasrc.h | 6 +
src/lib/datasrc/sqlite3_accessor.cc | 105 ++-
src/lib/datasrc/sqlite3_accessor.h | 4 +
src/lib/datasrc/tests/database_unittest.cc | 289 +++++-
src/lib/datasrc/tests/memory_datasrc_unittest.cc | 8 +
src/lib/datasrc/tests/sqlite3_accessor_unittest.cc | 39 +
src/lib/datasrc/zone.h | 73 ++-
src/lib/dns/gen-rdatacode.py.in | 17 +-
src/lib/dns/message.cc | 43 +-
src/lib/dns/message.h | 55 +-
src/lib/dns/python/Makefile.am | 1 +
src/lib/dns/python/message_python.cc | 78 +-
src/lib/dns/python/message_python_inc.cc | 41 +
src/lib/dns/python/pydnspp.cc | 139 ++-
src/lib/dns/python/tests/message_python_test.py | 52 +-
src/lib/dns/rdata/any_255/tsig_250.cc | 1 +
src/lib/dns/rdata/generic/afsdb_18.cc | 1 +
src/lib/dns/rdata/generic/minfo_14.cc | 1 +
src/lib/dns/rdata/generic/rp_17.cc | 1 +
src/lib/dns/rdata/template.cc | 1 +
src/lib/dns/tests/message_unittest.cc | 131 +++-
src/lib/dns/tests/testdata/Makefile.am | 6 +-
src/lib/dns/tests/testdata/message_fromWire19.spec | 20 +
src/lib/dns/tests/testdata/message_fromWire20.spec | 20 +
src/lib/dns/tests/testdata/message_fromWire21.spec | 20 +
src/lib/dns/tests/testdata/message_fromWire22.spec | 14 +
src/lib/python/isc/Makefile.am | 2 +-
src/lib/python/isc/__init__.py | 7 +-
src/lib/python/isc/acl/tests/Makefile.am | 2 +-
src/lib/python/isc/bind10/tests/Makefile.am | 2 +-
src/lib/python/isc/cc/tests/Makefile.am | 2 +-
src/lib/python/isc/config/tests/Makefile.am | 2 +-
src/lib/python/isc/datasrc/Makefile.am | 36 +-
src/lib/python/isc/datasrc/__init__.py | 21 +-
src/lib/python/isc/datasrc/client_inc.cc | 157 +++
src/lib/python/isc/datasrc/client_python.cc | 264 +++++
.../isc/datasrc/client_python.h} | 19 +-
src/lib/python/isc/datasrc/datasrc.cc | 225 ++++
.../python/isc/{acl/dns.h => datasrc/datasrc.h} | 18 +-
src/lib/python/isc/datasrc/finder_inc.cc | 96 ++
src/lib/python/isc/datasrc/finder_python.cc | 248 ++++
.../isc/datasrc/finder_python.h} | 18 +-
src/lib/python/isc/datasrc/iterator_inc.cc | 34 +
src/lib/python/isc/datasrc/iterator_python.cc | 202 ++++
.../isc/datasrc/iterator_python.h} | 20 +-
src/lib/python/isc/datasrc/tests/Makefile.am | 10 +-
src/lib/python/isc/datasrc/tests/datasrc_test.py | 389 +++++++
src/lib/python/isc/datasrc/updater_inc.cc | 181 +++
src/lib/python/isc/datasrc/updater_python.cc | 318 ++++++
.../isc/datasrc/updater_python.h} | 21 +-
.../isc/log => lib/python/isc/dns}/Makefile.am | 4 +-
src/lib/python/isc/log/log.cc | 2 +-
src/lib/python/isc/log/tests/Makefile.am | 2 +-
src/lib/python/isc/net/tests/Makefile.am | 2 +-
src/lib/python/isc/notify/tests/Makefile.am | 2 +-
src/lib/python/isc/util/tests/Makefile.am | 2 +-
tests/system/bindctl/tests.sh | 16 +-
136 files changed, 6294 insertions(+), 2877 deletions(-)
delete mode 100644 src/bin/stats/stats-schema.spec
mode change 100755 => 100644 src/bin/stats/stats_httpd.py.in
delete mode 100644 src/bin/stats/tests/fake_select.py
delete mode 100644 src/bin/stats/tests/fake_socket.py
delete mode 100644 src/bin/stats/tests/fake_time.py
delete mode 100644 src/bin/stats/tests/http/Makefile.am
delete mode 100644 src/bin/stats/tests/http/__init__.py
delete mode 100644 src/bin/stats/tests/http/server.py
delete mode 100644 src/bin/stats/tests/isc/Makefile.am
delete mode 100644 src/bin/stats/tests/isc/__init__.py
delete mode 100644 src/bin/stats/tests/isc/cc/Makefile.am
delete mode 100644 src/bin/stats/tests/isc/cc/__init__.py
delete mode 100644 src/bin/stats/tests/isc/cc/session.py
delete mode 100644 src/bin/stats/tests/isc/config/Makefile.am
delete mode 100644 src/bin/stats/tests/isc/config/__init__.py
delete mode 100644 src/bin/stats/tests/isc/config/ccsession.py
delete mode 100644 src/bin/stats/tests/isc/log/__init__.py
delete mode 100644 src/bin/stats/tests/isc/util/Makefile.am
delete mode 100644 src/bin/stats/tests/isc/util/__init__.py
delete mode 100644 src/bin/stats/tests/isc/util/process.py
create mode 100644 src/bin/stats/tests/test_utils.py
delete mode 100644 src/bin/stats/tests/testdata/Makefile.am
delete mode 100644 src/bin/stats/tests/testdata/stats_test.spec
create mode 100644 src/lib/dns/python/message_python_inc.cc
create mode 100644 src/lib/dns/tests/testdata/message_fromWire19.spec
create mode 100644 src/lib/dns/tests/testdata/message_fromWire20.spec
create mode 100644 src/lib/dns/tests/testdata/message_fromWire21.spec
create mode 100644 src/lib/dns/tests/testdata/message_fromWire22.spec
create mode 100644 src/lib/python/isc/datasrc/client_inc.cc
create mode 100644 src/lib/python/isc/datasrc/client_python.cc
copy src/lib/{dns/python/message_python.h => python/isc/datasrc/client_python.h} (73%)
create mode 100644 src/lib/python/isc/datasrc/datasrc.cc
copy src/lib/python/isc/{acl/dns.h => datasrc/datasrc.h} (82%)
create mode 100644 src/lib/python/isc/datasrc/finder_inc.cc
create mode 100644 src/lib/python/isc/datasrc/finder_python.cc
copy src/lib/{dns/python/message_python.h => python/isc/datasrc/finder_python.h} (73%)
create mode 100644 src/lib/python/isc/datasrc/iterator_inc.cc
create mode 100644 src/lib/python/isc/datasrc/iterator_python.cc
copy src/lib/{dns/python/message_python.h => python/isc/datasrc/iterator_python.h} (73%)
create mode 100644 src/lib/python/isc/datasrc/tests/datasrc_test.py
create mode 100644 src/lib/python/isc/datasrc/updater_inc.cc
create mode 100644 src/lib/python/isc/datasrc/updater_python.cc
copy src/lib/{dns/python/message_python.h => python/isc/datasrc/updater_python.h} (73%)
rename src/{bin/stats/tests/isc/log => lib/python/isc/dns}/Makefile.am (54%)
-----------------------------------------------------------------------
diff --git a/ChangeLog b/ChangeLog
index d0565e1..82e920b 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,43 @@
+291. [func] naokikambe
+ Statistics items are specified by each module's spec file.
+ Stats module can read these through the config manager. Stats
+ module and stats httpd report statistics data and statistics
+ schema by each module via both bindctl and HTTP/XML.
+ (Trac #928,#929,#930,#1175, git 054699635affd9c9ecbe7a108d880829f3ba229e)
+
+290. [func] jinmei
+ libdns++/pydnspp: added an option parameter to the "from wire"
+ methods of the Message class. One option is defined,
+ PRESERVE_ORDER, which specifies the parser to handle each RR
+ separately, preserving the order, and constructs RRsets in the
+ message sections so that each RRset contains only one RR.
+ (Trac #1258, git c874cb056e2a5e656165f3c160e1b34ccfe8b302)
+
+289. [func]* jinmei
+ b10-xfrout: ACLs for xfrout can now be configured per zone basis.
+ A per zone ACl is part of a more general zone configuration. A
+ quick example for configuring an ACL for zone "example.com" that
+ rejects any transfer request for that zone is as follows:
+ > config add Xfrout/zone_config
+ > config set Xfrout/zone_config[0]/origin "example.com"
+ > config add Xfrout/zone_config[0]/transfer_acl
+ > config set Xfrout/zone_config[0]/transfer_acl[0] {"action": "REJECT"}
+ The previous global ACL (query_acl) was renamed to transfer_acl,
+ which now works as the default ACL. Note: backward compatibility
+ is not provided, so an existing configuration using query_acl
+ needs to be updated by hand.
+ Note: the per zone configuration framework is a temporary
+ workaround. It will eventually be redesigned as a system wide
+ configuration.
+ (Trac #1165, git 698176eccd5d55759fe9448b2c249717c932ac31)
+
+288. [bug] stephen
+ Fixed problem whereby the order in which component files appeared in
+ rdataclass.cc was system dependent, leading to problems on some
+ systems where data types were used before the header file in which
+ they were declared was included.
+ (Trac #1202, git 4a605525cda67bea8c43ca8b3eae6e6749797450)
+
287. [bug]* jinmei
Python script files for log messages (xxx_messages.py) should have
been installed under the "isc" package. This fix itself should
diff --git a/configure.ac b/configure.ac
index ee5d79b..1cb0bb0 100644
--- a/configure.ac
+++ b/configure.ac
@@ -437,7 +437,7 @@ AC_ARG_WITH([botan],
AC_HELP_STRING([--with-botan=PATH],
[specify exact directory of Botan library]),
[botan_path="$withval"])
-if test "${botan_path}" == "no" ; then
+if test "${botan_path}" = "no" ; then
AC_MSG_ERROR([Need botan for libcryptolink])
fi
if test "${botan_path}" != "yes" ; then
@@ -510,7 +510,7 @@ AC_ARG_WITH([log4cplus],
AC_HELP_STRING([--with-log4cplus=PATH],
[specify exact directory of log4cplus library and headers]),
[log4cplus_path="$withval"])
-if test "${log4cplus_path}" == "no" ; then
+if test "${log4cplus_path}" = "no" ; then
AC_MSG_ERROR([Need log4cplus])
elif test "${log4cplus_path}" != "yes" ; then
LOG4CPLUS_INCLUDES="-I${log4cplus_path}/include"
@@ -817,14 +817,6 @@ AC_CONFIG_FILES([Makefile
src/bin/zonemgr/tests/Makefile
src/bin/stats/Makefile
src/bin/stats/tests/Makefile
- src/bin/stats/tests/isc/Makefile
- src/bin/stats/tests/isc/cc/Makefile
- src/bin/stats/tests/isc/config/Makefile
- src/bin/stats/tests/isc/util/Makefile
- src/bin/stats/tests/isc/log/Makefile
- src/bin/stats/tests/isc/log_messages/Makefile
- src/bin/stats/tests/testdata/Makefile
- src/bin/stats/tests/http/Makefile
src/bin/usermgr/Makefile
src/bin/tests/Makefile
src/lib/Makefile
@@ -845,6 +837,7 @@ AC_CONFIG_FILES([Makefile
src/lib/python/isc/util/tests/Makefile
src/lib/python/isc/datasrc/Makefile
src/lib/python/isc/datasrc/tests/Makefile
+ src/lib/python/isc/dns/Makefile
src/lib/python/isc/cc/Makefile
src/lib/python/isc/cc/tests/Makefile
src/lib/python/isc/config/Makefile
diff --git a/doc/guide/bind10-guide.html b/doc/guide/bind10-guide.html
index a9a4cc6..1070a2e 100644
--- a/doc/guide/bind10-guide.html
+++ b/doc/guide/bind10-guide.html
@@ -717,24 +717,30 @@ This may be a temporary setting until then.
</p><p>
- This stats daemon provides commands to identify if it is running,
- show specified or all statistics data, set values, remove data,
- and reset data.
+ This stats daemon provides commands to identify if it is
+ running, show specified or all statistics data, show specified
+ or all statistics data schema, and set specified statistics
+ data.
For example, using <span class="command"><strong>bindctl</strong></span>:
</p><pre class="screen">
> <strong class="userinput"><code>Stats show</code></strong>
{
- "auth.queries.tcp": 1749,
- "auth.queries.udp": 867868,
- "bind10.boot_time": "2011-01-20T16:59:03Z",
- "report_time": "2011-01-20T17:04:06Z",
- "stats.boot_time": "2011-01-20T16:59:05Z",
- "stats.last_update_time": "2011-01-20T17:04:05Z",
- "stats.lname": "4d3869d9_a at jreed.example.net",
- "stats.start_time": "2011-01-20T16:59:05Z",
- "stats.timestamp": 1295543046.823504
+ "Auth": {
+ "queries.tcp": 1749,
+ "queries.udp": 867868
+ },
+ "Boss": {
+ "boot_time": "2011-01-20T16:59:03Z"
+ },
+ "Stats": {
+ "boot_time": "2011-01-20T16:59:05Z",
+ "last_update_time": "2011-01-20T17:04:05Z",
+ "lname": "4d3869d9_a at jreed.example.net",
+ "report_time": "2011-01-20T17:04:06Z",
+ "timestamp": 1295543046.823504
+ }
}
</pre><p>
</p></div><div class="chapter" title="Chapter 14. Logging"><div class="titlepage"><div><div><h2 class="title"><a name="logging"></a>Chapter 14. Logging</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229447788">Logging configuration</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229447799">Loggers</a></span></dt><dt><span class="section"><a href="#id1168229448040">Output Options</a></span></dt><dt><span class="section"><a href="#id1168229448215">Example session</a></span></dt></dl></dd><dt><span class="section"><a href="#id1168229448428">Logging Message Format</a></span></dt></dl></div><div class="section" title="Logging configuration"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229447788"></a>Logging configuration</h2></div></div></div><p>
diff --git a/doc/guide/bind10-guide.xml b/doc/guide/bind10-guide.xml
index d34746b..00ffee6 100644
--- a/doc/guide/bind10-guide.xml
+++ b/doc/guide/bind10-guide.xml
@@ -1522,24 +1522,30 @@ then change those defaults with config set Resolver/forward_addresses[0]/address
<para>
- This stats daemon provides commands to identify if it is running,
- show specified or all statistics data, set values, remove data,
- and reset data.
+ This stats daemon provides commands to identify if it is
+ running, show specified or all statistics data, show specified
+ or all statistics data schema, and set specified statistics
+ data.
For example, using <command>bindctl</command>:
<screen>
> <userinput>Stats show</userinput>
{
- "auth.queries.tcp": 1749,
- "auth.queries.udp": 867868,
- "bind10.boot_time": "2011-01-20T16:59:03Z",
- "report_time": "2011-01-20T17:04:06Z",
- "stats.boot_time": "2011-01-20T16:59:05Z",
- "stats.last_update_time": "2011-01-20T17:04:05Z",
- "stats.lname": "4d3869d9_a at jreed.example.net",
- "stats.start_time": "2011-01-20T16:59:05Z",
- "stats.timestamp": 1295543046.823504
+ "Auth": {
+ "queries.tcp": 1749,
+ "queries.udp": 867868
+ },
+ "Boss": {
+ "boot_time": "2011-01-20T16:59:03Z"
+ },
+ "Stats": {
+ "boot_time": "2011-01-20T16:59:05Z",
+ "last_update_time": "2011-01-20T17:04:05Z",
+ "lname": "4d3869d9_a at jreed.example.net",
+ "report_time": "2011-01-20T17:04:06Z",
+ "timestamp": 1295543046.823504
+ }
}
</screen>
</para>
diff --git a/src/bin/auth/auth_messages.mes b/src/bin/auth/auth_messages.mes
index 9f04b76..1ffa687 100644
--- a/src/bin/auth/auth_messages.mes
+++ b/src/bin/auth/auth_messages.mes
@@ -257,4 +257,7 @@ request. The zone manager component has been informed of the request,
but has returned an error response (which is included in the message). The
NOTIFY request will not be honored.
+% AUTH_INVALID_STATISTICS_DATA invalid specification of statistics data specified
+An error was encountered when the authoritiative server specified
+statistics data which is invalid for the auth specification file.
diff --git a/src/bin/auth/auth_srv.cc b/src/bin/auth/auth_srv.cc
index 5a31442..c9dac88 100644
--- a/src/bin/auth/auth_srv.cc
+++ b/src/bin/auth/auth_srv.cc
@@ -125,6 +125,10 @@ public:
/// The TSIG keyring
const shared_ptr<TSIGKeyRing>* keyring_;
+
+ /// Bind the ModuleSpec object in config_session_ with
+ /// isc:config::ModuleSpec::validateStatistics.
+ void registerStatisticsValidator();
private:
std::string db_file_;
@@ -139,6 +143,9 @@ private:
/// Increment query counter
void incCounter(const int protocol);
+
+ // validateStatistics
+ bool validateStatistics(isc::data::ConstElementPtr data) const;
};
AuthSrvImpl::AuthSrvImpl(const bool use_cache,
@@ -317,6 +324,7 @@ AuthSrv::setXfrinSession(AbstractSession* xfrin_session) {
void
AuthSrv::setConfigSession(ModuleCCSession* config_session) {
impl_->config_session_ = config_session;
+ impl_->registerStatisticsValidator();
}
void
@@ -670,6 +678,22 @@ AuthSrvImpl::incCounter(const int protocol) {
}
}
+void
+AuthSrvImpl::registerStatisticsValidator() {
+ counters_.registerStatisticsValidator(
+ boost::bind(&AuthSrvImpl::validateStatistics, this, _1));
+}
+
+bool
+AuthSrvImpl::validateStatistics(isc::data::ConstElementPtr data) const {
+ if (config_session_ == NULL) {
+ return (false);
+ }
+ return (
+ config_session_->getModuleSpec().validateStatistics(
+ data, true));
+}
+
ConstElementPtr
AuthSrvImpl::setDbFile(ConstElementPtr config) {
ConstElementPtr answer = isc::config::createAnswer();
diff --git a/src/bin/auth/query.cc b/src/bin/auth/query.cc
index 898fff7..ab6404e 100644
--- a/src/bin/auth/query.cc
+++ b/src/bin/auth/query.cc
@@ -253,6 +253,13 @@ Query::process() const {
// Just empty answer with SOA in authority section
putSOA(*result.zone_finder);
break;
+ default:
+ // These are new result codes (WILDCARD and WILDCARD_NXRRSET)
+ // They should not happen from the in-memory and the database
+ // backend isn't used yet.
+ // TODO: Implement before letting the database backends in
+ isc_throw(isc::NotImplemented, "Unknown result code");
+ break;
}
}
}
diff --git a/src/bin/auth/statistics.cc b/src/bin/auth/statistics.cc
index 76e5007..e62719f 100644
--- a/src/bin/auth/statistics.cc
+++ b/src/bin/auth/statistics.cc
@@ -37,11 +37,14 @@ public:
void inc(const AuthCounters::CounterType type);
bool submitStatistics() const;
void setStatisticsSession(isc::cc::AbstractSession* statistics_session);
+ void registerStatisticsValidator
+ (AuthCounters::validator_type validator);
// Currently for testing purpose only
uint64_t getCounter(const AuthCounters::CounterType type) const;
private:
std::vector<uint64_t> counters_;
isc::cc::AbstractSession* statistics_session_;
+ AuthCounters::validator_type validator_;
};
AuthCountersImpl::AuthCountersImpl() :
@@ -67,16 +70,25 @@ AuthCountersImpl::submitStatistics() const {
}
std::stringstream statistics_string;
statistics_string << "{\"command\": [\"set\","
- << "{ \"stats_data\": "
- << "{ \"auth.queries.udp\": "
+ << "{ \"owner\": \"Auth\","
+ << " \"data\":"
+ << "{ \"queries.udp\": "
<< counters_.at(AuthCounters::COUNTER_UDP_QUERY)
- << ", \"auth.queries.tcp\": "
+ << ", \"queries.tcp\": "
<< counters_.at(AuthCounters::COUNTER_TCP_QUERY)
<< " }"
<< "}"
<< "]}";
isc::data::ConstElementPtr statistics_element =
isc::data::Element::fromJSON(statistics_string);
+ // validate the statistics data before send
+ if (validator_) {
+ if (!validator_(
+ statistics_element->get("command")->get(1)->get("data"))) {
+ LOG_ERROR(auth_logger, AUTH_INVALID_STATISTICS_DATA);
+ return (false);
+ }
+ }
try {
// group_{send,recv}msg() can throw an exception when encountering
// an error, and group_recvmsg() will throw an exception on timeout.
@@ -105,6 +117,13 @@ AuthCountersImpl::setStatisticsSession
statistics_session_ = statistics_session;
}
+void
+AuthCountersImpl::registerStatisticsValidator
+ (AuthCounters::validator_type validator)
+{
+ validator_ = validator;
+}
+
// Currently for testing purpose only
uint64_t
AuthCountersImpl::getCounter(const AuthCounters::CounterType type) const {
@@ -139,3 +158,10 @@ uint64_t
AuthCounters::getCounter(const AuthCounters::CounterType type) const {
return (impl_->getCounter(type));
}
+
+void
+AuthCounters::registerStatisticsValidator
+ (AuthCounters::validator_type validator) const
+{
+ return (impl_->registerStatisticsValidator(validator));
+}
diff --git a/src/bin/auth/statistics.h b/src/bin/auth/statistics.h
index 5bf6436..c930414 100644
--- a/src/bin/auth/statistics.h
+++ b/src/bin/auth/statistics.h
@@ -131,6 +131,26 @@ public:
/// \return the value of the counter specified by \a type.
///
uint64_t getCounter(const AuthCounters::CounterType type) const;
+
+ /// \brief A type of validation function for the specification in
+ /// isc::config::ModuleSpec.
+ ///
+ /// This type might be useful for not only statistics
+ /// specificatoin but also for config_data specification and for
+ /// commnad.
+ ///
+ typedef boost::function<bool(const isc::data::ConstElementPtr&)>
+ validator_type;
+
+ /// \brief Register a function type of the statistics validation
+ /// function for AuthCounters.
+ ///
+ /// This method never throws an exception.
+ ///
+ /// \param validator A function type of the validation of
+ /// statistics specification.
+ ///
+ void registerStatisticsValidator(AuthCounters::validator_type validator) const;
};
#endif // __STATISTICS_H
diff --git a/src/bin/auth/tests/query_unittest.cc b/src/bin/auth/tests/query_unittest.cc
index 4b8f013..b2d1094 100644
--- a/src/bin/auth/tests/query_unittest.cc
+++ b/src/bin/auth/tests/query_unittest.cc
@@ -141,6 +141,10 @@ public:
// Turn this on if you want it to return RRSIGs regardless of FIND_GLUE_OK
void setIncludeRRSIGAnyway(bool on) { include_rrsig_anyway_ = on; }
+ Name findPreviousName(const Name&) const {
+ isc_throw(isc::NotImplemented, "Mock doesn't support previous name");
+ }
+
private:
typedef map<RRType, ConstRRsetPtr> RRsetStore;
typedef map<Name, RRsetStore> Domains;
diff --git a/src/bin/auth/tests/statistics_unittest.cc b/src/bin/auth/tests/statistics_unittest.cc
index 9a3dded..98e573b 100644
--- a/src/bin/auth/tests/statistics_unittest.cc
+++ b/src/bin/auth/tests/statistics_unittest.cc
@@ -16,6 +16,8 @@
#include <gtest/gtest.h>
+#include <boost/bind.hpp>
+
#include <cc/data.h>
#include <cc/session.h>
@@ -76,6 +78,13 @@ protected:
}
MockSession statistics_session_;
AuthCounters counters;
+ // no need to be inherited from the original class here.
+ class MockModuleSpec {
+ public:
+ bool validateStatistics(ConstElementPtr, const bool valid) const
+ { return (valid); }
+ };
+ MockModuleSpec module_spec_;
};
void
@@ -181,7 +190,7 @@ TEST_F(AuthCountersTest, submitStatisticsWithException) {
statistics_session_.setThrowSessionTimeout(false);
}
-TEST_F(AuthCountersTest, submitStatistics) {
+TEST_F(AuthCountersTest, submitStatisticsWithoutValidator) {
// Submit statistics data.
// Validate if it submits correct data.
@@ -201,12 +210,69 @@ TEST_F(AuthCountersTest, submitStatistics) {
// Command is "set".
EXPECT_EQ("set", statistics_session_.sent_msg->get("command")
->get(0)->stringValue());
+ EXPECT_EQ("Auth", statistics_session_.sent_msg->get("command")
+ ->get(1)->get("owner")->stringValue());
ConstElementPtr statistics_data = statistics_session_.sent_msg
->get("command")->get(1)
- ->get("stats_data");
+ ->get("data");
// UDP query counter is 2 and TCP query counter is 1.
- EXPECT_EQ(2, statistics_data->get("auth.queries.udp")->intValue());
- EXPECT_EQ(1, statistics_data->get("auth.queries.tcp")->intValue());
+ EXPECT_EQ(2, statistics_data->get("queries.udp")->intValue());
+ EXPECT_EQ(1, statistics_data->get("queries.tcp")->intValue());
}
+TEST_F(AuthCountersTest, submitStatisticsWithValidator) {
+
+ //a validator for the unittest
+ AuthCounters::validator_type validator;
+ ConstElementPtr el;
+
+ // Submit statistics data with correct statistics validator.
+ validator = boost::bind(
+ &AuthCountersTest::MockModuleSpec::validateStatistics,
+ &module_spec_, _1, true);
+
+ EXPECT_TRUE(validator(el));
+
+ // register validator to AuthCounters
+ counters.registerStatisticsValidator(validator);
+
+ // Counters should be initialized to 0.
+ EXPECT_EQ(0, counters.getCounter(AuthCounters::COUNTER_UDP_QUERY));
+ EXPECT_EQ(0, counters.getCounter(AuthCounters::COUNTER_TCP_QUERY));
+
+ // UDP query counter is set to 2.
+ counters.inc(AuthCounters::COUNTER_UDP_QUERY);
+ counters.inc(AuthCounters::COUNTER_UDP_QUERY);
+ // TCP query counter is set to 1.
+ counters.inc(AuthCounters::COUNTER_TCP_QUERY);
+
+ // checks the value returned by submitStatistics
+ EXPECT_TRUE(counters.submitStatistics());
+
+ // Destination is "Stats".
+ EXPECT_EQ("Stats", statistics_session_.msg_destination);
+ // Command is "set".
+ EXPECT_EQ("set", statistics_session_.sent_msg->get("command")
+ ->get(0)->stringValue());
+ EXPECT_EQ("Auth", statistics_session_.sent_msg->get("command")
+ ->get(1)->get("owner")->stringValue());
+ ConstElementPtr statistics_data = statistics_session_.sent_msg
+ ->get("command")->get(1)
+ ->get("data");
+ // UDP query counter is 2 and TCP query counter is 1.
+ EXPECT_EQ(2, statistics_data->get("queries.udp")->intValue());
+ EXPECT_EQ(1, statistics_data->get("queries.tcp")->intValue());
+
+ // Submit statistics data with incorrect statistics validator.
+ validator = boost::bind(
+ &AuthCountersTest::MockModuleSpec::validateStatistics,
+ &module_spec_, _1, false);
+
+ EXPECT_FALSE(validator(el));
+
+ counters.registerStatisticsValidator(validator);
+
+ // checks the value returned by submitStatistics
+ EXPECT_FALSE(counters.submitStatistics());
+}
}
diff --git a/src/bin/bind10/bind10_messages.mes b/src/bin/bind10/bind10_messages.mes
index 4bac069..4debcdb 100644
--- a/src/bin/bind10/bind10_messages.mes
+++ b/src/bin/bind10/bind10_messages.mes
@@ -198,3 +198,7 @@ the message channel.
% BIND10_UNKNOWN_CHILD_PROCESS_ENDED unknown child pid %1 exited
An unknown child process has exited. The PID is printed, but no further
action will be taken by the boss process.
+
+% BIND10_INVALID_STATISTICS_DATA invalid specification of statistics data specified
+An error was encountered when the boss module specified
+statistics data which is invalid for the boss specification file.
diff --git a/src/bin/bind10/bind10_src.py.in b/src/bin/bind10/bind10_src.py.in
index 28af8cc..1687cb1 100755
--- a/src/bin/bind10/bind10_src.py.in
+++ b/src/bin/bind10/bind10_src.py.in
@@ -85,7 +85,7 @@ isc.util.process.rename(sys.argv[0])
# number, and the overall BIND 10 version number (set in configure.ac).
VERSION = "bind10 20110223 (BIND 10 @PACKAGE_VERSION@)"
-# This is for bind10.boottime of stats module
+# This is for boot_time of Boss
_BASETIME = time.gmtime()
class RestartSchedule:
@@ -308,9 +308,11 @@ class BoB:
return process_list
def _get_stats_data(self):
- return { "stats_data": {
- 'bind10.boot_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME)
- }}
+ return { "owner": "Boss",
+ "data": { 'boot_time':
+ time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME)
+ }
+ }
def command_handler(self, command, args):
logger.debug(DBG_COMMANDS, BIND10_RECEIVED_COMMAND, command)
@@ -325,15 +327,22 @@ class BoB:
answer = isc.config.ccsession.create_answer(0, self._get_stats_data())
elif command == "sendstats":
# send statistics data to the stats daemon immediately
- cmd = isc.config.ccsession.create_command(
- 'set', self._get_stats_data())
- seq = self.cc_session.group_sendmsg(cmd, 'Stats')
- # Consume the answer, in case it becomes a orphan message.
- try:
- self.cc_session.group_recvmsg(False, seq)
- except isc.cc.session.SessionTimeout:
- pass
- answer = isc.config.ccsession.create_answer(0)
+ stats_data = self._get_stats_data()
+ valid = self.ccs.get_module_spec().validate_statistics(
+ True, stats_data["data"])
+ if valid:
+ cmd = isc.config.ccsession.create_command('set', stats_data)
+ seq = self.cc_session.group_sendmsg(cmd, 'Stats')
+ # Consume the answer, in case it becomes a orphan message.
+ try:
+ self.cc_session.group_recvmsg(False, seq)
+ except isc.cc.session.SessionTimeout:
+ pass
+ answer = isc.config.ccsession.create_answer(0)
+ else:
+ logger.fatal(BIND10_INVALID_STATISTICS_DATA);
+ answer = isc.config.ccsession.create_answer(
+ 1, "specified statistics data is invalid")
elif command == "ping":
answer = isc.config.ccsession.create_answer(0, "pong")
elif command == "show_processes":
diff --git a/src/bin/bind10/run_bind10.sh.in b/src/bin/bind10/run_bind10.sh.in
index 9ed0cea..50e6e29 100755
--- a/src/bin/bind10/run_bind10.sh.in
+++ b/src/bin/bind10/run_bind10.sh.in
@@ -23,14 +23,14 @@ BIND10_PATH=@abs_top_builddir@/src/bin/bind10
PATH=@abs_top_builddir@/src/bin/msgq:@abs_top_builddir@/src/bin/auth:@abs_top_builddir@/src/bin/resolver:@abs_top_builddir@/src/bin/cfgmgr:@abs_top_builddir@/src/bin/cmdctl:@abs_top_builddir@/src/bin/stats:@abs_top_builddir@/src/bin/xfrin:@abs_top_builddir@/src/bin/xfrout:@abs_top_builddir@/src/bin/zonemgr:@abs_top_builddir@/src/bin/dhcp6:@abs_top_builddir@/src/bin/sockcreator:$PATH
export PATH
-PYTHONPATH=@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/xfr/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/python/isc/config:@abs_top_builddir@/src/lib/python/isc/acl/.libs:
+PYTHONPATH=@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/xfr/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/python/isc/config:@abs_top_builddir@/src/lib/python/isc/acl/.libs:@abs_top_builddir@/src/lib/python/isc/datasrc/.libs
export PYTHONPATH
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
if test $SET_ENV_LIBRARY_PATH = yes; then
- @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/acl/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:$@ENV_LIBRARY_PATH@
+ @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/acl/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
export @ENV_LIBRARY_PATH@
fi
diff --git a/src/bin/bind10/tests/Makefile.am b/src/bin/bind10/tests/Makefile.am
index d0f36ca..d54ee56 100644
--- a/src/bin/bind10/tests/Makefile.am
+++ b/src/bin/bind10/tests/Makefile.am
@@ -8,7 +8,7 @@ noinst_SCRIPTS = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
diff --git a/src/bin/bind10/tests/bind10_test.py.in b/src/bin/bind10/tests/bind10_test.py.in
index 424a610..2efd940 100644
--- a/src/bin/bind10/tests/bind10_test.py.in
+++ b/src/bin/bind10/tests/bind10_test.py.in
@@ -137,9 +137,27 @@ class TestBoB(unittest.TestCase):
def group_sendmsg(self, msg, group):
(self.msg, self.group) = (msg, group)
def group_recvmsg(self, nonblock, seq): pass
+ class DummyModuleCCSession():
+ module_spec = isc.config.module_spec.ModuleSpec({
+ "module_name": "Boss",
+ "statistics": [
+ {
+ "item_name": "boot_time",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Boot time",
+ "item_description": "A date time when bind10 process starts initially",
+ "item_format": "date-time"
+ }
+ ]
+ })
+ def get_module_spec(self):
+ return self.module_spec
bob = BoB()
bob.verbose = True
bob.cc_session = DummySession()
+ bob.ccs = DummyModuleCCSession()
# a bad command
self.assertEqual(bob.command_handler(-1, None),
isc.config.ccsession.create_answer(1, "bad command"))
@@ -150,8 +168,9 @@ class TestBoB(unittest.TestCase):
# "getstats" command
self.assertEqual(bob.command_handler("getstats", None),
isc.config.ccsession.create_answer(0,
- { "stats_data": {
- 'bind10.boot_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME)
+ { "owner": "Boss",
+ "data": {
+ 'boot_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME)
}}))
# "sendstats" command
self.assertEqual(bob.command_handler("sendstats", None),
@@ -159,8 +178,9 @@ class TestBoB(unittest.TestCase):
self.assertEqual(bob.cc_session.group, "Stats")
self.assertEqual(bob.cc_session.msg,
isc.config.ccsession.create_command(
- 'set', { "stats_data": {
- 'bind10.boot_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME)
+ "set", { "owner": "Boss",
+ "data": {
+ "boot_time": time.strftime("%Y-%m-%dT%H:%M:%SZ", _BASETIME)
}}))
# "ping" command
self.assertEqual(bob.command_handler("ping", None),
diff --git a/src/bin/bindctl/run_bindctl.sh.in b/src/bin/bindctl/run_bindctl.sh.in
index f7d7e78..f4cc40c 100755
--- a/src/bin/bindctl/run_bindctl.sh.in
+++ b/src/bin/bindctl/run_bindctl.sh.in
@@ -27,7 +27,7 @@ export PYTHONPATH
# required by loadable python modules.
SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
if test $SET_ENV_LIBRARY_PATH = yes; then
- @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:$@ENV_LIBRARY_PATH@
+ @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
export @ENV_LIBRARY_PATH@
fi
diff --git a/src/bin/bindctl/tests/Makefile.am b/src/bin/bindctl/tests/Makefile.am
index 5bde145..3d08a17 100644
--- a/src/bin/bindctl/tests/Makefile.am
+++ b/src/bin/bindctl/tests/Makefile.am
@@ -6,7 +6,7 @@ EXTRA_DIST = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
diff --git a/src/bin/cfgmgr/plugins/tests/Makefile.am b/src/bin/cfgmgr/plugins/tests/Makefile.am
index ca8005b..ffea2d7 100644
--- a/src/bin/cfgmgr/plugins/tests/Makefile.am
+++ b/src/bin/cfgmgr/plugins/tests/Makefile.am
@@ -7,7 +7,7 @@ EXTRA_DIST = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
diff --git a/src/bin/cfgmgr/tests/Makefile.am b/src/bin/cfgmgr/tests/Makefile.am
index 41edc8e..a2e43ff 100644
--- a/src/bin/cfgmgr/tests/Makefile.am
+++ b/src/bin/cfgmgr/tests/Makefile.am
@@ -8,7 +8,7 @@ EXTRA_DIST = testdata/plugins/testplugin.py
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
diff --git a/src/bin/cmdctl/run_b10-cmdctl.sh.in b/src/bin/cmdctl/run_b10-cmdctl.sh.in
index 6a519e1..7e63249 100644
--- a/src/bin/cmdctl/run_b10-cmdctl.sh.in
+++ b/src/bin/cmdctl/run_b10-cmdctl.sh.in
@@ -19,9 +19,17 @@ PYTHON_EXEC=${PYTHON_EXEC:- at PYTHON@}
export PYTHON_EXEC
CMD_CTRLD_PATH=@abs_top_builddir@/src/bin/cmdctl
-PYTHONPATH=@abs_top_srcdir@/src/lib/python
+PYTHONPATH=@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/xfr/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/python/isc/config:@abs_top_builddir@/src/lib/python/isc/acl/.libs:@abs_top_builddir@/src/lib/python/isc/datasrc/.libs
export PYTHONPATH
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
+if test $SET_ENV_LIBRARY_PATH = yes; then
+ @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
+ export @ENV_LIBRARY_PATH@
+fi
+
BIND10_MSGQ_SOCKET_FILE=@abs_top_builddir@/msgq_socket
export BIND10_MSGQ_SOCKET_FILE
diff --git a/src/bin/cmdctl/tests/Makefile.am b/src/bin/cmdctl/tests/Makefile.am
index 6bb9fba..89d89ea 100644
--- a/src/bin/cmdctl/tests/Makefile.am
+++ b/src/bin/cmdctl/tests/Makefile.am
@@ -6,7 +6,7 @@ EXTRA_DIST = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
diff --git a/src/bin/dhcp6/tests/Makefile.am b/src/bin/dhcp6/tests/Makefile.am
index 79f5968..231a3d9 100644
--- a/src/bin/dhcp6/tests/Makefile.am
+++ b/src/bin/dhcp6/tests/Makefile.am
@@ -8,7 +8,7 @@ EXTRA_DIST = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
diff --git a/src/bin/host/b10-host.1 b/src/bin/host/b10-host.1
index ed0068b..050f6a3 100644
--- a/src/bin/host/b10-host.1
+++ b/src/bin/host/b10-host.1
@@ -103,10 +103,6 @@ It doesn\'t use
at this time\&. The default name server used is 127\&.0\&.0\&.1\&.
.PP
-\fBb10\-host\fR
-does not do reverse lookups by default yet (by detecting if name is a IPv4 or IPv6 address)\&.
-.PP
-
\fB\-p\fR
is not a standard feature\&.
.SH "HISTORY"
diff --git a/src/bin/host/b10-host.xml b/src/bin/host/b10-host.xml
index 7da07dd..a17ef67 100644
--- a/src/bin/host/b10-host.xml
+++ b/src/bin/host/b10-host.xml
@@ -176,11 +176,6 @@
</para>
<para>
- <command>b10-host</command> does not do reverse lookups by
- default yet (by detecting if name is a IPv4 or IPv6 address).
- </para>
-
- <para>
<option>-p</option> is not a standard feature.
</para>
</refsect1>
diff --git a/src/bin/loadzone/run_loadzone.sh.in b/src/bin/loadzone/run_loadzone.sh.in
index e6db99c..43b7920 100755
--- a/src/bin/loadzone/run_loadzone.sh.in
+++ b/src/bin/loadzone/run_loadzone.sh.in
@@ -25,7 +25,7 @@ export PYTHONPATH
# required by loadable python modules.
SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
if test $SET_ENV_LIBRARY_PATH = yes; then
- @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:$@ENV_LIBRARY_PATH@
+ @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
export @ENV_LIBRARY_PATH@
fi
diff --git a/src/bin/loadzone/tests/correct/Makefile.am b/src/bin/loadzone/tests/correct/Makefile.am
index 73c8a34..fb882ba 100644
--- a/src/bin/loadzone/tests/correct/Makefile.am
+++ b/src/bin/loadzone/tests/correct/Makefile.am
@@ -19,7 +19,7 @@ noinst_SCRIPTS = correct_test.sh
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# TODO: maybe use TESTS?
diff --git a/src/bin/loadzone/tests/error/Makefile.am b/src/bin/loadzone/tests/error/Makefile.am
index 57f7857..03263b7 100644
--- a/src/bin/loadzone/tests/error/Makefile.am
+++ b/src/bin/loadzone/tests/error/Makefile.am
@@ -18,7 +18,7 @@ noinst_SCRIPTS = error_test.sh
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# TODO: use TESTS ?
diff --git a/src/bin/msgq/tests/Makefile.am b/src/bin/msgq/tests/Makefile.am
index ee9ffd8..50b218b 100644
--- a/src/bin/msgq/tests/Makefile.am
+++ b/src/bin/msgq/tests/Makefile.am
@@ -6,7 +6,7 @@ EXTRA_DIST = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
diff --git a/src/bin/stats/Makefile.am b/src/bin/stats/Makefile.am
index 3289765..63e2a3b 100644
--- a/src/bin/stats/Makefile.am
+++ b/src/bin/stats/Makefile.am
@@ -5,7 +5,7 @@ pkglibexecdir = $(libexecdir)/@PACKAGE@
pkglibexec_SCRIPTS = b10-stats b10-stats-httpd
b10_statsdir = $(pkgdatadir)
-b10_stats_DATA = stats.spec stats-httpd.spec stats-schema.spec
+b10_stats_DATA = stats.spec stats-httpd.spec
b10_stats_DATA += stats-httpd-xml.tpl stats-httpd-xsd.tpl stats-httpd-xsl.tpl
nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/stats_messages.py
@@ -21,7 +21,7 @@ CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/stats_httpd_messages.pyc
man_MANS = b10-stats.8 b10-stats-httpd.8
EXTRA_DIST = $(man_MANS) b10-stats.xml b10-stats-httpd.xml
-EXTRA_DIST += stats.spec stats-httpd.spec stats-schema.spec
+EXTRA_DIST += stats.spec stats-httpd.spec
EXTRA_DIST += stats-httpd-xml.tpl stats-httpd-xsd.tpl stats-httpd-xsl.tpl
EXTRA_DIST += stats_messages.mes stats_httpd_messages.mes
diff --git a/src/bin/stats/b10-stats-httpd.8 b/src/bin/stats/b10-stats-httpd.8
index ed4aafa..1206e1d 100644
--- a/src/bin/stats/b10-stats-httpd.8
+++ b/src/bin/stats/b10-stats-httpd.8
@@ -36,7 +36,7 @@ b10-stats-httpd \- BIND 10 HTTP server for HTTP/XML interface of statistics
.PP
\fBb10\-stats\-httpd\fR
-is a standalone HTTP server\&. It is intended for HTTP/XML interface for statistics module\&. This server process runs as a process separated from the process of the BIND 10 Stats daemon (\fBb10\-stats\fR)\&. The server is initially executed by the BIND 10 boss process (\fBbind10\fR) and eventually exited by it\&. The server is intended to be server requests by HTTP clients like web browsers and third\-party modules\&. When the server is asked, it requests BIND 10 statistics data from
+is a standalone HTTP server\&. It is intended for HTTP/XML interface for statistics module\&. This server process runs as a process separated from the process of the BIND 10 Stats daemon (\fBb10\-stats\fR)\&. The server is initially executed by the BIND 10 boss process (\fBbind10\fR) and eventually exited by it\&. The server is intended to be server requests by HTTP clients like web browsers and third\-party modules\&. When the server is asked, it requests BIND 10 statistics data or its schema from
\fBb10\-stats\fR, and it sends the data back in Python dictionary format and the server converts it into XML format\&. The server sends it to the HTTP client\&. The server can send three types of document, which are XML (Extensible Markup Language), XSD (XML Schema definition) and XSL (Extensible Stylesheet Language)\&. The XML document is the statistics data of BIND 10, The XSD document is the data schema of it, and The XSL document is the style sheet to be showed for the web browsers\&. There is different URL for each document\&. But please note that you would be redirected to the URL of XML document if you request the URL of the root document\&. For example, you would be redirected to http://127\&.0\&.0\&.1:8000/bind10/statistics/xml if you request http://127\&.0\&.0\&.1:8000/\&. Please see the manual and the spec file of
\fBb10\-stats\fR
for more details about the items of BIND 10 statistics\&. The server uses CC session in communication with
@@ -66,10 +66,6 @@ bindctl(1)\&. Please see the manual of
bindctl(1)
about how to configure the settings\&.
.PP
-/usr/local/share/bind10\-devel/stats\-schema\&.spec
-\(em This is a spec file for data schema of of BIND 10 statistics\&. This schema cannot be configured via
-bindctl(1)\&.
-.PP
/usr/local/share/bind10\-devel/stats\-httpd\-xml\&.tpl
\(em the template file of XML document\&.
diff --git a/src/bin/stats/b10-stats-httpd.xml b/src/bin/stats/b10-stats-httpd.xml
index 34c704f..c8df9b8 100644
--- a/src/bin/stats/b10-stats-httpd.xml
+++ b/src/bin/stats/b10-stats-httpd.xml
@@ -57,7 +57,7 @@
by the BIND 10 boss process (<command>bind10</command>) and eventually
exited by it. The server is intended to be server requests by HTTP
clients like web browsers and third-party modules. When the server is
- asked, it requests BIND 10 statistics data from
+ asked, it requests BIND 10 statistics data or its schema from
<command>b10-stats</command>, and it sends the data back in Python
dictionary format and the server converts it into XML format. The server
sends it to the HTTP client. The server can send three types of document,
@@ -112,12 +112,6 @@
of <refentrytitle>bindctl</refentrytitle><manvolnum>1</manvolnum> about
how to configure the settings.
</para>
- <para><filename>/usr/local/share/bind10-devel/stats-schema.spec</filename>
- <!--TODO: The filename should be computed from prefix-->
- — This is a spec file for data schema of
- of BIND 10 statistics. This schema cannot be configured
- via <refentrytitle>bindctl</refentrytitle><manvolnum>1</manvolnum>.
- </para>
<para>
<filename>/usr/local/share/bind10-devel/stats-httpd-xml.tpl</filename>
<!--TODO: The filename should be computed from prefix-->
@@ -138,7 +132,7 @@
<refsect1>
<title>CONFIGURATION AND COMMANDS</title>
<para>
- The configurable setting in
+ The configurable setting in
<filename>stats-httpd.spec</filename> is:
</para>
<variablelist>
diff --git a/src/bin/stats/b10-stats.8 b/src/bin/stats/b10-stats.8
index 98b109b..0204ca1 100644
--- a/src/bin/stats/b10-stats.8
+++ b/src/bin/stats/b10-stats.8
@@ -135,10 +135,6 @@ See other manual pages for explanations for their statistics that are kept track
\fBb10\-stats\fR\&. It contains commands for
\fBb10\-stats\fR\&. They can be invoked via
bindctl(1)\&.
-.PP
-/usr/local/share/bind10\-devel/stats\-schema\&.spec
-\(em This is a spec file for data schema of of BIND 10 statistics\&. This schema cannot be configured via
-bindctl(1)\&.
.SH "SEE ALSO"
.PP
diff --git a/src/bin/stats/b10-stats.xml b/src/bin/stats/b10-stats.xml
index 9709175..13ada7a 100644
--- a/src/bin/stats/b10-stats.xml
+++ b/src/bin/stats/b10-stats.xml
@@ -213,12 +213,6 @@
invoked
via <refentrytitle>bindctl</refentrytitle><manvolnum>1</manvolnum>.
</para>
- <para><filename>/usr/local/share/bind10-devel/stats-schema.spec</filename>
- <!--TODO: The filename should be computed from prefix-->
- — This is a spec file for data schema of
- of BIND 10 statistics. This schema cannot be configured
- via <refentrytitle>bindctl</refentrytitle><manvolnum>1</manvolnum>.
- </para>
</refsect1>
<refsect1>
diff --git a/src/bin/stats/stats-httpd-xsl.tpl b/src/bin/stats/stats-httpd-xsl.tpl
index 01ffdc6..a1f6406 100644
--- a/src/bin/stats/stats-httpd-xsl.tpl
+++ b/src/bin/stats/stats-httpd-xsl.tpl
@@ -44,6 +44,7 @@ td.title {
<h1>BIND 10 Statistics</h1>
<table>
<tr>
+ <th>Owner</th>
<th>Title</th>
<th>Value</th>
</tr>
diff --git a/src/bin/stats/stats-schema.spec b/src/bin/stats/stats-schema.spec
deleted file mode 100644
index 5252865..0000000
--- a/src/bin/stats/stats-schema.spec
+++ /dev/null
@@ -1,86 +0,0 @@
-{
- "module_spec": {
- "module_name": "Stats",
- "module_description": "Statistics data schema",
- "config_data": [
- {
- "item_name": "report_time",
- "item_type": "string",
- "item_optional": false,
- "item_default": "1970-01-01T00:00:00Z",
- "item_title": "Report time",
- "item_description": "A date time when stats module reports",
- "item_format": "date-time"
- },
- {
- "item_name": "bind10.boot_time",
- "item_type": "string",
- "item_optional": false,
- "item_default": "1970-01-01T00:00:00Z",
- "item_title": "bind10.BootTime",
- "item_description": "A date time when bind10 process starts initially",
- "item_format": "date-time"
- },
- {
- "item_name": "stats.boot_time",
- "item_type": "string",
- "item_optional": false,
- "item_default": "1970-01-01T00:00:00Z",
- "item_title": "stats.BootTime",
- "item_description": "A date time when the stats module starts initially or when the stats module restarts",
- "item_format": "date-time"
- },
- {
- "item_name": "stats.start_time",
- "item_type": "string",
- "item_optional": false,
- "item_default": "1970-01-01T00:00:00Z",
- "item_title": "stats.StartTime",
- "item_description": "A date time when the stats module starts collecting data or resetting values last time",
- "item_format": "date-time"
- },
- {
- "item_name": "stats.last_update_time",
- "item_type": "string",
- "item_optional": false,
- "item_default": "1970-01-01T00:00:00Z",
- "item_title": "stats.LastUpdateTime",
- "item_description": "The latest date time when the stats module receives from other modules like auth server or boss process and so on",
- "item_format": "date-time"
- },
- {
- "item_name": "stats.timestamp",
- "item_type": "real",
- "item_optional": false,
- "item_default": 0.0,
- "item_title": "stats.Timestamp",
- "item_description": "A current time stamp since epoch time (1970-01-01T00:00:00Z)"
- },
- {
- "item_name": "stats.lname",
- "item_type": "string",
- "item_optional": false,
- "item_default": "",
- "item_title": "stats.LocalName",
- "item_description": "A localname of stats module given via CC protocol"
- },
- {
- "item_name": "auth.queries.tcp",
- "item_type": "integer",
- "item_optional": false,
- "item_default": 0,
- "item_title": "auth.queries.tcp",
- "item_description": "A number of total query counts which all auth servers receive over TCP since they started initially"
- },
- {
- "item_name": "auth.queries.udp",
- "item_type": "integer",
- "item_optional": false,
- "item_default": 0,
- "item_title": "auth.queries.udp",
- "item_description": "A number of total query counts which all auth servers receive over UDP since they started initially"
- }
- ],
- "commands": []
- }
-}
diff --git a/src/bin/stats/stats.py.in b/src/bin/stats/stats.py.in
index afed544..da00818 100755
--- a/src/bin/stats/stats.py.in
+++ b/src/bin/stats/stats.py.in
@@ -15,16 +15,17 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+"""
+Statistics daemon in BIND 10
+
+"""
import sys; sys.path.append ('@@PYTHONPATH@@')
import os
-import signal
-import select
from time import time, strftime, gmtime
from optparse import OptionParser, OptionValueError
-from collections import defaultdict
-from isc.config.ccsession import ModuleCCSession, create_answer
-from isc.cc import Session, SessionError
+import isc
+import isc.util.process
import isc.log
from isc.log_messages.stats_messages import *
@@ -35,226 +36,157 @@ logger = isc.log.Logger("stats")
# have #1074
DBG_STATS_MESSAGING = 30
+# This is for boot_time of Stats
+_BASETIME = gmtime()
+
# for setproctitle
-import isc.util.process
isc.util.process.rename()
# If B10_FROM_SOURCE is set in the environment, we use data files
# from a directory relative to that, otherwise we use the ones
# installed on the system
if "B10_FROM_SOURCE" in os.environ:
- BASE_LOCATION = os.environ["B10_FROM_SOURCE"] + os.sep + \
- "src" + os.sep + "bin" + os.sep + "stats"
+ SPECFILE_LOCATION = os.environ["B10_FROM_SOURCE"] + os.sep + \
+ "src" + os.sep + "bin" + os.sep + "stats" + os.sep + "stats.spec"
else:
PREFIX = "@prefix@"
DATAROOTDIR = "@datarootdir@"
- BASE_LOCATION = "@datadir@" + os.sep + "@PACKAGE@"
- BASE_LOCATION = BASE_LOCATION.replace("${datarootdir}", DATAROOTDIR).replace("${prefix}", PREFIX)
-SPECFILE_LOCATION = BASE_LOCATION + os.sep + "stats.spec"
-SCHEMA_SPECFILE_LOCATION = BASE_LOCATION + os.sep + "stats-schema.spec"
+ SPECFILE_LOCATION = "@datadir@" + os.sep + "@PACKAGE@" + os.sep + "stats.spec"
+ SPECFILE_LOCATION = SPECFILE_LOCATION.replace("${datarootdir}", DATAROOTDIR)\
+ .replace("${prefix}", PREFIX)
-class Singleton(type):
+def get_timestamp():
"""
- A abstract class of singleton pattern
+ get current timestamp
"""
- # Because of singleton pattern:
- # At the beginning of coding, one UNIX domain socket is needed
- # for config manager, another socket is needed for stats module,
- # then stats module might need two sockets. So I adopted the
- # singleton pattern because I avoid creating multiple sockets in
- # one stats module. But in the initial version stats module
- # reports only via bindctl, so just one socket is needed. To use
- # the singleton pattern is not important now. :(
+ return time()
- def __init__(self, *args, **kwargs):
- type.__init__(self, *args, **kwargs)
- self._instances = {}
+def get_datetime(gmt=None):
+ """
+ get current datetime
+ """
+ if not gmt: gmt = gmtime()
+ return strftime("%Y-%m-%dT%H:%M:%SZ", gmt)
- def __call__(self, *args, **kwargs):
- if args not in self._instances:
- self._instances[args]={}
- kw = tuple(kwargs.items())
- if kw not in self._instances[args]:
- self._instances[args][kw] = type.__call__(self, *args, **kwargs)
- return self._instances[args][kw]
+def get_spec_defaults(spec):
+ """
+ extracts the default values of the items from spec specified in
+ arg, and returns the dict-type variable which is a set of the item
+ names and the default values
+ """
+ if type(spec) is not list: return {}
+ def _get_spec_defaults(spec):
+ item_type = spec['item_type']
+ if item_type == "integer":
+ return int(spec.get('item_default', 0))
+ elif item_type == "real":
+ return float(spec.get('item_default', 0.0))
+ elif item_type == "boolean":
+ return bool(spec.get('item_default', False))
+ elif item_type == "string":
+ return str(spec.get('item_default', ""))
+ elif item_type == "list":
+ return spec.get(
+ "item_default",
+ [ _get_spec_defaults(spec["list_item_spec"]) ])
+ elif item_type == "map":
+ return spec.get(
+ "item_default",
+ dict([ (s["item_name"], _get_spec_defaults(s)) for s in spec["map_item_spec"] ]) )
+ else:
+ return spec.get("item_default", None)
+ return dict([ (s['item_name'], _get_spec_defaults(s)) for s in spec ])
class Callback():
"""
A Callback handler class
"""
- def __init__(self, name=None, callback=None, args=(), kwargs={}):
- self.name = name
- self.callback = callback
+ def __init__(self, command=None, args=(), kwargs={}):
+ self.command = command
self.args = args
self.kwargs = kwargs
def __call__(self, *args, **kwargs):
- if not args:
- args = self.args
- if not kwargs:
- kwargs = self.kwargs
- if self.callback:
- return self.callback(*args, **kwargs)
+ if not args: args = self.args
+ if not kwargs: kwargs = self.kwargs
+ if self.command: return self.command(*args, **kwargs)
-class Subject():
- """
- A abstract subject class of observer pattern
- """
- # Because of observer pattern:
- # In the initial release, I'm also sure that observer pattern
- # isn't definitely needed because the interface between gathering
- # and reporting statistics data is single. However in the future
- # release, the interfaces may be multiple, that is, multiple
- # listeners may be needed. For example, one interface, which
- # stats module has, is for between ''config manager'' and stats
- # module, another interface is for between ''HTTP server'' and
- # stats module, and one more interface is for between ''SNMP
- # server'' and stats module. So by considering that stats module
- # needs multiple interfaces in the future release, I adopted the
- # observer pattern in stats module. But I don't have concrete
- # ideas in case of multiple listener currently.
-
- def __init__(self):
- self._listeners = []
-
- def attach(self, listener):
- if not listener in self._listeners:
- self._listeners.append(listener)
-
- def detach(self, listener):
- try:
- self._listeners.remove(listener)
- except ValueError:
- pass
+class StatsError(Exception):
+ """Exception class for Stats class"""
+ pass
- def notify(self, event, modifier=None):
- for listener in self._listeners:
- if modifier != listener:
- listener.update(event)
-
-class Listener():
+class Stats:
"""
- A abstract listener class of observer pattern
+ Main class of stats module
"""
- def __init__(self, subject):
- self.subject = subject
- self.subject.attach(self)
- self.events = {}
-
- def update(self, name):
- if name in self.events:
- callback = self.events[name]
- return callback()
-
- def add_event(self, event):
- self.events[event.name]=event
-
-class SessionSubject(Subject, metaclass=Singleton):
- """
- A concrete subject class which creates CC session object
- """
- def __init__(self, session=None):
- Subject.__init__(self)
- self.session=session
- self.running = False
-
- def start(self):
- self.running = True
- self.notify('start')
-
- def stop(self):
+ def __init__(self):
self.running = False
- self.notify('stop')
-
- def check(self):
- self.notify('check')
-
-class CCSessionListener(Listener):
- """
- A concrete listener class which creates SessionSubject object and
- ModuleCCSession object
- """
- def __init__(self, subject):
- Listener.__init__(self, subject)
- self.session = subject.session
- self.boot_time = get_datetime()
-
# create ModuleCCSession object
- self.cc_session = ModuleCCSession(SPECFILE_LOCATION,
- self.config_handler,
- self.command_handler,
- self.session)
-
- self.session = self.subject.session = self.cc_session._session
-
- # initialize internal data
- self.stats_spec = isc.config.module_spec_from_file(SCHEMA_SPECFILE_LOCATION).get_config_spec()
- self.stats_data = self.initialize_data(self.stats_spec)
-
- # add event handler invoked via SessionSubject object
- self.add_event(Callback('start', self.start))
- self.add_event(Callback('stop', self.stop))
- self.add_event(Callback('check', self.check))
- # don't add 'command_' suffix to the special commands in
- # order to prevent executing internal command via bindctl
-
+ self.mccs = isc.config.ModuleCCSession(SPECFILE_LOCATION,
+ self.config_handler,
+ self.command_handler)
+ self.cc_session = self.mccs._session
+ # get module spec
+ self.module_name = self.mccs.get_module_spec().get_module_name()
+ self.modules = {}
+ self.statistics_data = {}
# get commands spec
- self.commands_spec = self.cc_session.get_module_spec().get_commands_spec()
-
+ self.commands_spec = self.mccs.get_module_spec().get_commands_spec()
# add event handler related command_handler of ModuleCCSession
- # invoked via bindctl
+ self.callbacks = {}
for cmd in self.commands_spec:
+ # add prefix "command_"
+ name = "command_" + cmd["command_name"]
try:
- # add prefix "command_"
- name = "command_" + cmd["command_name"]
callback = getattr(self, name)
- kwargs = self.initialize_data(cmd["command_args"])
- self.add_event(Callback(name=name, callback=callback, args=(), kwargs=kwargs))
- except AttributeError as ae:
- logger.error(STATS_UNKNOWN_COMMAND_IN_SPEC, cmd["command_name"])
-
- def _update_stats_data(self, args):
- # 'args' must be dictionary type
- if isinstance(args, dict) and isinstance(args.get('stats_data'), dict):
- self.stats_data.update(args['stats_data'])
-
- # overwrite "stats.LastUpdateTime"
- self.stats_data['stats.last_update_time'] = get_datetime()
+ kwargs = get_spec_defaults(cmd["command_args"])
+ self.callbacks[name] = Callback(command=callback, kwargs=kwargs)
+ except AttributeError:
+ raise StatsError(STATS_UNKNOWN_COMMAND_IN_SPEC, cmd["command_name"])
+ self.mccs.start()
def start(self):
"""
- start the cc chanel
+ Start stats module
"""
- # set initial value
- self.stats_data['stats.boot_time'] = self.boot_time
- self.stats_data['stats.start_time'] = get_datetime()
- self.stats_data['stats.last_update_time'] = get_datetime()
- self.stats_data['stats.lname'] = self.session.lname
- self.cc_session.start()
+ self.running = True
+ logger.info(STATS_STARTING)
+
# request Bob to send statistics data
logger.debug(DBG_STATS_MESSAGING, STATS_SEND_REQUEST_BOSS)
cmd = isc.config.ccsession.create_command("getstats", None)
- seq = self.session.group_sendmsg(cmd, 'Boss')
+ seq = self.cc_session.group_sendmsg(cmd, 'Boss')
try:
- answer, env = self.session.group_recvmsg(False, seq)
+ answer, env = self.cc_session.group_recvmsg(False, seq)
if answer:
- rcode, arg = isc.config.ccsession.parse_answer(answer)
+ rcode, args = isc.config.ccsession.parse_answer(answer)
if rcode == 0:
- self._update_stats_data(arg)
+ errors = self.update_statistics_data(
+ args["owner"], **args["data"])
+ if errors:
+ raise StatsError("boss spec file is incorrect: "
+ + ", ".join(errors))
+ errors = self.update_statistics_data(
+ self.module_name,
+ last_update_time=get_datetime())
+ if errors:
+ raise StatsError("stats spec file is incorrect: "
+ + ", ".join(errors))
except isc.cc.session.SessionTimeout:
pass
- def stop(self):
- """
- stop the cc chanel
- """
- return self.cc_session.close()
+ # initialized Statistics data
+ errors = self.update_statistics_data(
+ self.module_name,
+ lname=self.cc_session.lname,
+ boot_time=get_datetime(_BASETIME)
+ )
+ if errors:
+ raise StatsError("stats spec file is incorrect: "
+ + ", ".join(errors))
- def check(self):
- """
- check the cc chanel
- """
- return self.cc_session.check_command(False)
+ while self.running:
+ self.mccs.check_command(False)
def config_handler(self, new_config):
"""
@@ -262,169 +194,222 @@ class CCSessionListener(Listener):
"""
logger.debug(DBG_STATS_MESSAGING, STATS_RECEIVED_NEW_CONFIG,
new_config)
-
# do nothing currently
- return create_answer(0)
+ return isc.config.create_answer(0)
- def command_handler(self, command, *args, **kwargs):
+ def command_handler(self, command, kwargs):
"""
handle commands from the cc channel
"""
- # add 'command_' suffix in order to executing command via bindctl
name = 'command_' + command
-
- if name in self.events:
- event = self.events[name]
- return event(*args, **kwargs)
+ if name in self.callbacks:
+ callback = self.callbacks[name]
+ if kwargs:
+ return callback(**kwargs)
+ else:
+ return callback()
else:
- return self.command_unknown(command, args)
+ logger.error(STATS_RECEIVED_UNKNOWN_COMMAND, command)
+ return isc.config.create_answer(1, "Unknown command: '"+str(command)+"'")
- def command_shutdown(self, args):
+ def update_modules(self):
"""
- handle shutdown command
+ updates information of each module. This method gets each
+ module's information from the config manager and sets it into
+ self.modules. If its getting from the config manager fails, it
+ raises StatsError.
"""
- logger.info(STATS_RECEIVED_SHUTDOWN_COMMAND)
- self.subject.running = False
- return create_answer(0)
+ modules = {}
+ seq = self.cc_session.group_sendmsg(
+ isc.config.ccsession.create_command(
+ isc.config.ccsession.COMMAND_GET_STATISTICS_SPEC),
+ 'ConfigManager')
+ (answer, env) = self.cc_session.group_recvmsg(False, seq)
+ if answer:
+ (rcode, value) = isc.config.ccsession.parse_answer(answer)
+ if rcode == 0:
+ for mod in value:
+ spec = { "module_name" : mod }
+ if value[mod] and type(value[mod]) is list:
+ spec["statistics"] = value[mod]
+ modules[mod] = isc.config.module_spec.ModuleSpec(spec)
+ else:
+ raise StatsError("Updating module spec fails: " + str(value))
+ modules[self.module_name] = self.mccs.get_module_spec()
+ self.modules = modules
- def command_set(self, args, stats_data={}):
+ def get_statistics_data(self, owner=None, name=None):
"""
- handle set command
+ returns statistics data which stats module has of each
+ module. If it can't find specified statistics data, it raises
+ StatsError.
"""
- self._update_stats_data(args)
- return create_answer(0)
+ self.update_statistics_data()
+ if owner and name:
+ try:
+ return self.statistics_data[owner][name]
+ except KeyError:
+ pass
+ elif owner:
+ try:
+ return self.statistics_data[owner]
+ except KeyError:
+ pass
+ elif name:
+ pass
+ else:
+ return self.statistics_data
+ raise StatsError("No statistics data found: "
+ + "owner: " + str(owner) + ", "
+ + "name: " + str(name))
- def command_remove(self, args, stats_item_name=''):
+ def update_statistics_data(self, owner=None, **data):
"""
- handle remove command
+ change statistics date of specified module into specified
+ data. It updates information of each module first, and it
+ updates statistics data. If specified data is invalid for
+ statistics spec of specified owner, it returns a list of error
+ messeges. If there is no error or if neither owner nor data is
+ specified in args, it returns None.
"""
-
- # 'args' must be dictionary type
- if args and args['stats_item_name'] in self.stats_data:
- stats_item_name = args['stats_item_name']
-
- logger.debug(DBG_STATS_MESSAGING, STATS_RECEIVED_REMOVE_COMMAND,
- stats_item_name)
-
- # just remove one item
- self.stats_data.pop(stats_item_name)
-
- return create_answer(0)
-
- def command_show(self, args, stats_item_name=''):
+ self.update_modules()
+ statistics_data = {}
+ for (name, module) in self.modules.items():
+ value = get_spec_defaults(module.get_statistics_spec())
+ if module.validate_statistics(True, value):
+ statistics_data[name] = value
+ for (name, value) in self.statistics_data.items():
+ if name in statistics_data:
+ statistics_data[name].update(value)
+ else:
+ statistics_data[name] = value
+ self.statistics_data = statistics_data
+ if owner and data:
+ errors = []
+ try:
+ if self.modules[owner].validate_statistics(False, data, errors):
+ self.statistics_data[owner].update(data)
+ return
+ except KeyError:
+ errors.append("unknown module name: " + str(owner))
+ return errors
+
+ def command_status(self):
"""
- handle show command
+ handle status command
"""
+ logger.debug(DBG_STATS_MESSAGING, STATS_RECEIVED_STATUS_COMMAND)
+ return isc.config.create_answer(
+ 0, "Stats is up. (PID " + str(os.getpid()) + ")")
- # always overwrite 'report_time' and 'stats.timestamp'
- # if "show" command invoked
- self.stats_data['report_time'] = get_datetime()
- self.stats_data['stats.timestamp'] = get_timestamp()
-
- # if with args
- if args and args['stats_item_name'] in self.stats_data:
- stats_item_name = args['stats_item_name']
- logger.debug(DBG_STATS_MESSAGING,
- STATS_RECEIVED_SHOW_NAME_COMMAND,
- stats_item_name)
- return create_answer(0, {stats_item_name: self.stats_data[stats_item_name]})
-
- logger.debug(DBG_STATS_MESSAGING,
- STATS_RECEIVED_SHOW_ALL_COMMAND)
- return create_answer(0, self.stats_data)
-
- def command_reset(self, args):
+ def command_shutdown(self):
"""
- handle reset command
+ handle shutdown command
"""
- logger.debug(DBG_STATS_MESSAGING,
- STATS_RECEIVED_RESET_COMMAND)
-
- # re-initialize internal variables
- self.stats_data = self.initialize_data(self.stats_spec)
-
- # reset initial value
- self.stats_data['stats.boot_time'] = self.boot_time
- self.stats_data['stats.start_time'] = get_datetime()
- self.stats_data['stats.last_update_time'] = get_datetime()
- self.stats_data['stats.lname'] = self.session.lname
-
- return create_answer(0)
+ logger.info(STATS_RECEIVED_SHUTDOWN_COMMAND)
+ self.running = False
+ return isc.config.create_answer(0)
- def command_status(self, args):
+ def command_show(self, owner=None, name=None):
"""
- handle status command
+ handle show command
"""
- logger.debug(DBG_STATS_MESSAGING, STATS_RECEIVED_STATUS_COMMAND)
- # just return "I'm alive."
- return create_answer(0, "I'm alive.")
-
- def command_unknown(self, command, args):
+ if owner or name:
+ logger.debug(DBG_STATS_MESSAGING,
+ STATS_RECEIVED_SHOW_NAME_COMMAND,
+ str(owner)+", "+str(name))
+ else:
+ logger.debug(DBG_STATS_MESSAGING,
+ STATS_RECEIVED_SHOW_ALL_COMMAND)
+ errors = self.update_statistics_data(
+ self.module_name,
+ timestamp=get_timestamp(),
+ report_time=get_datetime()
+ )
+ if errors:
+ raise StatsError("stats spec file is incorrect: "
+ + ", ".join(errors))
+ try:
+ return isc.config.create_answer(
+ 0, self.get_statistics_data(owner, name))
+ except StatsError:
+ return isc.config.create_answer(
+ 1, "specified arguments are incorrect: " \
+ + "owner: " + str(owner) + ", name: " + str(name))
+
+ def command_showschema(self, owner=None, name=None):
"""
- handle an unknown command
+ handle show command
"""
- logger.error(STATS_RECEIVED_UNKNOWN_COMMAND, command)
- return create_answer(1, "Unknown command: '"+str(command)+"'")
-
+ if owner or name:
+ logger.debug(DBG_STATS_MESSAGING,
+ STATS_RECEIVED_SHOWSCHEMA_NAME_COMMAND,
+ str(owner)+", "+str(name))
+ else:
+ logger.debug(DBG_STATS_MESSAGING,
+ STATS_RECEIVED_SHOWSCHEMA_ALL_COMMAND)
+ self.update_modules()
+ schema = {}
+ schema_byname = {}
+ for mod in self.modules:
+ spec = self.modules[mod].get_statistics_spec()
+ schema_byname[mod] = {}
+ if spec:
+ schema[mod] = spec
+ for item in spec:
+ schema_byname[mod][item['item_name']] = item
+ if owner:
+ try:
+ if name:
+ return isc.config.create_answer(0, schema_byname[owner][name])
+ else:
+ return isc.config.create_answer(0, schema[owner])
+ except KeyError:
+ pass
+ else:
+ if name:
+ return isc.config.create_answer(1, "module name is not specified")
+ else:
+ return isc.config.create_answer(0, schema)
+ return isc.config.create_answer(
+ 1, "specified arguments are incorrect: " \
+ + "owner: " + str(owner) + ", name: " + str(name))
- def initialize_data(self, spec):
+ def command_set(self, owner, data):
"""
- initialize stats data
+ handle set command
"""
- def __get_init_val(spec):
- if spec['item_type'] == 'null':
- return None
- elif spec['item_type'] == 'boolean':
- return bool(spec.get('item_default', False))
- elif spec['item_type'] == 'string':
- return str(spec.get('item_default', ''))
- elif spec['item_type'] in set(['number', 'integer']):
- return int(spec.get('item_default', 0))
- elif spec['item_type'] in set(['float', 'double', 'real']):
- return float(spec.get('item_default', 0.0))
- elif spec['item_type'] in set(['list', 'array']):
- return spec.get('item_default',
- [ __get_init_val(s) for s in spec['list_item_spec'] ])
- elif spec['item_type'] in set(['map', 'object']):
- return spec.get('item_default',
- dict([ (s['item_name'], __get_init_val(s)) for s in spec['map_item_spec'] ]) )
- else:
- return spec.get('item_default')
- return dict([ (s['item_name'], __get_init_val(s)) for s in spec ])
+ errors = self.update_statistics_data(owner, **data)
+ if errors:
+ return isc.config.create_answer(
+ 1, "errors while setting statistics data: " \
+ + ", ".join(errors))
+ errors = self.update_statistics_data(
+ self.module_name, last_update_time=get_datetime() )
+ if errors:
+ raise StatsError("stats spec file is incorrect: "
+ + ", ".join(errors))
+ return isc.config.create_answer(0)
-def get_timestamp():
- """
- get current timestamp
- """
- return time()
-
-def get_datetime():
- """
- get current datetime
- """
- return strftime("%Y-%m-%dT%H:%M:%SZ", gmtime())
-
-def main(session=None):
+if __name__ == "__main__":
try:
parser = OptionParser()
- parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
- help="display more about what is going on")
+ parser.add_option(
+ "-v", "--verbose", dest="verbose", action="store_true",
+ help="display more about what is going on")
(options, args) = parser.parse_args()
if options.verbose:
isc.log.init("b10-stats", "DEBUG", 99)
- subject = SessionSubject(session=session)
- listener = CCSessionListener(subject)
- subject.start()
- while subject.running:
- subject.check()
- subject.stop()
-
+ stats = Stats()
+ stats.start()
except OptionValueError as ove:
logger.fatal(STATS_BAD_OPTION_VALUE, ove)
- except SessionError as se:
+ sys.exit(1)
+ except isc.cc.session.SessionError as se:
logger.fatal(STATS_CC_SESSION_ERROR, se)
+ sys.exit(1)
+ except StatsError as se:
+ logger.fatal(STATS_START_ERROR, se)
+ sys.exit(1)
except KeyboardInterrupt as kie:
logger.info(STATS_STOPPED_BY_KEYBOARD)
-
-if __name__ == "__main__":
- main()
diff --git a/src/bin/stats/stats.spec b/src/bin/stats/stats.spec
index 635eb48..e716b62 100644
--- a/src/bin/stats/stats.spec
+++ b/src/bin/stats/stats.spec
@@ -6,55 +6,74 @@
"commands": [
{
"command_name": "status",
- "command_description": "identify whether stats module is alive or not",
+ "command_description": "Show status of the stats daemon",
+ "command_args": []
+ },
+ {
+ "command_name": "shutdown",
+ "command_description": "Shut down the stats module",
"command_args": []
},
{
"command_name": "show",
- "command_description": "show the specified/all statistics data",
+ "command_description": "Show the specified/all statistics data",
"command_args": [
{
- "item_name": "stats_item_name",
+ "item_name": "owner",
+ "item_type": "string",
+ "item_optional": true,
+ "item_default": "",
+ "item_description": "module name of the owner of the statistics data"
+ },
+ {
+ "item_name": "name",
"item_type": "string",
"item_optional": true,
- "item_default": ""
+ "item_default": "",
+ "item_description": "statistics item name of the owner"
}
]
},
{
- "command_name": "set",
- "command_description": "set the value of specified name in statistics data",
+ "command_name": "showschema",
+ "command_description": "show the specified/all statistics shema",
"command_args": [
{
- "item_name": "stats_data",
- "item_type": "map",
- "item_optional": false,
- "item_default": {},
- "map_item_spec": []
+ "item_name": "owner",
+ "item_type": "string",
+ "item_optional": true,
+ "item_default": "",
+ "item_description": "module name of the owner of the statistics data"
+ },
+ {
+ "item_name": "name",
+ "item_type": "string",
+ "item_optional": true,
+ "item_default": "",
+ "item_description": "statistics item name of the owner"
}
]
},
{
- "command_name": "remove",
- "command_description": "remove the specified name from statistics data",
+ "command_name": "set",
+ "command_description": "set the value of specified name in statistics data",
"command_args": [
{
- "item_name": "stats_item_name",
+ "item_name": "owner",
"item_type": "string",
"item_optional": false,
- "item_default": ""
+ "item_default": "",
+ "item_description": "module name of the owner of the statistics data"
+ },
+ {
+ "item_name": "data",
+ "item_type": "map",
+ "item_optional": false,
+ "item_default": {},
+ "item_description": "statistics data set of the owner",
+ "map_item_spec": []
}
]
- },
- {
- "command_name": "reset",
- "command_description": "reset all statistics data to default values except for several constant names",
- "command_args": []
- },
- {
- "command_name": "shutdown",
- "command_description": "Shut down the stats module",
- "command_args": []
}
],
"statistics": [
@@ -100,7 +119,7 @@
"item_default": "",
"item_title": "Local Name",
"item_description": "A localname of stats module given via CC protocol"
- }
+ }
]
}
}
diff --git a/src/bin/stats/stats_httpd.py.in b/src/bin/stats/stats_httpd.py.in
old mode 100755
new mode 100644
index 6be6adf..596870a
--- a/src/bin/stats/stats_httpd.py.in
+++ b/src/bin/stats/stats_httpd.py.in
@@ -57,7 +57,6 @@ else:
BASE_LOCATION = "@datadir@" + os.sep + "@PACKAGE@"
BASE_LOCATION = BASE_LOCATION.replace("${datarootdir}", DATAROOTDIR).replace("${prefix}", PREFIX)
SPECFILE_LOCATION = BASE_LOCATION + os.sep + "stats-httpd.spec"
-SCHEMA_SPECFILE_LOCATION = BASE_LOCATION + os.sep + "stats-schema.spec"
XML_TEMPLATE_LOCATION = BASE_LOCATION + os.sep + "stats-httpd-xml.tpl"
XSD_TEMPLATE_LOCATION = BASE_LOCATION + os.sep + "stats-httpd-xsd.tpl"
XSL_TEMPLATE_LOCATION = BASE_LOCATION + os.sep + "stats-httpd-xsl.tpl"
@@ -69,7 +68,6 @@ XSD_URL_PATH = '/bind10/statistics/xsd'
XSL_URL_PATH = '/bind10/statistics/xsl'
# TODO: This should be considered later.
XSD_NAMESPACE = 'http://bind10.isc.org' + XSD_URL_PATH
-DEFAULT_CONFIG = dict(listen_on=[('127.0.0.1', 8000)])
# Assign this process name
isc.util.process.rename()
@@ -160,8 +158,10 @@ class StatsHttpd:
self.mccs = None
self.httpd = []
self.open_mccs()
+ self.config = {}
self.load_config()
- self.load_templates()
+ self.http_addrs = []
+ self.mccs.start()
self.open_httpd()
def open_mccs(self):
@@ -171,10 +171,6 @@ class StatsHttpd:
self.mccs = isc.config.ModuleCCSession(
SPECFILE_LOCATION, self.config_handler, self.command_handler)
self.cc_session = self.mccs._session
- # read spec file of stats module and subscribe 'Stats'
- self.stats_module_spec = isc.config.module_spec_from_file(SCHEMA_SPECFILE_LOCATION)
- self.stats_config_spec = self.stats_module_spec.get_config_spec()
- self.stats_module_name = self.stats_module_spec.get_module_name()
def close_mccs(self):
"""Closes a ModuleCCSession object"""
@@ -189,18 +185,19 @@ class StatsHttpd:
"""Loads configuration from spec file or new configuration
from the config manager"""
# load config
- if len(new_config) > 0:
- self.config.update(new_config)
- else:
- self.config = DEFAULT_CONFIG
- self.config.update(
- dict([
- (itm['item_name'], self.mccs.get_value(itm['item_name'])[0])
- for itm in self.mccs.get_module_spec().get_config_spec()
- ])
- )
+ if len(self.config) == 0:
+ self.config = dict([
+ (itm['item_name'], self.mccs.get_value(itm['item_name'])[0])
+ for itm in self.mccs.get_module_spec().get_config_spec()
+ ])
+ self.config.update(new_config)
# set addresses and ports for HTTP
- self.http_addrs = [ (cf['address'], cf['port']) for cf in self.config['listen_on'] ]
+ addrs = []
+ if 'listen_on' in self.config:
+ for cf in self.config['listen_on']:
+ if 'address' in cf and 'port' in cf:
+ addrs.append((cf['address'], cf['port']))
+ self.http_addrs = addrs
def open_httpd(self):
"""Opens sockets for HTTP. Iterating each HTTP address to be
@@ -208,46 +205,44 @@ class StatsHttpd:
for addr in self.http_addrs:
self.httpd.append(self._open_httpd(addr))
- def _open_httpd(self, server_address, address_family=None):
+ def _open_httpd(self, server_address):
+ httpd = None
try:
- # try IPv6 at first
- if address_family is not None:
- HttpServer.address_family = address_family
- elif socket.has_ipv6:
- HttpServer.address_family = socket.AF_INET6
+ # get address family for the server_address before
+ # creating HttpServer object. If a specified address is
+ # not numerical, gaierror may be thrown.
+ address_family = socket.getaddrinfo(
+ server_address[0], server_address[1], 0,
+ socket.SOCK_STREAM, socket.IPPROTO_TCP, socket.AI_NUMERICHOST
+ )[0][0]
+ HttpServer.address_family = address_family
httpd = HttpServer(
server_address, HttpHandler,
self.xml_handler, self.xsd_handler, self.xsl_handler,
self.write_log)
- except (socket.gaierror, socket.error,
- OverflowError, TypeError) as err:
- # try IPv4 next
- if HttpServer.address_family == socket.AF_INET6:
- httpd = self._open_httpd(server_address, socket.AF_INET)
- else:
- raise HttpServerError(
- "Invalid address %s, port %s: %s: %s" %
- (server_address[0], server_address[1],
- err.__class__.__name__, err))
- else:
logger.info(STATHTTPD_STARTED, server_address[0],
server_address[1])
- return httpd
+ return httpd
+ except (socket.gaierror, socket.error,
+ OverflowError, TypeError) as err:
+ if httpd:
+ httpd.server_close()
+ raise HttpServerError(
+ "Invalid address %s, port %s: %s: %s" %
+ (server_address[0], server_address[1],
+ err.__class__.__name__, err))
def close_httpd(self):
"""Closes sockets for HTTP"""
- if len(self.httpd) == 0:
- return
- for ht in self.httpd:
+ while len(self.httpd)>0:
+ ht = self.httpd.pop()
logger.info(STATHTTPD_CLOSING, ht.server_address[0],
ht.server_address[1])
ht.server_close()
- self.httpd = []
def start(self):
"""Starts StatsHttpd objects to run. Waiting for client
requests by using select.select functions"""
- self.mccs.start()
self.running = True
while self.running:
try:
@@ -280,6 +275,7 @@ class StatsHttpd:
logger.info(STATHTTPD_SHUTDOWN)
self.close_httpd()
self.close_mccs()
+ self.running = False
def get_sockets(self):
"""Returns sockets to select.select"""
@@ -296,23 +292,27 @@ class StatsHttpd:
addresses and ports to listen HTTP requests on."""
logger.debug(DBG_STATHTTPD_MESSAGING, STATHTTPD_HANDLE_CONFIG,
new_config)
- for key in new_config.keys():
- if key not in DEFAULT_CONFIG and key != "version":
- logger.error(STATHTTPD_UNKNOWN_CONFIG_ITEM, key)
+ errors = []
+ if not self.mccs.get_module_spec().\
+ validate_config(False, new_config, errors):
return isc.config.ccsession.create_answer(
- 1, "Unknown known config: %s" % key)
+ 1, ", ".join(errors))
# backup old config
old_config = self.config.copy()
- self.close_httpd()
self.load_config(new_config)
+ # If the http sockets aren't opened or
+ # if new_config doesn't have'listen_on', it returns
+ if len(self.httpd) == 0 or 'listen_on' not in new_config:
+ return isc.config.ccsession.create_answer(0)
+ self.close_httpd()
try:
self.open_httpd()
except HttpServerError as err:
logger.error(STATHTTPD_SERVER_ERROR, err)
# restore old config
- self.config_handler(old_config)
- return isc.config.ccsession.create_answer(
- 1, "[b10-stats-httpd] %s" % err)
+ self.load_config(old_config)
+ self.open_httpd()
+ return isc.config.ccsession.create_answer(1, str(err))
else:
return isc.config.ccsession.create_answer(0)
@@ -328,8 +328,7 @@ class StatsHttpd:
logger.debug(DBG_STATHTTPD_MESSAGING,
STATHTTPD_RECEIVED_SHUTDOWN_COMMAND)
self.running = False
- return isc.config.ccsession.create_answer(
- 0, "Stats Httpd is shutting down.")
+ return isc.config.ccsession.create_answer(0)
else:
logger.debug(DBG_STATHTTPD_MESSAGING,
STATHTTPD_RECEIVED_UNKNOWN_COMMAND, command)
@@ -341,8 +340,7 @@ class StatsHttpd:
the data which obtains from it"""
try:
seq = self.cc_session.group_sendmsg(
- isc.config.ccsession.create_command('show'),
- self.stats_module_name)
+ isc.config.ccsession.create_command('show'), 'Stats')
(answer, env) = self.cc_session.group_recvmsg(False, seq)
if answer:
(rcode, value) = isc.config.ccsession.parse_answer(answer)
@@ -357,34 +355,82 @@ class StatsHttpd:
raise StatsHttpdError("Stats module: %s" % str(value))
def get_stats_spec(self):
- """Just returns spec data"""
- return self.stats_config_spec
-
- def load_templates(self):
- """Setup the bodies of XSD and XSL documents to be responds to
- HTTP clients. Before that it also creates XML tag structures by
- using xml.etree.ElementTree.Element class and substitutes
- concrete strings with parameters embed in the string.Template
- object."""
+ """Requests statistics data to the Stats daemon and returns
+ the data which obtains from it"""
+ try:
+ seq = self.cc_session.group_sendmsg(
+ isc.config.ccsession.create_command('showschema'), 'Stats')
+ (answer, env) = self.cc_session.group_recvmsg(False, seq)
+ if answer:
+ (rcode, value) = isc.config.ccsession.parse_answer(answer)
+ if rcode == 0:
+ return value
+ else:
+ raise StatsHttpdError("Stats module: %s" % str(value))
+ except (isc.cc.session.SessionTimeout,
+ isc.cc.session.SessionError) as err:
+ raise StatsHttpdError("%s: %s" %
+ (err.__class__.__name__, err))
+
+ def xml_handler(self):
+ """Handler which requests to Stats daemon to obtain statistics
+ data and returns the body of XML document"""
+ xml_list=[]
+ for (mod, spec) in self.get_stats_data().items():
+ if not spec: continue
+ elem1 = xml.etree.ElementTree.Element(str(mod))
+ for (k, v) in spec.items():
+ elem2 = xml.etree.ElementTree.Element(str(k))
+ elem2.text = str(v)
+ elem1.append(elem2)
+ # The coding conversion is tricky. xml..tostring() of Python 3.2
+ # returns bytes (not string) regardless of the coding, while
+ # tostring() of Python 3.1 returns a string. To support both
+ # cases transparently, we first make sure tostring() returns
+ # bytes by specifying utf-8 and then convert the result to a
+ # plain string (code below assume it).
+ xml_list.append(
+ str(xml.etree.ElementTree.tostring(elem1, encoding='utf-8'),
+ encoding='us-ascii'))
+ xml_string = "".join(xml_list)
+ self.xml_body = self.open_template(XML_TEMPLATE_LOCATION).substitute(
+ xml_string=xml_string,
+ xsd_namespace=XSD_NAMESPACE,
+ xsd_url_path=XSD_URL_PATH,
+ xsl_url_path=XSL_URL_PATH)
+ assert self.xml_body is not None
+ return self.xml_body
+
+ def xsd_handler(self):
+ """Handler which just returns the body of XSD document"""
# for XSD
xsd_root = xml.etree.ElementTree.Element("all") # started with "all" tag
- for item in self.get_stats_spec():
- element = xml.etree.ElementTree.Element(
- "element",
- dict( name=item["item_name"],
- type=item["item_type"] if item["item_type"].lower() != 'real' else 'float',
- minOccurs="1",
- maxOccurs="1" ),
- )
- annotation = xml.etree.ElementTree.Element("annotation")
- appinfo = xml.etree.ElementTree.Element("appinfo")
- documentation = xml.etree.ElementTree.Element("documentation")
- appinfo.text = item["item_title"]
- documentation.text = item["item_description"]
- annotation.append(appinfo)
- annotation.append(documentation)
- element.append(annotation)
- xsd_root.append(element)
+ for (mod, spec) in self.get_stats_spec().items():
+ if not spec: continue
+ alltag = xml.etree.ElementTree.Element("all")
+ for item in spec:
+ element = xml.etree.ElementTree.Element(
+ "element",
+ dict( name=item["item_name"],
+ type=item["item_type"] if item["item_type"].lower() != 'real' else 'float',
+ minOccurs="1",
+ maxOccurs="1" ),
+ )
+ annotation = xml.etree.ElementTree.Element("annotation")
+ appinfo = xml.etree.ElementTree.Element("appinfo")
+ documentation = xml.etree.ElementTree.Element("documentation")
+ appinfo.text = item["item_title"]
+ documentation.text = item["item_description"]
+ annotation.append(appinfo)
+ annotation.append(documentation)
+ element.append(annotation)
+ alltag.append(element)
+
+ complextype = xml.etree.ElementTree.Element("complexType")
+ complextype.append(alltag)
+ mod_element = xml.etree.ElementTree.Element("element", { "name" : mod })
+ mod_element.append(complextype)
+ xsd_root.append(mod_element)
# The coding conversion is tricky. xml..tostring() of Python 3.2
# returns bytes (not string) regardless of the coding, while
# tostring() of Python 3.1 returns a string. To support both
@@ -398,25 +444,33 @@ class StatsHttpd:
xsd_namespace=XSD_NAMESPACE
)
assert self.xsd_body is not None
+ return self.xsd_body
+ def xsl_handler(self):
+ """Handler which just returns the body of XSL document"""
# for XSL
xsd_root = xml.etree.ElementTree.Element(
"xsl:template",
dict(match="*")) # started with xml:template tag
- for item in self.get_stats_spec():
- tr = xml.etree.ElementTree.Element("tr")
- td1 = xml.etree.ElementTree.Element(
- "td", { "class" : "title",
- "title" : item["item_description"] })
- td1.text = item["item_title"]
- td2 = xml.etree.ElementTree.Element("td")
- xsl_valueof = xml.etree.ElementTree.Element(
- "xsl:value-of",
- dict(select=item["item_name"]))
- td2.append(xsl_valueof)
- tr.append(td1)
- tr.append(td2)
- xsd_root.append(tr)
+ for (mod, spec) in self.get_stats_spec().items():
+ if not spec: continue
+ for item in spec:
+ tr = xml.etree.ElementTree.Element("tr")
+ td0 = xml.etree.ElementTree.Element("td")
+ td0.text = str(mod)
+ td1 = xml.etree.ElementTree.Element(
+ "td", { "class" : "title",
+ "title" : item["item_description"] })
+ td1.text = item["item_title"]
+ td2 = xml.etree.ElementTree.Element("td")
+ xsl_valueof = xml.etree.ElementTree.Element(
+ "xsl:value-of",
+ dict(select=mod+'/'+item["item_name"]))
+ td2.append(xsl_valueof)
+ tr.append(td0)
+ tr.append(td1)
+ tr.append(td2)
+ xsd_root.append(tr)
# The coding conversion is tricky. xml..tostring() of Python 3.2
# returns bytes (not string) regardless of the coding, while
# tostring() of Python 3.1 returns a string. To support both
@@ -429,47 +483,15 @@ class StatsHttpd:
xsl_string=xsl_string,
xsd_namespace=XSD_NAMESPACE)
assert self.xsl_body is not None
-
- def xml_handler(self):
- """Handler which requests to Stats daemon to obtain statistics
- data and returns the body of XML document"""
- xml_list=[]
- for (k, v) in self.get_stats_data().items():
- (k, v) = (str(k), str(v))
- elem = xml.etree.ElementTree.Element(k)
- elem.text = v
- # The coding conversion is tricky. xml..tostring() of Python 3.2
- # returns bytes (not string) regardless of the coding, while
- # tostring() of Python 3.1 returns a string. To support both
- # cases transparently, we first make sure tostring() returns
- # bytes by specifying utf-8 and then convert the result to a
- # plain string (code below assume it).
- xml_list.append(
- str(xml.etree.ElementTree.tostring(elem, encoding='utf-8'),
- encoding='us-ascii'))
- xml_string = "".join(xml_list)
- self.xml_body = self.open_template(XML_TEMPLATE_LOCATION).substitute(
- xml_string=xml_string,
- xsd_namespace=XSD_NAMESPACE,
- xsd_url_path=XSD_URL_PATH,
- xsl_url_path=XSL_URL_PATH)
- assert self.xml_body is not None
- return self.xml_body
-
- def xsd_handler(self):
- """Handler which just returns the body of XSD document"""
- return self.xsd_body
-
- def xsl_handler(self):
- """Handler which just returns the body of XSL document"""
return self.xsl_body
def open_template(self, file_name):
"""It opens a template file, and it loads all lines to a
string variable and returns string. Template object includes
the variable. Limitation of a file size isn't needed there."""
- lines = "".join(
- open(file_name, 'r').readlines())
+ f = open(file_name, 'r')
+ lines = "".join(f.readlines())
+ f.close()
assert lines is not None
return string.Template(lines)
@@ -491,7 +513,7 @@ if __name__ == "__main__":
logger.fatal(STATHTTPD_CC_SESSION_ERROR, se)
sys.exit(1)
except HttpServerError as hse:
- logger.fatal(STATHTTPD_START_SERVER_ERROR, hse)
+ logger.fatal(STATHTTPD_START_SERVER_INIT_ERROR, hse)
sys.exit(1)
except KeyboardInterrupt as kie:
logger.info(STATHTTPD_STOPPED_BY_KEYBOARD)
diff --git a/src/bin/stats/stats_messages.mes b/src/bin/stats/stats_messages.mes
index 9ad07cf..cfffb3a 100644
--- a/src/bin/stats/stats_messages.mes
+++ b/src/bin/stats/stats_messages.mes
@@ -28,16 +28,6 @@ control bus. A likely problem is that the message bus daemon
This debug message is printed when the stats module has received a
configuration update from the configuration manager.
-% STATS_RECEIVED_REMOVE_COMMAND received command to remove %1
-A remove command for the given name was sent to the stats module, and
-the given statistics value will now be removed. It will not appear in
-statistics reports until it appears in a statistics update from a
-module again.
-
-% STATS_RECEIVED_RESET_COMMAND received command to reset all statistics
-The stats module received a command to clear all collected statistics.
-The data is cleared until it receives an update from the modules again.
-
% STATS_RECEIVED_SHOW_ALL_COMMAND received command to show all statistics
The stats module received a command to show all statistics that it has
collected.
@@ -72,4 +62,15 @@ installation problem, where the specification file stats.spec is
from a different version of BIND 10 than the stats module itself.
Please check your installation.
+% STATS_STARTING starting
+The stats module will be now starting.
+
+% STATS_RECEIVED_SHOWSCHEMA_ALL_COMMAND received command to show all statistics schema
+The stats module received a command to show all statistics schemas of all modules.
+
+% STATS_RECEIVED_SHOWSCHEMA_NAME_COMMAND received command to show statistics schema for %1
+The stats module received a command to show the specified statistics schema of the specified module.
+% STATS_START_ERROR stats module error: %1
+An internal error occurred while starting the stats module. The stats
+module will be now shutting down.
diff --git a/src/bin/stats/tests/Makefile.am b/src/bin/stats/tests/Makefile.am
index bb9369f..b5edc59 100644
--- a/src/bin/stats/tests/Makefile.am
+++ b/src/bin/stats/tests/Makefile.am
@@ -1,28 +1,28 @@
-SUBDIRS = isc http testdata
PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
PYTESTS = b10-stats_test.py b10-stats-httpd_test.py
-EXTRA_DIST = $(PYTESTS) fake_time.py fake_socket.py fake_select.py
-CLEANFILES = fake_time.pyc fake_socket.pyc fake_select.pyc
+EXTRA_DIST = $(PYTESTS) test_utils.py
+CLEANFILES = test_utils.pyc
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
if ENABLE_PYTHON_COVERAGE
- touch $(abs_top_srcdir)/.coverage
+ touch $(abs_top_srcdir)/.coverage
rm -f .coverage
${LN_S} $(abs_top_srcdir)/.coverage .coverage
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
- PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/stats:$(abs_top_builddir)/src/bin/stats/tests \
+ PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/stats:$(abs_top_builddir)/src/bin/stats/tests:$(abs_top_builddir)/src/bin/msgq:$(abs_top_builddir)/src/lib/python/isc/config \
B10_FROM_SOURCE=$(abs_top_srcdir) \
+ CONFIG_TESTDATA_PATH=$(abs_top_srcdir)/src/lib/config/tests/testdata \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/bin/stats/tests/b10-stats-httpd_test.py b/src/bin/stats/tests/b10-stats-httpd_test.py
index 6d72dc2..e867080 100644
--- a/src/bin/stats/tests/b10-stats-httpd_test.py
+++ b/src/bin/stats/tests/b10-stats-httpd_test.py
@@ -13,147 +13,269 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+"""
+In each of these tests we start several virtual components. They are
+not the real components, no external processes are started. They are
+just simple mock objects running each in its own thread and pretending
+to be bind10 modules. This helps testing the stats http server in a
+close to real environment.
+"""
+
import unittest
import os
-import http.server
-import string
-import fake_select
import imp
-import sys
-import fake_socket
-
-import isc.cc
+import socket
+import errno
+import select
+import string
+import time
+import threading
+import http.client
+import xml.etree.ElementTree
+import random
+import isc
import stats_httpd
-stats_httpd.socket = fake_socket
-stats_httpd.select = fake_select
+import stats
+from test_utils import BaseModules, ThreadingServerManager, MyStats, MyStatsHttpd, SignalHandler, send_command, send_shutdown
DUMMY_DATA = {
- "auth.queries.tcp": 10000,
- "auth.queries.udp": 12000,
- "bind10.boot_time": "2011-03-04T11:59:05Z",
- "report_time": "2011-03-04T11:59:19Z",
- "stats.boot_time": "2011-03-04T11:59:06Z",
- "stats.last_update_time": "2011-03-04T11:59:07Z",
- "stats.lname": "4d70d40a_c at host",
- "stats.start_time": "2011-03-04T11:59:06Z",
- "stats.timestamp": 1299239959.560846
+ 'Boss' : {
+ "boot_time": "2011-03-04T11:59:06Z"
+ },
+ 'Auth' : {
+ "queries.tcp": 2,
+ "queries.udp": 3
+ },
+ 'Stats' : {
+ "report_time": "2011-03-04T11:59:19Z",
+ "boot_time": "2011-03-04T11:59:06Z",
+ "last_update_time": "2011-03-04T11:59:07Z",
+ "lname": "4d70d40a_c at host",
+ "timestamp": 1299239959.560846
+ }
}
-def push_answer(stats_httpd):
- stats_httpd.cc_session.group_sendmsg(
- { 'result':
- [ 0, DUMMY_DATA ] }, "Stats")
-
-def pull_query(stats_httpd):
- (msg, env) = stats_httpd.cc_session.group_recvmsg()
- if 'result' in msg:
- (ret, arg) = isc.config.ccsession.parse_answer(msg)
- else:
- (ret, arg) = isc.config.ccsession.parse_command(msg)
- return (ret, arg, env)
+def get_availaddr(address='127.0.0.1', port=8001):
+ """returns a tuple of address and port which is available to
+ listen on the platform. The first argument is a address for
+ search. The second argument is a port for search. If a set of
+ address and port is failed on the search for the availability, the
+ port number is increased and it goes on the next trial until the
+ available set of address and port is looked up. If the port number
+ reaches over 65535, it may stop the search and raise a
+ OverflowError exception."""
+ while True:
+ for addr in socket.getaddrinfo(
+ address, port, 0,
+ socket.SOCK_STREAM, socket.IPPROTO_TCP):
+ sock = socket.socket(addr[0], socket.SOCK_STREAM)
+ try:
+ sock.bind((address, port))
+ return (address, port)
+ except socket.error:
+ continue
+ finally:
+ if sock: sock.close()
+ # This address and port number are already in use.
+ # next port number is added
+ port = port + 1
+
+def is_ipv6_enabled(address='::1', port=8001):
+ """checks IPv6 enabled on the platform. address for check is '::1'
+ and port for check is random number between 8001 and
+ 65535. Retrying is 3 times even if it fails. The built-in socket
+ module provides a 'has_ipv6' parameter, but it's not used here
+ because there may be a situation where the value is True on an
+ environment where the IPv6 config is disabled."""
+ for p in random.sample(range(port, 65535), 3):
+ try:
+ sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
+ sock.bind((address, p))
+ return True
+ except socket.error:
+ continue
+ finally:
+ if sock: sock.close()
+ return False
class TestHttpHandler(unittest.TestCase):
"""Tests for HttpHandler class"""
-
def setUp(self):
- self.stats_httpd = stats_httpd.StatsHttpd()
- self.assertTrue(type(self.stats_httpd.httpd) is list)
- self.httpd = self.stats_httpd.httpd
+ # set the signal handler for deadlock
+ self.sig_handler = SignalHandler(self.fail)
+ self.base = BaseModules()
+ self.stats_server = ThreadingServerManager(MyStats)
+ self.stats = self.stats_server.server
+ self.stats_server.run()
+ (self.address, self.port) = get_availaddr()
+ self.stats_httpd_server = ThreadingServerManager(MyStatsHttpd, (self.address, self.port))
+ self.stats_httpd = self.stats_httpd_server.server
+ self.stats_httpd_server.run()
+ self.client = http.client.HTTPConnection(self.address, self.port)
+ self.client._http_vsn_str = 'HTTP/1.0\n'
+ self.client.connect()
- def test_do_GET(self):
- for ht in self.httpd:
- self._test_do_GET(ht._handler)
+ def tearDown(self):
+ self.client.close()
+ self.stats_httpd_server.shutdown()
+ self.stats_server.shutdown()
+ self.base.shutdown()
+ # reset the signal handler
+ self.sig_handler.reset()
- def _test_do_GET(self, handler):
+ def test_do_GET(self):
+ self.assertTrue(type(self.stats_httpd.httpd) is list)
+ self.assertEqual(len(self.stats_httpd.httpd), 1)
+ self.assertEqual((self.address, self.port), self.stats_httpd.http_addrs[0])
# URL is '/bind10/statistics/xml'
- handler.path = stats_httpd.XML_URL_PATH
- push_answer(self.stats_httpd)
- handler.do_GET()
- (ret, arg, env) = pull_query(self.stats_httpd)
- self.assertEqual(ret, "show")
- self.assertIsNone(arg)
- self.assertTrue('group' in env)
- self.assertEqual(env['group'], 'Stats')
- self.assertEqual(handler.response.code, 200)
- self.assertEqual(handler.response.headers["Content-type"], "text/xml")
- self.assertTrue(handler.response.headers["Content-Length"] > 0)
- self.assertTrue(handler.response.wrote_headers)
- self.assertTrue(handler.response.body.find(stats_httpd.XSD_NAMESPACE)>0)
- self.assertTrue(handler.response.body.find(stats_httpd.XSD_URL_PATH)>0)
- for (k, v) in DUMMY_DATA.items():
- self.assertTrue(handler.response.body.find(str(k))>0)
- self.assertTrue(handler.response.body.find(str(v))>0)
+ self.client.putrequest('GET', stats_httpd.XML_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.getheader("Content-type"), "text/xml")
+ self.assertTrue(int(response.getheader("Content-Length")) > 0)
+ self.assertEqual(response.status, 200)
+ root = xml.etree.ElementTree.parse(response).getroot()
+ self.assertTrue(root.tag.find('stats_data') > 0)
+ for (k,v) in root.attrib.items():
+ if k.find('schemaLocation') > 0:
+ self.assertEqual(v, stats_httpd.XSD_NAMESPACE + ' ' + stats_httpd.XSD_URL_PATH)
+ for mod in DUMMY_DATA:
+ for (item, value) in DUMMY_DATA[mod].items():
+ self.assertIsNotNone(root.find(mod + '/' + item))
# URL is '/bind10/statitics/xsd'
- handler.path = stats_httpd.XSD_URL_PATH
- handler.do_GET()
- self.assertEqual(handler.response.code, 200)
- self.assertEqual(handler.response.headers["Content-type"], "text/xml")
- self.assertTrue(handler.response.headers["Content-Length"] > 0)
- self.assertTrue(handler.response.wrote_headers)
- self.assertTrue(handler.response.body.find(stats_httpd.XSD_NAMESPACE)>0)
- for (k, v) in DUMMY_DATA.items():
- self.assertTrue(handler.response.body.find(str(k))>0)
+ self.client.putrequest('GET', stats_httpd.XSD_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.getheader("Content-type"), "text/xml")
+ self.assertTrue(int(response.getheader("Content-Length")) > 0)
+ self.assertEqual(response.status, 200)
+ root = xml.etree.ElementTree.parse(response).getroot()
+ url_xmlschema = '{http://www.w3.org/2001/XMLSchema}'
+ tags = [ url_xmlschema + t for t in [ 'element', 'complexType', 'all', 'element' ] ]
+ xsdpath = '/'.join(tags)
+ self.assertTrue(root.tag.find('schema') > 0)
+ self.assertTrue(hasattr(root, 'attrib'))
+ self.assertTrue('targetNamespace' in root.attrib)
+ self.assertEqual(root.attrib['targetNamespace'],
+ stats_httpd.XSD_NAMESPACE)
+ for elm in root.findall(xsdpath):
+ self.assertIsNotNone(elm.attrib['name'])
+ self.assertTrue(elm.attrib['name'] in DUMMY_DATA)
# URL is '/bind10/statitics/xsl'
- handler.path = stats_httpd.XSL_URL_PATH
- handler.do_GET()
- self.assertEqual(handler.response.code, 200)
- self.assertEqual(handler.response.headers["Content-type"], "text/xml")
- self.assertTrue(handler.response.headers["Content-Length"] > 0)
- self.assertTrue(handler.response.wrote_headers)
- self.assertTrue(handler.response.body.find(stats_httpd.XSD_NAMESPACE)>0)
- for (k, v) in DUMMY_DATA.items():
- self.assertTrue(handler.response.body.find(str(k))>0)
+ self.client.putrequest('GET', stats_httpd.XSL_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.getheader("Content-type"), "text/xml")
+ self.assertTrue(int(response.getheader("Content-Length")) > 0)
+ self.assertEqual(response.status, 200)
+ root = xml.etree.ElementTree.parse(response).getroot()
+ url_trans = '{http://www.w3.org/1999/XSL/Transform}'
+ url_xhtml = '{http://www.w3.org/1999/xhtml}'
+ xslpath = url_trans + 'template/' + url_xhtml + 'tr'
+ self.assertEqual(root.tag, url_trans + 'stylesheet')
+ for tr in root.findall(xslpath):
+ tds = tr.findall(url_xhtml + 'td')
+ self.assertIsNotNone(tds)
+ self.assertEqual(type(tds), list)
+ self.assertTrue(len(tds) > 2)
+ self.assertTrue(hasattr(tds[0], 'text'))
+ self.assertTrue(tds[0].text in DUMMY_DATA)
+ valueof = tds[2].find(url_trans + 'value-of')
+ self.assertIsNotNone(valueof)
+ self.assertTrue(hasattr(valueof, 'attrib'))
+ self.assertIsNotNone(valueof.attrib)
+ self.assertTrue('select' in valueof.attrib)
+ self.assertTrue(valueof.attrib['select'] in \
+ [ tds[0].text+'/'+item for item in DUMMY_DATA[tds[0].text].keys() ])
# 302 redirect
- handler.path = '/'
- handler.headers = {'Host': 'my.host.domain'}
- handler.do_GET()
- self.assertEqual(handler.response.code, 302)
- self.assertEqual(handler.response.headers["Location"],
- "http://my.host.domain%s" % stats_httpd.XML_URL_PATH)
+ self.client._http_vsn_str = 'HTTP/1.1'
+ self.client.putrequest('GET', '/')
+ self.client.putheader('Host', self.address)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 302)
+ self.assertEqual(response.getheader('Location'),
+ "http://%s:%d%s" % (self.address, self.port, stats_httpd.XML_URL_PATH))
# 404 NotFound
- handler.path = '/path/to/foo/bar'
- handler.headers = {}
- handler.do_GET()
- self.assertEqual(handler.response.code, 404)
-
- # failure case(connection with Stats is down)
- handler.path = stats_httpd.XML_URL_PATH
- push_answer(self.stats_httpd)
- self.assertFalse(self.stats_httpd.cc_session._socket._closed)
- self.stats_httpd.cc_session._socket._closed = True
- handler.do_GET()
- self.stats_httpd.cc_session._socket._closed = False
- self.assertEqual(handler.response.code, 500)
- self.stats_httpd.cc_session._clear_queues()
-
- # failure case(Stats module returns err)
- handler.path = stats_httpd.XML_URL_PATH
- self.stats_httpd.cc_session.group_sendmsg(
- { 'result': [ 1, "I have an error." ] }, "Stats")
- self.assertFalse(self.stats_httpd.cc_session._socket._closed)
- self.stats_httpd.cc_session._socket._closed = False
- handler.do_GET()
- self.assertEqual(handler.response.code, 500)
- self.stats_httpd.cc_session._clear_queues()
+ self.client._http_vsn_str = 'HTTP/1.0'
+ self.client.putrequest('GET', '/path/to/foo/bar')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 404)
+
+
+ def test_do_GET_failed1(self):
+ # checks status
+ self.assertEqual(send_command("status", "Stats"),
+ (0, "Stats is up. (PID " + str(os.getpid()) + ")"))
+ # failure case(Stats is down)
+ self.assertTrue(self.stats.running)
+ self.assertEqual(send_shutdown("Stats"), (0, None)) # Stats is down
+ self.assertFalse(self.stats.running)
+ self.stats_httpd.cc_session.set_timeout(milliseconds=100)
+
+ # request XML
+ self.client.putrequest('GET', stats_httpd.XML_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 500)
+
+ # request XSD
+ self.client.putrequest('GET', stats_httpd.XSD_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 500)
+
+ # request XSL
+ self.client.putrequest('GET', stats_httpd.XSL_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 500)
+
+ def test_do_GET_failed2(self):
+ # failure case(Stats replies an error)
+ self.stats.mccs.set_command_handler(
+ lambda cmd, args: \
+ isc.config.ccsession.create_answer(1, "I have an error.")
+ )
+
+ # request XML
+ self.client.putrequest('GET', stats_httpd.XML_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 500)
+
+ # request XSD
+ self.client.putrequest('GET', stats_httpd.XSD_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 500)
+
+ # request XSL
+ self.client.putrequest('GET', stats_httpd.XSL_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 500)
def test_do_HEAD(self):
- for ht in self.httpd:
- self._test_do_HEAD(ht._handler)
+ self.client.putrequest('HEAD', stats_httpd.XML_URL_PATH)
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 200)
- def _test_do_HEAD(self, handler):
- handler.path = '/path/to/foo/bar'
- handler.do_HEAD()
- self.assertEqual(handler.response.code, 404)
+ self.client.putrequest('HEAD', '/path/to/foo/bar')
+ self.client.endheaders()
+ response = self.client.getresponse()
+ self.assertEqual(response.status, 404)
class TestHttpServerError(unittest.TestCase):
"""Tests for HttpServerError exception"""
-
def test_raises(self):
try:
raise stats_httpd.HttpServerError('Nothing')
@@ -162,17 +284,24 @@ class TestHttpServerError(unittest.TestCase):
class TestHttpServer(unittest.TestCase):
"""Tests for HttpServer class"""
+ def setUp(self):
+ # set the signal handler for deadlock
+ self.sig_handler = SignalHandler(self.fail)
+ self.base = BaseModules()
+
+ def tearDown(self):
+ if hasattr(self, "stats_httpd"):
+ self.stats_httpd.stop()
+ self.base.shutdown()
+ # reset the signal handler
+ self.sig_handler.reset()
def test_httpserver(self):
- self.stats_httpd = stats_httpd.StatsHttpd()
- for ht in self.stats_httpd.httpd:
- self.assertTrue(ht.server_address in self.stats_httpd.http_addrs)
- self.assertEqual(ht.xml_handler, self.stats_httpd.xml_handler)
- self.assertEqual(ht.xsd_handler, self.stats_httpd.xsd_handler)
- self.assertEqual(ht.xsl_handler, self.stats_httpd.xsl_handler)
- self.assertEqual(ht.log_writer, self.stats_httpd.write_log)
- self.assertTrue(isinstance(ht._handler, stats_httpd.HttpHandler))
- self.assertTrue(isinstance(ht.socket, fake_socket.socket))
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.assertEqual(type(self.stats_httpd.httpd), list)
+ self.assertEqual(len(self.stats_httpd.httpd), 1)
+ for httpd in self.stats_httpd.httpd:
+ self.assertTrue(isinstance(httpd, stats_httpd.HttpServer))
class TestStatsHttpdError(unittest.TestCase):
"""Tests for StatsHttpdError exception"""
@@ -187,132 +316,173 @@ class TestStatsHttpd(unittest.TestCase):
"""Tests for StatsHttpd class"""
def setUp(self):
- fake_socket._CLOSED = False
- fake_socket.has_ipv6 = True
- self.stats_httpd = stats_httpd.StatsHttpd()
+ # set the signal handler for deadlock
+ self.sig_handler = SignalHandler(self.fail)
+ self.base = BaseModules()
+ self.stats_server = ThreadingServerManager(MyStats)
+ self.stats_server.run()
+ # checking IPv6 enabled on this platform
+ self.ipv6_enabled = is_ipv6_enabled()
def tearDown(self):
- self.stats_httpd.stop()
+ if hasattr(self, "stats_httpd"):
+ self.stats_httpd.stop()
+ self.stats_server.shutdown()
+ self.base.shutdown()
+ # reset the signal handler
+ self.sig_handler.reset()
def test_init(self):
- self.assertFalse(self.stats_httpd.mccs.get_socket()._closed)
- self.assertEqual(self.stats_httpd.mccs.get_socket().fileno(),
- id(self.stats_httpd.mccs.get_socket()))
- for ht in self.stats_httpd.httpd:
- self.assertFalse(ht.socket._closed)
- self.assertEqual(ht.socket.fileno(), id(ht.socket))
- fake_socket._CLOSED = True
- self.assertRaises(isc.cc.session.SessionError,
- stats_httpd.StatsHttpd)
- fake_socket._CLOSED = False
+ server_address = get_availaddr()
+ self.stats_httpd = MyStatsHttpd(server_address)
+ self.assertEqual(self.stats_httpd.running, False)
+ self.assertEqual(self.stats_httpd.poll_intval, 0.5)
+ self.assertNotEqual(len(self.stats_httpd.httpd), 0)
+ self.assertEqual(type(self.stats_httpd.mccs), isc.config.ModuleCCSession)
+ self.assertEqual(type(self.stats_httpd.cc_session), isc.cc.Session)
+ self.assertEqual(len(self.stats_httpd.config), 2)
+ self.assertTrue('listen_on' in self.stats_httpd.config)
+ self.assertEqual(len(self.stats_httpd.config['listen_on']), 1)
+ self.assertTrue('address' in self.stats_httpd.config['listen_on'][0])
+ self.assertTrue('port' in self.stats_httpd.config['listen_on'][0])
+ self.assertTrue(server_address in set(self.stats_httpd.http_addrs))
+
+ def test_openclose_mccs(self):
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.stats_httpd.close_mccs()
+ self.assertEqual(self.stats_httpd.mccs, None)
+ self.stats_httpd.open_mccs()
+ self.assertIsNotNone(self.stats_httpd.mccs)
+ self.stats_httpd.mccs = None
+ self.assertEqual(self.stats_httpd.mccs, None)
+ self.assertEqual(self.stats_httpd.close_mccs(), None)
def test_mccs(self):
- self.stats_httpd.open_mccs()
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.assertIsNotNone(self.stats_httpd.mccs.get_socket())
self.assertTrue(
- isinstance(self.stats_httpd.mccs.get_socket(), fake_socket.socket))
+ isinstance(self.stats_httpd.mccs.get_socket(), socket.socket))
self.assertTrue(
isinstance(self.stats_httpd.cc_session, isc.cc.session.Session))
- self.assertTrue(
- isinstance(self.stats_httpd.stats_module_spec, isc.config.ModuleSpec))
- for cfg in self.stats_httpd.stats_config_spec:
- self.assertTrue('item_name' in cfg)
- self.assertTrue(cfg['item_name'] in DUMMY_DATA)
- self.assertTrue(len(self.stats_httpd.stats_config_spec), len(DUMMY_DATA))
-
- def test_load_config(self):
- self.stats_httpd.load_config()
- self.assertTrue(('127.0.0.1', 8000) in set(self.stats_httpd.http_addrs))
+ statistics_spec = self.stats_httpd.get_stats_spec()
+ for mod in DUMMY_DATA:
+ self.assertTrue(mod in statistics_spec)
+ for cfg in statistics_spec[mod]:
+ self.assertTrue('item_name' in cfg)
+ self.assertTrue(cfg['item_name'] in DUMMY_DATA[mod])
+ self.assertTrue(len(statistics_spec[mod]), len(DUMMY_DATA[mod]))
+ self.stats_httpd.close_mccs()
+ self.assertIsNone(self.stats_httpd.mccs)
def test_httpd(self):
# dual stack (addresses is ipv4 and ipv6)
- fake_socket.has_ipv6 = True
- self.assertTrue(('127.0.0.1', 8000) in set(self.stats_httpd.http_addrs))
- self.stats_httpd.http_addrs = [ ('::1', 8000), ('127.0.0.1', 8000) ]
- self.assertTrue(
- stats_httpd.HttpServer.address_family in set([fake_socket.AF_INET, fake_socket.AF_INET6]))
- self.stats_httpd.open_httpd()
- for ht in self.stats_httpd.httpd:
- self.assertTrue(isinstance(ht.socket, fake_socket.socket))
- self.stats_httpd.close_httpd()
+ if self.ipv6_enabled:
+ server_addresses = (get_availaddr('::1'), get_availaddr())
+ self.stats_httpd = MyStatsHttpd(*server_addresses)
+ for ht in self.stats_httpd.httpd:
+ self.assertTrue(isinstance(ht, stats_httpd.HttpServer))
+ self.assertTrue(ht.address_family in set([socket.AF_INET, socket.AF_INET6]))
+ self.assertTrue(isinstance(ht.socket, socket.socket))
# dual stack (address is ipv6)
- fake_socket.has_ipv6 = True
- self.stats_httpd.http_addrs = [ ('::1', 8000) ]
- self.stats_httpd.open_httpd()
+ if self.ipv6_enabled:
+ server_addresses = get_availaddr('::1')
+ self.stats_httpd = MyStatsHttpd(server_addresses)
+ for ht in self.stats_httpd.httpd:
+ self.assertTrue(isinstance(ht, stats_httpd.HttpServer))
+ self.assertEqual(ht.address_family, socket.AF_INET6)
+ self.assertTrue(isinstance(ht.socket, socket.socket))
+
+ # dual/single stack (address is ipv4)
+ server_addresses = get_availaddr()
+ self.stats_httpd = MyStatsHttpd(server_addresses)
for ht in self.stats_httpd.httpd:
- self.assertTrue(isinstance(ht.socket, fake_socket.socket))
- self.stats_httpd.close_httpd()
+ self.assertTrue(isinstance(ht, stats_httpd.HttpServer))
+ self.assertEqual(ht.address_family, socket.AF_INET)
+ self.assertTrue(isinstance(ht.socket, socket.socket))
- # dual stack (address is ipv4)
- fake_socket.has_ipv6 = True
- self.stats_httpd.http_addrs = [ ('127.0.0.1', 8000) ]
- self.stats_httpd.open_httpd()
+ # any address (IPv4)
+ server_addresses = get_availaddr(address='0.0.0.0')
+ self.stats_httpd = MyStatsHttpd(server_addresses)
for ht in self.stats_httpd.httpd:
- self.assertTrue(isinstance(ht.socket, fake_socket.socket))
- self.stats_httpd.close_httpd()
-
- # only-ipv4 single stack
- fake_socket.has_ipv6 = False
- self.stats_httpd.http_addrs = [ ('127.0.0.1', 8000) ]
- self.stats_httpd.open_httpd()
- for ht in self.stats_httpd.httpd:
- self.assertTrue(isinstance(ht.socket, fake_socket.socket))
- self.stats_httpd.close_httpd()
-
- # only-ipv4 single stack (force set ipv6 )
- fake_socket.has_ipv6 = False
- self.stats_httpd.http_addrs = [ ('::1', 8000) ]
- self.assertRaises(stats_httpd.HttpServerError,
- self.stats_httpd.open_httpd)
-
- # hostname
- self.stats_httpd.http_addrs = [ ('localhost', 8000) ]
- self.stats_httpd.open_httpd()
- for ht in self.stats_httpd.httpd:
- self.assertTrue(isinstance(ht.socket, fake_socket.socket))
- self.stats_httpd.close_httpd()
-
- self.stats_httpd.http_addrs = [ ('my.host.domain', 8000) ]
- self.stats_httpd.open_httpd()
- for ht in self.stats_httpd.httpd:
- self.assertTrue(isinstance(ht.socket, fake_socket.socket))
- self.stats_httpd.close_httpd()
+ self.assertTrue(isinstance(ht, stats_httpd.HttpServer))
+ self.assertEqual(ht.address_family,socket.AF_INET)
+ self.assertTrue(isinstance(ht.socket, socket.socket))
+
+ # any address (IPv6)
+ if self.ipv6_enabled:
+ server_addresses = get_availaddr(address='::')
+ self.stats_httpd = MyStatsHttpd(server_addresses)
+ for ht in self.stats_httpd.httpd:
+ self.assertTrue(isinstance(ht, stats_httpd.HttpServer))
+ self.assertEqual(ht.address_family,socket.AF_INET6)
+ self.assertTrue(isinstance(ht.socket, socket.socket))
+
+ # existent hostname
+ self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd,
+ get_availaddr(address='localhost'))
+
+ # nonexistent hostname
+ self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd, ('my.host.domain', 8000))
# over flow of port number
- self.stats_httpd.http_addrs = [ ('', 80000) ]
- self.assertRaises(stats_httpd.HttpServerError, self.stats_httpd.open_httpd)
+ self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd, ('127.0.0.1', 80000))
+
# negative
- self.stats_httpd.http_addrs = [ ('', -8000) ]
- self.assertRaises(stats_httpd.HttpServerError, self.stats_httpd.open_httpd)
- # alphabet
- self.stats_httpd.http_addrs = [ ('', 'ABCDE') ]
- self.assertRaises(stats_httpd.HttpServerError, self.stats_httpd.open_httpd)
-
- def test_start(self):
- self.stats_httpd.cc_session.group_sendmsg(
- { 'command': [ "shutdown" ] }, "StatsHttpd")
- self.stats_httpd.start()
- self.stats_httpd = stats_httpd.StatsHttpd()
- self.assertRaises(
- fake_select.error, self.stats_httpd.start)
+ self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd, ('127.0.0.1', -8000))
- def test_stop(self):
- # success case
- fake_socket._CLOSED = False
- self.stats_httpd.stop()
+ # alphabet
+ self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd, ('127.0.0.1', 'ABCDE'))
+
+ # Address already in use
+ server_addresses = get_availaddr()
+ self.stats_httpd_server = ThreadingServerManager(MyStatsHttpd, server_addresses)
+ self.stats_httpd_server.run()
+ self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd, server_addresses)
+ send_shutdown("StatsHttpd")
+
+ def test_running(self):
+ self.stats_httpd_server = ThreadingServerManager(MyStatsHttpd, get_availaddr())
+ self.stats_httpd = self.stats_httpd_server.server
self.assertFalse(self.stats_httpd.running)
- self.assertIsNone(self.stats_httpd.mccs)
- for ht in self.stats_httpd.httpd:
- self.assertTrue(ht.socket._closed)
- self.assertTrue(self.stats_httpd.cc_session._socket._closed)
+ self.stats_httpd_server.run()
+ self.assertEqual(send_command("status", "StatsHttpd"),
+ (0, "Stats Httpd is up. (PID " + str(os.getpid()) + ")"))
+ self.assertTrue(self.stats_httpd.running)
+ self.assertEqual(send_shutdown("StatsHttpd"), (0, None))
+ self.assertFalse(self.stats_httpd.running)
+ self.stats_httpd_server.shutdown()
+
# failure case
- self.stats_httpd.cc_session._socket._closed = False
- self.stats_httpd.open_mccs()
- self.stats_httpd.cc_session._socket._closed = True
- self.stats_httpd.stop() # No excetion raises
- self.stats_httpd.cc_session._socket._closed = False
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.stats_httpd.cc_session.close()
+ self.assertRaises(ValueError, self.stats_httpd.start)
+
+ def test_failure_with_a_select_error (self):
+ """checks select.error is raised if the exception except
+ errno.EINTR is raised while it's selecting"""
+ def raise_select_except(*args):
+ raise select.error('dummy error')
+ orig_select = stats_httpd.select.select
+ stats_httpd.select.select = raise_select_except
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.assertRaises(select.error, self.stats_httpd.start)
+ stats_httpd.select.select = orig_select
+
+ def test_nofailure_with_errno_EINTR(self):
+ """checks no exception is raised if errno.EINTR is raised
+ while it's selecting"""
+ def raise_select_except(*args):
+ raise select.error(errno.EINTR)
+ orig_select = stats_httpd.select.select
+ stats_httpd.select.select = raise_select_except
+ self.stats_httpd_server = ThreadingServerManager(MyStatsHttpd, get_availaddr())
+ self.stats_httpd_server.run()
+ self.stats_httpd_server.shutdown()
+ stats_httpd.select.select = orig_select
def test_open_template(self):
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
# successful conditions
tmpl = self.stats_httpd.open_template(stats_httpd.XML_TEMPLATE_LOCATION)
self.assertTrue(isinstance(tmpl, string.Template))
@@ -346,13 +516,13 @@ class TestStatsHttpd(unittest.TestCase):
self.stats_httpd.open_template, '/path/to/foo/bar')
def test_commands(self):
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
self.assertEqual(self.stats_httpd.command_handler("status", None),
isc.config.ccsession.create_answer(
0, "Stats Httpd is up. (PID " + str(os.getpid()) + ")"))
self.stats_httpd.running = True
self.assertEqual(self.stats_httpd.command_handler("shutdown", None),
- isc.config.ccsession.create_answer(
- 0, "Stats Httpd is shutting down."))
+ isc.config.ccsession.create_answer(0))
self.assertFalse(self.stats_httpd.running)
self.assertEqual(
self.stats_httpd.command_handler("__UNKNOWN_COMMAND__", None),
@@ -360,42 +530,48 @@ class TestStatsHttpd(unittest.TestCase):
1, "Unknown command: __UNKNOWN_COMMAND__"))
def test_config(self):
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
self.assertEqual(
self.stats_httpd.config_handler(dict(_UNKNOWN_KEY_=None)),
isc.config.ccsession.create_answer(
- 1, "Unknown known config: _UNKNOWN_KEY_"))
- self.assertEqual(
- self.stats_httpd.config_handler(
- dict(listen_on=[dict(address="::2",port=8000)])),
- isc.config.ccsession.create_answer(0))
- self.assertTrue("listen_on" in self.stats_httpd.config)
- for addr in self.stats_httpd.config["listen_on"]:
- self.assertTrue("address" in addr)
- self.assertTrue("port" in addr)
- self.assertTrue(addr["address"] == "::2")
- self.assertTrue(addr["port"] == 8000)
+ 1, "unknown item _UNKNOWN_KEY_"))
+ addresses = get_availaddr()
self.assertEqual(
self.stats_httpd.config_handler(
- dict(listen_on=[dict(address="::1",port=80)])),
+ dict(listen_on=[dict(address=addresses[0],port=addresses[1])])),
isc.config.ccsession.create_answer(0))
self.assertTrue("listen_on" in self.stats_httpd.config)
for addr in self.stats_httpd.config["listen_on"]:
self.assertTrue("address" in addr)
self.assertTrue("port" in addr)
- self.assertTrue(addr["address"] == "::1")
- self.assertTrue(addr["port"] == 80)
-
+ self.assertTrue(addr["address"] == addresses[0])
+ self.assertTrue(addr["port"] == addresses[1])
+
+ if self.ipv6_enabled:
+ addresses = get_availaddr("::1")
+ self.assertEqual(
+ self.stats_httpd.config_handler(
+ dict(listen_on=[dict(address=addresses[0],port=addresses[1])])),
+ isc.config.ccsession.create_answer(0))
+ self.assertTrue("listen_on" in self.stats_httpd.config)
+ for addr in self.stats_httpd.config["listen_on"]:
+ self.assertTrue("address" in addr)
+ self.assertTrue("port" in addr)
+ self.assertTrue(addr["address"] == addresses[0])
+ self.assertTrue(addr["port"] == addresses[1])
+
+ addresses = get_availaddr()
self.assertEqual(
self.stats_httpd.config_handler(
- dict(listen_on=[dict(address="1.2.3.4",port=54321)])),
+ dict(listen_on=[dict(address=addresses[0],port=addresses[1])])),
isc.config.ccsession.create_answer(0))
self.assertTrue("listen_on" in self.stats_httpd.config)
for addr in self.stats_httpd.config["listen_on"]:
self.assertTrue("address" in addr)
self.assertTrue("port" in addr)
- self.assertTrue(addr["address"] == "1.2.3.4")
- self.assertTrue(addr["port"] == 54321)
+ self.assertTrue(addr["address"] == addresses[0])
+ self.assertTrue(addr["port"] == addresses[1])
(ret, arg) = isc.config.ccsession.parse_answer(
self.stats_httpd.config_handler(
dict(listen_on=[dict(address="1.2.3.4",port=543210)]))
@@ -403,93 +579,103 @@ class TestStatsHttpd(unittest.TestCase):
self.assertEqual(ret, 1)
def test_xml_handler(self):
- orig_get_stats_data = stats_httpd.StatsHttpd.get_stats_data
- stats_httpd.StatsHttpd.get_stats_data = lambda x: {'foo':'bar'}
- xml_body1 = stats_httpd.StatsHttpd().open_template(
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.stats_httpd.get_stats_data = lambda: \
+ { 'Dummy' : { 'foo':'bar' } }
+ xml_body1 = self.stats_httpd.open_template(
stats_httpd.XML_TEMPLATE_LOCATION).substitute(
- xml_string='<foo>bar</foo>',
+ xml_string='<Dummy><foo>bar</foo></Dummy>',
xsd_namespace=stats_httpd.XSD_NAMESPACE,
xsd_url_path=stats_httpd.XSD_URL_PATH,
xsl_url_path=stats_httpd.XSL_URL_PATH)
- xml_body2 = stats_httpd.StatsHttpd().xml_handler()
+ xml_body2 = self.stats_httpd.xml_handler()
self.assertEqual(type(xml_body1), str)
self.assertEqual(type(xml_body2), str)
self.assertEqual(xml_body1, xml_body2)
- stats_httpd.StatsHttpd.get_stats_data = lambda x: {'bar':'foo'}
- xml_body2 = stats_httpd.StatsHttpd().xml_handler()
+ self.stats_httpd.get_stats_data = lambda: \
+ { 'Dummy' : {'bar':'foo'} }
+ xml_body2 = self.stats_httpd.xml_handler()
self.assertNotEqual(xml_body1, xml_body2)
- stats_httpd.StatsHttpd.get_stats_data = orig_get_stats_data
def test_xsd_handler(self):
- orig_get_stats_spec = stats_httpd.StatsHttpd.get_stats_spec
- stats_httpd.StatsHttpd.get_stats_spec = lambda x: \
- [{
- "item_name": "foo",
- "item_type": "string",
- "item_optional": False,
- "item_default": "bar",
- "item_description": "foo is bar",
- "item_title": "Foo"
- }]
- xsd_body1 = stats_httpd.StatsHttpd().open_template(
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.stats_httpd.get_stats_spec = lambda: \
+ { "Dummy" :
+ [{
+ "item_name": "foo",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "bar",
+ "item_description": "foo is bar",
+ "item_title": "Foo"
+ }]
+ }
+ xsd_body1 = self.stats_httpd.open_template(
stats_httpd.XSD_TEMPLATE_LOCATION).substitute(
- xsd_string='<all>' \
+ xsd_string=\
+ '<all><element name="Dummy"><complexType><all>' \
+ '<element maxOccurs="1" minOccurs="1" name="foo" type="string">' \
+ '<annotation><appinfo>Foo</appinfo>' \
+ '<documentation>foo is bar</documentation>' \
- + '</annotation></element></all>',
+ + '</annotation></element></all>' \
+ + '</complexType></element></all>',
xsd_namespace=stats_httpd.XSD_NAMESPACE)
- xsd_body2 = stats_httpd.StatsHttpd().xsd_handler()
+ xsd_body2 = self.stats_httpd.xsd_handler()
self.assertEqual(type(xsd_body1), str)
self.assertEqual(type(xsd_body2), str)
self.assertEqual(xsd_body1, xsd_body2)
- stats_httpd.StatsHttpd.get_stats_spec = lambda x: \
- [{
- "item_name": "bar",
- "item_type": "string",
- "item_optional": False,
- "item_default": "foo",
- "item_description": "bar is foo",
- "item_title": "bar"
- }]
- xsd_body2 = stats_httpd.StatsHttpd().xsd_handler()
+ self.stats_httpd.get_stats_spec = lambda: \
+ { "Dummy" :
+ [{
+ "item_name": "bar",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "foo",
+ "item_description": "bar is foo",
+ "item_title": "bar"
+ }]
+ }
+ xsd_body2 = self.stats_httpd.xsd_handler()
self.assertNotEqual(xsd_body1, xsd_body2)
- stats_httpd.StatsHttpd.get_stats_spec = orig_get_stats_spec
def test_xsl_handler(self):
- orig_get_stats_spec = stats_httpd.StatsHttpd.get_stats_spec
- stats_httpd.StatsHttpd.get_stats_spec = lambda x: \
- [{
- "item_name": "foo",
- "item_type": "string",
- "item_optional": False,
- "item_default": "bar",
- "item_description": "foo is bar",
- "item_title": "Foo"
- }]
- xsl_body1 = stats_httpd.StatsHttpd().open_template(
+ self.stats_httpd = MyStatsHttpd(get_availaddr())
+ self.stats_httpd.get_stats_spec = lambda: \
+ { "Dummy" :
+ [{
+ "item_name": "foo",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "bar",
+ "item_description": "foo is bar",
+ "item_title": "Foo"
+ }]
+ }
+ xsl_body1 = self.stats_httpd.open_template(
stats_httpd.XSL_TEMPLATE_LOCATION).substitute(
xsl_string='<xsl:template match="*"><tr>' \
+ + '<td>Dummy</td>' \
+ '<td class="title" title="foo is bar">Foo</td>' \
- + '<td><xsl:value-of select="foo" /></td>' \
+ + '<td><xsl:value-of select="Dummy/foo" /></td>' \
+ '</tr></xsl:template>',
xsd_namespace=stats_httpd.XSD_NAMESPACE)
- xsl_body2 = stats_httpd.StatsHttpd().xsl_handler()
+ xsl_body2 = self.stats_httpd.xsl_handler()
self.assertEqual(type(xsl_body1), str)
self.assertEqual(type(xsl_body2), str)
self.assertEqual(xsl_body1, xsl_body2)
- stats_httpd.StatsHttpd.get_stats_spec = lambda x: \
- [{
- "item_name": "bar",
- "item_type": "string",
- "item_optional": False,
- "item_default": "foo",
- "item_description": "bar is foo",
- "item_title": "bar"
- }]
- xsl_body2 = stats_httpd.StatsHttpd().xsl_handler()
+ self.stats_httpd.get_stats_spec = lambda: \
+ { "Dummy" :
+ [{
+ "item_name": "bar",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "foo",
+ "item_description": "bar is foo",
+ "item_title": "bar"
+ }]
+ }
+ xsl_body2 = self.stats_httpd.xsl_handler()
self.assertNotEqual(xsl_body1, xsl_body2)
- stats_httpd.StatsHttpd.get_stats_spec = orig_get_stats_spec
def test_for_without_B10_FROM_SOURCE(self):
# just lets it go through the code without B10_FROM_SOURCE env
@@ -500,8 +686,6 @@ class TestStatsHttpd(unittest.TestCase):
imp.reload(stats_httpd)
os.environ["B10_FROM_SOURCE"] = tmppath
imp.reload(stats_httpd)
- stats_httpd.socket = fake_socket
- stats_httpd.select = fake_select
if __name__ == "__main__":
unittest.main()
diff --git a/src/bin/stats/tests/b10-stats_test.py b/src/bin/stats/tests/b10-stats_test.py
index 2fb4ab5..3813c7e 100644
--- a/src/bin/stats/tests/b10-stats_test.py
+++ b/src/bin/stats/tests/b10-stats_test.py
@@ -13,650 +13,593 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-#
-# Tests for the stats module
-#
+"""
+In each of these tests we start several virtual components. They are
+not the real components, no external processes are started. They are
+just simple mock objects running each in its own thread and pretending
+to be bind10 modules. This helps testing the stats module in a close
+to real environment.
+"""
+
+import unittest
import os
-import sys
+import threading
+import io
import time
-import unittest
import imp
-from isc.cc.session import Session, SessionError
-from isc.config.ccsession import ModuleCCSession, ModuleCCSessionError
-from fake_time import time, strftime, gmtime
-import stats
-stats.time = time
-stats.strftime = strftime
-stats.gmtime = gmtime
-from stats import SessionSubject, CCSessionListener, get_timestamp, get_datetime
-from fake_time import _TEST_TIME_SECS, _TEST_TIME_STRF
-
-if "B10_FROM_SOURCE" in os.environ:
- TEST_SPECFILE_LOCATION = os.environ["B10_FROM_SOURCE"] +\
- "/src/bin/stats/tests/testdata/stats_test.spec"
-else:
- TEST_SPECFILE_LOCATION = "./testdata/stats_test.spec"
-class TestStats(unittest.TestCase):
+import stats
+import isc.cc.session
+from test_utils import BaseModules, ThreadingServerManager, MyStats, SignalHandler, send_command, send_shutdown
+
+class TestUtilties(unittest.TestCase):
+ items = [
+ { 'item_name': 'test_int1', 'item_type': 'integer', 'item_default': 12345 },
+ { 'item_name': 'test_real1', 'item_type': 'real', 'item_default': 12345.6789 },
+ { 'item_name': 'test_bool1', 'item_type': 'boolean', 'item_default': True },
+ { 'item_name': 'test_str1', 'item_type': 'string', 'item_default': 'ABCD' },
+ { 'item_name': 'test_list1', 'item_type': 'list', 'item_default': [1,2,3],
+ 'list_item_spec' : { 'item_name': 'number', 'item_type': 'integer' } },
+ { 'item_name': 'test_map1', 'item_type': 'map', 'item_default': {'a':1,'b':2,'c':3},
+ 'map_item_spec' : [ { 'item_name': 'a', 'item_type': 'integer'},
+ { 'item_name': 'b', 'item_type': 'integer'},
+ { 'item_name': 'c', 'item_type': 'integer'} ] },
+ { 'item_name': 'test_int2', 'item_type': 'integer' },
+ { 'item_name': 'test_real2', 'item_type': 'real' },
+ { 'item_name': 'test_bool2', 'item_type': 'boolean' },
+ { 'item_name': 'test_str2', 'item_type': 'string' },
+ { 'item_name': 'test_list2', 'item_type': 'list',
+ 'list_item_spec' : { 'item_name': 'number', 'item_type': 'integer' } },
+ { 'item_name': 'test_map2', 'item_type': 'map',
+ 'map_item_spec' : [ { 'item_name': 'A', 'item_type': 'integer'},
+ { 'item_name': 'B', 'item_type': 'integer'},
+ { 'item_name': 'C', 'item_type': 'integer'} ] },
+ { 'item_name': 'test_none', 'item_type': 'none' },
+ { 'item_name': 'test_list3', 'item_type': 'list', 'item_default': ["one","two","three"],
+ 'list_item_spec' : { 'item_name': 'number', 'item_type': 'string' } },
+ { 'item_name': 'test_map3', 'item_type': 'map', 'item_default': {'a':'one','b':'two','c':'three'},
+ 'map_item_spec' : [ { 'item_name': 'a', 'item_type': 'string'},
+ { 'item_name': 'b', 'item_type': 'string'},
+ { 'item_name': 'c', 'item_type': 'string'} ] }
+ ]
def setUp(self):
- self.session = Session()
- self.subject = SessionSubject(session=self.session)
- self.listener = CCSessionListener(self.subject)
- self.stats_spec = self.listener.cc_session.get_module_spec().get_config_spec()
- self.module_name = self.listener.cc_session.get_module_spec().get_module_name()
- self.stats_data = {
- 'report_time' : get_datetime(),
- 'bind10.boot_time' : "1970-01-01T00:00:00Z",
- 'stats.timestamp' : get_timestamp(),
- 'stats.lname' : self.session.lname,
- 'auth.queries.tcp': 0,
- 'auth.queries.udp': 0,
- "stats.boot_time": get_datetime(),
- "stats.start_time": get_datetime(),
- "stats.last_update_time": get_datetime()
- }
- # check starting
- self.assertFalse(self.subject.running)
- self.subject.start()
- self.assertEqual(len(self.session.old_message_queue), 1)
- self.assertTrue(self.subject.running)
- self.assertEqual(len(self.session.message_queue), 0)
- self.assertEqual(self.module_name, 'Stats')
-
- def tearDown(self):
- # check closing
- self.subject.stop()
- self.assertFalse(self.subject.running)
- self.subject.detach(self.listener)
- self.listener.stop()
- self.session.close()
-
- def test_local_func(self):
- """
- Test for local function
-
- """
- # test for result_ok
- self.assertEqual(type(result_ok()), dict)
- self.assertEqual(result_ok(), {'result': [0]})
- self.assertEqual(result_ok(1), {'result': [1]})
- self.assertEqual(result_ok(0,'OK'), {'result': [0, 'OK']})
- self.assertEqual(result_ok(1,'Not good'), {'result': [1, 'Not good']})
- self.assertEqual(result_ok(None,"It's None"), {'result': [None, "It's None"]})
- self.assertNotEqual(result_ok(), {'RESULT': [0]})
-
- # test for get_timestamp
- self.assertEqual(get_timestamp(), _TEST_TIME_SECS)
-
- # test for get_datetime
- self.assertEqual(get_datetime(), _TEST_TIME_STRF)
-
- def test_show_command(self):
- """
- Test for show command
-
- """
- # test show command without arg
- self.session.group_sendmsg({"command": [ "show", None ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- # ignore under 0.9 seconds
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test show command with arg
- self.session.group_sendmsg({"command": [ "show", {"stats_item_name": "stats.lname"}]}, "Stats")
- self.assertEqual(len(self.subject.session.message_queue), 1)
- self.subject.check()
- result_data = self.subject.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'stats.lname': self.stats_data['stats.lname']}),
- result_data)
- self.assertEqual(len(self.subject.session.message_queue), 0)
-
- # test show command with arg which has wrong name
- self.session.group_sendmsg({"command": [ "show", {"stats_item_name": "stats.dummy"}]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- # ignore under 0.9 seconds
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_set_command(self):
- """
- Test for set command
-
- """
- # test set command
- self.stats_data['auth.queries.udp'] = 54321
- self.assertEqual(self.stats_data['auth.queries.udp'], 54321)
- self.assertEqual(self.stats_data['auth.queries.tcp'], 0)
- self.session.group_sendmsg({ "command": [
- "set", {
- 'stats_data': {'auth.queries.udp': 54321 }
- } ] },
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test show command
- self.session.group_sendmsg({"command": [ "show", None ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test set command 2
- self.stats_data['auth.queries.udp'] = 0
- self.assertEqual(self.stats_data['auth.queries.udp'], 0)
- self.assertEqual(self.stats_data['auth.queries.tcp'], 0)
- self.session.group_sendmsg({ "command": [ "set", {'stats_data': {'auth.queries.udp': 0}} ]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test show command 2
- self.session.group_sendmsg({"command": [ "show", None ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test set command 3
- self.stats_data['auth.queries.tcp'] = 54322
- self.assertEqual(self.stats_data['auth.queries.udp'], 0)
- self.assertEqual(self.stats_data['auth.queries.tcp'], 54322)
- self.session.group_sendmsg({ "command": [
- "set", {
- 'stats_data': {'auth.queries.tcp': 54322 }
- } ] },
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test show command 3
- self.session.group_sendmsg({"command": [ "show", None ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_remove_command(self):
- """
- Test for remove command
-
- """
- self.session.group_sendmsg({"command":
- [ "remove", {"stats_item_name": 'bind10.boot_time' }]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
- self.assertEqual(self.stats_data.pop('bind10.boot_time'), "1970-01-01T00:00:00Z")
- self.assertFalse('bind10.boot_time' in self.stats_data)
-
- # test show command with arg
- self.session.group_sendmsg({"command":
- [ "show", {"stats_item_name": 'bind10.boot_time'}]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertFalse('bind10.boot_time' in result_data['result'][1])
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_reset_command(self):
- """
- Test for reset command
-
- """
- self.session.group_sendmsg({"command": [ "reset" ] }, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test show command
- self.session.group_sendmsg({"command": [ "show" ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_status_command(self):
- """
- Test for status command
-
- """
- self.session.group_sendmsg({"command": [ "status" ] }, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(0, "I'm alive."),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_unknown_command(self):
- """
- Test for unknown command
-
- """
- self.session.group_sendmsg({"command": [ "hoge", None ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(1, "Unknown command: 'hoge'"),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_shutdown_command(self):
- """
- Test for shutdown command
-
- """
- self.session.group_sendmsg({"command": [ "shutdown", None ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.assertTrue(self.subject.running)
- self.subject.check()
- self.assertFalse(self.subject.running)
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
+ self.const_timestamp = 1308730448.965706
+ self.const_timetuple = (2011, 6, 22, 8, 14, 8, 2, 173, 0)
+ self.const_datetime = '2011-06-22T08:14:08Z'
+ stats.time = lambda : self.const_timestamp
+ stats.gmtime = lambda : self.const_timetuple
- def test_some_commands(self):
- """
- Test for some commands in a row
-
- """
- # test set command
- self.stats_data['bind10.boot_time'] = '2010-08-02T14:47:56Z'
- self.assertEqual(self.stats_data['bind10.boot_time'], '2010-08-02T14:47:56Z')
- self.session.group_sendmsg({ "command": [
- "set", {
- 'stats_data': {'bind10.boot_time': '2010-08-02T14:47:56Z' }
- }]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # check its value
- self.session.group_sendmsg({ "command": [
- "show", { 'stats_item_name': 'bind10.boot_time' }
- ] }, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'bind10.boot_time': '2010-08-02T14:47:56Z'}),
- result_data)
- self.assertEqual(result_ok(0, {'bind10.boot_time': self.stats_data['bind10.boot_time']}),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test set command 2nd
- self.stats_data['auth.queries.udp'] = 98765
- self.assertEqual(self.stats_data['auth.queries.udp'], 98765)
- self.session.group_sendmsg({ "command": [
- "set", { 'stats_data': {
- 'auth.queries.udp':
- self.stats_data['auth.queries.udp']
- } }
- ] }, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # check its value
- self.session.group_sendmsg({"command": [
- "show", {'stats_item_name': 'auth.queries.udp'}
- ] }, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'auth.queries.udp': 98765}),
- result_data)
- self.assertEqual(result_ok(0, {'auth.queries.udp': self.stats_data['auth.queries.udp']}),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test set command 3
- self.stats_data['auth.queries.tcp'] = 4321
- self.session.group_sendmsg({"command": [
- "set",
- {'stats_data': {'auth.queries.tcp': 4321 }} ]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # check value
- self.session.group_sendmsg({"command": [ "show", {'stats_item_name': 'auth.queries.tcp'} ]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'auth.queries.tcp': 4321}),
- result_data)
- self.assertEqual(result_ok(0, {'auth.queries.tcp': self.stats_data['auth.queries.tcp']}),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- self.session.group_sendmsg({"command": [ "show", {'stats_item_name': 'auth.queries.udp'} ]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'auth.queries.udp': 98765}),
- result_data)
- self.assertEqual(result_ok(0, {'auth.queries.udp': self.stats_data['auth.queries.udp']}),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test set command 4
- self.stats_data['auth.queries.tcp'] = 67890
- self.session.group_sendmsg({"command": [
- "set", {'stats_data': {'auth.queries.tcp': 67890 }} ]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test show command for all values
- self.session.group_sendmsg({"command": [ "show", None ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, self.stats_data), result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_some_commands2(self):
- """
- Test for some commands in a row using list-type value
-
- """
- self.stats_data['listtype'] = [1, 2, 3]
- self.assertEqual(self.stats_data['listtype'], [1, 2, 3])
- self.session.group_sendmsg({ "command": [
- "set", {'stats_data': {'listtype': [1, 2, 3] }}
- ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # check its value
- self.session.group_sendmsg({ "command": [
- "show", { 'stats_item_name': 'listtype'}
- ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'listtype': [1, 2, 3]}),
- result_data)
- self.assertEqual(result_ok(0, {'listtype': self.stats_data['listtype']}),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test set list-type value
- self.assertEqual(self.stats_data['listtype'], [1, 2, 3])
- self.session.group_sendmsg({"command": [
- "set", {'stats_data': {'listtype': [3, 2, 1, 0] }}
- ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # check its value
- self.session.group_sendmsg({ "command": [
- "show", { 'stats_item_name': 'listtype' }
- ] }, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'listtype': [3, 2, 1, 0]}),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_some_commands3(self):
- """
- Test for some commands in a row using dictionary-type value
-
- """
- self.stats_data['dicttype'] = {"a": 1, "b": 2, "c": 3}
- self.assertEqual(self.stats_data['dicttype'], {"a": 1, "b": 2, "c": 3})
- self.session.group_sendmsg({ "command": [
- "set", {
- 'stats_data': {'dicttype': {"a": 1, "b": 2, "c": 3} }
- }]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # check its value
- self.session.group_sendmsg({ "command": [ "show", { 'stats_item_name': 'dicttype' } ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'dicttype': {"a": 1, "b": 2, "c": 3}}),
- result_data)
- self.assertEqual(result_ok(0, {'dicttype': self.stats_data['dicttype']}),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- # test set list-type value
- self.assertEqual(self.stats_data['dicttype'], {"a": 1, "b": 2, "c": 3})
- self.session.group_sendmsg({"command": [
- "set", {'stats_data': {'dicttype': {"a": 3, "b": 2, "c": 1, "d": 0} }} ]},
- "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
- self.assertEqual(len(self.session.message_queue), 0)
-
- # check its value
- self.session.group_sendmsg({ "command": [ "show", { 'stats_item_name': 'dicttype' }]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- result_data = self.session.get_message("Stats", None)
- self.assertEqual(result_ok(0, {'dicttype': {"a": 3, "b": 2, "c": 1, "d": 0} }),
- result_data)
- self.assertEqual(len(self.session.message_queue), 0)
-
- def test_config_update(self):
- """
- Test for config update
-
- """
- # test show command without arg
- self.session.group_sendmsg({"command": [ "config_update", {"x-version":999} ]}, "Stats")
- self.assertEqual(len(self.session.message_queue), 1)
- self.subject.check()
- self.assertEqual(result_ok(),
- self.session.get_message("Stats", None))
-
- def test_for_boss(self):
- last_queue = self.session.old_message_queue.pop()
- self.assertEqual(
- last_queue.msg, {'command': ['getstats']})
+ def test_get_spec_defaults(self):
self.assertEqual(
- last_queue.env['group'], 'Boss')
-
-class TestStats2(unittest.TestCase):
+ stats.get_spec_defaults(self.items), {
+ 'test_int1' : 12345 ,
+ 'test_real1' : 12345.6789 ,
+ 'test_bool1' : True ,
+ 'test_str1' : 'ABCD' ,
+ 'test_list1' : [1,2,3] ,
+ 'test_map1' : {'a':1,'b':2,'c':3},
+ 'test_int2' : 0 ,
+ 'test_real2' : 0.0,
+ 'test_bool2' : False,
+ 'test_str2' : "",
+ 'test_list2' : [0],
+ 'test_map2' : { 'A' : 0, 'B' : 0, 'C' : 0 },
+ 'test_none' : None,
+ 'test_list3' : [ "one", "two", "three" ],
+ 'test_map3' : { 'a' : 'one', 'b' : 'two', 'c' : 'three' } })
+ self.assertEqual(stats.get_spec_defaults(None), {})
+ self.assertRaises(KeyError, stats.get_spec_defaults, [{'item_name':'Foo'}])
+
+ def test_get_timestamp(self):
+ self.assertEqual(stats.get_timestamp(), self.const_timestamp)
+
+ def test_get_datetime(self):
+ self.assertEqual(stats.get_datetime(), self.const_datetime)
+ self.assertNotEqual(stats.get_datetime(
+ (2011, 6, 22, 8, 23, 40, 2, 173, 0)), self.const_datetime)
+
+class TestCallback(unittest.TestCase):
+ def setUp(self):
+ self.dummy_func = lambda *x, **y : (x, y)
+ self.dummy_args = (1,2,3)
+ self.dummy_kwargs = {'a':1,'b':2,'c':3}
+ self.cback1 = stats.Callback(
+ command=self.dummy_func,
+ args=self.dummy_args,
+ kwargs=self.dummy_kwargs
+ )
+ self.cback2 = stats.Callback(
+ args=self.dummy_args,
+ kwargs=self.dummy_kwargs
+ )
+ self.cback3 = stats.Callback(
+ command=self.dummy_func,
+ kwargs=self.dummy_kwargs
+ )
+ self.cback4 = stats.Callback(
+ command=self.dummy_func,
+ args=self.dummy_args
+ )
+
+ def test_init(self):
+ self.assertEqual((self.cback1.command, self.cback1.args, self.cback1.kwargs),
+ (self.dummy_func, self.dummy_args, self.dummy_kwargs))
+ self.assertEqual((self.cback2.command, self.cback2.args, self.cback2.kwargs),
+ (None, self.dummy_args, self.dummy_kwargs))
+ self.assertEqual((self.cback3.command, self.cback3.args, self.cback3.kwargs),
+ (self.dummy_func, (), self.dummy_kwargs))
+ self.assertEqual((self.cback4.command, self.cback4.args, self.cback4.kwargs),
+ (self.dummy_func, self.dummy_args, {}))
+
+ def test_call(self):
+ self.assertEqual(self.cback1(), (self.dummy_args, self.dummy_kwargs))
+ self.assertEqual(self.cback1(100, 200), ((100, 200), self.dummy_kwargs))
+ self.assertEqual(self.cback1(a=100, b=200), (self.dummy_args, {'a':100, 'b':200}))
+ self.assertEqual(self.cback2(), None)
+ self.assertEqual(self.cback3(), ((), self.dummy_kwargs))
+ self.assertEqual(self.cback3(100, 200), ((100, 200), self.dummy_kwargs))
+ self.assertEqual(self.cback3(a=100, b=200), ((), {'a':100, 'b':200}))
+ self.assertEqual(self.cback4(), (self.dummy_args, {}))
+ self.assertEqual(self.cback4(100, 200), ((100, 200), {}))
+ self.assertEqual(self.cback4(a=100, b=200), (self.dummy_args, {'a':100, 'b':200}))
+class TestStats(unittest.TestCase):
def setUp(self):
- self.session = Session()
- self.subject = SessionSubject(session=self.session)
- self.listener = CCSessionListener(self.subject)
- self.module_name = self.listener.cc_session.get_module_spec().get_module_name()
- # check starting
- self.assertFalse(self.subject.running)
- self.subject.start()
- self.assertTrue(self.subject.running)
- self.assertEqual(len(self.session.message_queue), 0)
- self.assertEqual(self.module_name, 'Stats')
+ # set the signal handler for deadlock
+ self.sig_handler = SignalHandler(self.fail)
+ self.base = BaseModules()
+ self.stats = stats.Stats()
+ self.const_timestamp = 1308730448.965706
+ self.const_datetime = '2011-06-22T08:14:08Z'
+ self.const_default_datetime = '1970-01-01T00:00:00Z'
def tearDown(self):
- # check closing
- self.subject.stop()
- self.assertFalse(self.subject.running)
- self.subject.detach(self.listener)
- self.listener.stop()
+ self.base.shutdown()
+ # reset the signal handler
+ self.sig_handler.reset()
+
+ def test_init(self):
+ self.assertEqual(self.stats.module_name, 'Stats')
+ self.assertFalse(self.stats.running)
+ self.assertTrue('command_show' in self.stats.callbacks)
+ self.assertTrue('command_status' in self.stats.callbacks)
+ self.assertTrue('command_shutdown' in self.stats.callbacks)
+ self.assertTrue('command_show' in self.stats.callbacks)
+ self.assertTrue('command_showschema' in self.stats.callbacks)
+ self.assertTrue('command_set' in self.stats.callbacks)
+
+ def test_init_undefcmd(self):
+ spec_str = """\
+{
+ "module_spec": {
+ "module_name": "Stats",
+ "module_description": "Stats daemon",
+ "config_data": [],
+ "commands": [
+ {
+ "command_name": "_undef_command_",
+ "command_description": "a undefined command in stats",
+ "command_args": []
+ }
+ ],
+ "statistics": []
+ }
+}
+"""
+ orig_spec_location = stats.SPECFILE_LOCATION
+ stats.SPECFILE_LOCATION = io.StringIO(spec_str)
+ self.assertRaises(stats.StatsError, stats.Stats)
+ stats.SPECFILE_LOCATION = orig_spec_location
+
+ def test_start(self):
+ # start without err
+ self.stats_server = ThreadingServerManager(MyStats)
+ self.stats = self.stats_server.server
+ self.assertFalse(self.stats.running)
+ self.stats_server.run()
+ self.assertEqual(send_command("status", "Stats"),
+ (0, "Stats is up. (PID " + str(os.getpid()) + ")"))
+ self.assertTrue(self.stats.running)
+ self.assertEqual(send_shutdown("Stats"), (0, None))
+ self.assertFalse(self.stats.running)
+ self.stats_server.shutdown()
+
+ # start with err
+ self.stats = stats.Stats()
+ self.stats.update_statistics_data = lambda x,**y: ['an error']
+ self.assertRaises(stats.StatsError, self.stats.start)
+
+ def test_handlers(self):
+ self.stats_server = ThreadingServerManager(MyStats)
+ self.stats = self.stats_server.server
+ self.stats_server.run()
+ # config_handler
+ self.assertEqual(self.stats.config_handler({'foo':'bar'}),
+ isc.config.create_answer(0))
+
+ # command_handler
+ self.base.boss.server._started.wait()
+ self.base.boss.server._started.clear()
+ self.assertEqual(
+ send_command(
+ 'show', 'Stats',
+ params={ 'owner' : 'Boss',
+ 'name' : 'boot_time' }),
+ (0, self.const_datetime))
+ self.assertEqual(
+ send_command(
+ 'set', 'Stats',
+ params={ 'owner' : 'Boss',
+ 'data' : { 'boot_time' : self.const_datetime } }),
+ (0, None))
+ self.assertEqual(
+ send_command(
+ 'show', 'Stats',
+ params={ 'owner' : 'Boss',
+ 'name' : 'boot_time' }),
+ (0, self.const_datetime))
+ self.assertEqual(
+ send_command('status', 'Stats'),
+ (0, "Stats is up. (PID " + str(os.getpid()) + ")"))
+
+ (rcode, value) = send_command('show', 'Stats')
+ self.assertEqual(rcode, 0)
+ self.assertEqual(len(value), 3)
+ self.assertTrue('Boss' in value)
+ self.assertTrue('Stats' in value)
+ self.assertTrue('Auth' in value)
+ self.assertEqual(len(value['Stats']), 5)
+ self.assertEqual(len(value['Boss']), 1)
+ self.assertTrue('boot_time' in value['Boss'])
+ self.assertEqual(value['Boss']['boot_time'], self.const_datetime)
+ self.assertTrue('report_time' in value['Stats'])
+ self.assertTrue('boot_time' in value['Stats'])
+ self.assertTrue('last_update_time' in value['Stats'])
+ self.assertTrue('timestamp' in value['Stats'])
+ self.assertTrue('lname' in value['Stats'])
+ (rcode, value) = send_command('showschema', 'Stats')
+ self.assertEqual(rcode, 0)
+ self.assertEqual(len(value), 3)
+ self.assertTrue('Boss' in value)
+ self.assertTrue('Stats' in value)
+ self.assertTrue('Auth' in value)
+ self.assertEqual(len(value['Stats']), 5)
+ self.assertEqual(len(value['Boss']), 1)
+ for item in value['Boss']:
+ self.assertTrue(len(item) == 7)
+ self.assertTrue('item_name' in item)
+ self.assertTrue('item_type' in item)
+ self.assertTrue('item_optional' in item)
+ self.assertTrue('item_default' in item)
+ self.assertTrue('item_title' in item)
+ self.assertTrue('item_description' in item)
+ self.assertTrue('item_format' in item)
+ for item in value['Stats']:
+ self.assertTrue(len(item) == 6 or len(item) == 7)
+ self.assertTrue('item_name' in item)
+ self.assertTrue('item_type' in item)
+ self.assertTrue('item_optional' in item)
+ self.assertTrue('item_default' in item)
+ self.assertTrue('item_title' in item)
+ self.assertTrue('item_description' in item)
+ if len(item) == 7:
+ self.assertTrue('item_format' in item)
- def test_specfile(self):
+ self.assertEqual(
+ send_command('__UNKNOWN__', 'Stats'),
+ (1, "Unknown command: '__UNKNOWN__'"))
+
+ self.stats_server.shutdown()
+
+ def test_update_modules(self):
+ self.assertEqual(len(self.stats.modules), 0)
+ self.stats.update_modules()
+ self.assertTrue('Stats' in self.stats.modules)
+ self.assertTrue('Boss' in self.stats.modules)
+ self.assertFalse('Dummy' in self.stats.modules)
+ my_statistics_data = stats.get_spec_defaults(self.stats.modules['Stats'].get_statistics_spec())
+ self.assertTrue('report_time' in my_statistics_data)
+ self.assertTrue('boot_time' in my_statistics_data)
+ self.assertTrue('last_update_time' in my_statistics_data)
+ self.assertTrue('timestamp' in my_statistics_data)
+ self.assertTrue('lname' in my_statistics_data)
+ self.assertEqual(my_statistics_data['report_time'], self.const_default_datetime)
+ self.assertEqual(my_statistics_data['boot_time'], self.const_default_datetime)
+ self.assertEqual(my_statistics_data['last_update_time'], self.const_default_datetime)
+ self.assertEqual(my_statistics_data['timestamp'], 0.0)
+ self.assertEqual(my_statistics_data['lname'], "")
+ my_statistics_data = stats.get_spec_defaults(self.stats.modules['Boss'].get_statistics_spec())
+ self.assertTrue('boot_time' in my_statistics_data)
+ self.assertEqual(my_statistics_data['boot_time'], self.const_default_datetime)
+ orig_parse_answer = stats.isc.config.ccsession.parse_answer
+ stats.isc.config.ccsession.parse_answer = lambda x: (99, 'error')
+ self.assertRaises(stats.StatsError, self.stats.update_modules)
+ stats.isc.config.ccsession.parse_answer = orig_parse_answer
+
+ def test_get_statistics_data(self):
+ my_statistics_data = self.stats.get_statistics_data()
+ self.assertTrue('Stats' in my_statistics_data)
+ self.assertTrue('Boss' in my_statistics_data)
+ my_statistics_data = self.stats.get_statistics_data(owner='Stats')
+ self.assertTrue('report_time' in my_statistics_data)
+ self.assertTrue('boot_time' in my_statistics_data)
+ self.assertTrue('last_update_time' in my_statistics_data)
+ self.assertTrue('timestamp' in my_statistics_data)
+ self.assertTrue('lname' in my_statistics_data)
+ self.assertRaises(stats.StatsError, self.stats.get_statistics_data, owner='Foo')
+ my_statistics_data = self.stats.get_statistics_data(owner='Stats')
+ self.assertTrue('boot_time' in my_statistics_data)
+ my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='report_time')
+ self.assertEqual(my_statistics_data, self.const_default_datetime)
+ my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='boot_time')
+ self.assertEqual(my_statistics_data, self.const_default_datetime)
+ my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='last_update_time')
+ self.assertEqual(my_statistics_data, self.const_default_datetime)
+ my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='timestamp')
+ self.assertEqual(my_statistics_data, 0.0)
+ my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='lname')
+ self.assertEqual(my_statistics_data, '')
+ self.assertRaises(stats.StatsError, self.stats.get_statistics_data,
+ owner='Stats', name='Bar')
+ self.assertRaises(stats.StatsError, self.stats.get_statistics_data,
+ owner='Foo', name='Bar')
+ self.assertRaises(stats.StatsError, self.stats.get_statistics_data,
+ name='Bar')
+
+ def test_update_statistics_data(self):
+ self.stats.update_statistics_data(owner='Stats', lname='foo at bar')
+ self.assertTrue('Stats' in self.stats.statistics_data)
+ my_statistics_data = self.stats.statistics_data['Stats']
+ self.assertEqual(my_statistics_data['lname'], 'foo at bar')
+ self.stats.update_statistics_data(owner='Stats', last_update_time=self.const_datetime)
+ self.assertTrue('Stats' in self.stats.statistics_data)
+ my_statistics_data = self.stats.statistics_data['Stats']
+ self.assertEqual(my_statistics_data['last_update_time'], self.const_datetime)
+ self.assertEqual(self.stats.update_statistics_data(owner='Stats', lname=0.0),
+ ['0.0 should be a string'])
+ self.assertEqual(self.stats.update_statistics_data(owner='Dummy', foo='bar'),
+ ['unknown module name: Dummy'])
+
+ def test_commands(self):
+ # status
+ self.assertEqual(self.stats.command_status(),
+ isc.config.create_answer(
+ 0, "Stats is up. (PID " + str(os.getpid()) + ")"))
+
+ # shutdown
+ self.stats.running = True
+ self.assertEqual(self.stats.command_shutdown(),
+ isc.config.create_answer(0))
+ self.assertFalse(self.stats.running)
+
+ def test_command_show(self):
+ self.assertEqual(self.stats.command_show(owner='Foo', name=None),
+ isc.config.create_answer(
+ 1, "specified arguments are incorrect: owner: Foo, name: None"))
+ self.assertEqual(self.stats.command_show(owner='Foo', name='_bar_'),
+ isc.config.create_answer(
+ 1, "specified arguments are incorrect: owner: Foo, name: _bar_"))
+ self.assertEqual(self.stats.command_show(owner='Foo', name='bar'),
+ isc.config.create_answer(
+ 1, "specified arguments are incorrect: owner: Foo, name: bar"))
+ self.assertEqual(self.stats.command_show(owner='Auth'),
+ isc.config.create_answer(
+ 0, {'queries.tcp': 0, 'queries.udp': 0}))
+ self.assertEqual(self.stats.command_show(owner='Auth', name='queries.udp'),
+ isc.config.create_answer(
+ 0, 0))
+ orig_get_timestamp = stats.get_timestamp
+ orig_get_datetime = stats.get_datetime
+ stats.get_timestamp = lambda : self.const_timestamp
+ stats.get_datetime = lambda : self.const_datetime
+ self.assertEqual(stats.get_timestamp(), self.const_timestamp)
+ self.assertEqual(stats.get_datetime(), self.const_datetime)
+ self.assertEqual(self.stats.command_show(owner='Stats', name='report_time'), \
+ isc.config.create_answer(0, self.const_datetime))
+ self.assertEqual(self.stats.statistics_data['Stats']['timestamp'], self.const_timestamp)
+ self.assertEqual(self.stats.statistics_data['Stats']['boot_time'], self.const_default_datetime)
+ stats.get_timestamp = orig_get_timestamp
+ stats.get_datetime = orig_get_datetime
+ self.stats.mccs.specification = isc.config.module_spec.ModuleSpec(
+ { "module_name": self.stats.module_name,
+ "statistics": [] } )
+ self.assertRaises(
+ stats.StatsError, self.stats.command_show, owner='Foo', name='bar')
+
+ def test_command_showchema(self):
+ (rcode, value) = isc.config.ccsession.parse_answer(
+ self.stats.command_showschema())
+ self.assertEqual(rcode, 0)
+ self.assertEqual(len(value), 3)
+ self.assertTrue('Stats' in value)
+ self.assertTrue('Boss' in value)
+ self.assertTrue('Auth' in value)
+ self.assertFalse('__Dummy__' in value)
+ schema = value['Stats']
+ self.assertEqual(len(schema), 5)
+ for item in schema:
+ self.assertTrue(len(item) == 6 or len(item) == 7)
+ self.assertTrue('item_name' in item)
+ self.assertTrue('item_type' in item)
+ self.assertTrue('item_optional' in item)
+ self.assertTrue('item_default' in item)
+ self.assertTrue('item_title' in item)
+ self.assertTrue('item_description' in item)
+ if len(item) == 7:
+ self.assertTrue('item_format' in item)
+
+ schema = value['Boss']
+ self.assertEqual(len(schema), 1)
+ for item in schema:
+ self.assertTrue(len(item) == 7)
+ self.assertTrue('item_name' in item)
+ self.assertTrue('item_type' in item)
+ self.assertTrue('item_optional' in item)
+ self.assertTrue('item_default' in item)
+ self.assertTrue('item_title' in item)
+ self.assertTrue('item_description' in item)
+ self.assertTrue('item_format' in item)
+
+ schema = value['Auth']
+ self.assertEqual(len(schema), 2)
+ for item in schema:
+ self.assertTrue(len(item) == 6)
+ self.assertTrue('item_name' in item)
+ self.assertTrue('item_type' in item)
+ self.assertTrue('item_optional' in item)
+ self.assertTrue('item_default' in item)
+ self.assertTrue('item_title' in item)
+ self.assertTrue('item_description' in item)
+
+ (rcode, value) = isc.config.ccsession.parse_answer(
+ self.stats.command_showschema(owner='Stats'))
+ self.assertEqual(rcode, 0)
+ self.assertFalse('Stats' in value)
+ self.assertFalse('Boss' in value)
+ self.assertFalse('Auth' in value)
+ for item in value:
+ self.assertTrue(len(item) == 6 or len(item) == 7)
+ self.assertTrue('item_name' in item)
+ self.assertTrue('item_type' in item)
+ self.assertTrue('item_optional' in item)
+ self.assertTrue('item_default' in item)
+ self.assertTrue('item_title' in item)
+ self.assertTrue('item_description' in item)
+ if len(item) == 7:
+ self.assertTrue('item_format' in item)
+
+ (rcode, value) = isc.config.ccsession.parse_answer(
+ self.stats.command_showschema(owner='Stats', name='report_time'))
+ self.assertEqual(rcode, 0)
+ self.assertFalse('Stats' in value)
+ self.assertFalse('Boss' in value)
+ self.assertFalse('Auth' in value)
+ self.assertTrue(len(value) == 7)
+ self.assertTrue('item_name' in value)
+ self.assertTrue('item_type' in value)
+ self.assertTrue('item_optional' in value)
+ self.assertTrue('item_default' in value)
+ self.assertTrue('item_title' in value)
+ self.assertTrue('item_description' in value)
+ self.assertTrue('item_format' in value)
+ self.assertEqual(value['item_name'], 'report_time')
+ self.assertEqual(value['item_format'], 'date-time')
+
+ self.assertEqual(self.stats.command_showschema(owner='Foo'),
+ isc.config.create_answer(
+ 1, "specified arguments are incorrect: owner: Foo, name: None"))
+ self.assertEqual(self.stats.command_showschema(owner='Foo', name='bar'),
+ isc.config.create_answer(
+ 1, "specified arguments are incorrect: owner: Foo, name: bar"))
+ self.assertEqual(self.stats.command_showschema(owner='Auth'),
+ isc.config.create_answer(
+ 0, [{
+ "item_default": 0,
+ "item_description": "A number of total query counts which all auth servers receive over TCP since they started initially",
+ "item_name": "queries.tcp",
+ "item_optional": False,
+ "item_title": "Queries TCP",
+ "item_type": "integer"
+ },
+ {
+ "item_default": 0,
+ "item_description": "A number of total query counts which all auth servers receive over UDP since they started initially",
+ "item_name": "queries.udp",
+ "item_optional": False,
+ "item_title": "Queries UDP",
+ "item_type": "integer"
+ }]))
+ self.assertEqual(self.stats.command_showschema(owner='Auth', name='queries.tcp'),
+ isc.config.create_answer(
+ 0, {
+ "item_default": 0,
+ "item_description": "A number of total query counts which all auth servers receive over TCP since they started initially",
+ "item_name": "queries.tcp",
+ "item_optional": False,
+ "item_title": "Queries TCP",
+ "item_type": "integer"
+ }))
+
+ self.assertEqual(self.stats.command_showschema(owner='Stats', name='bar'),
+ isc.config.create_answer(
+ 1, "specified arguments are incorrect: owner: Stats, name: bar"))
+ self.assertEqual(self.stats.command_showschema(name='bar'),
+ isc.config.create_answer(
+ 1, "module name is not specified"))
+
+ def test_command_set(self):
+ orig_get_datetime = stats.get_datetime
+ stats.get_datetime = lambda : self.const_datetime
+ (rcode, value) = isc.config.ccsession.parse_answer(
+ self.stats.command_set(owner='Boss',
+ data={ 'boot_time' : self.const_datetime }))
+ stats.get_datetime = orig_get_datetime
+ self.assertEqual(rcode, 0)
+ self.assertTrue(value is None)
+ self.assertEqual(self.stats.statistics_data['Boss']['boot_time'],
+ self.const_datetime)
+ self.assertEqual(self.stats.statistics_data['Stats']['last_update_time'],
+ self.const_datetime)
+ self.assertEqual(self.stats.command_set(owner='Stats',
+ data={ 'lname' : 'foo at bar' }),
+ isc.config.create_answer(0, None))
+ self.stats.statistics_data['Stats'] = {}
+ self.stats.mccs.specification = isc.config.module_spec.ModuleSpec(
+ { "module_name": self.stats.module_name,
+ "statistics": [] } )
+ self.assertEqual(self.stats.command_set(owner='Stats',
+ data={ 'lname' : '_foo_ at _bar_' }),
+ isc.config.create_answer(
+ 1,
+ "errors while setting statistics data: unknown item lname"))
+ self.stats.statistics_data['Stats'] = {}
+ self.stats.mccs.specification = isc.config.module_spec.ModuleSpec(
+ { "module_name": self.stats.module_name } )
+ self.assertEqual(self.stats.command_set(owner='Stats',
+ data={ 'lname' : '_foo_ at _bar_' }),
+ isc.config.create_answer(
+ 1,
+ "errors while setting statistics data: No statistics specification"))
+ self.stats.statistics_data['Stats'] = {}
+ self.stats.mccs.specification = isc.config.module_spec.ModuleSpec(
+ { "module_name": self.stats.module_name,
+ "statistics": [
+ {
+ "item_name": "dummy",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "",
+ "item_title": "Local Name",
+ "item_description": "brabra"
+ } ] } )
+ self.assertRaises(stats.StatsError,
+ self.stats.command_set, owner='Stats', data={ 'dummy' : '_xxxx_yyyy_zzz_' })
+
+class TestOSEnv(unittest.TestCase):
+ def test_osenv(self):
"""
- Test for specfile
-
+ test for the environ variable "B10_FROM_SOURCE"
+ "B10_FROM_SOURCE" is set in Makefile
"""
- if "B10_FROM_SOURCE" in os.environ:
- self.assertEqual(stats.SPECFILE_LOCATION,
+ # test case having B10_FROM_SOURCE
+ self.assertTrue("B10_FROM_SOURCE" in os.environ)
+ self.assertEqual(stats.SPECFILE_LOCATION, \
os.environ["B10_FROM_SOURCE"] + os.sep + \
- "src" + os.sep + "bin" + os.sep + "stats" + \
- os.sep + "stats.spec")
- self.assertEqual(stats.SCHEMA_SPECFILE_LOCATION,
- os.environ["B10_FROM_SOURCE"] + os.sep + \
- "src" + os.sep + "bin" + os.sep + "stats" + \
- os.sep + "stats-schema.spec")
+ "src" + os.sep + "bin" + os.sep + "stats" + \
+ os.sep + "stats.spec")
+ # test case not having B10_FROM_SOURCE
+ path = os.environ["B10_FROM_SOURCE"]
+ os.environ.pop("B10_FROM_SOURCE")
+ self.assertFalse("B10_FROM_SOURCE" in os.environ)
+ # import stats again
+ imp.reload(stats)
+ # revert the changes
+ os.environ["B10_FROM_SOURCE"] = path
imp.reload(stats)
- # change path of SPECFILE_LOCATION
- stats.SPECFILE_LOCATION = TEST_SPECFILE_LOCATION
- stats.SCHEMA_SPECFILE_LOCATION = TEST_SPECFILE_LOCATION
- self.assertEqual(stats.SPECFILE_LOCATION, TEST_SPECFILE_LOCATION)
- self.subject = stats.SessionSubject(session=self.session)
- self.session = self.subject.session
- self.listener = stats.CCSessionListener(self.subject)
-
- self.assertEqual(self.listener.stats_spec, [])
- self.assertEqual(self.listener.stats_data, {})
-
- self.assertEqual(self.listener.commands_spec, [
- {
- "command_name": "status",
- "command_description": "identify whether stats module is alive or not",
- "command_args": []
- },
- {
- "command_name": "the_dummy",
- "command_description": "this is for testing",
- "command_args": []
- }])
-
- def test_func_initialize_data(self):
- """
- Test for initialize_data function
-
- """
- # prepare for sample data set
- stats_spec = [
- {
- "item_name": "none_sample",
- "item_type": "null",
- "item_default": "None"
- },
- {
- "item_name": "boolean_sample",
- "item_type": "boolean",
- "item_default": True
- },
- {
- "item_name": "string_sample",
- "item_type": "string",
- "item_default": "A something"
- },
- {
- "item_name": "int_sample",
- "item_type": "integer",
- "item_default": 9999999
- },
- {
- "item_name": "real_sample",
- "item_type": "real",
- "item_default": 0.0009
- },
- {
- "item_name": "list_sample",
- "item_type": "list",
- "item_default": [0, 1, 2, 3, 4],
- "list_item_spec": []
- },
- {
- "item_name": "map_sample",
- "item_type": "map",
- "item_default": {'name':'value'},
- "map_item_spec": []
- },
- {
- "item_name": "other_sample",
- "item_type": "__unknown__",
- "item_default": "__unknown__"
- }
- ]
- # data for comparison
- stats_data = {
- 'none_sample': None,
- 'boolean_sample': True,
- 'string_sample': 'A something',
- 'int_sample': 9999999,
- 'real_sample': 0.0009,
- 'list_sample': [0, 1, 2, 3, 4],
- 'map_sample': {'name':'value'},
- 'other_sample': '__unknown__'
- }
- self.assertEqual(self.listener.initialize_data(stats_spec), stats_data)
-
- def test_func_main(self):
- # explicitly make failed
- self.session.close()
- stats.main(session=self.session)
- def test_osenv(self):
- """
- test for not having environ "B10_FROM_SOURCE"
- """
- if "B10_FROM_SOURCE" in os.environ:
- path = os.environ["B10_FROM_SOURCE"]
- os.environ.pop("B10_FROM_SOURCE")
- imp.reload(stats)
- os.environ["B10_FROM_SOURCE"] = path
- imp.reload(stats)
-
-def result_ok(*args):
- if args:
- return { 'result': list(args) }
- else:
- return { 'result': [ 0 ] }
+def test_main():
+ unittest.main()
if __name__ == "__main__":
- unittest.main()
+ test_main()
diff --git a/src/bin/stats/tests/fake_select.py b/src/bin/stats/tests/fake_select.py
deleted file mode 100644
index ca0ca82..0000000
--- a/src/bin/stats/tests/fake_select.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# Copyright (C) 2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-A mock-up module of select
-
-*** NOTE ***
-It is only for testing stats_httpd module and not reusable for
-external module.
-"""
-
-import fake_socket
-import errno
-
-class error(Exception):
- pass
-
-def select(rlst, wlst, xlst, timeout):
- if type(timeout) != int and type(timeout) != float:
- raise TypeError("Error: %s must be integer or float"
- % timeout.__class__.__name__)
- for s in rlst + wlst + xlst:
- if type(s) != fake_socket.socket:
- raise TypeError("Error: %s must be a dummy socket"
- % s.__class__.__name__)
- s._called = s._called + 1
- if s._called > 3:
- raise error("Something is happened!")
- elif s._called > 2:
- raise error(errno.EINTR)
- return (rlst, wlst, xlst)
diff --git a/src/bin/stats/tests/fake_socket.py b/src/bin/stats/tests/fake_socket.py
deleted file mode 100644
index 4e3a458..0000000
--- a/src/bin/stats/tests/fake_socket.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# Copyright (C) 2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-A mock-up module of socket
-
-*** NOTE ***
-It is only for testing stats_httpd module and not reusable for
-external module.
-"""
-
-import re
-
-AF_INET = 'AF_INET'
-AF_INET6 = 'AF_INET6'
-_ADDRFAMILY = AF_INET
-has_ipv6 = True
-_CLOSED = False
-
-class gaierror(Exception):
- pass
-
-class error(Exception):
- pass
-
-class socket:
-
- def __init__(self, family=None):
- if family is None:
- self.address_family = _ADDRFAMILY
- else:
- self.address_family = family
- self._closed = _CLOSED
- if self._closed:
- raise error('socket is already closed!')
- self._called = 0
-
- def close(self):
- self._closed = True
-
- def fileno(self):
- return id(self)
-
- def bind(self, server_class):
- (self.server_address, self.server_port) = server_class
- if self.address_family not in set([AF_INET, AF_INET6]):
- raise error("Address family not supported by protocol: %s" % self.address_family)
- if self.address_family == AF_INET6 and not has_ipv6:
- raise error("Address family not supported in this machine: %s has_ipv6: %s"
- % (self.address_family, str(has_ipv6)))
- if self.address_family == AF_INET and re.search(':', self.server_address) is not None:
- raise gaierror("Address family for hostname not supported : %s %s" % (self.server_address, self.address_family))
- if self.address_family == AF_INET6 and re.search(':', self.server_address) is None:
- raise error("Cannot assign requested address : %s" % str(self.server_address))
- if type(self.server_port) is not int:
- raise TypeError("an integer is required: %s" % str(self.server_port))
- if self.server_port < 0 or self.server_port > 65535:
- raise OverflowError("port number must be 0-65535.: %s" % str(self.server_port))
diff --git a/src/bin/stats/tests/fake_time.py b/src/bin/stats/tests/fake_time.py
deleted file mode 100644
index 65e0237..0000000
--- a/src/bin/stats/tests/fake_time.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright (C) 2010 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-__version__ = "$Revision$"
-
-# This is a dummy time class against a Python standard time class.
-# It is just testing use only.
-# Other methods which time class has is not implemented.
-# (This class isn't orderloaded for time class.)
-
-# These variables are constant. These are example.
-_TEST_TIME_SECS = 1283364938.229088
-_TEST_TIME_STRF = '2010-09-01T18:15:38Z'
-
-def time():
- """
- This is a dummy time() method against time.time()
- """
- # return float constant value
- return _TEST_TIME_SECS
-
-def gmtime():
- """
- This is a dummy gmtime() method against time.gmtime()
- """
- # always return nothing
- return None
-
-def strftime(*arg):
- """
- This is a dummy gmtime() method against time.gmtime()
- """
- return _TEST_TIME_STRF
-
-
diff --git a/src/bin/stats/tests/http/Makefile.am b/src/bin/stats/tests/http/Makefile.am
deleted file mode 100644
index 79263a9..0000000
--- a/src/bin/stats/tests/http/Makefile.am
+++ /dev/null
@@ -1,6 +0,0 @@
-EXTRA_DIST = __init__.py server.py
-CLEANFILES = __init__.pyc server.pyc
-CLEANDIRS = __pycache__
-
-clean-local:
- rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/http/__init__.py b/src/bin/stats/tests/http/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/src/bin/stats/tests/http/server.py b/src/bin/stats/tests/http/server.py
deleted file mode 100644
index 70ed6fa..0000000
--- a/src/bin/stats/tests/http/server.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# Copyright (C) 2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-A mock-up module of http.server
-
-*** NOTE ***
-It is only for testing stats_httpd module and not reusable for
-external module.
-"""
-
-import fake_socket
-
-class DummyHttpResponse:
- def __init__(self, path):
- self.path = path
- self.headers={}
- self.log = ""
-
- def _write_log(self, msg):
- self.log = self.log + msg
-
-class HTTPServer:
- """
- A mock-up class of http.server.HTTPServer
- """
- address_family = fake_socket.AF_INET
- def __init__(self, server_class, handler_class):
- self.socket = fake_socket.socket(self.address_family)
- self.server_class = server_class
- self.socket.bind(self.server_class)
- self._handler = handler_class(None, None, self)
-
- def handle_request(self):
- pass
-
- def server_close(self):
- self.socket.close()
-
-class BaseHTTPRequestHandler:
- """
- A mock-up class of http.server.BaseHTTPRequestHandler
- """
-
- def __init__(self, request, client_address, server):
- self.path = "/path/to"
- self.headers = {}
- self.server = server
- self.response = DummyHttpResponse(path=self.path)
- self.response.write = self._write
- self.wfile = self.response
-
- def send_response(self, code=0):
- if self.path != self.response.path:
- self.response = DummyHttpResponse(path=self.path)
- self.response.code = code
-
- def send_header(self, key, value):
- if self.path != self.response.path:
- self.response = DummyHttpResponse(path=self.path)
- self.response.headers[key] = value
-
- def end_headers(self):
- if self.path != self.response.path:
- self.response = DummyHttpResponse(path=self.path)
- self.response.wrote_headers = True
-
- def send_error(self, code, message=None):
- if self.path != self.response.path:
- self.response = DummyHttpResponse(path=self.path)
- self.response.code = code
- self.response.body = message
-
- def address_string(self):
- return 'dummyhost'
-
- def log_date_time_string(self):
- return '[DD/MM/YYYY HH:MI:SS]'
-
- def _write(self, obj):
- if self.path != self.response.path:
- self.response = DummyHttpResponse(path=self.path)
- self.response.body = obj.decode()
-
diff --git a/src/bin/stats/tests/isc/Makefile.am b/src/bin/stats/tests/isc/Makefile.am
deleted file mode 100644
index bdfa1eb..0000000
--- a/src/bin/stats/tests/isc/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-SUBDIRS = cc config util log log_messages
-EXTRA_DIST = __init__.py
-CLEANFILES = __init__.pyc
-
-CLEANDIRS = __pycache__
-
-clean-local:
- rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/isc/__init__.py b/src/bin/stats/tests/isc/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/src/bin/stats/tests/isc/cc/Makefile.am b/src/bin/stats/tests/isc/cc/Makefile.am
deleted file mode 100644
index 67323b5..0000000
--- a/src/bin/stats/tests/isc/cc/Makefile.am
+++ /dev/null
@@ -1,7 +0,0 @@
-EXTRA_DIST = __init__.py session.py
-CLEANFILES = __init__.pyc session.pyc
-
-CLEANDIRS = __pycache__
-
-clean-local:
- rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/isc/cc/__init__.py b/src/bin/stats/tests/isc/cc/__init__.py
deleted file mode 100644
index 9a3eaf6..0000000
--- a/src/bin/stats/tests/isc/cc/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from isc.cc.session import *
diff --git a/src/bin/stats/tests/isc/cc/session.py b/src/bin/stats/tests/isc/cc/session.py
deleted file mode 100644
index e18a695..0000000
--- a/src/bin/stats/tests/isc/cc/session.py
+++ /dev/null
@@ -1,156 +0,0 @@
-# Copyright (C) 2010,2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-A mock-up module of isc.cc.session
-
-*** NOTE ***
-It is only for testing stats_httpd module and not reusable for
-external module.
-"""
-
-import sys
-import fake_socket
-
-# set a dummy lname
-_TEST_LNAME = '123abc at xxxx'
-
-class Queue():
- def __init__(self, msg=None, env={}):
- self.msg = msg
- self.env = env
-
- def dump(self):
- return { 'msg': self.msg, 'env': self.env }
-
-class SessionError(Exception):
- pass
-
-class SessionTimeout(Exception):
- pass
-
-class Session:
- def __init__(self, socket_file=None, verbose=False):
- self._lname = _TEST_LNAME
- self.message_queue = []
- self.old_message_queue = []
- try:
- self._socket = fake_socket.socket()
- except fake_socket.error as se:
- raise SessionError(se)
- self.verbose = verbose
-
- @property
- def lname(self):
- return self._lname
-
- def close(self):
- self._socket.close()
-
- def _clear_queues(self):
- while len(self.message_queue) > 0:
- self.dequeue()
-
- def _next_sequence(self, que=None):
- return len(self.message_queue)
-
- def enqueue(self, msg=None, env={}):
- if self._socket._closed:
- raise SessionError("Session has been closed.")
- seq = self._next_sequence()
- env.update({"seq": 0}) # fixed here
- que = Queue(msg=msg, env=env)
- self.message_queue.append(que)
- if self.verbose:
- sys.stdout.write("[Session] enqueue: " + str(que.dump()) + "\n")
- return seq
-
- def dequeue(self):
- if self._socket._closed:
- raise SessionError("Session has been closed.")
- que = None
- try:
- que = self.message_queue.pop(0) # always pop at index 0
- self.old_message_queue.append(que)
- except IndexError:
- que = Queue()
- if self.verbose:
- sys.stdout.write("[Session] dequeue: " + str(que.dump()) + "\n")
- return que
-
- def get_queue(self, seq=None):
- if self._socket._closed:
- raise SessionError("Session has been closed.")
- if seq is None:
- seq = len(self.message_queue) - 1
- que = None
- try:
- que = self.message_queue[seq]
- except IndexError:
- raise IndexError
- que = Queue()
- if self.verbose:
- sys.stdout.write("[Session] get_queue: " + str(que.dump()) + "\n")
- return que
-
- def group_sendmsg(self, msg, group, instance="*", to="*"):
- return self.enqueue(msg=msg, env={
- "type": "send",
- "from": self._lname,
- "to": to,
- "group": group,
- "instance": instance })
-
- def group_recvmsg(self, nonblock=True, seq=0):
- que = self.dequeue()
- if que.msg != None:
- cmd = que.msg.get("command")
- if cmd and cmd[0] == 'getstats':
- # Create answer for command 'getstats'
- retdata = { "stats_data": {
- 'bind10.boot_time' : "1970-01-01T00:00:00Z"
- }}
- return {'result': [0, retdata]}, que.env
- return que.msg, que.env
-
- def group_reply(self, routing, msg):
- return self.enqueue(msg=msg, env={
- "type": "send",
- "from": self._lname,
- "to": routing["from"],
- "group": routing["group"],
- "instance": routing["instance"],
- "reply": routing["seq"] })
-
- def get_message(self, group, to='*'):
- if self._socket._closed:
- raise SessionError("Session has been closed.")
- que = Queue()
- for q in self.message_queue:
- if q.env['group'] == group:
- self.message_queue.remove(q)
- self.old_message_queue.append(q)
- que = q
- if self.verbose:
- sys.stdout.write("[Session] get_message: " + str(que.dump()) + "\n")
- return q.msg
-
- def group_subscribe(self, group, instance = "*"):
- if self._socket._closed:
- raise SessionError("Session has been closed.")
-
- def group_unsubscribe(self, group, instance = "*"):
- if self._socket._closed:
- raise SessionError("Session has been closed.")
diff --git a/src/bin/stats/tests/isc/config/Makefile.am b/src/bin/stats/tests/isc/config/Makefile.am
deleted file mode 100644
index ffbecda..0000000
--- a/src/bin/stats/tests/isc/config/Makefile.am
+++ /dev/null
@@ -1,7 +0,0 @@
-EXTRA_DIST = __init__.py ccsession.py
-CLEANFILES = __init__.pyc ccsession.pyc
-
-CLEANDIRS = __pycache__
-
-clean-local:
- rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/isc/config/__init__.py b/src/bin/stats/tests/isc/config/__init__.py
deleted file mode 100644
index 4c49e95..0000000
--- a/src/bin/stats/tests/isc/config/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from isc.config.ccsession import *
diff --git a/src/bin/stats/tests/isc/config/ccsession.py b/src/bin/stats/tests/isc/config/ccsession.py
deleted file mode 100644
index 50f7c1b..0000000
--- a/src/bin/stats/tests/isc/config/ccsession.py
+++ /dev/null
@@ -1,249 +0,0 @@
-# Copyright (C) 2010,2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-A mock-up module of isc.cc.session
-
-*** NOTE ***
-It is only for testing stats_httpd module and not reusable for
-external module.
-"""
-
-import json
-import os
-import time
-from isc.cc.session import Session
-
-COMMAND_CONFIG_UPDATE = "config_update"
-
-def parse_answer(msg):
- assert 'result' in msg
- try:
- return msg['result'][0], msg['result'][1]
- except IndexError:
- return msg['result'][0], None
-
-def create_answer(rcode, arg = None):
- if arg is None:
- return { 'result': [ rcode ] }
- else:
- return { 'result': [ rcode, arg ] }
-
-def parse_command(msg):
- assert 'command' in msg
- try:
- return msg['command'][0], msg['command'][1]
- except IndexError:
- return msg['command'][0], None
-
-def create_command(command_name, params = None):
- if params is None:
- return {"command": [command_name]}
- else:
- return {"command": [command_name, params]}
-
-def module_spec_from_file(spec_file, check = True):
- try:
- file = open(spec_file)
- json_str = file.read()
- module_spec = json.loads(json_str)
- file.close()
- return ModuleSpec(module_spec['module_spec'], check)
- except IOError as ioe:
- raise ModuleSpecError("JSON read error: " + str(ioe))
- except ValueError as ve:
- raise ModuleSpecError("JSON parse error: " + str(ve))
- except KeyError as err:
- raise ModuleSpecError("Data definition has no module_spec element")
-
-class ModuleSpecError(Exception):
- pass
-
-class ModuleSpec:
- def __init__(self, module_spec, check = True):
- # check only confi_data for testing
- if check and "config_data" in module_spec:
- _check_config_spec(module_spec["config_data"])
- self._module_spec = module_spec
-
- def get_config_spec(self):
- return self._module_spec['config_data']
-
- def get_commands_spec(self):
- return self._module_spec['commands']
-
- def get_module_name(self):
- return self._module_spec['module_name']
-
-def _check_config_spec(config_data):
- # config data is a list of items represented by dicts that contain
- # things like "item_name", depending on the type they can have
- # specific subitems
- """Checks a list that contains the configuration part of the
- specification. Raises a ModuleSpecError if there is a
- problem."""
- if type(config_data) != list:
- raise ModuleSpecError("config_data is of type " + str(type(config_data)) + ", not a list of items")
- for config_item in config_data:
- _check_item_spec(config_item)
-
-def _check_item_spec(config_item):
- """Checks the dict that defines one config item
- (i.e. containing "item_name", "item_type", etc.
- Raises a ModuleSpecError if there is an error"""
- if type(config_item) != dict:
- raise ModuleSpecError("item spec not a dict")
- if "item_name" not in config_item:
- raise ModuleSpecError("no item_name in config item")
- if type(config_item["item_name"]) != str:
- raise ModuleSpecError("item_name is not a string: " + str(config_item["item_name"]))
- item_name = config_item["item_name"]
- if "item_type" not in config_item:
- raise ModuleSpecError("no item_type in config item")
- item_type = config_item["item_type"]
- if type(item_type) != str:
- raise ModuleSpecError("item_type in " + item_name + " is not a string: " + str(type(item_type)))
- if item_type not in ["integer", "real", "boolean", "string", "list", "map", "any"]:
- raise ModuleSpecError("unknown item_type in " + item_name + ": " + item_type)
- if "item_optional" in config_item:
- if type(config_item["item_optional"]) != bool:
- raise ModuleSpecError("item_default in " + item_name + " is not a boolean")
- if not config_item["item_optional"] and "item_default" not in config_item:
- raise ModuleSpecError("no default value for non-optional item " + item_name)
- else:
- raise ModuleSpecError("item_optional not in item " + item_name)
- if "item_default" in config_item:
- item_default = config_item["item_default"]
- if (item_type == "integer" and type(item_default) != int) or \
- (item_type == "real" and type(item_default) != float) or \
- (item_type == "boolean" and type(item_default) != bool) or \
- (item_type == "string" and type(item_default) != str) or \
- (item_type == "list" and type(item_default) != list) or \
- (item_type == "map" and type(item_default) != dict):
- raise ModuleSpecError("Wrong type for item_default in " + item_name)
- # TODO: once we have check_type, run the item default through that with the list|map_item_spec
- if item_type == "list":
- if "list_item_spec" not in config_item:
- raise ModuleSpecError("no list_item_spec in list item " + item_name)
- if type(config_item["list_item_spec"]) != dict:
- raise ModuleSpecError("list_item_spec in " + item_name + " is not a dict")
- _check_item_spec(config_item["list_item_spec"])
- if item_type == "map":
- if "map_item_spec" not in config_item:
- raise ModuleSpecError("no map_item_sepc in map item " + item_name)
- if type(config_item["map_item_spec"]) != list:
- raise ModuleSpecError("map_item_spec in " + item_name + " is not a list")
- for map_item in config_item["map_item_spec"]:
- if type(map_item) != dict:
- raise ModuleSpecError("map_item_spec element is not a dict")
- _check_item_spec(map_item)
- if 'item_format' in config_item and 'item_default' in config_item:
- item_format = config_item["item_format"]
- item_default = config_item["item_default"]
- if not _check_format(item_default, item_format):
- raise ModuleSpecError(
- "Wrong format for " + str(item_default) + " in " + str(item_name))
-
-def _check_format(value, format_name):
- """Check if specified value and format are correct. Return True if
- is is correct."""
- # TODO: should be added other format types if necessary
- time_formats = { 'date-time' : "%Y-%m-%dT%H:%M:%SZ",
- 'date' : "%Y-%m-%d",
- 'time' : "%H:%M:%S" }
- for fmt in time_formats:
- if format_name == fmt:
- try:
- time.strptime(value, time_formats[fmt])
- return True
- except (ValueError, TypeError):
- break
- return False
-
-class ModuleCCSessionError(Exception):
- pass
-
-class DataNotFoundError(Exception):
- pass
-
-class ConfigData:
- def __init__(self, specification):
- self.specification = specification
-
- def get_value(self, identifier):
- """Returns a tuple where the first item is the value at the
- given identifier, and the second item is absolutely False
- even if the value is an unset default or not. Raises an
- DataNotFoundError if the identifier is not found in the
- specification file.
- *** NOTE ***
- There are some differences from the original method. This
- method never handles local settings like the original
- method. But these different behaviors aren't so big issues
- for a mock-up method of stats_httpd because stats_httpd
- calls this method at only first."""
- for config_map in self.get_module_spec().get_config_spec():
- if config_map['item_name'] == identifier:
- if 'item_default' in config_map:
- return config_map['item_default'], False
- raise DataNotFoundError("item_name %s is not found in the specfile" % identifier)
-
- def get_module_spec(self):
- return self.specification
-
-class ModuleCCSession(ConfigData):
- def __init__(self, spec_file_name, config_handler, command_handler, cc_session = None):
- module_spec = module_spec_from_file(spec_file_name)
- ConfigData.__init__(self, module_spec)
- self._module_name = module_spec.get_module_name()
- self.set_config_handler(config_handler)
- self.set_command_handler(command_handler)
- if not cc_session:
- self._session = Session(verbose=True)
- else:
- self._session = cc_session
-
- def start(self):
- pass
-
- def close(self):
- self._session.close()
-
- def check_command(self, nonblock=True):
- msg, env = self._session.group_recvmsg(nonblock)
- if not msg or 'result' in msg:
- return
- cmd, arg = parse_command(msg)
- answer = None
- if cmd == COMMAND_CONFIG_UPDATE and self._config_handler:
- answer = self._config_handler(arg)
- elif env['group'] == self._module_name and self._command_handler:
- answer = self._command_handler(cmd, arg)
- if answer:
- self._session.group_reply(env, answer)
-
- def set_config_handler(self, config_handler):
- self._config_handler = config_handler
- # should we run this right now since we've changed the handler?
-
- def set_command_handler(self, command_handler):
- self._command_handler = command_handler
-
- def get_module_spec(self):
- return self.specification
-
- def get_socket(self):
- return self._session._socket
-
diff --git a/src/bin/stats/tests/isc/log/Makefile.am b/src/bin/stats/tests/isc/log/Makefile.am
deleted file mode 100644
index 457b9de..0000000
--- a/src/bin/stats/tests/isc/log/Makefile.am
+++ /dev/null
@@ -1,7 +0,0 @@
-EXTRA_DIST = __init__.py
-CLEANFILES = __init__.pyc
-
-CLEANDIRS = __pycache__
-
-clean-local:
- rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/isc/log/__init__.py b/src/bin/stats/tests/isc/log/__init__.py
deleted file mode 100644
index 641cf79..0000000
--- a/src/bin/stats/tests/isc/log/__init__.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright (C) 2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-# This file is not installed. The log.so is installed into the right place.
-# It is only to find it in the .libs directory when we run as a test or
-# from the build directory.
-# But as nobody gives us the builddir explicitly (and we can't use generation
-# from .in file, as it would put us into the builddir and we wouldn't be found)
-# we guess from current directory. Any idea for something better? This should
-# be enough for the tests, but would it work for B10_FROM_SOURCE as well?
-# Should we look there? Or define something in bind10_config?
-
-import os
-import sys
-
-for base in sys.path[:]:
- loglibdir = os.path.join(base, 'isc/log/.libs')
- if os.path.exists(loglibdir):
- sys.path.insert(0, loglibdir)
-
-from log import *
diff --git a/src/bin/stats/tests/isc/util/Makefile.am b/src/bin/stats/tests/isc/util/Makefile.am
deleted file mode 100644
index 9c74354..0000000
--- a/src/bin/stats/tests/isc/util/Makefile.am
+++ /dev/null
@@ -1,7 +0,0 @@
-EXTRA_DIST = __init__.py process.py
-CLEANFILES = __init__.pyc process.pyc
-
-CLEANDIRS = __pycache__
-
-clean-local:
- rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/isc/util/__init__.py b/src/bin/stats/tests/isc/util/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/src/bin/stats/tests/isc/util/process.py b/src/bin/stats/tests/isc/util/process.py
deleted file mode 100644
index 0f764c1..0000000
--- a/src/bin/stats/tests/isc/util/process.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright (C) 2010 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-A dummy function of isc.util.process.rename()
-"""
-
-def rename(name=None):
- pass
diff --git a/src/bin/stats/tests/test_utils.py b/src/bin/stats/tests/test_utils.py
new file mode 100644
index 0000000..da0bac4
--- /dev/null
+++ b/src/bin/stats/tests/test_utils.py
@@ -0,0 +1,364 @@
+"""
+Utilities and mock modules for unittests of statistics modules
+
+"""
+import os
+import io
+import time
+import sys
+import threading
+import tempfile
+import json
+import signal
+
+import msgq
+import isc.config.cfgmgr
+import stats
+import stats_httpd
+
+# Change value of BIND10_MSGQ_SOCKET_FILE in environment variables
+if 'BIND10_MSGQ_SOCKET_FILE' not in os.environ:
+ os.environ['BIND10_MSGQ_SOCKET_FILE'] = tempfile.mktemp(prefix='msgq_socket_')
+
+class SignalHandler():
+ """A signal handler class for deadlock in unittest"""
+ def __init__(self, fail_handler, timeout=20):
+ """sets a schedule in SIGARM for invoking the handler via
+ unittest.TestCase after timeout seconds (default is 20)"""
+ self.fail_handler = fail_handler
+ self.orig_handler = signal.signal(signal.SIGALRM, self.sig_handler)
+ signal.alarm(timeout)
+
+ def reset(self):
+ """resets the schedule in SIGALRM"""
+ signal.alarm(0)
+ signal.signal(signal.SIGALRM, self.orig_handler)
+
+ def sig_handler(self, signal, frame):
+ """envokes unittest.TestCase.fail as a signal handler"""
+ self.fail_handler("A deadlock might be detected")
+
+def send_command(command_name, module_name, params=None, session=None, nonblock=False, timeout=None):
+ if session is not None:
+ cc_session = session
+ else:
+ cc_session = isc.cc.Session()
+ if timeout is not None:
+ orig_timeout = cc_session.get_timeout()
+ cc_session.set_timeout(timeout * 1000)
+ command = isc.config.ccsession.create_command(command_name, params)
+ seq = cc_session.group_sendmsg(command, module_name)
+ try:
+ (answer, env) = cc_session.group_recvmsg(nonblock, seq)
+ if answer:
+ return isc.config.ccsession.parse_answer(answer)
+ except isc.cc.SessionTimeout:
+ pass
+ finally:
+ if timeout is not None:
+ cc_session.set_timeout(orig_timeout)
+ if session is None:
+ cc_session.close()
+
+def send_shutdown(module_name, **kwargs):
+ return send_command("shutdown", module_name, **kwargs)
+
+class ThreadingServerManager:
+ def __init__(self, server, *args, **kwargs):
+ self.server = server(*args, **kwargs)
+ self.server_name = server.__name__
+ self.server._thread = threading.Thread(
+ name=self.server_name, target=self.server.run)
+ self.server._thread.daemon = True
+
+ def run(self):
+ self.server._thread.start()
+ self.server._started.wait()
+ self.server._started.clear()
+
+ def shutdown(self):
+ self.server.shutdown()
+ self.server._thread.join(0) # timeout is 0
+
+def do_nothing(*args, **kwargs): pass
+
+class dummy_sys:
+ """Dummy for sys"""
+ class dummy_io:
+ write = do_nothing
+ stdout = stderr = dummy_io()
+
+class MockMsgq:
+ def __init__(self):
+ self._started = threading.Event()
+ # suppress output to stdout and stderr
+ msgq.sys = dummy_sys()
+ msgq.print = do_nothing
+ self.msgq = msgq.MsgQ(verbose=False)
+ result = self.msgq.setup()
+ if result:
+ sys.exit("Error on Msgq startup: %s" % result)
+
+ def run(self):
+ self._started.set()
+ try:
+ self.msgq.run()
+ except Exception:
+ pass
+ finally:
+ # explicitly shut down the socket of the msgq before
+ # shutting down the msgq
+ self.msgq.listen_socket.shutdown(msgq.socket.SHUT_RDWR)
+ self.msgq.shutdown()
+
+ def shutdown(self):
+ # do nothing for avoiding shutting down the msgq twice
+ pass
+
+class MockCfgmgr:
+ def __init__(self):
+ self._started = threading.Event()
+ self.cfgmgr = isc.config.cfgmgr.ConfigManager(
+ os.environ['CONFIG_TESTDATA_PATH'], "b10-config.db")
+ self.cfgmgr.read_config()
+
+ def run(self):
+ self._started.set()
+ try:
+ self.cfgmgr.run()
+ except Exception:
+ pass
+
+ def shutdown(self):
+ self.cfgmgr.running = False
+
+class MockBoss:
+ spec_str = """\
+{
+ "module_spec": {
+ "module_name": "Boss",
+ "module_description": "Mock Master process",
+ "config_data": [],
+ "commands": [
+ {
+ "command_name": "sendstats",
+ "command_description": "Send data to a statistics module at once",
+ "command_args": []
+ }
+ ],
+ "statistics": [
+ {
+ "item_name": "boot_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Boot time",
+ "item_description": "A date time when bind10 process starts initially",
+ "item_format": "date-time"
+ }
+ ]
+ }
+}
+"""
+ _BASETIME = (2011, 6, 22, 8, 14, 8, 2, 173, 0)
+
+ def __init__(self):
+ self._started = threading.Event()
+ self.running = False
+ self.spec_file = io.StringIO(self.spec_str)
+ # create ModuleCCSession object
+ self.mccs = isc.config.ModuleCCSession(
+ self.spec_file,
+ self.config_handler,
+ self.command_handler)
+ self.spec_file.close()
+ self.cc_session = self.mccs._session
+ self.got_command_name = ''
+
+ def run(self):
+ self.mccs.start()
+ self.running = True
+ self._started.set()
+ try:
+ while self.running:
+ self.mccs.check_command(False)
+ except Exception:
+ pass
+
+ def shutdown(self):
+ self.running = False
+
+ def config_handler(self, new_config):
+ return isc.config.create_answer(0)
+
+ def command_handler(self, command, *args, **kwargs):
+ self._started.set()
+ self.got_command_name = command
+ params = { "owner": "Boss",
+ "data": {
+ 'boot_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', self._BASETIME)
+ }
+ }
+ if command == 'sendstats':
+ send_command("set", "Stats", params=params, session=self.cc_session)
+ return isc.config.create_answer(0)
+ elif command == 'getstats':
+ return isc.config.create_answer(0, params)
+ return isc.config.create_answer(1, "Unknown Command")
+
+class MockAuth:
+ spec_str = """\
+{
+ "module_spec": {
+ "module_name": "Auth",
+ "module_description": "Mock Authoritative service",
+ "config_data": [],
+ "commands": [
+ {
+ "command_name": "sendstats",
+ "command_description": "Send data to a statistics module at once",
+ "command_args": []
+ }
+ ],
+ "statistics": [
+ {
+ "item_name": "queries.tcp",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 0,
+ "item_title": "Queries TCP",
+ "item_description": "A number of total query counts which all auth servers receive over TCP since they started initially"
+ },
+ {
+ "item_name": "queries.udp",
+ "item_type": "integer",
+ "item_optional": false,
+ "item_default": 0,
+ "item_title": "Queries UDP",
+ "item_description": "A number of total query counts which all auth servers receive over UDP since they started initially"
+ }
+ ]
+ }
+}
+"""
+ def __init__(self):
+ self._started = threading.Event()
+ self.running = False
+ self.spec_file = io.StringIO(self.spec_str)
+ # create ModuleCCSession object
+ self.mccs = isc.config.ModuleCCSession(
+ self.spec_file,
+ self.config_handler,
+ self.command_handler)
+ self.spec_file.close()
+ self.cc_session = self.mccs._session
+ self.got_command_name = ''
+ self.queries_tcp = 3
+ self.queries_udp = 2
+
+ def run(self):
+ self.mccs.start()
+ self.running = True
+ self._started.set()
+ try:
+ while self.running:
+ self.mccs.check_command(False)
+ except Exception:
+ pass
+
+ def shutdown(self):
+ self.running = False
+
+ def config_handler(self, new_config):
+ return isc.config.create_answer(0)
+
+ def command_handler(self, command, *args, **kwargs):
+ self.got_command_name = command
+ if command == 'sendstats':
+ params = { "owner": "Auth",
+ "data": { 'queries.tcp': self.queries_tcp,
+ 'queries.udp': self.queries_udp } }
+ return send_command("set", "Stats", params=params, session=self.cc_session)
+ return isc.config.create_answer(1, "Unknown Command")
+
+class MyStats(stats.Stats):
+ def __init__(self):
+ self._started = threading.Event()
+ stats.Stats.__init__(self)
+
+ def run(self):
+ self._started.set()
+ try:
+ self.start()
+ except Exception:
+ pass
+
+ def shutdown(self):
+ self.command_shutdown()
+
+class MyStatsHttpd(stats_httpd.StatsHttpd):
+ ORIG_SPECFILE_LOCATION = stats_httpd.SPECFILE_LOCATION
+ def __init__(self, *server_address):
+ self._started = threading.Event()
+ if server_address:
+ stats_httpd.SPECFILE_LOCATION = self.create_specfile(*server_address)
+ try:
+ stats_httpd.StatsHttpd.__init__(self)
+ finally:
+ if hasattr(stats_httpd.SPECFILE_LOCATION, "close"):
+ stats_httpd.SPECFILE_LOCATION.close()
+ stats_httpd.SPECFILE_LOCATION = self.ORIG_SPECFILE_LOCATION
+ else:
+ stats_httpd.StatsHttpd.__init__(self)
+
+ def create_specfile(self, *server_address):
+ spec_io = open(self.ORIG_SPECFILE_LOCATION)
+ try:
+ spec = json.load(spec_io)
+ spec_io.close()
+ config = spec['module_spec']['config_data']
+ for i in range(len(config)):
+ if config[i]['item_name'] == 'listen_on':
+ config[i]['item_default'] = \
+ [ dict(address=a[0], port=a[1]) for a in server_address ]
+ break
+ return io.StringIO(json.dumps(spec))
+ finally:
+ spec_io.close()
+
+ def run(self):
+ self._started.set()
+ try:
+ self.start()
+ except Exception:
+ pass
+
+ def shutdown(self):
+ self.command_handler('shutdown', None)
+
+class BaseModules:
+ def __init__(self):
+ # MockMsgq
+ self.msgq = ThreadingServerManager(MockMsgq)
+ self.msgq.run()
+ # Check whether msgq is ready. A SessionTimeout is raised here if not.
+ isc.cc.session.Session().close()
+ # MockCfgmgr
+ self.cfgmgr = ThreadingServerManager(MockCfgmgr)
+ self.cfgmgr.run()
+ # MockBoss
+ self.boss = ThreadingServerManager(MockBoss)
+ self.boss.run()
+ # MockAuth
+ self.auth = ThreadingServerManager(MockAuth)
+ self.auth.run()
+
+ def shutdown(self):
+ # MockAuth
+ self.auth.shutdown()
+ # MockBoss
+ self.boss.shutdown()
+ # MockCfgmgr
+ self.cfgmgr.shutdown()
+ # MockMsgq
+ self.msgq.shutdown()
diff --git a/src/bin/stats/tests/testdata/Makefile.am b/src/bin/stats/tests/testdata/Makefile.am
deleted file mode 100644
index 1b8df6d..0000000
--- a/src/bin/stats/tests/testdata/Makefile.am
+++ /dev/null
@@ -1 +0,0 @@
-EXTRA_DIST = stats_test.spec
diff --git a/src/bin/stats/tests/testdata/stats_test.spec b/src/bin/stats/tests/testdata/stats_test.spec
deleted file mode 100644
index 8136756..0000000
--- a/src/bin/stats/tests/testdata/stats_test.spec
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "module_spec": {
- "module_name": "Stats",
- "module_description": "Stats daemon",
- "config_data": [],
- "commands": [
- {
- "command_name": "status",
- "command_description": "identify whether stats module is alive or not",
- "command_args": []
- },
- {
- "command_name": "the_dummy",
- "command_description": "this is for testing",
- "command_args": []
- }
- ]
- }
-}
diff --git a/src/bin/tests/Makefile.am b/src/bin/tests/Makefile.am
index 446c002..41b497f 100644
--- a/src/bin/tests/Makefile.am
+++ b/src/bin/tests/Makefile.am
@@ -8,13 +8,13 @@ noinst_SCRIPTS = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
check-local:
if ENABLE_PYTHON_COVERAGE
- touch $(abs_top_srcdir)/.coverage
+ touch $(abs_top_srcdir)/.coverage
rm -f .coverage
${LN_S} $(abs_top_srcdir)/.coverage .coverage
endif
diff --git a/src/bin/xfrin/tests/Makefile.am b/src/bin/xfrin/tests/Makefile.am
index 2f31808..3d56009 100644
--- a/src/bin/xfrin/tests/Makefile.am
+++ b/src/bin/xfrin/tests/Makefile.am
@@ -6,7 +6,7 @@ EXTRA_DIST = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/xfr/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
diff --git a/src/bin/xfrin/tests/xfrin_test.py b/src/bin/xfrin/tests/xfrin_test.py
index 92bf1b0..05cce98 100644
--- a/src/bin/xfrin/tests/xfrin_test.py
+++ b/src/bin/xfrin/tests/xfrin_test.py
@@ -955,13 +955,20 @@ class TestXfrin(unittest.TestCase):
self.assertEqual(zone_info.tsig_key.to_text(), TSIGKey(zone_config['tsig_key']).to_text())
else:
self.assertIsNone(zone_info.tsig_key)
+ if 'ixfr_disabled' in zone_config and\
+ zone_config.get('ixfr_disabled'):
+ self.assertTrue(zone_info.ixfr_disabled)
+ else:
+ # if not set, should default to False
+ self.assertFalse(zone_info.ixfr_disabled)
def test_command_handler_zones(self):
config1 = { 'transfers_in': 3,
'zones': [
{ 'name': 'test.example.',
'master_addr': '192.0.2.1',
- 'master_port': 53
+ 'master_port': 53,
+ 'ixfr_disabled': False
}
]}
self.assertEqual(self.xfr.config_handler(config1)['result'][0], 0)
@@ -972,7 +979,8 @@ class TestXfrin(unittest.TestCase):
{ 'name': 'test.example.',
'master_addr': '192.0.2.2',
'master_port': 53,
- 'tsig_key': "example.com:SFuWd/q99SzF8Yzd1QbB9g=="
+ 'tsig_key': "example.com:SFuWd/q99SzF8Yzd1QbB9g==",
+ 'ixfr_disabled': True
}
]}
self.assertEqual(self.xfr.config_handler(config2)['result'][0], 0)
diff --git a/src/bin/xfrin/xfrin.py.in b/src/bin/xfrin/xfrin.py.in
index 8845b42..a77a383 100755
--- a/src/bin/xfrin/xfrin.py.in
+++ b/src/bin/xfrin/xfrin.py.in
@@ -451,6 +451,7 @@ class ZoneInfo:
self.set_master_port(config_data.get('master_port'))
self.set_zone_class(config_data.get('class'))
self.set_tsig_key(config_data.get('tsig_key'))
+ self.set_ixfr_disabled(config_data.get('ixfr_disabled'))
def set_name(self, name_str):
"""Set the name for this zone given a name string.
@@ -525,6 +526,16 @@ class ZoneInfo:
errmsg = "bad TSIG key string: " + tsig_key_str
raise XfrinZoneInfoException(errmsg)
+ def set_ixfr_disabled(self, ixfr_disabled):
+ """Set ixfr_disabled. If set to False (the default), it will use
+ IXFR for incoming transfers. If set to True, it will use AXFR.
+ At this moment there is no automatic fallback"""
+ # don't care what type it is; if evaluates to true, set to True
+ if ixfr_disabled:
+ self.ixfr_disabled = True
+ else:
+ self.ixfr_disabled = False
+
def get_master_addr_info(self):
return (self.master_addr.family, socket.SOCK_STREAM,
(str(self.master_addr), self.master_port))
diff --git a/src/bin/xfrin/xfrin.spec b/src/bin/xfrin/xfrin.spec
index a3e62ce..bc93720 100644
--- a/src/bin/xfrin/xfrin.spec
+++ b/src/bin/xfrin/xfrin.spec
@@ -43,6 +43,11 @@
{ "item_name": "tsig_key",
"item_type": "string",
"item_optional": true
+ },
+ { "item_name": "ixfr_disabled",
+ "item_type": "boolean",
+ "item_optional": false,
+ "item_default": false
}
]
}
diff --git a/src/bin/xfrout/tests/Makefile.am b/src/bin/xfrout/tests/Makefile.am
index 255478a..ace8fc9 100644
--- a/src/bin/xfrout/tests/Makefile.am
+++ b/src/bin/xfrout/tests/Makefile.am
@@ -6,10 +6,12 @@ noinst_SCRIPTS = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/acl/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$(abs_top_builddir)/src/lib/acl/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
+# We set B10_FROM_BUILD below, so that the test can refer to the in-source
+# spec file.
check-local:
if ENABLE_PYTHON_COVERAGE
touch $(abs_top_srcdir)/.coverage
@@ -19,6 +21,7 @@ endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
chmod +x $(abs_builddir)/$$pytest ; \
+ B10_FROM_BUILD=$(abs_top_builddir) \
$(LIBRARY_PATH_PLACEHOLDER) \
PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/xfrout:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/util/io/.libs \
$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
diff --git a/src/bin/xfrout/tests/xfrout_test.py.in b/src/bin/xfrout/tests/xfrout_test.py.in
index 62c7708..85979a0 100644
--- a/src/bin/xfrout/tests/xfrout_test.py.in
+++ b/src/bin/xfrout/tests/xfrout_test.py.in
@@ -20,6 +20,7 @@ import unittest
import os
from isc.testutils.tsigctx_mock import MockTSIGContext
from isc.cc.session import *
+import isc.config
from pydnspp import *
from xfrout import *
import xfrout
@@ -101,20 +102,24 @@ class TestXfroutSession(unittest.TestCase):
def message_has_tsig(self, msg):
return msg.get_tsig_record() is not None
- def create_request_data_with_tsig(self):
+ def create_request_data(self, with_tsig=False):
msg = Message(Message.RENDER)
query_id = 0x1035
msg.set_qid(query_id)
msg.set_opcode(Opcode.QUERY())
msg.set_rcode(Rcode.NOERROR())
- query_question = Question(Name("example.com."), RRClass.IN(), RRType.AXFR())
+ query_question = Question(Name("example.com"), RRClass.IN(),
+ RRType.AXFR())
msg.add_question(query_question)
renderer = MessageRenderer()
- tsig_ctx = MockTSIGContext(TSIG_KEY)
- msg.to_wire(renderer, tsig_ctx)
- reply_data = renderer.get_data()
- return reply_data
+ if with_tsig:
+ tsig_ctx = MockTSIGContext(TSIG_KEY)
+ msg.to_wire(renderer, tsig_ctx)
+ else:
+ msg.to_wire(renderer)
+ request_data = renderer.get_data()
+ return request_data
def setUp(self):
self.sock = MySocket(socket.AF_INET,socket.SOCK_STREAM)
@@ -122,8 +127,9 @@ class TestXfroutSession(unittest.TestCase):
TSIGKeyRing(), ('127.0.0.1', 12345),
# When not testing ACLs, simply accept
isc.acl.dns.REQUEST_LOADER.load(
- [{"action": "ACCEPT"}]))
- self.mdata = bytes(b'\xd6=\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07example\x03com\x00\x00\xfc\x00\x01')
+ [{"action": "ACCEPT"}]),
+ {})
+ self.mdata = self.create_request_data(False)
self.soa_record = (4, 3, 'example.com.', 'com.example.', 3600, 'SOA', None, 'master.example.com. admin.example.com. 1234 3600 1800 2419200 7200')
def test_parse_query_message(self):
@@ -131,7 +137,7 @@ class TestXfroutSession(unittest.TestCase):
self.assertEqual(get_rcode.to_text(), "NOERROR")
# tsig signed query message
- request_data = self.create_request_data_with_tsig()
+ request_data = self.create_request_data(True)
# BADKEY
[rcode, msg] = self.xfrsess._parse_query_message(request_data)
self.assertEqual(rcode.to_text(), "NOTAUTH")
@@ -143,8 +149,9 @@ class TestXfroutSession(unittest.TestCase):
self.assertEqual(rcode.to_text(), "NOERROR")
self.assertTrue(self.xfrsess._tsig_ctx is not None)
+ def check_transfer_acl(self, acl_setter):
# ACL checks, put some ACL inside
- self.xfrsess._acl = isc.acl.dns.REQUEST_LOADER.load([
+ acl_setter(isc.acl.dns.REQUEST_LOADER.load([
{
"from": "127.0.0.1",
"action": "ACCEPT"
@@ -153,7 +160,7 @@ class TestXfroutSession(unittest.TestCase):
"from": "192.0.2.1",
"action": "DROP"
}
- ])
+ ]))
# Localhost (the default in this test) is accepted
rcode, msg = self.xfrsess._parse_query_message(self.mdata)
self.assertEqual(rcode.to_text(), "NOERROR")
@@ -165,6 +172,10 @@ class TestXfroutSession(unittest.TestCase):
self.xfrsess._remote = ('192.0.2.2', 12345)
rcode, msg = self.xfrsess._parse_query_message(self.mdata)
self.assertEqual(rcode.to_text(), "REFUSED")
+
+ # TSIG signed request
+ request_data = self.create_request_data(True)
+
# If the TSIG check fails, it should not check ACL
# (If it checked ACL as well, it would just drop the request)
self.xfrsess._remote = ('192.0.2.1', 12345)
@@ -174,36 +185,36 @@ class TestXfroutSession(unittest.TestCase):
self.assertTrue(self.xfrsess._tsig_ctx is not None)
# ACL using TSIG: successful case
- self.xfrsess._acl = isc.acl.dns.REQUEST_LOADER.load([
+ acl_setter(isc.acl.dns.REQUEST_LOADER.load([
{"key": "example.com", "action": "ACCEPT"}, {"action": "REJECT"}
- ])
+ ]))
self.assertEqual(TSIGKeyRing.SUCCESS,
self.xfrsess._tsig_key_ring.add(TSIG_KEY))
[rcode, msg] = self.xfrsess._parse_query_message(request_data)
self.assertEqual(rcode.to_text(), "NOERROR")
# ACL using TSIG: key name doesn't match; should be rejected
- self.xfrsess._acl = isc.acl.dns.REQUEST_LOADER.load([
+ acl_setter(isc.acl.dns.REQUEST_LOADER.load([
{"key": "example.org", "action": "ACCEPT"}, {"action": "REJECT"}
- ])
+ ]))
[rcode, msg] = self.xfrsess._parse_query_message(request_data)
self.assertEqual(rcode.to_text(), "REFUSED")
# ACL using TSIG: no TSIG; should be rejected
- self.xfrsess._acl = isc.acl.dns.REQUEST_LOADER.load([
+ acl_setter(isc.acl.dns.REQUEST_LOADER.load([
{"key": "example.org", "action": "ACCEPT"}, {"action": "REJECT"}
- ])
+ ]))
[rcode, msg] = self.xfrsess._parse_query_message(self.mdata)
self.assertEqual(rcode.to_text(), "REFUSED")
#
# ACL using IP + TSIG: both should match
#
- self.xfrsess._acl = isc.acl.dns.REQUEST_LOADER.load([
+ acl_setter(isc.acl.dns.REQUEST_LOADER.load([
{"ALL": [{"key": "example.com"}, {"from": "192.0.2.1"}],
"action": "ACCEPT"},
{"action": "REJECT"}
- ])
+ ]))
# both matches
self.xfrsess._remote = ('192.0.2.1', 12345)
[rcode, msg] = self.xfrsess._parse_query_message(request_data)
@@ -221,6 +232,63 @@ class TestXfroutSession(unittest.TestCase):
[rcode, msg] = self.xfrsess._parse_query_message(self.mdata)
self.assertEqual(rcode.to_text(), "REFUSED")
+ def test_transfer_acl(self):
+ # ACL checks only with the default ACL
+ def acl_setter(acl):
+ self.xfrsess._acl = acl
+ self.check_transfer_acl(acl_setter)
+
+ def test_transfer_zoneacl(self):
+ # ACL check with a per zone ACL + default ACL. The per zone ACL
+ # should match the queryied zone, so it should be used.
+ def acl_setter(acl):
+ zone_key = ('IN', 'example.com.')
+ self.xfrsess._zone_config[zone_key] = {}
+ self.xfrsess._zone_config[zone_key]['transfer_acl'] = acl
+ self.xfrsess._acl = isc.acl.dns.REQUEST_LOADER.load([
+ {"from": "127.0.0.1", "action": "DROP"}])
+ self.check_transfer_acl(acl_setter)
+
+ def test_transfer_zoneacl_nomatch(self):
+ # similar to the previous one, but the per zone doesn't match the
+ # query. The default should be used.
+ def acl_setter(acl):
+ zone_key = ('IN', 'example.org.')
+ self.xfrsess._zone_config[zone_key] = {}
+ self.xfrsess._zone_config[zone_key]['transfer_acl'] = \
+ isc.acl.dns.REQUEST_LOADER.load([
+ {"from": "127.0.0.1", "action": "DROP"}])
+ self.xfrsess._acl = acl
+ self.check_transfer_acl(acl_setter)
+
+ def test_get_transfer_acl(self):
+ # set the default ACL. If there's no specific zone ACL, this one
+ # should be used.
+ self.xfrsess._acl = isc.acl.dns.REQUEST_LOADER.load([
+ {"from": "127.0.0.1", "action": "ACCEPT"}])
+ acl = self.xfrsess._get_transfer_acl(Name('example.com'), RRClass.IN())
+ self.assertEqual(acl, self.xfrsess._acl)
+
+ # install a per zone config with transfer ACL for example.com. Then
+ # that ACL will be used for example.com; for others the default ACL
+ # will still be used.
+ com_acl = isc.acl.dns.REQUEST_LOADER.load([
+ {"from": "127.0.0.1", "action": "REJECT"}])
+ self.xfrsess._zone_config[('IN', 'example.com.')] = {}
+ self.xfrsess._zone_config[('IN', 'example.com.')]['transfer_acl'] = \
+ com_acl
+ self.assertEqual(com_acl,
+ self.xfrsess._get_transfer_acl(Name('example.com'),
+ RRClass.IN()))
+ self.assertEqual(self.xfrsess._acl,
+ self.xfrsess._get_transfer_acl(Name('example.org'),
+ RRClass.IN()))
+
+ # Name matching should be case insensitive.
+ self.assertEqual(com_acl,
+ self.xfrsess._get_transfer_acl(Name('EXAMPLE.COM'),
+ RRClass.IN()))
+
def test_get_query_zone_name(self):
msg = self.getmsg()
self.assertEqual(self.xfrsess._get_query_zone_name(msg), "example.com.")
@@ -572,9 +640,11 @@ class TestXfroutSession(unittest.TestCase):
# and it should not have sent anything else
self.assertEqual(0, len(self.sock.sendqueue))
-class MyCCSession():
+class MyCCSession(isc.config.ConfigData):
def __init__(self):
- pass
+ module_spec = isc.config.module_spec_from_file(
+ xfrout.SPECFILE_LOCATION)
+ ConfigData.__init__(self, module_spec)
def get_remote_config_value(self, module_name, identifier):
if module_name == "Auth" and identifier == "database_file":
@@ -586,9 +656,9 @@ class MyCCSession():
class MyUnixSockServer(UnixSockServer):
def __init__(self):
self._shutdown_event = threading.Event()
- self._max_transfers_out = 10
- self._cc = MyCCSession()
self._common_init()
+ self._cc = MyCCSession()
+ self.update_config_data(self._cc.get_full_config())
class TestUnixSockServer(unittest.TestCase):
def setUp(self):
@@ -636,17 +706,17 @@ class TestUnixSockServer(unittest.TestCase):
socket.AI_NUMERICHOST)[0][4])
self.assertEqual(isc.acl.acl.ACCEPT, self.unix._acl.execute(context))
- def check_loaded_ACL(self):
+ def check_loaded_ACL(self, acl):
context = isc.acl.dns.RequestContext(socket.getaddrinfo("127.0.0.1",
1234, 0, socket.SOCK_DGRAM,
socket.IPPROTO_UDP,
socket.AI_NUMERICHOST)[0][4])
- self.assertEqual(isc.acl.acl.ACCEPT, self.unix._acl.execute(context))
+ self.assertEqual(isc.acl.acl.ACCEPT, acl.execute(context))
context = isc.acl.dns.RequestContext(socket.getaddrinfo("192.0.2.1",
1234, 0, socket.SOCK_DGRAM,
socket.IPPROTO_UDP,
socket.AI_NUMERICHOST)[0][4])
- self.assertEqual(isc.acl.acl.REJECT, self.unix._acl.execute(context))
+ self.assertEqual(isc.acl.acl.REJECT, acl.execute(context))
def test_update_config_data(self):
self.check_default_ACL()
@@ -671,14 +741,79 @@ class TestUnixSockServer(unittest.TestCase):
self.assertEqual(self.unix.tsig_key_ring.size(), 0)
# Load the ACL
- self.unix.update_config_data({'query_acl': [{'from': '127.0.0.1',
+ self.unix.update_config_data({'transfer_acl': [{'from': '127.0.0.1',
'action': 'ACCEPT'}]})
- self.check_loaded_ACL()
+ self.check_loaded_ACL(self.unix._acl)
# Pass a wrong data there and check it does not replace the old one
- self.assertRaises(isc.acl.acl.LoaderError,
+ self.assertRaises(XfroutConfigError,
+ self.unix.update_config_data,
+ {'transfer_acl': ['Something bad']})
+ self.check_loaded_ACL(self.unix._acl)
+
+ def test_zone_config_data(self):
+ # By default, there's no specific zone config
+ self.assertEqual({}, self.unix._zone_config)
+
+ # Adding config for a specific zone. The config is empty unless
+ # explicitly specified.
+ self.unix.update_config_data({'zone_config':
+ [{'origin': 'example.com',
+ 'class': 'IN'}]})
+ self.assertEqual({}, self.unix._zone_config[('IN', 'example.com.')])
+
+ # zone class can be omitted
+ self.unix.update_config_data({'zone_config':
+ [{'origin': 'example.com'}]})
+ self.assertEqual({}, self.unix._zone_config[('IN', 'example.com.')])
+
+ # zone class, name are stored in the "normalized" form. class
+ # strings are upper cased, names are down cased.
+ self.unix.update_config_data({'zone_config':
+ [{'origin': 'EXAMPLE.com'}]})
+ self.assertEqual({}, self.unix._zone_config[('IN', 'example.com.')])
+
+ # invalid zone class, name will result in exceptions
+ self.assertRaises(EmptyLabel,
+ self.unix.update_config_data,
+ {'zone_config': [{'origin': 'bad..example'}]})
+ self.assertRaises(InvalidRRClass,
+ self.unix.update_config_data,
+ {'zone_config': [{'origin': 'example.com',
+ 'class': 'badclass'}]})
+
+ # Configuring a couple of more zones
+ self.unix.update_config_data({'zone_config':
+ [{'origin': 'example.com'},
+ {'origin': 'example.com',
+ 'class': 'CH'},
+ {'origin': 'example.org'}]})
+ self.assertEqual({}, self.unix._zone_config[('IN', 'example.com.')])
+ self.assertEqual({}, self.unix._zone_config[('CH', 'example.com.')])
+ self.assertEqual({}, self.unix._zone_config[('IN', 'example.org.')])
+
+ # Duplicate data: should be rejected with an exception
+ self.assertRaises(XfroutConfigError,
+ self.unix.update_config_data,
+ {'zone_config': [{'origin': 'example.com'},
+ {'origin': 'example.org'},
+ {'origin': 'example.com'}]})
+
+ def test_zone_config_data_with_acl(self):
+ # Similar to the previous test, but with transfer_acl config
+ self.unix.update_config_data({'zone_config':
+ [{'origin': 'example.com',
+ 'transfer_acl':
+ [{'from': '127.0.0.1',
+ 'action': 'ACCEPT'}]}]})
+ acl = self.unix._zone_config[('IN', 'example.com.')]['transfer_acl']
+ self.check_loaded_ACL(acl)
+
+ # invalid ACL syntax will be rejected with exception
+ self.assertRaises(XfroutConfigError,
self.unix.update_config_data,
- {'query_acl': ['Something bad']})
- self.check_loaded_ACL()
+ {'zone_config': [{'origin': 'example.com',
+ 'transfer_acl':
+ [{'action': 'BADACTION'}]}]})
def test_get_db_file(self):
self.assertEqual(self.unix.get_db_file(), "initdb.file")
diff --git a/src/bin/xfrout/xfrout.py.in b/src/bin/xfrout/xfrout.py.in
index 144a1b8..8049e29 100755
--- a/src/bin/xfrout/xfrout.py.in
+++ b/src/bin/xfrout/xfrout.py.in
@@ -48,11 +48,23 @@ except ImportError as e:
# must keep running, so we warn about it and move forward.
log.error(XFROUT_IMPORT, str(e))
-from isc.acl.acl import ACCEPT, REJECT, DROP
+from isc.acl.acl import ACCEPT, REJECT, DROP, LoaderError
from isc.acl.dns import REQUEST_LOADER
isc.util.process.rename()
+class XfroutConfigError(Exception):
+ """An exception indicating an error in updating xfrout configuration.
+
+ This exception is raised when the xfrout process encouters an error in
+ handling configuration updates. Not all syntax error can be caught
+ at the module-CC layer, so xfrout needs to (explicitly or implicitly)
+ validate the given configuration data itself. When it finds an error
+ it raises this exception (either directly or by converting an exception
+ from other modules) as a unified error in configuration.
+ """
+ pass
+
def init_paths():
global SPECFILE_PATH
global AUTH_SPECFILE_PATH
@@ -79,14 +91,12 @@ init_paths()
SPECFILE_LOCATION = SPECFILE_PATH + "/xfrout.spec"
AUTH_SPECFILE_LOCATION = AUTH_SPECFILE_PATH + os.sep + "auth.spec"
-MAX_TRANSFERS_OUT = 10
VERBOSE_MODE = False
# tsig sign every N axfr packets.
TSIG_SIGN_EVERY_NTH = 96
XFROUT_MAX_MESSAGE_SIZE = 65535
-
def get_rrset_len(rrset):
"""Returns the wire length of the given RRset"""
bytes = bytearray()
@@ -96,7 +106,7 @@ def get_rrset_len(rrset):
class XfroutSession():
def __init__(self, sock_fd, request_data, server, tsig_key_ring, remote,
- acl):
+ default_acl, zone_config):
self._sock_fd = sock_fd
self._request_data = request_data
self._server = server
@@ -104,7 +114,8 @@ class XfroutSession():
self._tsig_ctx = None
self._tsig_len = 0
self._remote = remote
- self._acl = acl
+ self._acl = default_acl
+ self._zone_config = zone_config
self.handle()
def create_tsig_ctx(self, tsig_record, tsig_key_ring):
@@ -140,34 +151,50 @@ class XfroutSession():
try:
msg = Message(Message.PARSE)
Message.from_wire(msg, mdata)
-
- # TSIG related checks
- rcode = self._check_request_tsig(msg, mdata)
-
- if rcode == Rcode.NOERROR():
- # ACL checks
- acl_result = self._acl.execute(
- isc.acl.dns.RequestContext(self._remote,
- msg.get_tsig_record()))
- if acl_result == DROP:
- logger.info(XFROUT_QUERY_DROPPED,
- self._get_query_zone_name(msg),
- self._get_query_zone_class(msg),
- self._remote[0], self._remote[1])
- return None, None
- elif acl_result == REJECT:
- logger.info(XFROUT_QUERY_REJECTED,
- self._get_query_zone_name(msg),
- self._get_query_zone_class(msg),
- self._remote[0], self._remote[1])
- return Rcode.REFUSED(), msg
-
- except Exception as err:
+ except Exception as err: # Exception is too broad
logger.error(XFROUT_PARSE_QUERY_ERROR, err)
return Rcode.FORMERR(), None
+ # TSIG related checks
+ rcode = self._check_request_tsig(msg, mdata)
+
+ if rcode == Rcode.NOERROR():
+ # ACL checks
+ zone_name = msg.get_question()[0].get_name()
+ zone_class = msg.get_question()[0].get_class()
+ acl = self._get_transfer_acl(zone_name, zone_class)
+ acl_result = acl.execute(
+ isc.acl.dns.RequestContext(self._remote,
+ msg.get_tsig_record()))
+ if acl_result == DROP:
+ logger.info(XFROUT_QUERY_DROPPED, zone_name, zone_class,
+ self._remote[0], self._remote[1])
+ return None, None
+ elif acl_result == REJECT:
+ logger.info(XFROUT_QUERY_REJECTED, zone_name, zone_class,
+ self._remote[0], self._remote[1])
+ return Rcode.REFUSED(), msg
+
return rcode, msg
+ def _get_transfer_acl(self, zone_name, zone_class):
+ '''Return the ACL that should be applied for a given zone.
+
+ The zone is identified by a tuple of name and RR class.
+ If a per zone configuration for the zone exists and contains
+ transfer_acl, that ACL will be used; otherwise, the default
+ ACL will be used.
+
+ '''
+ # Internally zone names are managed in lower cased label characters,
+ # so we first need to convert the name.
+ zone_name_lower = Name(zone_name.to_text(), True)
+ config_key = (zone_class.to_text(), zone_name_lower.to_text())
+ if config_key in self._zone_config and \
+ 'transfer_acl' in self._zone_config[config_key]:
+ return self._zone_config[config_key]['transfer_acl']
+ return self._acl
+
def _get_query_zone_name(self, msg):
question = msg.get_question()[0]
return question.get_name().to_text()
@@ -384,10 +411,12 @@ class XfroutSession():
self._send_message_with_last_soa(msg, sock_fd, rrset_soa, message_upper_len,
count_since_last_tsig_sign)
-class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
+class UnixSockServer(socketserver_mixin.NoPollMixIn,
+ ThreadingUnixStreamServer):
'''The unix domain socket server which accept xfr query sent from auth server.'''
- def __init__(self, sock_file, handle_class, shutdown_event, config_data, cc):
+ def __init__(self, sock_file, handle_class, shutdown_event, config_data,
+ cc):
self._remove_unused_sock_file(sock_file)
self._sock_file = sock_file
socketserver_mixin.NoPollMixIn.__init__(self)
@@ -395,16 +424,15 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
self._shutdown_event = shutdown_event
self._write_sock, self._read_sock = socket.socketpair()
self._common_init()
- self.update_config_data(config_data)
self._cc = cc
+ self.update_config_data(config_data)
def _common_init(self):
+ '''Initialization shared with the mock server class used for tests'''
self._lock = threading.Lock()
self._transfers_counter = 0
- # This default value will probably get overwritten by the (same)
- # default value from the spec file. This is here just to make
- # sure and to make the default value in tests consistent.
- self._acl = REQUEST_LOADER.load('[{"action": "ACCEPT"}]')
+ self._zone_config = {}
+ self._acl = None # this will be initialized in update_config_data()
def _receive_query_message(self, sock):
''' receive request message from sock'''
@@ -482,7 +510,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
if not request_data:
return
- t = threading.Thread(target = self.finish_request,
+ t = threading.Thread(target=self.finish_request,
args = (sock_fd, request_data))
if self.daemon_threads:
t.daemon = True
@@ -506,10 +534,17 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
return sock.getpeername()
def finish_request(self, sock_fd, request_data):
- '''Finish one request by instantiating RequestHandlerClass.'''
+ '''Finish one request by instantiating RequestHandlerClass.
+
+ This method creates a XfroutSession object.
+ '''
+ self._lock.acquire()
+ acl = self._acl
+ zone_config = self._zone_config
+ self._lock.release()
self.RequestHandlerClass(sock_fd, request_data, self,
self.tsig_key_ring,
- self._guess_remote(sock_fd), self._acl)
+ self._guess_remote(sock_fd), acl, zone_config)
def _remove_unused_sock_file(self, sock_file):
'''Try to remove the socket file. If the file is being used
@@ -551,16 +586,65 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
pass
def update_config_data(self, new_config):
- '''Apply the new config setting of xfrout module. '''
- logger.info(XFROUT_NEW_CONFIG)
- if 'query_acl' in new_config:
- self._acl = REQUEST_LOADER.load(new_config['query_acl'])
+ '''Apply the new config setting of xfrout module.
+
+ '''
self._lock.acquire()
- self._max_transfers_out = new_config.get('transfers_out')
- self.set_tsig_key_ring(new_config.get('tsig_key_ring'))
+ try:
+ logger.info(XFROUT_NEW_CONFIG)
+ new_acl = self._acl
+ if 'transfer_acl' in new_config:
+ try:
+ new_acl = REQUEST_LOADER.load(new_config['transfer_acl'])
+ except LoaderError as e:
+ raise XfroutConfigError('Failed to parse transfer_acl: ' +
+ str(e))
+
+ new_zone_config = self._zone_config
+ zconfig_data = new_config.get('zone_config')
+ if zconfig_data is not None:
+ new_zone_config = self.__create_zone_config(zconfig_data)
+
+ self._acl = new_acl
+ self._zone_config = new_zone_config
+ self._max_transfers_out = new_config.get('transfers_out')
+ self.set_tsig_key_ring(new_config.get('tsig_key_ring'))
+ except Exception as e:
+ self._lock.release()
+ raise e
self._lock.release()
logger.info(XFROUT_NEW_CONFIG_DONE)
+ def __create_zone_config(self, zone_config_list):
+ new_config = {}
+ for zconf in zone_config_list:
+ # convert the class, origin (name) pair. First build pydnspp
+ # object to reject invalid input.
+ zclass_str = zconf.get('class')
+ if zclass_str is None:
+ #zclass_str = 'IN' # temporary
+ zclass_str = self._cc.get_default_value('zone_config/class')
+ zclass = RRClass(zclass_str)
+ zorigin = Name(zconf['origin'], True)
+ config_key = (zclass.to_text(), zorigin.to_text())
+
+ # reject duplicate config
+ if config_key in new_config:
+ raise XfroutConfigError('Duplicate zone_config for ' +
+ str(zorigin) + '/' + str(zclass))
+
+ # create a new config entry, build any given (and known) config
+ new_config[config_key] = {}
+ if 'transfer_acl' in zconf:
+ try:
+ new_config[config_key]['transfer_acl'] = \
+ REQUEST_LOADER.load(zconf['transfer_acl'])
+ except LoaderError as e:
+ raise XfroutConfigError('Failed to parse transfer_acl ' +
+ 'for ' + zorigin.to_text() + '/' +
+ zclass_str + ': ' + str(e))
+ return new_config
+
def set_tsig_key_ring(self, key_list):
"""Set the tsig_key_ring , given a TSIG key string list representation. """
@@ -617,8 +701,10 @@ class XfroutServer:
def _start_xfr_query_listener(self):
'''Start a new thread to accept xfr query. '''
- self._unix_socket_server = UnixSockServer(self._listen_sock_file, XfroutSession,
- self._shutdown_event, self._config_data,
+ self._unix_socket_server = UnixSockServer(self._listen_sock_file,
+ XfroutSession,
+ self._shutdown_event,
+ self._config_data,
self._cc)
listener = threading.Thread(target=self._unix_socket_server.serve_forever)
listener.start()
@@ -726,6 +812,10 @@ if '__main__' == __name__:
logger.INFO(XFROUT_STOPPED_BY_KEYBOARD)
except SessionError as e:
logger.error(XFROUT_CC_SESSION_ERROR, str(e))
+ except ModuleCCSessionError as e:
+ logger.error(XFROUT_MODULECC_SESSION_ERROR, str(e))
+ except XfroutConfigError as e:
+ logger.error(XFROUT_CONFIG_ERROR, str(e))
except SessionTimeout as e:
logger.error(XFROUT_CC_SESSION_TIMEOUT_ERROR)
diff --git a/src/bin/xfrout/xfrout.spec.pre.in b/src/bin/xfrout/xfrout.spec.pre.in
index 8ecbb0b..0891a57 100644
--- a/src/bin/xfrout/xfrout.spec.pre.in
+++ b/src/bin/xfrout/xfrout.spec.pre.in
@@ -51,7 +51,7 @@
}
},
{
- "item_name": "query_acl",
+ "item_name": "transfer_acl",
"item_type": "list",
"item_optional": false,
"item_default": [{"action": "ACCEPT"}],
@@ -61,6 +61,45 @@
"item_type": "any",
"item_optional": true
}
+ },
+ {
+ "item_name": "zone_config",
+ "item_type": "list",
+ "item_optional": true,
+ "item_default": [],
+ "list_item_spec":
+ {
+ "item_name": "zone_config_element",
+ "item_type": "map",
+ "item_optional": true,
+ "item_default": { "origin": "" },
+ "map_item_spec": [
+ {
+ "item_name": "origin",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": ""
+ },
+ {
+ "item_name": "class",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "IN"
+ },
+ {
+ "item_name": "transfer_acl",
+ "item_type": "list",
+ "item_optional": true,
+ "item_default": [{"action": "ACCEPT"}],
+ "list_item_spec":
+ {
+ "item_name": "acl_element",
+ "item_type": "any",
+ "item_optional": true
+ }
+ }
+ ]
+ }
}
],
"commands": [
diff --git a/src/bin/xfrout/xfrout_messages.mes b/src/bin/xfrout/xfrout_messages.mes
index 121b2ad..b2e432c 100644
--- a/src/bin/xfrout/xfrout_messages.mes
+++ b/src/bin/xfrout/xfrout_messages.mes
@@ -47,6 +47,17 @@ a valid TSIG key.
There was a problem reading from the command and control channel. The
most likely cause is that the msgq daemon is not running.
+% XFROUT_MODULECC_SESSION_ERROR error encountered by configuration/command module: %1
+There was a problem in the lower level module handling configuration and
+control commands. This could happen for various reasons, but the most likely
+cause is that the configuration database contains a syntax error and xfrout
+failed to start at initialization. A detailed error message from the module
+will also be displayed.
+
+% XFROUT_CONFIG_ERROR error found in configuration data: %1
+The xfrout process encountered an error when installing the configuration at
+startup time. Details of the error are included in the log message.
+
% XFROUT_CC_SESSION_TIMEOUT_ERROR timeout waiting for cc response
There was a problem reading a response from another module over the
command and control channel. The most likely cause is that the
diff --git a/src/bin/zonemgr/tests/Makefile.am b/src/bin/zonemgr/tests/Makefile.am
index 6e8c35b..769d332 100644
--- a/src/bin/zonemgr/tests/Makefile.am
+++ b/src/bin/zonemgr/tests/Makefile.am
@@ -7,7 +7,7 @@ CLEANFILES = initdb.file
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
diff --git a/src/cppcheck-suppress.lst b/src/cppcheck-suppress.lst
index a4fea30..8a4c7c1 100644
--- a/src/cppcheck-suppress.lst
+++ b/src/cppcheck-suppress.lst
@@ -3,7 +3,7 @@
debug
missingInclude
// This is a template, and should be excluded from the check
-unreadVariable:src/lib/dns/rdata/template.cc:60
+unreadVariable:src/lib/dns/rdata/template.cc:61
// Intentional self assignment tests. Suppress warning about them.
selfAssignment:src/lib/dns/tests/name_unittest.cc:293
selfAssignment:src/lib/dns/tests/rdata_unittest.cc:228
diff --git a/src/lib/Makefile.am b/src/lib/Makefile.am
index 5adf150..04eee45 100644
--- a/src/lib/Makefile.am
+++ b/src/lib/Makefile.am
@@ -1,3 +1,3 @@
-SUBDIRS = exceptions util log cryptolink dns cc config acl python xfr \
- bench asiolink asiodns nsas cache resolve testutils datasrc \
- server_common
+SUBDIRS = exceptions util log cryptolink dns cc config acl xfr bench \
+ asiolink asiodns nsas cache resolve testutils datasrc \
+ server_common python
diff --git a/src/lib/datasrc/Makefile.am b/src/lib/datasrc/Makefile.am
index 3a01f0d..5e193d2 100644
--- a/src/lib/datasrc/Makefile.am
+++ b/src/lib/datasrc/Makefile.am
@@ -36,6 +36,7 @@ libdatasrc_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
libdatasrc_la_LIBADD += $(top_builddir)/src/lib/dns/libdns++.la
libdatasrc_la_LIBADD += $(top_builddir)/src/lib/log/liblog.la
libdatasrc_la_LIBADD += $(top_builddir)/src/lib/cc/libcc.la
+libdatasrc_la_LIBADD += $(SQLITE_LIBS)
BUILT_SOURCES = datasrc_messages.h datasrc_messages.cc
datasrc_messages.h datasrc_messages.cc: Makefile datasrc_messages.mes
diff --git a/src/lib/datasrc/database.cc b/src/lib/datasrc/database.cc
index 2c5aaeb..e476297 100644
--- a/src/lib/datasrc/database.cc
+++ b/src/lib/datasrc/database.cc
@@ -174,105 +174,42 @@ private:
};
}
-std::pair<bool, isc::dns::RRsetPtr>
-DatabaseClient::Finder::getRRset(const isc::dns::Name& name,
- const isc::dns::RRType* type,
- bool want_cname, bool want_dname,
- bool want_ns,
- const isc::dns::Name* construct_name)
+DatabaseClient::Finder::FoundRRsets
+DatabaseClient::Finder::getRRsets(const string& name, const WantedTypes& types,
+ bool check_ns, const string* construct_name)
{
RRsigStore sig_store;
bool records_found = false;
- isc::dns::RRsetPtr result_rrset;
+ std::map<RRType, RRsetPtr> result;
// Request the context
DatabaseAccessor::IteratorContextPtr
- context(accessor_->getRecords(name.toText(), zone_id_));
+ context(accessor_->getRecords(name, zone_id_));
// It must not return NULL, that's a bug of the implementation
if (!context) {
- isc_throw(isc::Unexpected, "Iterator context null at " +
- name.toText());
+ isc_throw(isc::Unexpected, "Iterator context null at " + name);
}
std::string columns[DatabaseAccessor::COLUMN_COUNT];
if (construct_name == NULL) {
construct_name = &name;
}
+
+ const Name construct_name_object(*construct_name);
+
+ bool seen_cname(false);
+ bool seen_ds(false);
+ bool seen_other(false);
+ bool seen_ns(false);
+
while (context->getNext(columns)) {
- if (!records_found) {
- records_found = true;
- }
+ // The domain is not empty
+ records_found = true;
try {
- const isc::dns::RRType cur_type(columns[DatabaseAccessor::
- TYPE_COLUMN]);
- const isc::dns::RRTTL cur_ttl(columns[DatabaseAccessor::
- TTL_COLUMN]);
- // Ths sigtype column was an optimization for finding the
- // relevant RRSIG RRs for a lookup. Currently this column is
- // not used in this revised datasource implementation. We
- // should either start using it again, or remove it from use
- // completely (i.e. also remove it from the schema and the
- // backend implementation).
- // Note that because we don't use it now, we also won't notice
- // it if the value is wrong (i.e. if the sigtype column
- // contains an rrtype that is different from the actual value
- // of the 'type covered' field in the RRSIG Rdata).
- //cur_sigtype(columns[SIGTYPE_COLUMN]);
-
- // Check for delegations before checking for the right type.
- // This is needed to properly delegate request for the NS
- // record itself.
- //
- // This happens with NS only, CNAME must be alone and DNAME
- // is not checked in the exact queried domain.
- if (want_ns && cur_type == isc::dns::RRType::NS()) {
- if (result_rrset &&
- result_rrset->getType() != isc::dns::RRType::NS()) {
- isc_throw(DataSourceError, "NS found together with data"
- " in non-apex domain " + name.toText());
- }
- addOrCreate(result_rrset, *construct_name, getClass(),
- cur_type, cur_ttl,
- columns[DatabaseAccessor::RDATA_COLUMN],
- *accessor_);
- } else if (type != NULL && cur_type == *type) {
- if (result_rrset &&
- result_rrset->getType() == isc::dns::RRType::CNAME()) {
- isc_throw(DataSourceError, "CNAME found but it is not "
- "the only record for " + name.toText());
- } else if (result_rrset && want_ns &&
- result_rrset->getType() == isc::dns::RRType::NS()) {
- isc_throw(DataSourceError, "NS found together with data"
- " in non-apex domain " + name.toText());
- }
- addOrCreate(result_rrset, *construct_name, getClass(),
- cur_type, cur_ttl,
- columns[DatabaseAccessor::RDATA_COLUMN],
- *accessor_);
- } else if (want_cname && cur_type == isc::dns::RRType::CNAME()) {
- // There should be no other data, so result_rrset should
- // be empty.
- if (result_rrset) {
- isc_throw(DataSourceError, "CNAME found but it is not "
- "the only record for " + name.toText());
- }
- addOrCreate(result_rrset, *construct_name, getClass(),
- cur_type, cur_ttl,
- columns[DatabaseAccessor::RDATA_COLUMN],
- *accessor_);
- } else if (want_dname && cur_type == isc::dns::RRType::DNAME()) {
- // There should be max one RR of DNAME present
- if (result_rrset &&
- result_rrset->getType() == isc::dns::RRType::DNAME()) {
- isc_throw(DataSourceError, "DNAME with multiple RRs in " +
- name.toText());
- }
- addOrCreate(result_rrset, *construct_name, getClass(),
- cur_type, cur_ttl,
- columns[DatabaseAccessor::RDATA_COLUMN],
- *accessor_);
- } else if (cur_type == isc::dns::RRType::RRSIG()) {
+ const RRType cur_type(columns[DatabaseAccessor::TYPE_COLUMN]);
+
+ if (cur_type == RRType::RRSIG()) {
// If we get signatures before we get the actual data, we
// can't know which ones to keep and which to drop...
// So we keep a separate store of any signature that may be
@@ -280,27 +217,76 @@ DatabaseClient::Finder::getRRset(const isc::dns::Name& name,
// done.
// A possible optimization here is to not store them for
// types we are certain we don't need
- sig_store.addSig(isc::dns::rdata::createRdata(cur_type,
- getClass(), columns[DatabaseAccessor::RDATA_COLUMN]));
+ sig_store.addSig(rdata::createRdata(cur_type, getClass(),
+ columns[DatabaseAccessor::RDATA_COLUMN]));
+ }
+
+ if (types.find(cur_type) != types.end()) {
+ // This type is requested, so put it into result
+ const RRTTL cur_ttl(columns[DatabaseAccessor::TTL_COLUMN]);
+ // Ths sigtype column was an optimization for finding the
+ // relevant RRSIG RRs for a lookup. Currently this column is
+ // not used in this revised datasource implementation. We
+ // should either start using it again, or remove it from use
+ // completely (i.e. also remove it from the schema and the
+ // backend implementation).
+ // Note that because we don't use it now, we also won't notice
+ // it if the value is wrong (i.e. if the sigtype column
+ // contains an rrtype that is different from the actual value
+ // of the 'type covered' field in the RRSIG Rdata).
+ //cur_sigtype(columns[SIGTYPE_COLUMN]);
+ addOrCreate(result[cur_type], construct_name_object,
+ getClass(), cur_type, cur_ttl,
+ columns[DatabaseAccessor::RDATA_COLUMN],
+ *accessor_);
}
- } catch (const isc::dns::InvalidRRType& irt) {
+
+ if (cur_type == RRType::CNAME()) {
+ seen_cname = true;
+ } else if (cur_type == RRType::NS()) {
+ seen_ns = true;
+ } else if (cur_type == RRType::DS()) {
+ seen_ds = true;
+ } else if (cur_type != RRType::RRSIG() &&
+ cur_type != RRType::NSEC3() &&
+ cur_type != RRType::NSEC()) {
+ // NSEC and RRSIG can coexist with anything, otherwise
+ // we've seen something that can't live together with potential
+ // CNAME or NS
+ //
+ // NSEC3 lives in separate namespace from everything, therefore
+ // we just ignore it here for these checks as well.
+ seen_other = true;
+ }
+ } catch (const InvalidRRType&) {
isc_throw(DataSourceError, "Invalid RRType in database for " <<
name << ": " << columns[DatabaseAccessor::
TYPE_COLUMN]);
- } catch (const isc::dns::InvalidRRTTL& irttl) {
+ } catch (const InvalidRRTTL&) {
isc_throw(DataSourceError, "Invalid TTL in database for " <<
name << ": " << columns[DatabaseAccessor::
TTL_COLUMN]);
- } catch (const isc::dns::rdata::InvalidRdataText& ird) {
+ } catch (const rdata::InvalidRdataText&) {
isc_throw(DataSourceError, "Invalid rdata in database for " <<
name << ": " << columns[DatabaseAccessor::
RDATA_COLUMN]);
}
}
- if (result_rrset) {
- sig_store.appendSignatures(result_rrset);
+ if (seen_cname && (seen_other || seen_ns || seen_ds)) {
+ isc_throw(DataSourceError, "CNAME shares domain " << name <<
+ " with something else");
+ }
+ if (check_ns && seen_ns && seen_other) {
+ isc_throw(DataSourceError, "NS shares domain " << name <<
+ " with something else");
}
- return (std::pair<bool, isc::dns::RRsetPtr>(records_found, result_rrset));
+ // Add signatures to all found RRsets
+ for (std::map<RRType, RRsetPtr>::iterator i(result.begin());
+ i != result.end(); ++ i) {
+ sig_store.appendSignatures(i->second);
+ }
+
+ return (FoundRRsets(records_found, result));
}
bool
@@ -317,6 +303,92 @@ DatabaseClient::Finder::hasSubdomains(const std::string& name) {
return (context->getNext(columns));
}
+// Some manipulation with RRType sets
+namespace {
+
+// Bunch of functions to construct specific sets of RRTypes we will
+// ask from it.
+typedef std::set<RRType> WantedTypes;
+
+const WantedTypes&
+NSEC_TYPES() {
+ static bool initialized(false);
+ static WantedTypes result;
+
+ if (!initialized) {
+ result.insert(RRType::NSEC());
+ initialized = true;
+ }
+ return (result);
+}
+
+const WantedTypes&
+DELEGATION_TYPES() {
+ static bool initialized(false);
+ static WantedTypes result;
+
+ if (!initialized) {
+ result.insert(RRType::DNAME());
+ result.insert(RRType::NS());
+ initialized = true;
+ }
+ return (result);
+}
+
+const WantedTypes&
+FINAL_TYPES() {
+ static bool initialized(false);
+ static WantedTypes result;
+
+ if (!initialized) {
+ result.insert(RRType::CNAME());
+ result.insert(RRType::NS());
+ result.insert(RRType::NSEC());
+ initialized = true;
+ }
+ return (result);
+}
+
+}
+
+RRsetPtr
+DatabaseClient::Finder::findNSECCover(const Name& name) {
+ try {
+ // Which one should contain the NSEC record?
+ const Name coverName(findPreviousName(name));
+ // Get the record and copy it out
+ const FoundRRsets found = getRRsets(coverName.toText(), NSEC_TYPES(),
+ coverName != getOrigin());
+ const FoundIterator
+ nci(found.second.find(RRType::NSEC()));
+ if (nci != found.second.end()) {
+ return (nci->second);
+ } else {
+ // The previous doesn't contain NSEC.
+ // Badly signed zone or a bug?
+
+ // FIXME: Currently, if the zone is not signed, we could get
+ // here. In that case we can't really throw, but for now, we can't
+ // recognize it. So we don't throw at all, enable it once
+ // we have a is_signed flag or something.
+#if 0
+ isc_throw(DataSourceError, "No NSEC in " +
+ coverName.toText() + ", but it was "
+ "returned as previous - "
+ "accessor error? Badly signed zone?");
+#endif
+ }
+ }
+ catch (const isc::NotImplemented&) {
+ // Well, they want DNSSEC, but there is no available.
+ // So we don't provide anything.
+ LOG_INFO(logger, DATASRC_DATABASE_COVER_NSEC_UNSUPPORTED).
+ arg(accessor_->getDBName()).arg(name);
+ }
+ // We didn't find it, return nothing
+ return (RRsetPtr());
+}
+
ZoneFinder::FindResult
DatabaseClient::Finder::find(const isc::dns::Name& name,
const isc::dns::RRType& type,
@@ -326,10 +398,12 @@ DatabaseClient::Finder::find(const isc::dns::Name& name,
// This variable is used to determine the difference between
// NXDOMAIN and NXRRSET
bool records_found = false;
- bool glue_ok(options & FIND_GLUE_OK);
+ bool glue_ok((options & FIND_GLUE_OK) != 0);
+ const bool dnssec_data((options & FIND_DNSSEC) != 0);
+ bool get_cover(false);
isc::dns::RRsetPtr result_rrset;
ZoneFinder::Result result_status = SUCCESS;
- std::pair<bool, isc::dns::RRsetPtr> found;
+ FoundRRsets found;
logger.debug(DBG_TRACE_DETAILED, DATASRC_DATABASE_FIND_RECORDS)
.arg(accessor_->getDBName()).arg(name).arg(type);
// In case we are in GLUE_OK mode and start matching wildcards,
@@ -337,11 +411,11 @@ DatabaseClient::Finder::find(const isc::dns::Name& name,
isc::dns::RRsetPtr first_ns;
// First, do we have any kind of delegation (NS/DNAME) here?
- Name origin(getOrigin());
- size_t origin_label_count(origin.getLabelCount());
+ const Name origin(getOrigin());
+ const size_t origin_label_count(origin.getLabelCount());
// Number of labels in the last known non-empty domain
size_t last_known(origin_label_count);
- size_t current_label_count(name.getLabelCount());
+ const size_t current_label_count(name.getLabelCount());
// This is how many labels we remove to get origin
size_t remove_labels(current_label_count - origin_label_count);
@@ -349,35 +423,44 @@ DatabaseClient::Finder::find(const isc::dns::Name& name,
for (int i(remove_labels); i > 0; --i) {
Name superdomain(name.split(i));
// Look if there's NS or DNAME (but ignore the NS in origin)
- found = getRRset(superdomain, NULL, false, true,
- i != remove_labels && !glue_ok);
+ found = getRRsets(superdomain.toText(), DELEGATION_TYPES(),
+ i != remove_labels);
if (found.first) {
// It contains some RRs, so it exists.
last_known = superdomain.getLabelCount();
- // In case we are in GLUE_OK, we want to store the highest
- // encountered RRset.
- if (glue_ok && !first_ns && i != remove_labels) {
- first_ns = getRRset(superdomain, NULL, false, false,
- true).second;
- }
- }
- if (found.second) {
- // We found something redirecting somewhere else
- // (it can be only NS or DNAME here)
- result_rrset = found.second;
- if (result_rrset->getType() == isc::dns::RRType::NS()) {
+
+ const FoundIterator nsi(found.second.find(RRType::NS()));
+ const FoundIterator dni(found.second.find(RRType::DNAME()));
+ // In case we are in GLUE_OK mode, we want to store the
+ // highest encountered NS (but not apex)
+ if (glue_ok && !first_ns && i != remove_labels &&
+ nsi != found.second.end()) {
+ first_ns = nsi->second;
+ } else if (!glue_ok && i != remove_labels &&
+ nsi != found.second.end()) {
+ // Do a NS delegation, but ignore NS in glue_ok mode. Ignore
+ // delegation in apex
LOG_DEBUG(logger, DBG_TRACE_DETAILED,
DATASRC_DATABASE_FOUND_DELEGATION).
arg(accessor_->getDBName()).arg(superdomain);
+ result_rrset = nsi->second;
result_status = DELEGATION;
- } else {
+ // No need to go lower, found
+ break;
+ } else if (dni != found.second.end()) {
+ // Very similar with DNAME
LOG_DEBUG(logger, DBG_TRACE_DETAILED,
DATASRC_DATABASE_FOUND_DNAME).
arg(accessor_->getDBName()).arg(superdomain);
+ result_rrset = dni->second;
result_status = DNAME;
+ if (result_rrset->getRdataCount() != 1) {
+ isc_throw(DataSourceError, "DNAME at " << superdomain <<
+ " has " << result_rrset->getRdataCount() <<
+ " rdata, 1 expected");
+ }
+ break;
}
- // Don't search more
- break;
}
}
@@ -385,21 +468,37 @@ DatabaseClient::Finder::find(const isc::dns::Name& name,
// Try getting the final result and extract it
// It is special if there's a CNAME or NS, DNAME is ignored here
// And we don't consider the NS in origin
- found = getRRset(name, &type, true, false, name != origin && !glue_ok);
+
+ WantedTypes final_types(FINAL_TYPES());
+ final_types.insert(type);
+ found = getRRsets(name.toText(), final_types, name != origin);
records_found = found.first;
- result_rrset = found.second;
- if (result_rrset && name != origin && !glue_ok &&
- result_rrset->getType() == isc::dns::RRType::NS()) {
+
+ // NS records, CNAME record and Wanted Type records
+ const FoundIterator nsi(found.second.find(RRType::NS()));
+ const FoundIterator cni(found.second.find(RRType::CNAME()));
+ const FoundIterator wti(found.second.find(type));
+ if (name != origin && !glue_ok && nsi != found.second.end()) {
+ // There's a delegation at the exact node.
LOG_DEBUG(logger, DBG_TRACE_DETAILED,
DATASRC_DATABASE_FOUND_DELEGATION_EXACT).
arg(accessor_->getDBName()).arg(name);
result_status = DELEGATION;
- } else if (result_rrset && type != isc::dns::RRType::CNAME() &&
- result_rrset->getType() == isc::dns::RRType::CNAME()) {
+ result_rrset = nsi->second;
+ } else if (type != isc::dns::RRType::CNAME() &&
+ cni != found.second.end()) {
+ // A CNAME here
result_status = CNAME;
- }
-
- if (!result_rrset && !records_found) {
+ result_rrset = cni->second;
+ if (result_rrset->getRdataCount() != 1) {
+ isc_throw(DataSourceError, "CNAME with " <<
+ result_rrset->getRdataCount() <<
+ " rdata at " << name << ", expected 1");
+ }
+ } else if (wti != found.second.end()) {
+ // Just get the answer
+ result_rrset = wti->second;
+ } else if (!records_found) {
// Nothing lives here.
// But check if something lives below this
// domain and if so, pretend something is here as well.
@@ -408,23 +507,22 @@ DatabaseClient::Finder::find(const isc::dns::Name& name,
DATASRC_DATABASE_FOUND_EMPTY_NONTERMINAL).
arg(accessor_->getDBName()).arg(name);
records_found = true;
+ get_cover = dnssec_data;
} else {
// It's not empty non-terminal. So check for wildcards.
// We remove labels one by one and look for the wildcard there.
// Go up to first non-empty domain.
remove_labels = current_label_count - last_known;
- Name star("*");
for (size_t i(1); i <= remove_labels; ++ i) {
// Construct the name with *
- // TODO: Once the underlying DatabaseAccessor takes
- // string, do the concatenation on strings, not
- // Names
- Name superdomain(name.split(i));
- Name wildcard(star.concatenate(superdomain));
+ const Name superdomain(name.split(i));
+ const string wildcard("*." + superdomain.toText());
+ const string construct_name(name.toText());
// TODO What do we do about DNAME here?
- found = getRRset(wildcard, &type, true, false, true,
- &name);
+ // The types are the same as with original query
+ found = getRRsets(wildcard, final_types, true,
+ &construct_name);
if (found.first) {
if (first_ns) {
// In case we are under NS, we don't
@@ -445,7 +543,42 @@ DatabaseClient::Finder::find(const isc::dns::Name& name,
// domain, but it could be empty non-terminal. In
// that case, we need to cancel the match.
records_found = true;
- result_rrset = found.second;
+ const FoundIterator
+ cni(found.second.find(RRType::CNAME()));
+ const FoundIterator
+ nsi(found.second.find(RRType::NS()));
+ const FoundIterator
+ nci(found.second.find(RRType::NSEC()));
+ const FoundIterator wti(found.second.find(type));
+ if (cni != found.second.end() &&
+ type != RRType::CNAME()) {
+ result_rrset = cni->second;
+ result_status = CNAME;
+ } else if (nsi != found.second.end()) {
+ result_rrset = nsi->second;
+ result_status = DELEGATION;
+ } else if (wti != found.second.end()) {
+ result_rrset = wti->second;
+ result_status = WILDCARD;
+ } else {
+ // NXRRSET case in the wildcard
+ result_status = WILDCARD_NXRRSET;
+ if (dnssec_data &&
+ nci != found.second.end()) {
+ // User wants a proof the wildcard doesn't
+ // contain it
+ //
+ // However, we need to get the RRset in the
+ // name of the wildcard, not the constructed
+ // one, so we walk it again
+ found = getRRsets(wildcard, NSEC_TYPES(),
+ true);
+ result_rrset =
+ found.second.find(RRType::NSEC())->
+ second;
+ }
+ }
+
LOG_DEBUG(logger, DBG_TRACE_DETAILED,
DATASRC_DATABASE_WILDCARD).
arg(accessor_->getDBName()).arg(wildcard).
@@ -457,33 +590,63 @@ DatabaseClient::Finder::find(const isc::dns::Name& name,
arg(name).arg(superdomain);
}
break;
- } else if (hasSubdomains(wildcard.toText())) {
+ } else if (hasSubdomains(wildcard)) {
// Empty non-terminal asterisk
records_found = true;
LOG_DEBUG(logger, DBG_TRACE_DETAILED,
DATASRC_DATABASE_WILDCARD_EMPTY).
arg(accessor_->getDBName()).arg(wildcard).
arg(name);
+ if (dnssec_data) {
+ result_rrset = findNSECCover(Name(wildcard));
+ if (result_rrset) {
+ result_status = WILDCARD_NXRRSET;
+ }
+ }
break;
}
}
+ // This is the NXDOMAIN case (nothing found anywhere). If
+ // they want DNSSEC data, try getting the NSEC record
+ if (dnssec_data && !records_found) {
+ get_cover = true;
+ }
+ }
+ } else if (dnssec_data) {
+ // This is the "usual" NXRRSET case
+ // So in case they want DNSSEC, provide the NSEC
+ // (which should be available already here)
+ result_status = NXRRSET;
+ const FoundIterator nci(found.second.find(RRType::NSEC()));
+ if (nci != found.second.end()) {
+ result_rrset = nci->second;
}
}
}
if (!result_rrset) {
- if (records_found) {
- logger.debug(DBG_TRACE_DETAILED,
- DATASRC_DATABASE_FOUND_NXRRSET)
- .arg(accessor_->getDBName()).arg(name)
- .arg(getClass()).arg(type);
- result_status = NXRRSET;
- } else {
- logger.debug(DBG_TRACE_DETAILED,
- DATASRC_DATABASE_FOUND_NXDOMAIN)
- .arg(accessor_->getDBName()).arg(name)
- .arg(getClass()).arg(type);
- result_status = NXDOMAIN;
+ if (result_status == SUCCESS) {
+ // Should we look for NSEC covering the name?
+ if (get_cover) {
+ result_rrset = findNSECCover(name);
+ if (result_rrset) {
+ result_status = NXDOMAIN;
+ }
+ }
+ // Something is not here and we didn't decide yet what
+ if (records_found) {
+ logger.debug(DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_FOUND_NXRRSET)
+ .arg(accessor_->getDBName()).arg(name)
+ .arg(getClass()).arg(type);
+ result_status = NXRRSET;
+ } else {
+ logger.debug(DBG_TRACE_DETAILED,
+ DATASRC_DATABASE_FOUND_NXDOMAIN)
+ .arg(accessor_->getDBName()).arg(name)
+ .arg(getClass()).arg(type);
+ result_status = NXDOMAIN;
+ }
}
} else {
logger.debug(DBG_TRACE_DETAILED,
@@ -494,6 +657,26 @@ DatabaseClient::Finder::find(const isc::dns::Name& name,
}
Name
+DatabaseClient::Finder::findPreviousName(const Name& name) const {
+ const string str(accessor_->findPreviousName(zone_id_,
+ name.reverse().toText()));
+ try {
+ return (Name(str));
+ }
+ /*
+ * To avoid having the same code many times, we just catch all the
+ * exceptions and handle them in a common code below
+ */
+ catch (const isc::dns::EmptyLabel&) {}
+ catch (const isc::dns::TooLongLabel&) {}
+ catch (const isc::dns::BadLabelType&) {}
+ catch (const isc::dns::BadEscape&) {}
+ catch (const isc::dns::TooLongName&) {}
+ catch (const isc::dns::IncompleteName&) {}
+ isc_throw(DataSourceError, "Bad name " + str + " from findPreviousName");
+}
+
+Name
DatabaseClient::Finder::getOrigin() const {
return (origin_);
}
diff --git a/src/lib/datasrc/database.h b/src/lib/datasrc/database.h
index 82918ac..8295779 100644
--- a/src/lib/datasrc/database.h
+++ b/src/lib/datasrc/database.h
@@ -28,6 +28,9 @@
#include <dns/name.h>
#include <exceptions/exceptions.h>
+#include <map>
+#include <set>
+
namespace isc {
namespace datasrc {
@@ -471,6 +474,34 @@ public:
* \return the name of the database
*/
virtual const std::string& getDBName() const = 0;
+
+ /**
+ * \brief It returns the previous name in DNSSEC order.
+ *
+ * This is used in DatabaseClient::findPreviousName and does more
+ * or less the real work, except for working on strings.
+ *
+ * \param rname The name to ask for previous of, in reversed form.
+ * We use the reversed form (see isc::dns::Name::reverse),
+ * because then the case insensitive order of string representation
+ * and the DNSSEC order correspond (eg. org.example.a is followed
+ * by org.example.a.b which is followed by org.example.b, etc).
+ * \param zone_id The zone to look through.
+ * \return The previous name.
+ * \note This function must return previous name even in case
+ * the queried rname does not exist in the zone.
+ * \note This method must skip under-the-zone-cut data (glue data).
+ * This might be implemented by looking for NSEC records (as glue
+ * data don't have them) in the zone or in some other way.
+ *
+ * \throw DataSourceError if there's a problem with the database.
+ * \throw NotImplemented if this database doesn't support DNSSEC
+ * or there's no previous name for the queried one (the NSECs
+ * might be missing or the queried name is less or equal the
+ * apex of the zone).
+ */
+ virtual std::string findPreviousName(int zone_id,
+ const std::string& rname) const = 0;
};
/**
@@ -587,6 +618,12 @@ public:
const FindOptions options = FIND_DEFAULT);
/**
+ * \brief Implementation of ZoneFinder::findPreviousName method.
+ */
+ virtual isc::dns::Name findPreviousName(const isc::dns::Name& query)
+ const;
+
+ /**
* \brief The zone ID
*
* This function provides the stored zone ID as passed to the
@@ -609,54 +646,42 @@ public:
boost::shared_ptr<DatabaseAccessor> accessor_;
const int zone_id_;
const isc::dns::Name origin_;
-
+ //
+ /// \brief Shortcut name for the result of getRRsets
+ typedef std::pair<bool, std::map<dns::RRType, dns::RRsetPtr> >
+ FoundRRsets;
+ /// \brief Just shortcut for set of types
+ typedef std::set<dns::RRType> WantedTypes;
/**
- * \brief Searches database for an RRset
+ * \brief Searches database for RRsets of one domain.
*
- * This method scans RRs of single domain specified by name and finds
- * RRset with given type or any of redirection RRsets that are
- * requested.
+ * This method scans RRs of single domain specified by name and
+ * extracts any RRsets found and requested by parameters.
*
- * This function is used internally by find(), because this part is
- * called multiple times with slightly different parameters.
+ * It is used internally by find(), because it is called multiple
+ * times (usually with different domains).
*
* \param name Which domain name should be scanned.
- * \param type The RRType which is requested. This can be NULL, in
- * which case the method will look for the redirections only.
- * \param want_cname If this is true, CNAME redirection may be returned
- * instead of the RRset with given type. If there's CNAME and
- * something else or the CNAME has multiple RRs, it throws
- * DataSourceError.
- * \param want_dname If this is true, DNAME redirection may be returned
- * instead. This is with type = NULL only and is not checked in
- * other circumstances. If the DNAME has multiple RRs, it throws
- * DataSourceError.
- * \param want_ns This allows redirection by NS to be returned. If
- * any other data is met as well, DataSourceError is thrown.
- * \param construct_name If set to non-NULL, the resulting RRset will
- * be constructed for this name instead of the queried one. This
- * is useful for wildcards.
- * \note It may happen that some of the above error conditions are not
- * detected in some circumstances. The goal here is not to validate
- * the domain in DB, but to avoid bad behaviour resulting from
- * broken data.
- * \return First part of the result tells if the domain contains any
- * RRs. This can be used to decide between NXDOMAIN and NXRRSET.
- * The second part is the RRset found (if any) with any relevant
- * signatures attached to it.
- * \todo This interface doesn't look very elegant. Any better idea
- * would be nice.
+ * \param types List of types the caller is interested in.
+ * \param check_ns If this is set to true, it checks nothing lives
+ * together with NS record (with few little exceptions, like RRSIG
+ * or NSEC). This check is meant for non-apex NS records.
+ * \param construct_name If this is NULL, the resulting RRsets have
+ * their name set to name. If it is not NULL, it overrides the name
+ * and uses this one (this can be used for wildcard synthesized
+ * records).
+ * \return A pair, where the first element indicates if the domain
+ * contains any RRs at all (not only the requested, it may happen
+ * this is set to true, but the second part is empty). The second
+ * part is map from RRtypes to RRsets of the corresponding types.
+ * If the RRset is not present in DB, the RRtype is not there at
+ * all (so you'll not find NULL pointer in the result).
+ * \throw DataSourceError If there's a low-level error with the
+ * database or the database contains bad data.
*/
- std::pair<bool, isc::dns::RRsetPtr> getRRset(const isc::dns::Name&
- name,
- const isc::dns::RRType*
- type,
- bool want_cname,
- bool want_dname,
- bool want_ns, const
- isc::dns::Name*
- construct_name = NULL);
-
+ FoundRRsets getRRsets(const std::string& name,
+ const WantedTypes& types, bool check_ns,
+ const std::string* construct_name = NULL);
/**
* \brief Checks if something lives below this domain.
*
@@ -666,6 +691,23 @@ public:
* \param name The domain to check.
*/
bool hasSubdomains(const std::string& name);
+
+ /**
+ * \brief Get the NSEC covering a name.
+ *
+ * This one calls findPreviousName on the given name and extracts an NSEC
+ * record on the result. It handles various error cases. The method exists
+ * to share code present at more than one location.
+ */
+ dns::RRsetPtr findNSECCover(const dns::Name& name);
+
+ /**
+ * \brief Convenience type shortcut.
+ *
+ * To find stuff in the result of getRRsets.
+ */
+ typedef std::map<dns::RRType, dns::RRsetPtr>::const_iterator
+ FoundIterator;
};
/**
diff --git a/src/lib/datasrc/datasrc_messages.mes b/src/lib/datasrc/datasrc_messages.mes
index efb88fd..04ad610 100644
--- a/src/lib/datasrc/datasrc_messages.mes
+++ b/src/lib/datasrc/datasrc_messages.mes
@@ -63,6 +63,11 @@ The maximum allowed number of items of the hotspot cache is set to the given
number. If there are too many, some of them will be dropped. The size of 0
means no limit.
+% DATASRC_DATABASE_COVER_NSEC_UNSUPPORTED %1 doesn't support DNSSEC when asked for NSEC data covering %2
+The datasource tried to provide an NSEC proof that the named domain does not
+exist, but the database backend doesn't support DNSSEC. No proof is included
+in the answer as a result.
+
% DATASRC_DATABASE_FIND_RECORDS looking in datasource %1 for record %2/%3
Debug information. The database data source is looking up records with the given
name and type in the database.
diff --git a/src/lib/datasrc/memory_datasrc.cc b/src/lib/datasrc/memory_datasrc.cc
index a29e902..4c9e53f 100644
--- a/src/lib/datasrc/memory_datasrc.cc
+++ b/src/lib/datasrc/memory_datasrc.cc
@@ -666,6 +666,12 @@ InMemoryZoneFinder::getFileName() const {
return (impl_->file_name_);
}
+isc::dns::Name
+InMemoryZoneFinder::findPreviousName(const isc::dns::Name&) const {
+ isc_throw(NotImplemented, "InMemory data source doesn't support DNSSEC "
+ "yet, can't find previous name");
+}
+
/// Implementation details for \c InMemoryClient hidden from the public
/// interface.
///
diff --git a/src/lib/datasrc/memory_datasrc.h b/src/lib/datasrc/memory_datasrc.h
index 4a6641d..cf467a2 100644
--- a/src/lib/datasrc/memory_datasrc.h
+++ b/src/lib/datasrc/memory_datasrc.h
@@ -77,6 +77,12 @@ public:
isc::dns::RRsetList* target = NULL,
const FindOptions options = FIND_DEFAULT);
+ /// \brief Imelementation of the ZoneFinder::findPreviousName method
+ ///
+ /// This one throws NotImplemented exception, as InMemory doesn't
+ /// support DNSSEC currently.
+ virtual isc::dns::Name findPreviousName(const isc::dns::Name& query) const;
+
/// \brief Inserts an rrset into the zone.
///
/// It puts another RRset into the zone.
diff --git a/src/lib/datasrc/sqlite3_accessor.cc b/src/lib/datasrc/sqlite3_accessor.cc
index 8e9f511..3607227 100644
--- a/src/lib/datasrc/sqlite3_accessor.cc
+++ b/src/lib/datasrc/sqlite3_accessor.cc
@@ -51,7 +51,8 @@ enum StatementID {
ADD_RECORD = 7,
DEL_RECORD = 8,
ITERATE = 9,
- NUM_STATEMENTS = 10
+ FIND_PREVIOUS = 10,
+ NUM_STATEMENTS = 11
};
const char* const text_statements[NUM_STATEMENTS] = {
@@ -72,7 +73,15 @@ const char* const text_statements[NUM_STATEMENTS] = {
"DELETE FROM records WHERE zone_id=?1 AND name=?2 " // DEL_RECORD
"AND rdtype=?3 AND rdata=?4",
"SELECT rdtype, ttl, sigtype, rdata, name FROM records " // ITERATE
- "WHERE zone_id = ?1 ORDER BY name, rdtype"
+ "WHERE zone_id = ?1 ORDER BY name, rdtype",
+ /*
+ * This one looks for previous name with NSEC record. It is done by
+ * using the reversed name. The NSEC is checked because we need to
+ * skip glue data, which don't have the NSEC.
+ */
+ "SELECT name FROM records " // FIND_PREVIOUS
+ "WHERE zone_id=?1 AND rdtype = 'NSEC' AND "
+ "rname < $2 ORDER BY rname DESC LIMIT 1"
};
struct SQLite3Parameters {
@@ -395,6 +404,28 @@ SQLite3Accessor::getZone(const std::string& name) const {
return (std::pair<bool, int>(false, 0));
}
+namespace {
+
+// Conversion to plain char
+const char*
+convertToPlainChar(const unsigned char* ucp, sqlite3 *db) {
+ if (ucp == NULL) {
+ // The field can really be NULL, in which case we return an
+ // empty string, or sqlite may have run out of memory, in
+ // which case we raise an error
+ if (sqlite3_errcode(db) == SQLITE_NOMEM) {
+ isc_throw(DataSourceError,
+ "Sqlite3 backend encountered a memory allocation "
+ "error in sqlite3_column_text()");
+ } else {
+ return ("");
+ }
+ }
+ const void* p = ucp;
+ return (static_cast<const char*>(p));
+}
+
+}
class SQLite3Accessor::Context : public DatabaseAccessor::IteratorContext {
public:
// Construct an iterator for all records. When constructed this
@@ -472,7 +503,8 @@ private:
void copyColumn(std::string (&data)[COLUMN_COUNT], int column) {
data[column] = convertToPlainChar(sqlite3_column_text(statement_,
- column));
+ column),
+ accessor_->dbparameters_->db_);
}
void bindZoneId(const int zone_id) {
@@ -499,29 +531,6 @@ private:
statement_ = NULL;
}
- // This helper method converts from the unsigned char* type (used by
- // sqlite3) to char* (wanted by std::string). Technically these types
- // might not be directly convertable
- // In case sqlite3_column_text() returns NULL, we just make it an
- // empty string, unless it was caused by a memory error
- const char* convertToPlainChar(const unsigned char* ucp) {
- if (ucp == NULL) {
- // The field can really be NULL, in which case we return an
- // empty string, or sqlite may have run out of memory, in
- // which case we raise an error
- if (sqlite3_errcode(accessor_->dbparameters_->db_)
- == SQLITE_NOMEM) {
- isc_throw(DataSourceError,
- "Sqlite3 backend encountered a memory allocation "
- "error in sqlite3_column_text()");
- } else {
- return ("");
- }
- }
- const void* p = ucp;
- return (static_cast<const char*>(p));
- }
-
const IteratorType iterator_type_;
boost::shared_ptr<const SQLite3Accessor> accessor_;
sqlite3_stmt *statement_;
@@ -662,6 +671,50 @@ SQLite3Accessor::deleteRecordInZone(const string (¶ms)[DEL_PARAM_COUNT]) {
*dbparameters_, DEL_RECORD, params, "delete record from zone");
}
+std::string
+SQLite3Accessor::findPreviousName(int zone_id, const std::string& rname)
+ const
+{
+ sqlite3_reset(dbparameters_->statements_[FIND_PREVIOUS]);
+ sqlite3_clear_bindings(dbparameters_->statements_[FIND_PREVIOUS]);
+
+ if (sqlite3_bind_int(dbparameters_->statements_[FIND_PREVIOUS], 1,
+ zone_id) != SQLITE_OK) {
+ isc_throw(SQLite3Error, "Could not bind zone ID " << zone_id <<
+ " to SQL statement (find previous): " <<
+ sqlite3_errmsg(dbparameters_->db_));
+ }
+ if (sqlite3_bind_text(dbparameters_->statements_[FIND_PREVIOUS], 2,
+ rname.c_str(), -1, SQLITE_STATIC) != SQLITE_OK) {
+ isc_throw(SQLite3Error, "Could not bind name " << rname <<
+ " to SQL statement (find previous): " <<
+ sqlite3_errmsg(dbparameters_->db_));
+ }
+
+ std::string result;
+ const int rc = sqlite3_step(dbparameters_->statements_[FIND_PREVIOUS]);
+ if (rc == SQLITE_ROW) {
+ // We found it
+ result = convertToPlainChar(sqlite3_column_text(dbparameters_->
+ statements_[FIND_PREVIOUS], 0), dbparameters_->db_);
+ }
+ sqlite3_reset(dbparameters_->statements_[FIND_PREVIOUS]);
+
+ if (rc == SQLITE_DONE) {
+ // No NSEC records here, this DB doesn't support DNSSEC or
+ // we asked before the apex
+ isc_throw(isc::NotImplemented, "The zone doesn't support DNSSEC or "
+ "query before apex");
+ }
+
+ if (rc != SQLITE_ROW && rc != SQLITE_DONE) {
+ // Some kind of error
+ isc_throw(SQLite3Error, "Could not get data for previous name");
+ }
+
+ return (result);
+}
+
namespace {
void
addError(ElementPtr errors, const std::string& error) {
diff --git a/src/lib/datasrc/sqlite3_accessor.h b/src/lib/datasrc/sqlite3_accessor.h
index 6a77a63..3286f3b 100644
--- a/src/lib/datasrc/sqlite3_accessor.h
+++ b/src/lib/datasrc/sqlite3_accessor.h
@@ -172,6 +172,10 @@ public:
/// "sqlite3_bind10.sqlite3".
virtual const std::string& getDBName() const { return (database_name_); }
+ /// \brief Concrete implementation of the pure virtual method
+ virtual std::string findPreviousName(int zone_id, const std::string& rname)
+ const;
+
private:
/// \brief Private database data
boost::scoped_ptr<SQLite3Parameters> dbparameters_;
diff --git a/src/lib/datasrc/tests/database_unittest.cc b/src/lib/datasrc/tests/database_unittest.cc
index 4ed9f12..fe57185 100644
--- a/src/lib/datasrc/tests/database_unittest.cc
+++ b/src/lib/datasrc/tests/database_unittest.cc
@@ -48,9 +48,11 @@ const char* const TEST_RECORDS[][5] = {
{"www.example.org.", "A", "3600", "", "192.0.2.1"},
{"www.example.org.", "AAAA", "3600", "", "2001:db8::1"},
{"www.example.org.", "AAAA", "3600", "", "2001:db8::2"},
+ {"www.example.org.", "NSEC", "3600", "", "www2.example.org. A AAAA NSEC RRSIG"},
+ {"www.example.org.", "RRSIG", "3600", "", "NSEC 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
{"www2.example.org.", "A", "3600", "", "192.0.2.1"},
- {"www2.example.org.","AAAA", "3600", "", "2001:db8::1"},
+ {"www2.example.org.", "AAAA", "3600", "", "2001:db8::1"},
{"www2.example.org.", "A", "3600", "", "192.0.2.2"},
{"cname.example.org.", "CNAME", "3600", "", "www.example.org."},
@@ -125,6 +127,7 @@ const char* const TEST_RECORDS[][5] = {
{"delegation.example.org.", "NS", "3600", "", "ns.example.com."},
{"delegation.example.org.", "NS", "3600", "",
"ns.delegation.example.org."},
+ {"delegation.example.org.", "DS", "3600", "", "1 RSAMD5 2 abcd"},
{"delegation.example.org.", "RRSIG", "3600", "", "NS 5 3 3600 "
"20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
{"ns.delegation.example.org.", "A", "3600", "", "192.0.2.1"},
@@ -153,6 +156,9 @@ const char* const TEST_RECORDS[][5] = {
// doesn't break anything
{"example.org.", "NS", "3600", "", "ns.example.com."},
{"example.org.", "A", "3600", "", "192.0.2.1"},
+ {"example.org.", "NSEC", "3600", "", "acnamesig1.example.org. NS A NSEC RRSIG"},
+ {"example.org.", "RRSIG", "3600", "", "NSEC 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. FAKEFAKEFAKE"},
{"example.org.", "RRSIG", "3600", "", "NS 5 3 3600 20000101000000 "
"20000201000000 12345 example.org. FAKEFAKEFAKE"},
@@ -162,11 +168,23 @@ const char* const TEST_RECORDS[][5] = {
// Something for wildcards
{"*.wild.example.org.", "A", "3600", "", "192.0.2.5"},
{"*.wild.example.org.", "RRSIG", "3600", "A", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+ {"*.wild.example.org.", "NSEC", "3600", "", "cancel.here.wild.example.org. A NSEC RRSIG"},
+ {"*.wild.example.org.", "RRSIG", "3600", "", "NSEC 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
{"cancel.here.wild.example.org.", "AAAA", "3600", "", "2001:db8::5"},
{"delegatedwild.example.org.", "NS", "3600", "", "ns.example.com."},
{"*.delegatedwild.example.org.", "A", "3600", "", "192.0.2.5"},
{"wild.*.foo.example.org.", "A", "3600", "", "192.0.2.5"},
{"wild.*.foo.*.bar.example.org.", "A", "3600", "", "192.0.2.5"},
+ {"bao.example.org.", "NSEC", "3600", "", "wild.*.foo.*.bar.example.org. NSEC"},
+ {"*.cnamewild.example.org.", "CNAME", "3600", "", "www.example.org."},
+ {"*.nswild.example.org.", "NS", "3600", "", "ns.example.com."},
+ // For NSEC empty non-terminal
+ {"l.example.org.", "NSEC", "3600", "", "empty.nonterminal.example.org. NSEC"},
+ {"empty.nonterminal.example.org.", "A", "3600", "", "192.0.2.1"},
+ // Invalid rdata
+ {"invalidrdata.example.org.", "A", "3600", "", "Bunch of nonsense"},
+ {"invalidrdata2.example.org.", "A", "3600", "", "192.0.2.1"},
+ {"invalidrdata2.example.org.", "RRSIG", "3600", "", "Nonsense"},
{NULL, NULL, NULL, NULL, NULL},
};
@@ -223,6 +241,10 @@ public:
"This database datasource can't be iterated");
}
+ virtual std::string findPreviousName(int, const std::string&) const {
+ isc_throw(isc::NotImplemented,
+ "This data source doesn't support DNSSEC");
+ }
private:
const std::string database_name_;
@@ -529,6 +551,38 @@ public:
return (latest_clone_);
}
+ virtual std::string findPreviousName(int id, const std::string& rname)
+ const
+ {
+ // Hardcoded for now, but we could compute it from the data
+ // Maybe do it when it is needed some time in future?
+ if (id == -1) {
+ isc_throw(isc::NotImplemented, "Test not implemented behaviour");
+ } else if (id == 42) {
+ if (rname == "org.example.nonterminal.") {
+ return ("l.example.org.");
+ } else if (rname == "org.example.aa.") {
+ return ("example.org.");
+ } else if (rname == "org.example.www2." ||
+ rname == "org.example.www1.") {
+ return ("www.example.org.");
+ } else if (rname == "org.example.badnsec2.") {
+ return ("badnsec1.example.org.");
+ } else if (rname == "org.example.brokenname.") {
+ return ("brokenname...example.org.");
+ } else if (rname == "org.example.bar.*.") {
+ return ("bao.example.org.");
+ } else if (rname == "org.example.notimplnsec." ||
+ rname == "org.example.wild.here.") {
+ isc_throw(isc::NotImplemented, "Not implemented in this test");
+ } else {
+ isc_throw(isc::Unexpected, "Unexpected name");
+ }
+ } else {
+ isc_throw(isc::Unexpected, "Unknown zone ID");
+ }
+ }
+
private:
// The following member variables are storage and/or update work space
// of the test zone. The "master"s are the real objects that contain
@@ -797,7 +851,17 @@ public:
// The following two lines instantiate test cases with concrete accessor
// classes to be tested.
+// XXX: clang++ installed on our FreeBSD buildbot cannot complete compiling
+// this file, seemingly due to the size of the code. We'll consider more
+// complete workaround, but for a short term workaround we'll reduce the
+// number of tested accessor classes (thus reducing the amount of code
+// to be compiled) for this particular environment.
+#if defined(__clang__) && defined(__FreeBSD__)
+typedef ::testing::Types<MockAccessor> TestAccessorTypes;
+#else
typedef ::testing::Types<MockAccessor, TestSQLite3Accessor> TestAccessorTypes;
+#endif
+
TYPED_TEST_CASE(DatabaseClientTest, TestAccessorTypes);
// In some cases the entire test fixture is for the mock accessor only.
@@ -967,21 +1031,25 @@ doFindTest(ZoneFinder& finder,
ZoneFinder::FindResult result =
finder.find(name, type, NULL, options);
ASSERT_EQ(expected_result, result.code) << name << " " << type;
- if (!expected_rdatas.empty()) {
+ if (!expected_rdatas.empty() && result.rrset) {
checkRRset(result.rrset, expected_name != Name(".") ? expected_name :
name, finder.getClass(), expected_type, expected_ttl,
expected_rdatas);
- if (!expected_sig_rdatas.empty()) {
+ if (!expected_sig_rdatas.empty() && result.rrset->getRRsig()) {
checkRRset(result.rrset->getRRsig(), expected_name != Name(".") ?
expected_name : name, finder.getClass(),
isc::dns::RRType::RRSIG(), expected_ttl,
expected_sig_rdatas);
- } else {
+ } else if (expected_sig_rdatas.empty()) {
EXPECT_EQ(isc::dns::RRsetPtr(), result.rrset->getRRsig());
+ } else {
+ ADD_FAILURE() << "Missing RRSIG";
}
- } else {
+ } else if (expected_rdatas.empty()) {
EXPECT_EQ(isc::dns::RRsetPtr(), result.rrset);
+ } else {
+ ADD_FAILURE() << "Missing result";
}
}
@@ -1422,21 +1490,21 @@ TYPED_TEST(DatabaseClientTest, wildcard) {
"FAKEFAKEFAKE");
doFindTest(*finder, isc::dns::Name("a.wild.example.org"),
this->qtype_, this->qtype_, this->rrttl_,
- ZoneFinder::SUCCESS, this->expected_rdatas_,
+ ZoneFinder::WILDCARD, this->expected_rdatas_,
this->expected_sig_rdatas_);
doFindTest(*finder, isc::dns::Name("b.a.wild.example.org"),
- this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+ this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::WILDCARD,
this->expected_rdatas_, this->expected_sig_rdatas_);
this->expected_rdatas_.clear();
this->expected_sig_rdatas_.clear();
doFindTest(*finder, isc::dns::Name("a.wild.example.org"),
isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
- this->rrttl_, ZoneFinder::NXRRSET, this->expected_rdatas_,
- this->expected_sig_rdatas_);
+ this->rrttl_, ZoneFinder::WILDCARD_NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
doFindTest(*finder, isc::dns::Name("b.a.wild.example.org"),
isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
- this->rrttl_, ZoneFinder::NXRRSET, this->expected_rdatas_,
- this->expected_sig_rdatas_);
+ this->rrttl_, ZoneFinder::WILDCARD_NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
// Direct request for this wildcard
this->expected_rdatas_.push_back("192.0.2.5");
@@ -1532,11 +1600,146 @@ TYPED_TEST(DatabaseClientTest, wildcard) {
doFindTest(*finder, isc::dns::Name(*name), this->qtype_,
this->qtype_, this->rrttl_, ZoneFinder::NXRRSET,
this->expected_rdatas_, this->expected_sig_rdatas_);
+ // FIXME: What should be returned in this case? How does the
+ // DNSSEC logic handle it?
}
+
+ const char* negative_dnssec_names[] = {
+ "a.bar.example.org.",
+ "foo.baz.bar.example.org.",
+ "a.foo.bar.example.org.",
+ NULL
+ };
+
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("wild.*.foo.*.bar.example.org. NSEC");
+ this->expected_sig_rdatas_.clear();
+ for (const char** name(negative_dnssec_names); *name != NULL; ++ name) {
+ doFindTest(*finder, isc::dns::Name(*name), this->qtype_,
+ RRType::NSEC(), this->rrttl_, ZoneFinder::WILDCARD_NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ Name("bao.example.org."), ZoneFinder::FIND_DNSSEC);
+ }
+
+ // Some strange things in the wild node
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("www.example.org.");
+ this->expected_sig_rdatas_.clear();
+ doFindTest(*finder, isc::dns::Name("a.cnamewild.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::CNAME(),
+ this->rrttl_, ZoneFinder::CNAME,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("ns.example.com.");
+ doFindTest(*finder, isc::dns::Name("a.nswild.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::NS(),
+ this->rrttl_, ZoneFinder::DELEGATION,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+}
+
+TYPED_TEST(DatabaseClientTest, NXRRSET_NSEC) {
+ // The domain exists, but doesn't have this RRType
+ // So we should get its NSEC
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ this->expected_rdatas_.push_back("www2.example.org. A AAAA NSEC RRSIG");
+ this->expected_sig_rdatas_.push_back("NSEC 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("www.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::NSEC(),
+ this->rrttl_, ZoneFinder::NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ Name::ROOT_NAME(), ZoneFinder::FIND_DNSSEC);
+}
+
+TYPED_TEST(DatabaseClientTest, wildcardNXRRSET_NSEC) {
+ // The domain exists, but doesn't have this RRType
+ // So we should get its NSEC
+ //
+ // The user will have to query us again to get the correct
+ // answer (eg. prove there's not an exact match)
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ this->expected_rdatas_.push_back("cancel.here.wild.example.org. A NSEC "
+ "RRSIG");
+ this->expected_sig_rdatas_.push_back("NSEC 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ // Note that the NSEC name should NOT be synthesized.
+ doFindTest(*finder, isc::dns::Name("a.wild.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::NSEC(),
+ this->rrttl_, ZoneFinder::WILDCARD_NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ Name("*.wild.example.org"), ZoneFinder::FIND_DNSSEC);
+}
+
+TYPED_TEST(DatabaseClientTest, NXDOMAIN_NSEC) {
+ // The domain doesn't exist, so we must get the right NSEC
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ this->expected_rdatas_.push_back("www2.example.org. A AAAA NSEC RRSIG");
+ this->expected_sig_rdatas_.push_back("NSEC 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ doFindTest(*finder, isc::dns::Name("www1.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::NSEC(),
+ this->rrttl_, ZoneFinder::NXDOMAIN,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ Name("www.example.org."), ZoneFinder::FIND_DNSSEC);
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("acnamesig1.example.org. NS A NSEC RRSIG");
+ // This tests it works correctly in apex (there was a bug, where a check
+ // for NS-alone was there and it would throw).
+ doFindTest(*finder, isc::dns::Name("aa.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::NSEC(),
+ this->rrttl_, ZoneFinder::NXDOMAIN,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ Name("example.org."), ZoneFinder::FIND_DNSSEC);
+
+ // Check that if the DB doesn't support it, the exception from there
+ // is not propagated and it only does not include the NSEC
+ if (!this->is_mock_) {
+ return; // We don't make the real DB to throw
+ }
+ EXPECT_NO_THROW(doFindTest(*finder,
+ isc::dns::Name("notimplnsec.example.org."),
+ isc::dns::RRType::TXT(),
+ isc::dns::RRType::NSEC(), this->rrttl_,
+ ZoneFinder::NXDOMAIN, this->empty_rdatas_,
+ this->empty_rdatas_, Name::ROOT_NAME(),
+ ZoneFinder::FIND_DNSSEC));
+}
+
+TYPED_TEST(DatabaseClientTest, emptyNonterminalNSEC) {
+ // Same as NXDOMAIN_NSEC, but with empty non-terminal
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ this->expected_rdatas_.push_back("empty.nonterminal.example.org. NSEC");
+ doFindTest(*finder, isc::dns::Name("nonterminal.example.org."),
+ isc::dns::RRType::TXT(), isc::dns::RRType::NSEC(), this->rrttl_,
+ ZoneFinder::NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ Name("l.example.org."), ZoneFinder::FIND_DNSSEC);
+
+ // Check that if the DB doesn't support it, the exception from there
+ // is not propagated and it only does not include the NSEC
+ if (!this->is_mock_) {
+ return; // We don't make the real DB to throw
+ }
+ EXPECT_NO_THROW(doFindTest(*finder,
+ isc::dns::Name("here.wild.example.org."),
+ isc::dns::RRType::TXT(),
+ isc::dns::RRType::NSEC(),
+ this->rrttl_, ZoneFinder::NXRRSET,
+ this->empty_rdatas_, this->empty_rdatas_,
+ Name::ROOT_NAME(), ZoneFinder::FIND_DNSSEC));
}
TYPED_TEST(DatabaseClientTest, getOrigin) {
- DataSourceClient::FindResult zone(this->client_->findZone(this->zname_));
+ DataSourceClient::FindResult
+ zone(this->client_->findZone(Name("example.org")));
ASSERT_EQ(result::SUCCESS, zone.code);
shared_ptr<DatabaseClient::Finder> finder(
dynamic_pointer_cast<DatabaseClient::Finder>(zone.zone_finder));
@@ -2142,4 +2345,66 @@ TYPED_TEST(DatabaseClientTest, compoundUpdate) {
ZoneFinder::SUCCESS, this->expected_rdatas_,
this->empty_rdatas_);
}
+
+TYPED_TEST(DatabaseClientTest, previous) {
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ EXPECT_EQ(Name("www.example.org."),
+ finder->findPreviousName(Name("www2.example.org.")));
+ // Check a name that doesn't exist there
+ EXPECT_EQ(Name("www.example.org."),
+ finder->findPreviousName(Name("www1.example.org.")));
+ if (this->is_mock_) { // We can't really force the DB to throw
+ // Check it doesn't crash or anything if the underlying DB throws
+ DataSourceClient::FindResult
+ zone(this->client_->findZone(Name("bad.example.org")));
+ finder =
+ dynamic_pointer_cast<DatabaseClient::Finder>(zone.zone_finder);
+
+ EXPECT_THROW(finder->findPreviousName(Name("bad.example.org")),
+ isc::NotImplemented);
+ } else {
+ // No need to test this on mock one, because we test only that
+ // the exception gets through
+
+ // A name before the origin
+ EXPECT_THROW(finder->findPreviousName(Name("example.com")),
+ isc::NotImplemented);
+ }
+}
+
+TYPED_TEST(DatabaseClientTest, invalidRdata) {
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ EXPECT_THROW(finder->find(Name("invalidrdata.example.org."), RRType::A()),
+ DataSourceError);
+ EXPECT_THROW(finder->find(Name("invalidrdata2.example.org."), RRType::A()),
+ DataSourceError);
+}
+
+TEST_F(MockDatabaseClientTest, missingNSEC) {
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ /*
+ * FIXME: For now, we can't really distinguish this bogus input
+ * from not-signed zone so we can't throw. But once we can,
+ * enable the original test.
+ */
+#if 0
+ EXPECT_THROW(finder->find(Name("badnsec2.example.org."), RRType::A(), NULL,
+ ZoneFinder::FIND_DNSSEC),
+ DataSourceError);
+#endif
+ doFindTest(*finder, Name("badnsec2.example.org."), RRType::A(),
+ RRType::A(), this->rrttl_, ZoneFinder::NXDOMAIN,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+}
+
+TEST_F(MockDatabaseClientTest, badName) {
+ shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+ EXPECT_THROW(finder->findPreviousName(Name("brokenname.example.org.")),
+ DataSourceError);
+}
+
}
diff --git a/src/lib/datasrc/tests/memory_datasrc_unittest.cc b/src/lib/datasrc/tests/memory_datasrc_unittest.cc
index a926935..2b854db 100644
--- a/src/lib/datasrc/tests/memory_datasrc_unittest.cc
+++ b/src/lib/datasrc/tests/memory_datasrc_unittest.cc
@@ -395,6 +395,14 @@ public:
};
/**
+ * \brief Check that findPreviousName throws as it should now.
+ */
+TEST_F(InMemoryZoneFinderTest, findPreviousName) {
+ EXPECT_THROW(zone_finder_.findPreviousName(Name("www.example.org")),
+ isc::NotImplemented);
+}
+
+/**
* \brief Test InMemoryZoneFinder::InMemoryZoneFinder constructor.
*
* Takes the created zone finder and checks its properties they are the same
diff --git a/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc b/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
index 8b423f8..3974977 100644
--- a/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
+++ b/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
@@ -351,6 +351,45 @@ TEST_F(SQLite3AccessorTest, getRecords) {
EXPECT_FALSE(context->getNext(columns));
}
+TEST_F(SQLite3AccessorTest, findPrevious) {
+ EXPECT_EQ("dns01.example.com.",
+ accessor->findPreviousName(1, "com.example.dns02."));
+ // A name that doesn't exist
+ EXPECT_EQ("dns01.example.com.",
+ accessor->findPreviousName(1, "com.example.dns01x."));
+ // Largest name
+ EXPECT_EQ("www.example.com.",
+ accessor->findPreviousName(1, "com.example.wwww"));
+ // Out of zone after the last name
+ EXPECT_EQ("www.example.com.",
+ accessor->findPreviousName(1, "org.example."));
+ // Case insensitive?
+ EXPECT_EQ("dns01.example.com.",
+ accessor->findPreviousName(1, "com.exaMple.DNS02."));
+ // A name that doesn't exist
+ EXPECT_EQ("dns01.example.com.",
+ accessor->findPreviousName(1, "com.exaMple.DNS01X."));
+ // The DB contains foo.bar.example.com., which would be in between
+ // these two names. However, that one does not have an NSEC record,
+ // which is how this database recognizes glue data, so it should
+ // be skipped.
+ EXPECT_EQ("example.com.",
+ accessor->findPreviousName(1, "com.example.cname-ext."));
+ // Throw when we are before the origin
+ EXPECT_THROW(accessor->findPreviousName(1, "com.example."),
+ isc::NotImplemented);
+ EXPECT_THROW(accessor->findPreviousName(1, "a.example."),
+ isc::NotImplemented);
+}
+
+TEST_F(SQLite3AccessorTest, findPreviousNoData) {
+ // This one doesn't hold any NSEC records, so it shouldn't work
+ // The underlying DB/data don't support DNSSEC, so it's not implemented
+ // (does it make sense? Or different exception here?)
+ EXPECT_THROW(accessor->findPreviousName(3, "com.example.sql2.www."),
+ isc::NotImplemented);
+}
+
// Test fixture for creating a db that automatically deletes it before start,
// and when done
class SQLite3Create : public ::testing::Test {
diff --git a/src/lib/datasrc/zone.h b/src/lib/datasrc/zone.h
index bb4f435..c83b14b 100644
--- a/src/lib/datasrc/zone.h
+++ b/src/lib/datasrc/zone.h
@@ -54,13 +54,50 @@ public:
///
/// Note: the codes are tentative. We may need more, or we may find
/// some of them unnecessary as we implement more details.
+ ///
+ /// Some are synonyms of others in terms of RCODE returned to user.
+ /// But they help the logic to decide if it should ask for a NSEC
+ /// that covers something or not (for example, in case of NXRRSET,
+ /// the directly returned NSEC is sufficient, but with wildcard one,
+ /// we need to add one proving there's no exact match and this is
+ /// actually the best wildcard we have). Data sources that don't
+ /// support DNSSEC don't need to distinguish them.
+ ///
+ /// In case of NXRRSET related results, the returned NSEC record
+ /// belongs to the domain which would provide the result if it
+ /// contained the correct type (in case of NXRRSET, it is the queried
+ /// domain, in case of WILDCARD_NXRRSET, it is the wildcard domain
+ /// that matched the query name). In case of an empty nonterminal,
+ /// an NSEC is provided for the interval where the empty nonterminal
+ /// lives. The end of the interval is the subdomain causing existence
+ /// of the empty nonterminal (if there's sub.x.example.com, and no record
+ /// in x.example.com, then x.example.com exists implicitly - is the empty
+ /// nonterminal and sub.x.example.com is the subdomain causing it).
+ ///
+ /// Examples: if zone "example.com" has the following record:
+ /// \code
+ /// a.b.example.com. NSEC c.example.com.
+ /// \endcode
+ /// a call to \c find() for "b.example.com." will result in NXRRSET,
+ /// and if the FIND_DNSSEC option is set this NSEC will be returned.
+ /// Likewise, if zone "example.org" has the following record,
+ /// \code
+ /// x.*.example.org. NSEC a.example.org.
+ /// \endcode
+ /// a call to \c find() for "y.example.org" will result in
+ /// WILDCARD_NXRRSET (*.example.org is an empty nonterminal wildcard node),
+ /// and if the FIND_DNSSEC option is set this NSEC will be returned.
+ ///
+ /// In case of NXDOMAIN, the returned NSEC covers the queried domain.
enum Result {
SUCCESS, ///< An exact match is found.
DELEGATION, ///< The search encounters a zone cut.
NXDOMAIN, ///< There is no domain name that matches the search name
NXRRSET, ///< There is a matching name but no RRset of the search type
CNAME, ///< The search encounters and returns a CNAME RR
- DNAME ///< The search encounters and returns a DNAME RR
+ DNAME, ///< The search encounters and returns a DNAME RR
+ WILDCARD, ///< Succes by wildcard match, for DNSSEC
+ WILDCARD_NXRRSET ///< NXRRSET on wildcard, for DNSSEC
};
/// A helper structure to represent the search result of \c find().
@@ -135,7 +172,7 @@ public:
//@}
///
- /// \name Search Method
+ /// \name Search Methods
///
//@{
/// Search the zone for a given pair of domain name and RR type.
@@ -167,8 +204,8 @@ public:
/// We should revisit the interface before we heavily rely on it.
///
/// The \c options parameter specifies customized behavior of the search.
- /// Their semantics is as follows:
- /// - \c GLUE_OK Allow search under a zone cut. By default the search
+ /// Their semantics is as follows (they are or bit-field):
+ /// - \c FIND_GLUE_OK Allow search under a zone cut. By default the search
/// will stop once it encounters a zone cut. If this option is specified
/// it remembers information about the highest zone cut and continues
/// the search until it finds an exact match for the given name or it
@@ -176,6 +213,9 @@ public:
/// RRsets for that name are searched just like the normal case;
/// otherwise, if the search has encountered a zone cut, \c DELEGATION
/// with the information of the highest zone cut will be returned.
+ /// - \c FIND_DNSSEC Request that DNSSEC data (like NSEC, RRSIGs) are
+ /// returned with the answer. It is allowed for the data source to
+ /// include them even when not requested.
///
/// A derived version of this method may involve internal resource
/// allocation, especially for constructing the resulting RRset, and may
@@ -195,6 +235,31 @@ public:
isc::dns::RRsetList* target = NULL,
const FindOptions options
= FIND_DEFAULT) = 0;
+
+ /// \brief Get previous name in the zone
+ ///
+ /// Gets the previous name in the DNSSEC order. This can be used
+ /// to find the correct NSEC records for proving nonexistence
+ /// of domains.
+ ///
+ /// The concrete implementation might throw anything it thinks appropriate,
+ /// however it is recommended to stick to the ones listed here. The user
+ /// of this method should be able to handle any exceptions.
+ ///
+ /// This method does not include under-zone-cut data (glue data).
+ ///
+ /// \param query The name for which one we look for a previous one. The
+ /// queried name doesn't have to exist in the zone.
+ /// \return The preceding name
+ ///
+ /// \throw NotImplemented in case the data source backend doesn't support
+ /// DNSSEC or there is no previous in the zone (NSEC records might be
+ /// missing in the DB, the queried name is less or equal to the apex).
+ /// \throw DataSourceError for low-level or internal datasource errors
+ /// (like broken connection to database, wrong data living there).
+ /// \throw std::bad_alloc For allocation errors.
+ virtual isc::dns::Name findPreviousName(const isc::dns::Name& query)
+ const = 0;
//@}
};
diff --git a/src/lib/dns/gen-rdatacode.py.in b/src/lib/dns/gen-rdatacode.py.in
index b3c8da2..f3cd5df 100755
--- a/src/lib/dns/gen-rdatacode.py.in
+++ b/src/lib/dns/gen-rdatacode.py.in
@@ -133,7 +133,15 @@ def import_definitions(classcode2txt, typecode2txt, typeandclass):
if classdir_mtime < getmtime('@srcdir@/rdata'):
classdir_mtime = getmtime('@srcdir@/rdata')
- for dir in list(os.listdir('@srcdir@/rdata')):
+ # Sort directories before iterating through them so that the directory
+ # list is processed in the same order on all systems. The resulting
+ # files should compile regardless of the order in which the components
+ # are included but... Having a fixed order for the directories should
+ # eliminate system-dependent problems. (Note that the drectory names
+ # in BIND 10 are ASCII, so the order should be locale-independent.)
+ dirlist = os.listdir('@srcdir@/rdata')
+ dirlist.sort()
+ for dir in dirlist:
classdir = '@srcdir@/rdata' + os.sep + dir
m = re_typecode.match(dir)
if os.path.isdir(classdir) and (m != None or dir == 'generic'):
@@ -145,7 +153,12 @@ def import_definitions(classcode2txt, typecode2txt, typeandclass):
class_code = m.group(2)
if not class_code in classcode2txt:
classcode2txt[class_code] = class_txt
- for file in list(os.listdir(classdir)):
+
+ # Same considerations as directories regarding sorted order
+ # also apply to files.
+ filelist = os.listdir(classdir)
+ filelist.sort()
+ for file in filelist:
file = classdir + os.sep + file
m = re_typecode.match(os.path.split(file)[1])
if m != None:
diff --git a/src/lib/dns/message.cc b/src/lib/dns/message.cc
index c5ba4e1..b3e9229 100644
--- a/src/lib/dns/message.cc
+++ b/src/lib/dns/message.cc
@@ -124,10 +124,12 @@ public:
void setOpcode(const Opcode& opcode);
void setRcode(const Rcode& rcode);
int parseQuestion(InputBuffer& buffer);
- int parseSection(const Message::Section section, InputBuffer& buffer);
+ int parseSection(const Message::Section section, InputBuffer& buffer,
+ Message::ParseOptions options);
void addRR(Message::Section section, const Name& name,
const RRClass& rrclass, const RRType& rrtype,
- const RRTTL& ttl, ConstRdataPtr rdata);
+ const RRTTL& ttl, ConstRdataPtr rdata,
+ Message::ParseOptions options);
void addEDNS(Message::Section section, const Name& name,
const RRClass& rrclass, const RRType& rrtype,
const RRTTL& ttl, const Rdata& rdata);
@@ -614,7 +616,7 @@ Message::parseHeader(InputBuffer& buffer) {
}
void
-Message::fromWire(InputBuffer& buffer) {
+Message::fromWire(InputBuffer& buffer, ParseOptions options) {
if (impl_->mode_ != Message::PARSE) {
isc_throw(InvalidMessageOperation,
"Message parse attempted in non parse mode");
@@ -626,11 +628,11 @@ Message::fromWire(InputBuffer& buffer) {
impl_->counts_[SECTION_QUESTION] = impl_->parseQuestion(buffer);
impl_->counts_[SECTION_ANSWER] =
- impl_->parseSection(SECTION_ANSWER, buffer);
+ impl_->parseSection(SECTION_ANSWER, buffer, options);
impl_->counts_[SECTION_AUTHORITY] =
- impl_->parseSection(SECTION_AUTHORITY, buffer);
+ impl_->parseSection(SECTION_AUTHORITY, buffer, options);
impl_->counts_[SECTION_ADDITIONAL] =
- impl_->parseSection(SECTION_ADDITIONAL, buffer);
+ impl_->parseSection(SECTION_ADDITIONAL, buffer, options);
}
int
@@ -706,7 +708,7 @@ struct MatchRR : public unary_function<RRsetPtr, bool> {
// is hardcoded here.
int
MessageImpl::parseSection(const Message::Section section,
- InputBuffer& buffer)
+ InputBuffer& buffer, Message::ParseOptions options)
{
assert(section < MessageImpl::NUM_SECTIONS);
@@ -738,7 +740,7 @@ MessageImpl::parseSection(const Message::Section section,
addTSIG(section, count, buffer, start_position, name, rrclass, ttl,
*rdata);
} else {
- addRR(section, name, rrclass, rrtype, ttl, rdata);
+ addRR(section, name, rrclass, rrtype, ttl, rdata, options);
++added;
}
}
@@ -749,19 +751,22 @@ MessageImpl::parseSection(const Message::Section section,
void
MessageImpl::addRR(Message::Section section, const Name& name,
const RRClass& rrclass, const RRType& rrtype,
- const RRTTL& ttl, ConstRdataPtr rdata)
+ const RRTTL& ttl, ConstRdataPtr rdata,
+ Message::ParseOptions options)
{
- vector<RRsetPtr>::iterator it =
- find_if(rrsets_[section].begin(), rrsets_[section].end(),
- MatchRR(name, rrtype, rrclass));
- if (it != rrsets_[section].end()) {
- (*it)->setTTL(min((*it)->getTTL(), ttl));
- (*it)->addRdata(rdata);
- } else {
- RRsetPtr rrset(new RRset(name, rrclass, rrtype, ttl));
- rrset->addRdata(rdata);
- rrsets_[section].push_back(rrset);
+ if ((options & Message::PRESERVE_ORDER) == 0) {
+ vector<RRsetPtr>::iterator it =
+ find_if(rrsets_[section].begin(), rrsets_[section].end(),
+ MatchRR(name, rrtype, rrclass));
+ if (it != rrsets_[section].end()) {
+ (*it)->setTTL(min((*it)->getTTL(), ttl));
+ (*it)->addRdata(rdata);
+ return;
+ }
}
+ RRsetPtr rrset(new RRset(name, rrclass, rrtype, ttl));
+ rrset->addRdata(rdata);
+ rrsets_[section].push_back(rrset);
}
void
diff --git a/src/lib/dns/message.h b/src/lib/dns/message.h
index 6a8bf9f..f286c67 100644
--- a/src/lib/dns/message.h
+++ b/src/lib/dns/message.h
@@ -581,11 +581,58 @@ public:
/// message
void toWire(AbstractMessageRenderer& renderer, TSIGContext& tsig_ctx);
+ /// Parse options.
+ ///
+ /// describe PRESERVE_ORDER: note doesn't affect EDNS or TSIG.
+ ///
+ /// The option values are used as a parameter for \c fromWire().
+ /// These are values of a bitmask type. Bitwise operations can be
+ /// performed on these values to express compound options.
+ enum ParseOptions {
+ PARSE_DEFAULT = 0, ///< The default options
+ PRESERVE_ORDER = 1 ///< Preserve RR order and don't combine them
+ };
+
/// \brief Parse the header section of the \c Message.
void parseHeader(isc::util::InputBuffer& buffer);
- /// \brief Parse the \c Message.
- void fromWire(isc::util::InputBuffer& buffer);
+ /// \brief (Re)build a \c Message object from wire-format data.
+ ///
+ /// This method parses the given wire format data to build a
+ /// complete Message object. On success, the values of the header section
+ /// fields can be accessible via corresponding get methods, and the
+ /// question and following sections can be accessible via the
+ /// corresponding iterators. If the message contains an EDNS or TSIG,
+ /// they can be accessible via \c getEDNS() and \c getTSIGRecord(),
+ /// respectively.
+ ///
+ /// This \c Message must be in the \c PARSE mode.
+ ///
+ /// This method performs strict validation on the given message based
+ /// on the DNS protocol specifications. If the given message data is
+ /// invalid, this method throws an exception (see the exception list).
+ ///
+ /// By default, this method combines RRs of the same name, RR type and
+ /// RR class in a section into a single RRset, even if they are interleaved
+ /// with a different type of RR (though it would be a rare case in
+ /// practice). If the \c PRESERVE_ORDER option is specified, it handles
+ /// each RR separately, in the appearing order, and converts it to a
+ /// separate RRset (so this RRset should contain exactly one Rdata).
+ /// This mode will be necessary when the higher level protocol is
+ /// ordering conscious. For example, in AXFR and IXFR, the position of
+ /// the SOA RRs are crucial.
+ ///
+ /// \exception InvalidMessageOperation \c Message is in the RENDER mode
+ /// \exception DNSMessageFORMERR The given message data is syntactically
+ /// \exception MessageTooShort The given data is shorter than a valid
+ /// header section
+ /// \exception std::bad_alloc Memory allocation failure
+ /// \exception Others \c Name, \c Rdata, and \c EDNS classes can also throw
+ ///
+ /// \param buffer A input buffer object that stores the wire data
+ /// \param options Parse options
+ void fromWire(isc::util::InputBuffer& buffer, ParseOptions options
+ = PARSE_DEFAULT);
///
/// \name Protocol constants
@@ -629,6 +676,6 @@ std::ostream& operator<<(std::ostream& os, const Message& message);
}
#endif // __MESSAGE_H
-// Local Variables:
+// Local Variables:
// mode: c++
-// End:
+// End:
diff --git a/src/lib/dns/python/Makefile.am b/src/lib/dns/python/Makefile.am
index 4452e40..3b89358 100644
--- a/src/lib/dns/python/Makefile.am
+++ b/src/lib/dns/python/Makefile.am
@@ -39,6 +39,7 @@ pydnspp_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
pydnspp_la_LDFLAGS = $(PYTHON_LDFLAGS)
EXTRA_DIST = tsigerror_python_inc.cc
+EXTRA_DIST += message_python_inc.cc
# Python prefers .so, while some OSes (specifically MacOS) use a different
# suffix for dynamic objects. -module is necessary to work this around.
diff --git a/src/lib/dns/python/message_python.cc b/src/lib/dns/python/message_python.cc
index b40ab45..6de0925 100644
--- a/src/lib/dns/python/message_python.cc
+++ b/src/lib/dns/python/message_python.cc
@@ -39,6 +39,9 @@ using namespace isc::dns;
using namespace isc::dns::python;
using namespace isc::util;
+// Import pydoc text
+#include "message_python_inc.cc"
+
namespace {
class s_Message : public PyObject {
public:
@@ -75,7 +78,7 @@ PyObject* Message_makeResponse(s_Message* self);
PyObject* Message_toText(s_Message* self);
PyObject* Message_str(PyObject* self);
PyObject* Message_toWire(s_Message* self, PyObject* args);
-PyObject* Message_fromWire(s_Message* self, PyObject* args);
+PyObject* Message_fromWire(PyObject* const pyself, PyObject* args);
// This list contains the actual set of functions we have in
// python. Each entry has
@@ -157,14 +160,7 @@ PyMethodDef Message_methods[] = {
"If the given message is not in RENDER mode, an "
"InvalidMessageOperation is raised.\n"
},
- { "from_wire", reinterpret_cast<PyCFunction>(Message_fromWire), METH_VARARGS,
- "Parses the given wire format to a Message object.\n"
- "The first argument is a Message to parse the data into.\n"
- "The second argument must implement the buffer interface.\n"
- "If the given message is not in PARSE mode, an "
- "InvalidMessageOperation is raised.\n"
- "Raises MessageTooShort, DNSMessageFORMERR or DNSMessageBADVERS "
- " if there is a problem parsing the message." },
+ { "from_wire", reinterpret_cast<PyCFunction>(Message_fromWire), METH_VARARGS, Message_fromWire_doc },
{ NULL, NULL, 0, NULL }
};
@@ -646,30 +642,54 @@ Message_toWire(s_Message* self, PyObject* args) {
}
PyObject*
-Message_fromWire(s_Message* self, PyObject* args) {
+Message_fromWire(PyObject* const pyself, PyObject* args) {
+ s_Message* self = static_cast<s_Message*>(pyself);
const char* b;
Py_ssize_t len;
- if (!PyArg_ParseTuple(args, "y#", &b, &len)) {
- return (NULL);
- }
+ unsigned int options = Message::PARSE_DEFAULT;
+
+ if (PyArg_ParseTuple(args, "y#", &b, &len) ||
+ PyArg_ParseTuple(args, "y#I", &b, &len, &options)) {
+ // We need to clear the error in case the first call to ParseTuple
+ // fails.
+ PyErr_Clear();
- InputBuffer inbuf(b, len);
- try {
- self->cppobj->fromWire(inbuf);
- Py_RETURN_NONE;
- } catch (const InvalidMessageOperation& imo) {
- PyErr_SetString(po_InvalidMessageOperation, imo.what());
- return (NULL);
- } catch (const DNSMessageFORMERR& dmfe) {
- PyErr_SetString(po_DNSMessageFORMERR, dmfe.what());
- return (NULL);
- } catch (const DNSMessageBADVERS& dmfe) {
- PyErr_SetString(po_DNSMessageBADVERS, dmfe.what());
- return (NULL);
- } catch (const MessageTooShort& mts) {
- PyErr_SetString(po_MessageTooShort, mts.what());
- return (NULL);
+ InputBuffer inbuf(b, len);
+ try {
+ self->cppobj->fromWire(
+ inbuf, static_cast<Message::ParseOptions>(options));
+ Py_RETURN_NONE;
+ } catch (const InvalidMessageOperation& imo) {
+ PyErr_SetString(po_InvalidMessageOperation, imo.what());
+ return (NULL);
+ } catch (const DNSMessageFORMERR& dmfe) {
+ PyErr_SetString(po_DNSMessageFORMERR, dmfe.what());
+ return (NULL);
+ } catch (const DNSMessageBADVERS& dmfe) {
+ PyErr_SetString(po_DNSMessageBADVERS, dmfe.what());
+ return (NULL);
+ } catch (const MessageTooShort& mts) {
+ PyErr_SetString(po_MessageTooShort, mts.what());
+ return (NULL);
+ } catch (const InvalidBufferPosition& ex) {
+ PyErr_SetString(po_DNSMessageFORMERR, ex.what());
+ return (NULL);
+ } catch (const exception& ex) {
+ const string ex_what =
+ "Error in Message.from_wire: " + string(ex.what());
+ PyErr_SetString(PyExc_RuntimeError, ex_what.c_str());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "Unexpected exception in Message.from_wire");
+ return (NULL);
+ }
}
+
+ PyErr_SetString(PyExc_TypeError,
+ "from_wire() arguments must be a byte object and "
+ "(optional) parse options");
+ return (NULL);
}
} // end of unnamed namespace
diff --git a/src/lib/dns/python/message_python_inc.cc b/src/lib/dns/python/message_python_inc.cc
new file mode 100644
index 0000000..561c494
--- /dev/null
+++ b/src/lib/dns/python/message_python_inc.cc
@@ -0,0 +1,41 @@
+namespace {
+const char* const Message_fromWire_doc = "\
+from_wire(data, options=PARSE_DEFAULT)\n\
+\n\
+(Re)build a Message object from wire-format data.\n\
+\n\
+This method parses the given wire format data to build a complete\n\
+Message object. On success, the values of the header section fields\n\
+can be accessible via corresponding get methods, and the question and\n\
+following sections can be accessible via the corresponding iterators.\n\
+If the message contains an EDNS or TSIG, they can be accessible via\n\
+get_edns() and get_tsig_record(), respectively.\n\
+\n\
+This Message must be in the PARSE mode.\n\
+\n\
+This method performs strict validation on the given message based on\n\
+the DNS protocol specifications. If the given message data is invalid,\n\
+this method throws an exception (see the exception list).\n\
+\n\
+By default, this method combines RRs of the same name, RR type and RR\n\
+class in a section into a single RRset, even if they are interleaved\n\
+with a different type of RR (though it would be a rare case in\n\
+practice). If the PRESERVE_ORDER option is specified, it handles each\n\
+RR separately, in the appearing order, and converts it to a separate\n\
+RRset (so this RRset should contain exactly one Rdata). This mode will\n\
+be necessary when the higher level protocol is ordering conscious. For\n\
+example, in AXFR and IXFR, the position of the SOA RRs are crucial.\n\
+\n\
+Exceptions:\n\
+ InvalidMessageOperation Message is in the RENDER mode\n\
+ DNSMessageFORMERR The given message data is syntactically\n\
+ MessageTooShort The given data is shorter than a valid header\n\
+ section\n\
+ Others Name, Rdata, and EDNS classes can also throw\n\
+\n\
+Parameters:\n\
+ data A byte object of the wire data\n\
+ options Parse options\n\
+\n\
+";
+} // unnamed namespace
diff --git a/src/lib/dns/python/pydnspp.cc b/src/lib/dns/python/pydnspp.cc
index 830876c..0a7d8e5 100644
--- a/src/lib/dns/python/pydnspp.cc
+++ b/src/lib/dns/python/pydnspp.cc
@@ -89,64 +89,91 @@ initModulePart_Message(PyObject* mod) {
if (PyType_Ready(&message_type) < 0) {
return (false);
}
+ void* p = &message_type;
+ if (PyModule_AddObject(mod, "Message", static_cast<PyObject*>(p)) < 0) {
+ return (false);
+ }
Py_INCREF(&message_type);
- // Class variables
- // These are added to the tp_dict of the type object
- //
- addClassVariable(message_type, "PARSE",
- Py_BuildValue("I", Message::PARSE));
- addClassVariable(message_type, "RENDER",
- Py_BuildValue("I", Message::RENDER));
-
- addClassVariable(message_type, "HEADERFLAG_QR",
- Py_BuildValue("I", Message::HEADERFLAG_QR));
- addClassVariable(message_type, "HEADERFLAG_AA",
- Py_BuildValue("I", Message::HEADERFLAG_AA));
- addClassVariable(message_type, "HEADERFLAG_TC",
- Py_BuildValue("I", Message::HEADERFLAG_TC));
- addClassVariable(message_type, "HEADERFLAG_RD",
- Py_BuildValue("I", Message::HEADERFLAG_RD));
- addClassVariable(message_type, "HEADERFLAG_RA",
- Py_BuildValue("I", Message::HEADERFLAG_RA));
- addClassVariable(message_type, "HEADERFLAG_AD",
- Py_BuildValue("I", Message::HEADERFLAG_AD));
- addClassVariable(message_type, "HEADERFLAG_CD",
- Py_BuildValue("I", Message::HEADERFLAG_CD));
-
- addClassVariable(message_type, "SECTION_QUESTION",
- Py_BuildValue("I", Message::SECTION_QUESTION));
- addClassVariable(message_type, "SECTION_ANSWER",
- Py_BuildValue("I", Message::SECTION_ANSWER));
- addClassVariable(message_type, "SECTION_AUTHORITY",
- Py_BuildValue("I", Message::SECTION_AUTHORITY));
- addClassVariable(message_type, "SECTION_ADDITIONAL",
- Py_BuildValue("I", Message::SECTION_ADDITIONAL));
-
- addClassVariable(message_type, "DEFAULT_MAX_UDPSIZE",
- Py_BuildValue("I", Message::DEFAULT_MAX_UDPSIZE));
-
- /* Class-specific exceptions */
- po_MessageTooShort = PyErr_NewException("pydnspp.MessageTooShort", NULL,
- NULL);
- PyModule_AddObject(mod, "MessageTooShort", po_MessageTooShort);
- po_InvalidMessageSection =
- PyErr_NewException("pydnspp.InvalidMessageSection", NULL, NULL);
- PyModule_AddObject(mod, "InvalidMessageSection", po_InvalidMessageSection);
- po_InvalidMessageOperation =
- PyErr_NewException("pydnspp.InvalidMessageOperation", NULL, NULL);
- PyModule_AddObject(mod, "InvalidMessageOperation",
- po_InvalidMessageOperation);
- po_InvalidMessageUDPSize =
- PyErr_NewException("pydnspp.InvalidMessageUDPSize", NULL, NULL);
- PyModule_AddObject(mod, "InvalidMessageUDPSize", po_InvalidMessageUDPSize);
- po_DNSMessageBADVERS = PyErr_NewException("pydnspp.DNSMessageBADVERS",
- NULL, NULL);
- PyModule_AddObject(mod, "DNSMessageBADVERS", po_DNSMessageBADVERS);
-
- PyModule_AddObject(mod, "Message",
- reinterpret_cast<PyObject*>(&message_type));
-
+ try {
+ //
+ // Constant class variables
+ //
+
+ // Parse mode
+ installClassVariable(message_type, "PARSE",
+ Py_BuildValue("I", Message::PARSE));
+ installClassVariable(message_type, "RENDER",
+ Py_BuildValue("I", Message::RENDER));
+
+ // Parse options
+ installClassVariable(message_type, "PARSE_DEFAULT",
+ Py_BuildValue("I", Message::PARSE_DEFAULT));
+ installClassVariable(message_type, "PRESERVE_ORDER",
+ Py_BuildValue("I", Message::PRESERVE_ORDER));
+
+ // Header flags
+ installClassVariable(message_type, "HEADERFLAG_QR",
+ Py_BuildValue("I", Message::HEADERFLAG_QR));
+ installClassVariable(message_type, "HEADERFLAG_AA",
+ Py_BuildValue("I", Message::HEADERFLAG_AA));
+ installClassVariable(message_type, "HEADERFLAG_TC",
+ Py_BuildValue("I", Message::HEADERFLAG_TC));
+ installClassVariable(message_type, "HEADERFLAG_RD",
+ Py_BuildValue("I", Message::HEADERFLAG_RD));
+ installClassVariable(message_type, "HEADERFLAG_RA",
+ Py_BuildValue("I", Message::HEADERFLAG_RA));
+ installClassVariable(message_type, "HEADERFLAG_AD",
+ Py_BuildValue("I", Message::HEADERFLAG_AD));
+ installClassVariable(message_type, "HEADERFLAG_CD",
+ Py_BuildValue("I", Message::HEADERFLAG_CD));
+
+ // Sections
+ installClassVariable(message_type, "SECTION_QUESTION",
+ Py_BuildValue("I", Message::SECTION_QUESTION));
+ installClassVariable(message_type, "SECTION_ANSWER",
+ Py_BuildValue("I", Message::SECTION_ANSWER));
+ installClassVariable(message_type, "SECTION_AUTHORITY",
+ Py_BuildValue("I", Message::SECTION_AUTHORITY));
+ installClassVariable(message_type, "SECTION_ADDITIONAL",
+ Py_BuildValue("I", Message::SECTION_ADDITIONAL));
+
+ // Protocol constant
+ installClassVariable(message_type, "DEFAULT_MAX_UDPSIZE",
+ Py_BuildValue("I", Message::DEFAULT_MAX_UDPSIZE));
+
+ /* Class-specific exceptions */
+ po_MessageTooShort =
+ PyErr_NewException("pydnspp.MessageTooShort", NULL, NULL);
+ PyObjectContainer(po_MessageTooShort).installToModule(
+ mod, "MessageTooShort");
+ po_InvalidMessageSection =
+ PyErr_NewException("pydnspp.InvalidMessageSection", NULL, NULL);
+ PyObjectContainer(po_InvalidMessageSection).installToModule(
+ mod, "InvalidMessageSection");
+ po_InvalidMessageOperation =
+ PyErr_NewException("pydnspp.InvalidMessageOperation", NULL, NULL);
+ PyObjectContainer(po_InvalidMessageOperation).installToModule(
+ mod, "InvalidMessageOperation");
+ po_InvalidMessageUDPSize =
+ PyErr_NewException("pydnspp.InvalidMessageUDPSize", NULL, NULL);
+ PyObjectContainer(po_InvalidMessageUDPSize).installToModule(
+ mod, "InvalidMessageUDPSize");
+ po_DNSMessageBADVERS =
+ PyErr_NewException("pydnspp.DNSMessageBADVERS", NULL, NULL);
+ PyObjectContainer(po_DNSMessageBADVERS).installToModule(
+ mod, "DNSMessageBADVERS");
+ } catch (const std::exception& ex) {
+ const std::string ex_what =
+ "Unexpected failure in Message initialization: " +
+ std::string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (false);
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure in Message initialization");
+ return (false);
+ }
return (true);
}
diff --git a/src/lib/dns/python/tests/message_python_test.py b/src/lib/dns/python/tests/message_python_test.py
index c731253..8f2d732 100644
--- a/src/lib/dns/python/tests/message_python_test.py
+++ b/src/lib/dns/python/tests/message_python_test.py
@@ -29,9 +29,9 @@ if "TESTDATA_PATH" in os.environ:
else:
testdata_path = "../tests/testdata"
-def factoryFromFile(message, file):
+def factoryFromFile(message, file, parse_options=Message.PARSE_DEFAULT):
data = read_wire_data(file)
- message.from_wire(data)
+ message.from_wire(data, parse_options)
return data
# we don't have direct comparison for rrsets right now (should we?
@@ -466,6 +466,54 @@ test.example.com. 3600 IN A 192.0.2.2
self.assertEqual("192.0.2.2", rdata[1].to_text())
self.assertEqual(2, len(rdata))
+ def test_from_wire_short_buffer(self):
+ data = read_wire_data("message_fromWire22.wire")
+ self.assertRaises(DNSMessageFORMERR, self.p.from_wire, data[:-1])
+
+ def test_from_wire_combind_rrs(self):
+ factoryFromFile(self.p, "message_fromWire19.wire")
+ rrset = self.p.get_section(Message.SECTION_ANSWER)[0]
+ self.assertEqual(RRType("A"), rrset.get_type())
+ self.assertEqual(2, len(rrset.get_rdata()))
+
+ rrset = self.p.get_section(Message.SECTION_ANSWER)[1]
+ self.assertEqual(RRType("AAAA"), rrset.get_type())
+ self.assertEqual(1, len(rrset.get_rdata()))
+
+ def check_preserve_rrs(self, message, section):
+ rrset = message.get_section(section)[0]
+ self.assertEqual(RRType("A"), rrset.get_type())
+ rdata = rrset.get_rdata()
+ self.assertEqual(1, len(rdata))
+ self.assertEqual('192.0.2.1', rdata[0].to_text())
+
+ rrset = message.get_section(section)[1]
+ self.assertEqual(RRType("AAAA"), rrset.get_type())
+ rdata = rrset.get_rdata()
+ self.assertEqual(1, len(rdata))
+ self.assertEqual('2001:db8::1', rdata[0].to_text())
+
+ rrset = message.get_section(section)[2]
+ self.assertEqual(RRType("A"), rrset.get_type())
+ rdata = rrset.get_rdata()
+ self.assertEqual(1, len(rdata))
+ self.assertEqual('192.0.2.2', rdata[0].to_text())
+
+ def test_from_wire_preserve_answer(self):
+ factoryFromFile(self.p, "message_fromWire19.wire",
+ Message.PRESERVE_ORDER)
+ self.check_preserve_rrs(self.p, Message.SECTION_ANSWER)
+
+ def test_from_wire_preserve_authority(self):
+ factoryFromFile(self.p, "message_fromWire20.wire",
+ Message.PRESERVE_ORDER)
+ self.check_preserve_rrs(self.p, Message.SECTION_AUTHORITY)
+
+ def test_from_wire_preserve_additional(self):
+ factoryFromFile(self.p, "message_fromWire21.wire",
+ Message.PRESERVE_ORDER)
+ self.check_preserve_rrs(self.p, Message.SECTION_ADDITIONAL)
+
def test_EDNS0ExtCode(self):
# Extended Rcode = BADVERS
message_parse = Message(Message.PARSE)
diff --git a/src/lib/dns/rdata/any_255/tsig_250.cc b/src/lib/dns/rdata/any_255/tsig_250.cc
index 04a4dc4..4eb72bc 100644
--- a/src/lib/dns/rdata/any_255/tsig_250.cc
+++ b/src/lib/dns/rdata/any_255/tsig_250.cc
@@ -23,6 +23,7 @@
#include <util/encode/base64.h>
#include <dns/messagerenderer.h>
+#include <dns/name.h>
#include <dns/rdata.h>
#include <dns/rdataclass.h>
#include <dns/tsigerror.h>
diff --git a/src/lib/dns/rdata/generic/afsdb_18.cc b/src/lib/dns/rdata/generic/afsdb_18.cc
index dd7fa5f..6afc4de 100644
--- a/src/lib/dns/rdata/generic/afsdb_18.cc
+++ b/src/lib/dns/rdata/generic/afsdb_18.cc
@@ -26,6 +26,7 @@
#include <boost/lexical_cast.hpp>
using namespace std;
+using namespace isc::util;
using namespace isc::util::str;
// BEGIN_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/minfo_14.cc b/src/lib/dns/rdata/generic/minfo_14.cc
index 734fbc3..aa5272c 100644
--- a/src/lib/dns/rdata/generic/minfo_14.cc
+++ b/src/lib/dns/rdata/generic/minfo_14.cc
@@ -24,6 +24,7 @@
using namespace std;
using namespace isc::dns;
+using namespace isc::util;
// BEGIN_ISC_NAMESPACE
// BEGIN_RDATA_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/rp_17.cc b/src/lib/dns/rdata/generic/rp_17.cc
index b8b2ba2..781b55d 100644
--- a/src/lib/dns/rdata/generic/rp_17.cc
+++ b/src/lib/dns/rdata/generic/rp_17.cc
@@ -24,6 +24,7 @@
using namespace std;
using namespace isc::dns;
+using namespace isc::util;
// BEGIN_ISC_NAMESPACE
// BEGIN_RDATA_NAMESPACE
diff --git a/src/lib/dns/rdata/template.cc b/src/lib/dns/rdata/template.cc
index d9f08ee..e85f82c 100644
--- a/src/lib/dns/rdata/template.cc
+++ b/src/lib/dns/rdata/template.cc
@@ -18,6 +18,7 @@
#include <dns/messagerenderer.h>
#include <dns/rdata.h>
#include <dns/rdataclass.h>
+#include <dns/rrtype.h>
using namespace std;
using namespace isc::util;
diff --git a/src/lib/dns/tests/message_unittest.cc b/src/lib/dns/tests/message_unittest.cc
index 6430626..f068791 100644
--- a/src/lib/dns/tests/message_unittest.cc
+++ b/src/lib/dns/tests/message_unittest.cc
@@ -118,16 +118,20 @@ protected:
vector<unsigned char> received_data;
vector<unsigned char> expected_data;
- void factoryFromFile(Message& message, const char* datafile);
+ void factoryFromFile(Message& message, const char* datafile,
+ Message::ParseOptions options =
+ Message::PARSE_DEFAULT);
};
void
-MessageTest::factoryFromFile(Message& message, const char* datafile) {
+MessageTest::factoryFromFile(Message& message, const char* datafile,
+ Message::ParseOptions options)
+{
received_data.clear();
UnitTestUtil::readWireData(datafile, received_data);
InputBuffer buffer(&received_data[0], received_data.size());
- message.fromWire(buffer);
+ message.fromWire(buffer, options);
}
TEST_F(MessageTest, headerFlag) {
@@ -175,7 +179,6 @@ TEST_F(MessageTest, headerFlag) {
EXPECT_THROW(message_parse.setHeaderFlag(Message::HEADERFLAG_QR),
InvalidMessageOperation);
}
-
TEST_F(MessageTest, getEDNS) {
EXPECT_FALSE(message_parse.getEDNS()); // by default EDNS isn't set
@@ -532,7 +535,46 @@ TEST_F(MessageTest, appendSection) {
}
+TEST_F(MessageTest, parseHeader) {
+ received_data.clear();
+ UnitTestUtil::readWireData("message_fromWire1", received_data);
+
+ // parseHeader() isn't allowed in the render mode.
+ InputBuffer buffer(&received_data[0], received_data.size());
+ EXPECT_THROW(message_render.parseHeader(buffer), InvalidMessageOperation);
+
+ message_parse.parseHeader(buffer);
+ EXPECT_EQ(0x1035, message_parse.getQid());
+ EXPECT_EQ(Opcode::QUERY(), message_parse.getOpcode());
+ EXPECT_EQ(Rcode::NOERROR(), message_parse.getRcode());
+ EXPECT_TRUE(message_parse.getHeaderFlag(Message::HEADERFLAG_QR));
+ EXPECT_TRUE(message_parse.getHeaderFlag(Message::HEADERFLAG_AA));
+ EXPECT_FALSE(message_parse.getHeaderFlag(Message::HEADERFLAG_TC));
+ EXPECT_TRUE(message_parse.getHeaderFlag(Message::HEADERFLAG_RD));
+ EXPECT_FALSE(message_parse.getHeaderFlag(Message::HEADERFLAG_RA));
+ EXPECT_FALSE(message_parse.getHeaderFlag(Message::HEADERFLAG_AD));
+ EXPECT_FALSE(message_parse.getHeaderFlag(Message::HEADERFLAG_CD));
+ EXPECT_EQ(1, message_parse.getRRCount(Message::SECTION_QUESTION));
+ EXPECT_EQ(2, message_parse.getRRCount(Message::SECTION_ANSWER));
+ EXPECT_EQ(0, message_parse.getRRCount(Message::SECTION_AUTHORITY));
+ EXPECT_EQ(0, message_parse.getRRCount(Message::SECTION_ADDITIONAL));
+
+ // Only the header part should have been examined.
+ EXPECT_EQ(12, buffer.getPosition()); // 12 = size of the header section
+ EXPECT_TRUE(message_parse.beginQuestion() == message_parse.endQuestion());
+ EXPECT_TRUE(message_parse.beginSection(Message::SECTION_ANSWER) ==
+ message_parse.endSection(Message::SECTION_ANSWER));
+ EXPECT_TRUE(message_parse.beginSection(Message::SECTION_AUTHORITY) ==
+ message_parse.endSection(Message::SECTION_AUTHORITY));
+ EXPECT_TRUE(message_parse.beginSection(Message::SECTION_ADDITIONAL) ==
+ message_parse.endSection(Message::SECTION_ADDITIONAL));
+}
+
TEST_F(MessageTest, fromWire) {
+ // fromWire() isn't allowed in the render mode.
+ EXPECT_THROW(factoryFromFile(message_render, "message_fromWire1"),
+ InvalidMessageOperation);
+
factoryFromFile(message_parse, "message_fromWire1");
EXPECT_EQ(0x1035, message_parse.getQid());
EXPECT_EQ(Opcode::QUERY(), message_parse.getOpcode());
@@ -564,6 +606,87 @@ TEST_F(MessageTest, fromWire) {
EXPECT_TRUE(it->isLast());
}
+TEST_F(MessageTest, fromWireShortBuffer) {
+ // We trim a valid message (ending with an SOA RR) for one byte.
+ // fromWire() should throw an exception while parsing the trimmed RR.
+ UnitTestUtil::readWireData("message_fromWire22.wire", received_data);
+ InputBuffer buffer(&received_data[0], received_data.size() - 1);
+ EXPECT_THROW(message_parse.fromWire(buffer), InvalidBufferPosition);
+}
+
+TEST_F(MessageTest, fromWireCombineRRs) {
+ // This message contains 3 RRs in the answer section in the order of
+ // A, AAAA, A types. fromWire() should combine the two A RRs into a
+ // single RRset by default.
+ factoryFromFile(message_parse, "message_fromWire19.wire");
+
+ RRsetIterator it = message_parse.beginSection(Message::SECTION_ANSWER);
+ RRsetIterator it_end = message_parse.endSection(Message::SECTION_ANSWER);
+ ASSERT_TRUE(it != it_end);
+ EXPECT_EQ(RRType::A(), (*it)->getType());
+ EXPECT_EQ(2, (*it)->getRdataCount());
+
+ ++it;
+ ASSERT_TRUE(it != it_end);
+ EXPECT_EQ(RRType::AAAA(), (*it)->getType());
+ EXPECT_EQ(1, (*it)->getRdataCount());
+}
+
+// A helper function for a test pattern commonly used in several tests below.
+void
+preserveRRCheck(const Message& message, Message::Section section) {
+ RRsetIterator it = message.beginSection(section);
+ RRsetIterator it_end = message.endSection(section);
+ ASSERT_TRUE(it != it_end);
+ EXPECT_EQ(RRType::A(), (*it)->getType());
+ EXPECT_EQ(1, (*it)->getRdataCount());
+ EXPECT_EQ("192.0.2.1", (*it)->getRdataIterator()->getCurrent().toText());
+
+ ++it;
+ ASSERT_TRUE(it != it_end);
+ EXPECT_EQ(RRType::AAAA(), (*it)->getType());
+ EXPECT_EQ(1, (*it)->getRdataCount());
+ EXPECT_EQ("2001:db8::1", (*it)->getRdataIterator()->getCurrent().toText());
+
+ ++it;
+ ASSERT_TRUE(it != it_end);
+ EXPECT_EQ(RRType::A(), (*it)->getType());
+ EXPECT_EQ(1, (*it)->getRdataCount());
+ EXPECT_EQ("192.0.2.2", (*it)->getRdataIterator()->getCurrent().toText());
+}
+
+TEST_F(MessageTest, fromWirePreserveAnswer) {
+ // Using the same data as the previous test, but specify the PRESERVE_ORDER
+ // option. The received order of RRs should be preserved, and each RR
+ // should be stored in a single RRset.
+ factoryFromFile(message_parse, "message_fromWire19.wire",
+ Message::PRESERVE_ORDER);
+ {
+ SCOPED_TRACE("preserve answer RRs");
+ preserveRRCheck(message_parse, Message::SECTION_ANSWER);
+ }
+}
+
+TEST_F(MessageTest, fromWirePreserveAuthority) {
+ // Same for the previous test, but for the authority section.
+ factoryFromFile(message_parse, "message_fromWire20.wire",
+ Message::PRESERVE_ORDER);
+ {
+ SCOPED_TRACE("preserve authority RRs");
+ preserveRRCheck(message_parse, Message::SECTION_AUTHORITY);
+ }
+}
+
+TEST_F(MessageTest, fromWirePreserveAdditional) {
+ // Same for the previous test, but for the additional section.
+ factoryFromFile(message_parse, "message_fromWire21.wire",
+ Message::PRESERVE_ORDER);
+ {
+ SCOPED_TRACE("preserve additional RRs");
+ preserveRRCheck(message_parse, Message::SECTION_ADDITIONAL);
+ }
+}
+
TEST_F(MessageTest, EDNS0ExtRcode) {
// Extended Rcode = BADVERS
factoryFromFile(message_parse, "message_fromWire10.wire");
diff --git a/src/lib/dns/tests/testdata/Makefile.am b/src/lib/dns/tests/testdata/Makefile.am
index 3aa4937..d8f0d1c 100644
--- a/src/lib/dns/tests/testdata/Makefile.am
+++ b/src/lib/dns/tests/testdata/Makefile.am
@@ -6,7 +6,9 @@ BUILT_SOURCES += message_fromWire10.wire message_fromWire11.wire
BUILT_SOURCES += message_fromWire12.wire message_fromWire13.wire
BUILT_SOURCES += message_fromWire14.wire message_fromWire15.wire
BUILT_SOURCES += message_fromWire16.wire message_fromWire17.wire
-BUILT_SOURCES += message_fromWire18.wire
+BUILT_SOURCES += message_fromWire18.wire message_fromWire19.wire
+BUILT_SOURCES += message_fromWire20.wire message_fromWire21.wire
+BUILT_SOURCES += message_fromWire22.wire
BUILT_SOURCES += message_toWire2.wire message_toWire3.wire
BUILT_SOURCES += message_toWire4.wire message_toWire5.wire
BUILT_SOURCES += message_toText1.wire message_toText2.wire
@@ -71,6 +73,8 @@ EXTRA_DIST += message_fromWire11.spec message_fromWire12.spec
EXTRA_DIST += message_fromWire13.spec message_fromWire14.spec
EXTRA_DIST += message_fromWire15.spec message_fromWire16.spec
EXTRA_DIST += message_fromWire17.spec message_fromWire18.spec
+EXTRA_DIST += message_fromWire19.spec message_fromWire20.spec
+EXTRA_DIST += message_fromWire21.spec message_fromWire22.spec
EXTRA_DIST += message_toWire1 message_toWire2.spec message_toWire3.spec
EXTRA_DIST += message_toWire4.spec message_toWire5.spec
EXTRA_DIST += message_toText1.txt message_toText1.spec
diff --git a/src/lib/dns/tests/testdata/message_fromWire19.spec b/src/lib/dns/tests/testdata/message_fromWire19.spec
new file mode 100644
index 0000000..8212dbf
--- /dev/null
+++ b/src/lib/dns/tests/testdata/message_fromWire19.spec
@@ -0,0 +1,20 @@
+#
+# A non realistic DNS response message containing mixed types of RRs in the
+# answer section in a mixed order.
+#
+
+[custom]
+sections: header:question:a/1:aaaa:a/2
+[header]
+qr: 1
+ancount: 3
+[question]
+name: www.example.com
+rrtype: A
+[a/1]
+as_rr: True
+[aaaa]
+as_rr: True
+[a/2]
+as_rr: True
+address: 192.0.2.2
diff --git a/src/lib/dns/tests/testdata/message_fromWire20.spec b/src/lib/dns/tests/testdata/message_fromWire20.spec
new file mode 100644
index 0000000..91986e4
--- /dev/null
+++ b/src/lib/dns/tests/testdata/message_fromWire20.spec
@@ -0,0 +1,20 @@
+#
+# A non realistic DNS response message containing mixed types of RRs in the
+# authority section in a mixed order.
+#
+
+[custom]
+sections: header:question:a/1:aaaa:a/2
+[header]
+qr: 1
+nscount: 3
+[question]
+name: www.example.com
+rrtype: A
+[a/1]
+as_rr: True
+[aaaa]
+as_rr: True
+[a/2]
+as_rr: True
+address: 192.0.2.2
diff --git a/src/lib/dns/tests/testdata/message_fromWire21.spec b/src/lib/dns/tests/testdata/message_fromWire21.spec
new file mode 100644
index 0000000..cd6aac9
--- /dev/null
+++ b/src/lib/dns/tests/testdata/message_fromWire21.spec
@@ -0,0 +1,20 @@
+#
+# A non realistic DNS response message containing mixed types of RRs in the
+# additional section in a mixed order.
+#
+
+[custom]
+sections: header:question:a/1:aaaa:a/2
+[header]
+qr: 1
+arcount: 3
+[question]
+name: www.example.com
+rrtype: A
+[a/1]
+as_rr: True
+[aaaa]
+as_rr: True
+[a/2]
+as_rr: True
+address: 192.0.2.2
diff --git a/src/lib/dns/tests/testdata/message_fromWire22.spec b/src/lib/dns/tests/testdata/message_fromWire22.spec
new file mode 100644
index 0000000..a52523b
--- /dev/null
+++ b/src/lib/dns/tests/testdata/message_fromWire22.spec
@@ -0,0 +1,14 @@
+#
+# A simple DNS message containing one SOA RR in the answer section. This is
+# intended to be trimmed to emulate a bogus message.
+#
+
+[custom]
+sections: header:question:soa
+[header]
+qr: 1
+ancount: 1
+[question]
+rrtype: SOA
+[soa]
+as_rr: True
diff --git a/src/lib/python/isc/Makefile.am b/src/lib/python/isc/Makefile.am
index 2b63da6..f90f7b6 100644
--- a/src/lib/python/isc/Makefile.am
+++ b/src/lib/python/isc/Makefile.am
@@ -1,4 +1,4 @@
-SUBDIRS = datasrc cc config log net notify util testutils acl bind10
+SUBDIRS = datasrc cc config dns log net notify util testutils acl bind10
SUBDIRS += log_messages
python_PYTHON = __init__.py
diff --git a/src/lib/python/isc/__init__.py b/src/lib/python/isc/__init__.py
index 8fcbf42..029f110 100644
--- a/src/lib/python/isc/__init__.py
+++ b/src/lib/python/isc/__init__.py
@@ -1,4 +1,7 @@
-import isc.datasrc
+# On some systems, it appears the dynamic linker gets
+# confused if the order is not right here
+# There is probably a solution for this, but for now:
+# order is important here!
import isc.cc
import isc.config
-#import isc.dns
+import isc.datasrc
diff --git a/src/lib/python/isc/acl/tests/Makefile.am b/src/lib/python/isc/acl/tests/Makefile.am
index 5a193b8..e0a1895 100644
--- a/src/lib/python/isc/acl/tests/Makefile.am
+++ b/src/lib/python/isc/acl/tests/Makefile.am
@@ -7,7 +7,7 @@ EXTRA_DIST = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/acl/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/acl/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
diff --git a/src/lib/python/isc/bind10/tests/Makefile.am b/src/lib/python/isc/bind10/tests/Makefile.am
index 0cc12ff..df8ab30 100644
--- a/src/lib/python/isc/bind10/tests/Makefile.am
+++ b/src/lib/python/isc/bind10/tests/Makefile.am
@@ -9,7 +9,7 @@ EXTRA_DIST = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
diff --git a/src/lib/python/isc/cc/tests/Makefile.am b/src/lib/python/isc/cc/tests/Makefile.am
index 2dc6a58..4c2acc0 100644
--- a/src/lib/python/isc/cc/tests/Makefile.am
+++ b/src/lib/python/isc/cc/tests/Makefile.am
@@ -10,7 +10,7 @@ EXTRA_DIST += test_session.py
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
diff --git a/src/lib/python/isc/config/tests/Makefile.am b/src/lib/python/isc/config/tests/Makefile.am
index 7b48f43..6670ee7 100644
--- a/src/lib/python/isc/config/tests/Makefile.am
+++ b/src/lib/python/isc/config/tests/Makefile.am
@@ -8,7 +8,7 @@ EXTRA_DIST += unittest_fakesession.py
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
diff --git a/src/lib/python/isc/datasrc/Makefile.am b/src/lib/python/isc/datasrc/Makefile.am
index 46fb661..07fb417 100644
--- a/src/lib/python/isc/datasrc/Makefile.am
+++ b/src/lib/python/isc/datasrc/Makefile.am
@@ -1,10 +1,44 @@
SUBDIRS = . tests
+# old data, should be removed in the near future once conversion is done
+pythondir = $(pyexecdir)/isc/datasrc
python_PYTHON = __init__.py master.py sqlite3_ds.py
-pythondir = $(pyexecdir)/isc/datasrc
+
+# new data
+
+AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
+AM_CPPFLAGS += $(SQLITE_CFLAGS)
+
+python_LTLIBRARIES = datasrc.la
+datasrc_la_SOURCES = datasrc.cc datasrc.h
+datasrc_la_SOURCES += client_python.cc client_python.h
+datasrc_la_SOURCES += iterator_python.cc iterator_python.h
+datasrc_la_SOURCES += finder_python.cc finder_python.h
+datasrc_la_SOURCES += updater_python.cc updater_python.h
+# This is a temporary workaround for #1206, where the InMemoryClient has been
+# moved to an ldopened library. We could add that library to LDADD, but that
+# is nonportable. When #1207 is done this becomes moot anyway, and the
+# specific workaround is not needed anymore, so we can then remove this
+# line again.
+datasrc_la_SOURCES += ${top_srcdir}/src/lib/datasrc/sqlite3_accessor.cc
+
+datasrc_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
+datasrc_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
+datasrc_la_LDFLAGS = $(PYTHON_LDFLAGS)
+datasrc_la_LDFLAGS += -module
+datasrc_la_LIBADD = $(top_builddir)/src/lib/datasrc/libdatasrc.la
+datasrc_la_LIBADD += $(top_builddir)/src/lib/dns/python/libpydnspp.la
+datasrc_la_LIBADD += $(PYTHON_LIB)
+#datasrc_la_LIBADD += $(SQLITE_LIBS)
+
+EXTRA_DIST = client_inc.cc
+EXTRA_DIST += finder_inc.cc
+EXTRA_DIST += iterator_inc.cc
+EXTRA_DIST += updater_inc.cc
CLEANDIRS = __pycache__
clean-local:
rm -rf $(CLEANDIRS)
+
diff --git a/src/lib/python/isc/datasrc/__init__.py b/src/lib/python/isc/datasrc/__init__.py
index 0e1e481..0b4ed98 100644
--- a/src/lib/python/isc/datasrc/__init__.py
+++ b/src/lib/python/isc/datasrc/__init__.py
@@ -1,2 +1,21 @@
-from isc.datasrc.master import *
+import sys
+import os
+
+# this setup is a temporary workaround to deal with the problem of
+# having both 'normal' python modules and a wrapper module
+# Once all programs use the new interface, we should remove the
+# old, and the setup can be made similar to that of the log wrappers.
+intree = False
+for base in sys.path[:]:
+ datasrc_libdir = os.path.join(base, 'isc/datasrc/.libs')
+ if os.path.exists(datasrc_libdir):
+ sys.path.insert(0, datasrc_libdir)
+ intree = True
+
+if intree:
+ from datasrc import *
+else:
+ from isc.datasrc.datasrc import *
from isc.datasrc.sqlite3_ds import *
+from isc.datasrc.master import *
+
diff --git a/src/lib/python/isc/datasrc/client_inc.cc b/src/lib/python/isc/datasrc/client_inc.cc
new file mode 100644
index 0000000..1eba488
--- /dev/null
+++ b/src/lib/python/isc/datasrc/client_inc.cc
@@ -0,0 +1,157 @@
+namespace {
+
+const char* const DataSourceClient_doc = "\
+The base class of data source clients.\n\
+\n\
+This is the python wrapper for the abstract base class that defines\n\
+the common interface for various types of data source clients. A data\n\
+source client is a top level access point to a data source, allowing \n\
+various operations on the data source such as lookups, traversing or \n\
+updates. The client class itself has limited focus and delegates \n\
+the responsibility for these specific operations to other (c++) classes;\n\
+in general methods of this class act as factories of these other classes.\n\
+\n\
+- InMemoryClient: A client of a conceptual data source that stores all\n\
+ necessary data in memory for faster lookups\n\
+- DatabaseClient: A client that uses a real database backend (such as\n\
+ an SQL database). It would internally hold a connection to the\n\
+ underlying database system.\n\
+\n\
+It is intentional that while the term these derived classes don't\n\
+contain \"DataSource\" unlike their base class. It's also noteworthy\n\
+that the naming of the base class is somewhat redundant because the\n\
+namespace datasrc would indicate that it's related to a data source.\n\
+The redundant naming comes from the observation that namespaces are\n\
+often omitted with using directives, in which case \"Client\" would be\n\
+too generic. On the other hand, concrete derived classes are generally\n\
+not expected to be referenced directly from other modules and\n\
+applications, so we'll give them more concise names such as\n\
+InMemoryClient. A single DataSourceClient object is expected to handle\n\
+only a single RR class even if the underlying data source contains\n\
+records for multiple RR classes. Likewise, (when we support views) a\n\
+DataSourceClient object is expected to handle only a single view.\n\
+\n\
+If the application uses multiple threads, each thread will need to\n\
+create and use a separate DataSourceClient. This is because some\n\
+database backend doesn't allow multiple threads to share the same\n\
+connection to the database.\n\
+\n\
+For a client using an in memory backend, this may result in having a\n\
+multiple copies of the same data in memory, increasing the memory\n\
+footprint substantially. Depending on how to support multiple CPU\n\
+cores for concurrent lookups on the same single data source (which is\n\
+not fully fixed yet, and for which multiple threads may be used), this\n\
+design may have to be revisited. This class (and therefore its derived\n\
+classes) are not copyable. This is because the derived classes would\n\
+generally contain attributes that are not easy to copy (such as a\n\
+large size of in memory data or a network connection to a database\n\
+server). In order to avoid a surprising disruption with a naive copy\n\
+it's prohibited explicitly. For the expected usage of the client\n\
+classes the restriction should be acceptable.\n\
+\n\
+Todo: This class is still not complete. It will need more factory\n\
+methods, e.g. for (re)loading a zone.\n\
+";
+
+const char* const DataSourceClient_findZone_doc = "\
+find_zone(name) -> (code, ZoneFinder)\n\
+\n\
+Returns a ZoneFinder for a zone that best matches the given name.\n\
+\n\
+code: The result code of the operation (integer).\n\
+- DataSourceClient.SUCCESS: A zone that gives an exact match is found\n\
+- DataSourceClient.PARTIALMATCH: A zone whose origin is a super domain of name\n\
+ is found (but there is no exact match)\n\
+- DataSourceClient.NOTFOUND: For all other cases.\n\
+ZoneFinder: ZoneFinder object for the found zone if one is found;\n\
+otherwise None.\n\
+\n\
+Any internal error will be raised as an isc.datasrc.Error exception\n\
+\n\
+Parameters:\n\
+ name A domain name for which the search is performed.\n\
+\n\
+Return Value(s): A tuple containing a result value and a ZoneFinder object or\n\
+None\n\
+";
+
+const char* const DataSourceClient_getIterator_doc = "\
+get_iterator(name) -> ZoneIterator\n\
+\n\
+Returns an iterator to the given zone.\n\
+\n\
+This allows for traversing the whole zone. The returned object can\n\
+provide the RRsets one by one.\n\
+\n\
+This throws isc.datasrc.Error when the zone does not exist in the\n\
+datasource, or when an internal error occurs.\n\
+\n\
+The default implementation throws isc.datasrc.NotImplemented. This allows for\n\
+easy and fast deployment of minimal custom data sources, where the\n\
+user/implementator doesn't have to care about anything else but the\n\
+actual queries. Also, in some cases, it isn't possible to traverse the\n\
+zone from logic point of view (eg. dynamically generated zone data).\n\
+\n\
+It is not fixed if a concrete implementation of this method can throw\n\
+anything else.\n\
+\n\
+Parameters:\n\
+ isc.dns.Name The name of zone apex to be traversed. It doesn't do\n\
+ nearest match as find_zone.\n\
+\n\
+Return Value(s): Pointer to the iterator.\n\
+";
+
+const char* const DataSourceClient_getUpdater_doc = "\
+get_updater(name, replace) -> ZoneUpdater\n\
+\n\
+Return an updater to make updates to a specific zone.\n\
+\n\
+The RR class of the zone is the one that the client is expected to\n\
+handle (see the detailed description of this class).\n\
+\n\
+If the specified zone is not found via the client, a NULL pointer will\n\
+be returned; in other words a completely new zone cannot be created\n\
+using an updater. It must be created beforehand (even if it's an empty\n\
+placeholder) in a way specific to the underlying data source.\n\
+\n\
+Conceptually, the updater will trigger a separate transaction for\n\
+subsequent updates to the zone within the context of the updater (the\n\
+actual implementation of the \"transaction\" may vary for the specific\n\
+underlying data source). Until commit() is performed on the updater,\n\
+the intermediate updates won't affect the results of other methods\n\
+(and the result of the object's methods created by other factory\n\
+methods). Likewise, if the updater is destructed without performing\n\
+commit(), the intermediate updates will be effectively canceled and\n\
+will never affect other methods.\n\
+\n\
+If the underlying data source allows concurrent updates, this method\n\
+can be called multiple times while the previously returned updater(s)\n\
+are still active. In this case each updater triggers a different\n\
+\"transaction\". Normally it would be for different zones for such a\n\
+case as handling multiple incoming AXFR streams concurrently, but this\n\
+interface does not even prohibit an attempt of getting more than one\n\
+updater for the same zone, as long as the underlying data source\n\
+allows such an operation (and any conflict resolution is left to the\n\
+specific implementation).\n\
+\n\
+If replace is true, any existing RRs of the zone will be deleted on\n\
+successful completion of updates (after commit() on the updater); if\n\
+it's false, the existing RRs will be intact unless explicitly deleted\n\
+by delete_rrset() on the updater.\n\
+\n\
+A data source can be \"read only\" or can prohibit partial updates. In\n\
+such cases this method will result in an isc.datasrc.NotImplemented exception\n\
+unconditionally or when replace is false).\n\
+\n\
+Exceptions:\n\
+ isc.datasrc. NotImplemented The underlying data source does not support\n\
+ updates.\n\
+ isc.datasrc.Error Internal error in the underlying data source.\n\
+\n\
+Parameters:\n\
+ name The zone name to be updated\n\
+ replace Whether to delete existing RRs before making updates\n\
+\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/datasrc/client_python.cc b/src/lib/python/isc/datasrc/client_python.cc
new file mode 100644
index 0000000..984eabf
--- /dev/null
+++ b/src/lib/python/isc/datasrc/client_python.cc
@@ -0,0 +1,264 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// Enable this if you use s# variants with PyArg_ParseTuple(), see
+// http://docs.python.org/py3k/c-api/arg.html#strings-and-buffers
+//#define PY_SSIZE_T_CLEAN
+
+// Python.h needs to be placed at the head of the program file, see:
+// http://docs.python.org/py3k/extending/extending.html#a-simple-example
+#include <Python.h>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <datasrc/client.h>
+#include <datasrc/database.h>
+#include <datasrc/data_source.h>
+#include <datasrc/sqlite3_accessor.h>
+#include <datasrc/iterator.h>
+
+#include <dns/python/name_python.h>
+#include <dns/python/rrset_python.h>
+#include <dns/python/pydnspp_common.h>
+
+#include "datasrc.h"
+#include "client_python.h"
+#include "finder_python.h"
+#include "iterator_python.h"
+#include "updater_python.h"
+#include "client_inc.cc"
+
+using namespace std;
+using namespace isc::util::python;
+using namespace isc::dns::python;
+using namespace isc::datasrc;
+using namespace isc::datasrc::python;
+
+namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_DataSourceClient : public PyObject {
+public:
+ s_DataSourceClient() : cppobj(NULL) {};
+ DataSourceClient* cppobj;
+};
+
+// Shortcut type which would be convenient for adding class variables safely.
+typedef CPPPyObjectContainer<s_DataSourceClient, DataSourceClient>
+ DataSourceClientContainer;
+
+PyObject*
+DataSourceClient_findZone(PyObject* po_self, PyObject* args) {
+ s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
+ PyObject *name;
+ if (PyArg_ParseTuple(args, "O!", &name_type, &name)) {
+ try {
+ DataSourceClient::FindResult find_result(
+ self->cppobj->findZone(PyName_ToName(name)));
+
+ result::Result r = find_result.code;
+ ZoneFinderPtr zfp = find_result.zone_finder;
+ // Use N instead of O so refcount isn't increased twice
+ return (Py_BuildValue("IN", r, createZoneFinderObject(zfp)));
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+ } else {
+ return (NULL);
+ }
+}
+
+PyObject*
+DataSourceClient_getIterator(PyObject* po_self, PyObject* args) {
+ s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
+ PyObject *name_obj;
+ if (PyArg_ParseTuple(args, "O!", &name_type, &name_obj)) {
+ try {
+ return (createZoneIteratorObject(
+ self->cppobj->getIterator(PyName_ToName(name_obj))));
+ } catch (const isc::NotImplemented& ne) {
+ PyErr_SetString(getDataSourceException("NotImplemented"),
+ ne.what());
+ return (NULL);
+ } catch (const DataSourceError& dse) {
+ PyErr_SetString(getDataSourceException("Error"), dse.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+ } else {
+ return (NULL);
+ }
+}
+
+PyObject*
+DataSourceClient_getUpdater(PyObject* po_self, PyObject* args) {
+ s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
+ PyObject *name_obj;
+ PyObject *replace_obj;
+ if (PyArg_ParseTuple(args, "O!O", &name_type, &name_obj, &replace_obj) &&
+ PyBool_Check(replace_obj)) {
+ bool replace = (replace_obj != Py_False);
+ try {
+ return (createZoneUpdaterObject(
+ self->cppobj->getUpdater(PyName_ToName(name_obj),
+ replace)));
+ } catch (const isc::NotImplemented& ne) {
+ PyErr_SetString(getDataSourceException("NotImplemented"),
+ ne.what());
+ return (NULL);
+ } catch (const DataSourceError& dse) {
+ PyErr_SetString(getDataSourceException("Error"), dse.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+ } else {
+ return (NULL);
+ }
+}
+
+// This list contains the actual set of functions we have in
+// python. Each entry has
+// 1. Python method name
+// 2. Our static function here
+// 3. Argument type
+// 4. Documentation
+PyMethodDef DataSourceClient_methods[] = {
+ { "find_zone", reinterpret_cast<PyCFunction>(DataSourceClient_findZone),
+ METH_VARARGS, DataSourceClient_findZone_doc },
+ { "get_iterator",
+ reinterpret_cast<PyCFunction>(DataSourceClient_getIterator), METH_VARARGS,
+ DataSourceClient_getIterator_doc },
+ { "get_updater", reinterpret_cast<PyCFunction>(DataSourceClient_getUpdater),
+ METH_VARARGS, DataSourceClient_getUpdater_doc },
+ { NULL, NULL, 0, NULL }
+};
+
+int
+DataSourceClient_init(s_DataSourceClient* self, PyObject* args) {
+ // TODO: we should use the factory function which hasn't been written
+ // yet. For now we hardcode the sqlite3 initialization, and pass it one
+ // string for the database file. (similar to how the 'old direct'
+ // sqlite3_ds code works)
+ try {
+ char* db_file_name;
+ if (PyArg_ParseTuple(args, "s", &db_file_name)) {
+ boost::shared_ptr<DatabaseAccessor> sqlite3_accessor(
+ new SQLite3Accessor(db_file_name, isc::dns::RRClass::IN()));
+ self->cppobj = new DatabaseClient(isc::dns::RRClass::IN(),
+ sqlite3_accessor);
+ return (0);
+ } else {
+ return (-1);
+ }
+
+ } catch (const exception& ex) {
+ const string ex_what = "Failed to construct DataSourceClient object: " +
+ string(ex.what());
+ PyErr_SetString(getDataSourceException("Error"), ex_what.c_str());
+ return (-1);
+ } catch (...) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "Unexpected exception in constructing DataSourceClient");
+ return (-1);
+ }
+ PyErr_SetString(PyExc_TypeError,
+ "Invalid arguments to DataSourceClient constructor");
+
+ return (-1);
+}
+
+void
+DataSourceClient_destroy(s_DataSourceClient* const self) {
+ delete self->cppobj;
+ self->cppobj = NULL;
+ Py_TYPE(self)->tp_free(self);
+}
+
+} // end anonymous namespace
+
+namespace isc {
+namespace datasrc {
+namespace python {
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_DataSourceClient
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject datasourceclient_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "datasrc.DataSourceClient",
+ sizeof(s_DataSourceClient), // tp_basicsize
+ 0, // tp_itemsize
+ reinterpret_cast<destructor>(DataSourceClient_destroy),// tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ NULL, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ DataSourceClient_doc,
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ DataSourceClient_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ reinterpret_cast<initproc>(DataSourceClient_init),// tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
diff --git a/src/lib/python/isc/datasrc/client_python.h b/src/lib/python/isc/datasrc/client_python.h
new file mode 100644
index 0000000..b20fb6b
--- /dev/null
+++ b/src/lib/python/isc/datasrc/client_python.h
@@ -0,0 +1,35 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_DATASRC_CLIENT_H
+#define __PYTHON_DATASRC_CLIENT_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace datasrc {
+class DataSourceClient;
+
+namespace python {
+
+extern PyTypeObject datasourceclient_type;
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+#endif // __PYTHON_DATASRC_CLIENT_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/datasrc/datasrc.cc b/src/lib/python/isc/datasrc/datasrc.cc
new file mode 100644
index 0000000..4b0324a
--- /dev/null
+++ b/src/lib/python/isc/datasrc/datasrc.cc
@@ -0,0 +1,225 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#define PY_SSIZE_T_CLEAN
+#include <Python.h>
+#include <structmember.h>
+
+#include <config.h>
+
+#include <datasrc/client.h>
+#include <datasrc/database.h>
+#include <datasrc/sqlite3_accessor.h>
+
+#include "datasrc.h"
+#include "client_python.h"
+#include "finder_python.h"
+#include "iterator_python.h"
+#include "updater_python.h"
+
+#include <util/python/pycppwrapper_util.h>
+#include <dns/python/pydnspp_common.h>
+
+using namespace isc::datasrc;
+using namespace isc::datasrc::python;
+using namespace isc::util::python;
+using namespace isc::dns::python;
+
+namespace isc {
+namespace datasrc {
+namespace python {
+PyObject*
+getDataSourceException(const char* ex_name) {
+ PyObject* ex_obj = NULL;
+
+ PyObject* datasrc_module = PyImport_AddModule("isc.datasrc");
+ if (datasrc_module != NULL) {
+ PyObject* datasrc_dict = PyModule_GetDict(datasrc_module);
+ if (datasrc_dict != NULL) {
+ ex_obj = PyDict_GetItemString(datasrc_dict, ex_name);
+ }
+ }
+
+ if (ex_obj == NULL) {
+ ex_obj = PyExc_RuntimeError;
+ }
+ return (ex_obj);
+}
+
+} // end namespace python
+} // end namespace datasrc
+} // end namespace isc
+
+namespace {
+
+bool
+initModulePart_DataSourceClient(PyObject* mod) {
+ // We initialize the static description object with PyType_Ready(),
+ // then add it to the module. This is not just a check! (leaving
+ // this out results in segmentation faults)
+ if (PyType_Ready(&datasourceclient_type) < 0) {
+ return (false);
+ }
+ void* dscp = &datasourceclient_type;
+ if (PyModule_AddObject(mod, "DataSourceClient", static_cast<PyObject*>(dscp)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&datasourceclient_type);
+
+ addClassVariable(datasourceclient_type, "SUCCESS",
+ Py_BuildValue("I", result::SUCCESS));
+ addClassVariable(datasourceclient_type, "EXIST",
+ Py_BuildValue("I", result::EXIST));
+ addClassVariable(datasourceclient_type, "NOTFOUND",
+ Py_BuildValue("I", result::NOTFOUND));
+ addClassVariable(datasourceclient_type, "PARTIALMATCH",
+ Py_BuildValue("I", result::PARTIALMATCH));
+
+ return (true);
+}
+
+bool
+initModulePart_ZoneFinder(PyObject* mod) {
+ // We initialize the static description object with PyType_Ready(),
+ // then add it to the module. This is not just a check! (leaving
+ // this out results in segmentation faults)
+ if (PyType_Ready(&zonefinder_type) < 0) {
+ return (false);
+ }
+ void* zip = &zonefinder_type;
+ if (PyModule_AddObject(mod, "ZoneFinder", static_cast<PyObject*>(zip)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&zonefinder_type);
+
+ addClassVariable(zonefinder_type, "SUCCESS",
+ Py_BuildValue("I", ZoneFinder::SUCCESS));
+ addClassVariable(zonefinder_type, "DELEGATION",
+ Py_BuildValue("I", ZoneFinder::DELEGATION));
+ addClassVariable(zonefinder_type, "NXDOMAIN",
+ Py_BuildValue("I", ZoneFinder::NXDOMAIN));
+ addClassVariable(zonefinder_type, "NXRRSET",
+ Py_BuildValue("I", ZoneFinder::NXRRSET));
+ addClassVariable(zonefinder_type, "CNAME",
+ Py_BuildValue("I", ZoneFinder::CNAME));
+ addClassVariable(zonefinder_type, "DNAME",
+ Py_BuildValue("I", ZoneFinder::DNAME));
+
+ addClassVariable(zonefinder_type, "FIND_DEFAULT",
+ Py_BuildValue("I", ZoneFinder::FIND_DEFAULT));
+ addClassVariable(zonefinder_type, "FIND_GLUE_OK",
+ Py_BuildValue("I", ZoneFinder::FIND_GLUE_OK));
+ addClassVariable(zonefinder_type, "FIND_DNSSEC",
+ Py_BuildValue("I", ZoneFinder::FIND_DNSSEC));
+
+
+ return (true);
+}
+
+bool
+initModulePart_ZoneIterator(PyObject* mod) {
+ // We initialize the static description object with PyType_Ready(),
+ // then add it to the module. This is not just a check! (leaving
+ // this out results in segmentation faults)
+ if (PyType_Ready(&zoneiterator_type) < 0) {
+ return (false);
+ }
+ void* zip = &zoneiterator_type;
+ if (PyModule_AddObject(mod, "ZoneIterator", static_cast<PyObject*>(zip)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&zoneiterator_type);
+
+ return (true);
+}
+
+bool
+initModulePart_ZoneUpdater(PyObject* mod) {
+ // We initialize the static description object with PyType_Ready(),
+ // then add it to the module. This is not just a check! (leaving
+ // this out results in segmentation faults)
+ if (PyType_Ready(&zoneupdater_type) < 0) {
+ return (false);
+ }
+ void* zip = &zoneupdater_type;
+ if (PyModule_AddObject(mod, "ZoneUpdater", static_cast<PyObject*>(zip)) < 0) {
+ return (false);
+ }
+ Py_INCREF(&zoneupdater_type);
+
+ return (true);
+}
+
+
+PyObject* po_DataSourceError;
+PyObject* po_NotImplemented;
+
+PyModuleDef iscDataSrc = {
+ { PyObject_HEAD_INIT(NULL) NULL, 0, NULL},
+ "datasrc",
+ "Python bindings for the classes in the isc::datasrc namespace.\n\n"
+ "These bindings are close match to the C++ API, but they are not complete "
+ "(some parts are not needed) and some are done in more python-like ways.",
+ -1,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+
+} // end anonymous namespace
+
+PyMODINIT_FUNC
+PyInit_datasrc(void) {
+ PyObject* mod = PyModule_Create(&iscDataSrc);
+ if (mod == NULL) {
+ return (NULL);
+ }
+
+ if (!initModulePart_DataSourceClient(mod)) {
+ Py_DECREF(mod);
+ return (NULL);
+ }
+
+ if (!initModulePart_ZoneFinder(mod)) {
+ Py_DECREF(mod);
+ return (NULL);
+ }
+
+ if (!initModulePart_ZoneIterator(mod)) {
+ Py_DECREF(mod);
+ return (NULL);
+ }
+
+ if (!initModulePart_ZoneUpdater(mod)) {
+ Py_DECREF(mod);
+ return (NULL);
+ }
+
+ try {
+ po_DataSourceError = PyErr_NewException("isc.datasrc.Error", NULL,
+ NULL);
+ PyObjectContainer(po_DataSourceError).installToModule(mod, "Error");
+ po_NotImplemented = PyErr_NewException("isc.datasrc.NotImplemented",
+ NULL, NULL);
+ PyObjectContainer(po_NotImplemented).installToModule(mod,
+ "NotImplemented");
+ } catch (...) {
+ Py_DECREF(mod);
+ return (NULL);
+ }
+
+ return (mod);
+}
diff --git a/src/lib/python/isc/datasrc/datasrc.h b/src/lib/python/isc/datasrc/datasrc.h
new file mode 100644
index 0000000..d82881b
--- /dev/null
+++ b/src/lib/python/isc/datasrc/datasrc.h
@@ -0,0 +1,50 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_DATASRC_H
+#define __PYTHON_DATASRC_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace datasrc {
+namespace python {
+
+// Return a Python exception object of the given name (ex_name) defined in
+// the isc.datasrc.datasrc loadable module.
+//
+// Since the datasrc module is a different binary image and is loaded separately
+// from the dns module, it would be very tricky to directly access to
+// C/C++ symbols defined in that module. So we get access to these object
+// using the Python interpretor through this wrapper function.
+//
+// The __init__.py file should ensure isc.datasrc has been loaded by the time
+// whenever this function is called, and there shouldn't be any operation
+// within this function that can fail (such as dynamic memory allocation),
+// so this function should always succeed. Yet there may be an overlooked
+// failure mode, perhaps due to a bug in the binding implementation, or
+// due to invalid usage. As a last resort for such cases, this function
+// returns PyExc_RuntimeError (a C binding of Python's RuntimeError) should
+// it encounters an unexpected failure.
+extern PyObject* getDataSourceException(const char* ex_name);
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+
+#endif // __PYTHON_ACL_DNS_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/datasrc/finder_inc.cc b/src/lib/python/isc/datasrc/finder_inc.cc
new file mode 100644
index 0000000..2b47d02
--- /dev/null
+++ b/src/lib/python/isc/datasrc/finder_inc.cc
@@ -0,0 +1,96 @@
+namespace {
+const char* const ZoneFinder_doc = "\
+The base class to search a zone for RRsets.\n\
+\n\
+The ZoneFinder class is a wrapper for the c++ base class for representing an\n\
+object that performs DNS lookups in a specific zone accessible via a\n\
+data source. In general, different types of data sources (in-memory,\n\
+database-based, etc) define their own derived c++ classes of ZoneFinder,\n\
+implementing ways to retrieve the required data through the common\n\
+interfaces declared in the base class. Each concrete ZoneFinder object\n\
+is therefore (conceptually) associated with a specific zone of one\n\
+specific data source instance.\n\
+\n\
+The origin name and the RR class of the associated zone are available\n\
+via the get_origin() and get_class() methods, respectively.\n\
+\n\
+The most important method of this class is find(), which performs the\n\
+lookup for a given domain and type. See the description of the method\n\
+for details.\n\
+\n\
+It's not clear whether we should request that a zone finder form a\n\
+\"transaction\", that is, whether to ensure the finder is not\n\
+susceptible to changes made by someone else than the creator of the\n\
+finder. If we don't request that, for example, two different lookup\n\
+results for the same name and type can be different if other threads\n\
+or programs make updates to the zone between the lookups. We should\n\
+revisit this point as we gain more experiences.\n\
+\n\
+";
+
+const char* const ZoneFinder_getOrigin_doc = "\
+get_origin() -> isc.dns.Name\n\
+\n\
+Return the origin name of the zone.\n\
+\n\
+";
+
+const char* const ZoneFinder_getClass_doc = "\
+get_class() -> isc.dns.RRClass\n\
+\n\
+Return the RR class of the zone.\n\
+\n\
+";
+
+const char* const ZoneFinder_find_doc = "\
+find(name, type, target=NULL, options=FIND_DEFAULT) -> (code, FindResult)\n\
+\n\
+Search the zone for a given pair of domain name and RR type.\n\
+\n\
+- If the search name belongs under a zone cut, it returns the code of\n\
+ DELEGATION and the NS RRset at the zone cut.\n\
+- If there is no matching name, it returns the code of NXDOMAIN, and,\n\
+ if DNSSEC is requested, the NSEC RRset that proves the non-\n\
+ existence.\n\
+- If there is a matching name but no RRset of the search type, it\n\
+ returns the code of NXRRSET, and, if DNSSEC is required, the NSEC\n\
+ RRset for that name.\n\
+- If there is a CNAME RR of the searched name but there is no RR of\n\
+ the searched type of the name (so this type is different from\n\
+ CNAME), it returns the code of CNAME and that CNAME RR. Note that if\n\
+ the searched RR type is CNAME, it is considered a successful match,\n\
+ and the code of SUCCESS will be returned.\n\
+- If the search name matches a delegation point of DNAME, it returns\n\
+ the code of DNAME and that DNAME RR.\n\
+- If the target is a list, all RRsets under the domain are inserted\n\
+ there and SUCCESS (or NXDOMAIN, in case of empty domain) is returned\n\
+ instead of normall processing. This is intended to handle ANY query.\n\
+ : this behavior is controversial as we discussed in\n\
+ https://lists.isc.org/pipermail/bind10-dev/2011-January/001918.html\n\
+ We should revisit the interface before we heavily rely on it. The\n\
+ options parameter specifies customized behavior of the search. Their\n\
+ semantics is as follows:\n\
+ (This feature is disable at this time)\n\
+- GLUE_OK Allow search under a zone cut. By default the search will\n\
+ stop once it encounters a zone cut. If this option is specified it\n\
+ remembers information about the highest zone cut and continues the\n\
+ search until it finds an exact match for the given name or it\n\
+ detects there is no exact match. If an exact match is found, RRsets\n\
+ for that name are searched just like the normal case; otherwise, if\n\
+ the search has encountered a zone cut, DELEGATION with the\n\
+ information of the highest zone cut will be returned.\n\
+\n\
+This method raises an isc.datasrc.Error exception if there is an internal\n\
+error in the datasource.\n\
+\n\
+Parameters:\n\
+ name The domain name to be searched for.\n\
+ type The RR type to be searched for.\n\
+ target If target is not NULL, insert all RRs under the domain\n\
+ into it.\n\
+ options The search options.\n\
+\n\
+Return Value(s): A tuple of a result code an a FindResult object enclosing\n\
+the search result (see above).\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/datasrc/finder_python.cc b/src/lib/python/isc/datasrc/finder_python.cc
new file mode 100644
index 0000000..598d300
--- /dev/null
+++ b/src/lib/python/isc/datasrc/finder_python.cc
@@ -0,0 +1,248 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// Enable this if you use s# variants with PyArg_ParseTuple(), see
+// http://docs.python.org/py3k/c-api/arg.html#strings-and-buffers
+//#define PY_SSIZE_T_CLEAN
+
+// Python.h needs to be placed at the head of the program file, see:
+// http://docs.python.org/py3k/extending/extending.html#a-simple-example
+#include <Python.h>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <datasrc/client.h>
+#include <datasrc/database.h>
+#include <datasrc/data_source.h>
+#include <datasrc/sqlite3_accessor.h>
+#include <datasrc/iterator.h>
+#include <datasrc/zone.h>
+
+#include <dns/python/name_python.h>
+#include <dns/python/rrset_python.h>
+#include <dns/python/rrclass_python.h>
+#include <dns/python/rrtype_python.h>
+#include <dns/python/pydnspp_common.h>
+
+#include "datasrc.h"
+#include "finder_python.h"
+#include "finder_inc.cc"
+
+using namespace std;
+using namespace isc::util::python;
+using namespace isc::dns::python;
+using namespace isc::datasrc;
+using namespace isc::datasrc::python;
+
+namespace isc_datasrc_internal {
+// This is the shared code for the find() call in the finder and the updater
+// Is is intentionally not available through any header, nor at our standard
+// namespace, as it is not supposed to be called anywhere but from finder and
+// updater
+PyObject* ZoneFinder_helper(ZoneFinder* finder, PyObject* args) {
+ if (finder == NULL) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Internal error in find() wrapper; finder object NULL");
+ return (NULL);
+ }
+ PyObject *name;
+ PyObject *rrtype;
+ PyObject *target;
+ int options_int;
+ if (PyArg_ParseTuple(args, "O!O!OI", &name_type, &name,
+ &rrtype_type, &rrtype,
+ &target, &options_int)) {
+ try {
+ ZoneFinder::FindOptions options =
+ static_cast<ZoneFinder::FindOptions>(options_int);
+ ZoneFinder::FindResult find_result(
+ finder->find(PyName_ToName(name),
+ PyRRType_ToRRType(rrtype),
+ NULL,
+ options
+ ));
+ ZoneFinder::Result r = find_result.code;
+ isc::dns::ConstRRsetPtr rrsp = find_result.rrset;
+ if (rrsp) {
+ // Use N instead of O so the refcount isn't increased twice
+ return (Py_BuildValue("IN", r, createRRsetObject(*rrsp)));
+ } else {
+ return (Py_BuildValue("IO", r, Py_None));
+ }
+ } catch (const DataSourceError& dse) {
+ PyErr_SetString(getDataSourceException("Error"), dse.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+ } else {
+ return (NULL);
+ }
+ return Py_BuildValue("I", 1);
+}
+
+} // end namespace internal
+
+namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_ZoneFinder : public PyObject {
+public:
+ s_ZoneFinder() : cppobj(ZoneFinderPtr()) {};
+ ZoneFinderPtr cppobj;
+};
+
+// Shortcut type which would be convenient for adding class variables safely.
+typedef CPPPyObjectContainer<s_ZoneFinder, ZoneFinder> ZoneFinderContainer;
+
+// General creation and destruction
+int
+ZoneFinder_init(s_ZoneFinder* self, PyObject* args) {
+ // can't be called directly
+ PyErr_SetString(PyExc_TypeError,
+ "ZoneFinder cannot be constructed directly");
+
+ return (-1);
+}
+
+void
+ZoneFinder_destroy(s_ZoneFinder* const self) {
+ // cppobj is a shared ptr, but to make sure things are not destroyed in
+ // the wrong order, we reset it here.
+ self->cppobj.reset();
+ Py_TYPE(self)->tp_free(self);
+}
+
+PyObject*
+ZoneFinder_getClass(PyObject* po_self, PyObject*) {
+ s_ZoneFinder* self = static_cast<s_ZoneFinder*>(po_self);
+ try {
+ return (createRRClassObject(self->cppobj->getClass()));
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ }
+}
+
+PyObject*
+ZoneFinder_getOrigin(PyObject* po_self, PyObject*) {
+ s_ZoneFinder* self = static_cast<s_ZoneFinder*>(po_self);
+ try {
+ return (createNameObject(self->cppobj->getOrigin()));
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+}
+
+PyObject*
+ZoneFinder_find(PyObject* po_self, PyObject* args) {
+ s_ZoneFinder* const self = static_cast<s_ZoneFinder*>(po_self);
+ return (isc_datasrc_internal::ZoneFinder_helper(self->cppobj.get(), args));
+}
+
+// This list contains the actual set of functions we have in
+// python. Each entry has
+// 1. Python method name
+// 2. Our static function here
+// 3. Argument type
+// 4. Documentation
+PyMethodDef ZoneFinder_methods[] = {
+ { "get_origin", reinterpret_cast<PyCFunction>(ZoneFinder_getOrigin),
+ METH_NOARGS, ZoneFinder_getOrigin_doc },
+ { "get_class", reinterpret_cast<PyCFunction>(ZoneFinder_getClass),
+ METH_NOARGS, ZoneFinder_getClass_doc },
+ { "find", reinterpret_cast<PyCFunction>(ZoneFinder_find), METH_VARARGS,
+ ZoneFinder_find_doc },
+ { NULL, NULL, 0, NULL }
+};
+
+} // end of unnamed namespace
+
+namespace isc {
+namespace datasrc {
+namespace python {
+
+PyTypeObject zonefinder_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "datasrc.ZoneFinder",
+ sizeof(s_ZoneFinder), // tp_basicsize
+ 0, // tp_itemsize
+ reinterpret_cast<destructor>(ZoneFinder_destroy),// tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ NULL, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ ZoneFinder_doc,
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ ZoneFinder_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ reinterpret_cast<initproc>(ZoneFinder_init),// tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+PyObject*
+createZoneFinderObject(isc::datasrc::ZoneFinderPtr source) {
+ s_ZoneFinder* py_zi = static_cast<s_ZoneFinder*>(
+ zonefinder_type.tp_alloc(&zonefinder_type, 0));
+ if (py_zi != NULL) {
+ py_zi->cppobj = source;
+ }
+ return (py_zi);
+}
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+
diff --git a/src/lib/python/isc/datasrc/finder_python.h b/src/lib/python/isc/datasrc/finder_python.h
new file mode 100644
index 0000000..5f2404e
--- /dev/null
+++ b/src/lib/python/isc/datasrc/finder_python.h
@@ -0,0 +1,36 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_DATASRC_FINDER_H
+#define __PYTHON_DATASRC_FINDER_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace datasrc {
+
+namespace python {
+
+extern PyTypeObject zonefinder_type;
+
+PyObject* createZoneFinderObject(isc::datasrc::ZoneFinderPtr source);
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+#endif // __PYTHON_DATASRC_FINDER_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/datasrc/iterator_inc.cc b/src/lib/python/isc/datasrc/iterator_inc.cc
new file mode 100644
index 0000000..b1d9d25
--- /dev/null
+++ b/src/lib/python/isc/datasrc/iterator_inc.cc
@@ -0,0 +1,34 @@
+namespace {
+
+const char* const ZoneIterator_doc = "\
+Read-only iterator to a zone.\n\
+\n\
+You can get an instance of the ZoneIterator from\n\
+DataSourceClient.get_iterator() method. The actual concrete\n\
+c++ implementation will be different depending on the actual data source\n\
+used. This is the abstract interface.\n\
+\n\
+There's no way to start iterating from the beginning again or return.\n\
+\n\
+The ZoneIterator is a python iterator, and can be iterated over directly.\n\
+";
+
+const char* const ZoneIterator_getNextRRset_doc = "\
+get_next_rrset() -> isc.dns.RRset\n\
+\n\
+Get next RRset from the zone.\n\
+\n\
+This returns the next RRset in the zone.\n\
+\n\
+Any special order is not guaranteed.\n\
+\n\
+While this can potentially throw anything (including standard\n\
+allocation errors), it should be rare.\n\
+\n\
+Pointer to the next RRset or None pointer when the iteration gets to\n\
+the end of the zone.\n\
+\n\
+Raises an isc.datasrc.Error exception if it is called again after returning\n\
+None\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/datasrc/iterator_python.cc b/src/lib/python/isc/datasrc/iterator_python.cc
new file mode 100644
index 0000000..b482ea6
--- /dev/null
+++ b/src/lib/python/isc/datasrc/iterator_python.cc
@@ -0,0 +1,202 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// Enable this if you use s# variants with PyArg_ParseTuple(), see
+// http://docs.python.org/py3k/c-api/arg.html#strings-and-buffers
+//#define PY_SSIZE_T_CLEAN
+
+// Python.h needs to be placed at the head of the program file, see:
+// http://docs.python.org/py3k/extending/extending.html#a-simple-example
+#include <Python.h>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <datasrc/client.h>
+#include <datasrc/database.h>
+#include <datasrc/sqlite3_accessor.h>
+#include <datasrc/iterator.h>
+
+#include <dns/python/name_python.h>
+#include <dns/python/rrset_python.h>
+
+#include "datasrc.h"
+#include "iterator_python.h"
+
+#include "iterator_inc.cc"
+
+using namespace std;
+using namespace isc::util::python;
+using namespace isc::dns::python;
+using namespace isc::datasrc;
+using namespace isc::datasrc::python;
+
+namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_ZoneIterator : public PyObject {
+public:
+ s_ZoneIterator() : cppobj(ZoneIteratorPtr()) {};
+ ZoneIteratorPtr cppobj;
+};
+
+// Shortcut type which would be convenient for adding class variables safely.
+typedef CPPPyObjectContainer<s_ZoneIterator, ZoneIterator>
+ ZoneIteratorContainer;
+
+// General creation and destruction
+int
+ZoneIterator_init(s_ZoneIterator* self, PyObject* args) {
+ // can't be called directly
+ PyErr_SetString(PyExc_TypeError,
+ "ZoneIterator cannot be constructed directly");
+
+ return (-1);
+}
+
+void
+ZoneIterator_destroy(s_ZoneIterator* const self) {
+ // cppobj is a shared ptr, but to make sure things are not destroyed in
+ // the wrong order, we reset it here.
+ self->cppobj.reset();
+ Py_TYPE(self)->tp_free(self);
+}
+
+//
+// We declare the functions here, the definitions are below
+// the type definition of the object, since both can use the other
+//
+PyObject*
+ZoneIterator_getNextRRset(PyObject* po_self, PyObject*) {
+ s_ZoneIterator* self = static_cast<s_ZoneIterator*>(po_self);
+ if (!self->cppobj) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "get_next_rrset() called past end of iterator");
+ return (NULL);
+ }
+ try {
+ isc::dns::ConstRRsetPtr rrset = self->cppobj->getNextRRset();
+ if (!rrset) {
+ Py_RETURN_NONE;
+ }
+ return (createRRsetObject(*rrset));
+ } catch (const isc::Exception& isce) {
+ // isc::Unexpected is thrown when we call getNextRRset() when we are
+ // already done iterating ('iterating past end')
+ // We could also simply return None again
+ PyErr_SetString(getDataSourceException("Error"), isce.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+}
+
+PyObject*
+ZoneIterator_iter(PyObject *self) {
+ Py_INCREF(self);
+ return (self);
+}
+
+PyObject*
+ZoneIterator_next(PyObject* self) {
+ PyObject *result = ZoneIterator_getNextRRset(self, NULL);
+ // iter_next must return NULL without error instead of Py_None
+ if (result == Py_None) {
+ Py_DECREF(result);
+ return (NULL);
+ } else {
+ return (result);
+ }
+}
+
+PyMethodDef ZoneIterator_methods[] = {
+ { "get_next_rrset",
+ reinterpret_cast<PyCFunction>(ZoneIterator_getNextRRset), METH_NOARGS,
+ ZoneIterator_getNextRRset_doc },
+ { NULL, NULL, 0, NULL }
+};
+
+
+} // end of unnamed namespace
+
+namespace isc {
+namespace datasrc {
+namespace python {
+PyTypeObject zoneiterator_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "datasrc.ZoneIterator",
+ sizeof(s_ZoneIterator), // tp_basicsize
+ 0, // tp_itemsize
+ reinterpret_cast<destructor>(ZoneIterator_destroy),// tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ NULL, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ ZoneIterator_doc,
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ ZoneIterator_iter, // tp_iter
+ ZoneIterator_next, // tp_iternext
+ ZoneIterator_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ reinterpret_cast<initproc>(ZoneIterator_init),// tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+PyObject*
+createZoneIteratorObject(isc::datasrc::ZoneIteratorPtr source) {
+ s_ZoneIterator* py_zi = static_cast<s_ZoneIterator*>(
+ zoneiterator_type.tp_alloc(&zoneiterator_type, 0));
+ if (py_zi != NULL) {
+ py_zi->cppobj = source;
+ }
+ return (py_zi);
+}
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+
diff --git a/src/lib/python/isc/datasrc/iterator_python.h b/src/lib/python/isc/datasrc/iterator_python.h
new file mode 100644
index 0000000..b457740
--- /dev/null
+++ b/src/lib/python/isc/datasrc/iterator_python.h
@@ -0,0 +1,38 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_DATASRC_ITERATOR_H
+#define __PYTHON_DATASRC_ITERATOR_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace datasrc {
+class DataSourceClient;
+
+namespace python {
+
+extern PyTypeObject zoneiterator_type;
+
+PyObject* createZoneIteratorObject(isc::datasrc::ZoneIteratorPtr source);
+
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+#endif // __PYTHON_DATASRC_ITERATOR_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/datasrc/tests/Makefile.am b/src/lib/python/isc/datasrc/tests/Makefile.am
index 1a50fd3..be30dfa 100644
--- a/src/lib/python/isc/datasrc/tests/Makefile.am
+++ b/src/lib/python/isc/datasrc/tests/Makefile.am
@@ -1,16 +1,18 @@
PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
-PYTESTS = master_test.py sqlite3_ds_test.py
+# old tests, TODO remove or change to use new API?
+#PYTESTS = master_test.py sqlite3_ds_test.py
+PYTESTS = datasrc_test.py
EXTRA_DIST = $(PYTESTS)
EXTRA_DIST += testdata/brokendb.sqlite3
EXTRA_DIST += testdata/example.com.sqlite3
-CLEANFILES = $(abs_builddir)/example.com.out.sqlite3
+CLEANFILES = $(abs_builddir)/rwtest.sqlite3.copied
# If necessary (rare cases), explicitly specify paths to dynamic libraries
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
@@ -23,7 +25,7 @@ endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
- PYTHONPATH=:$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/log \
+ PYTHONPATH=:$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/log:$(abs_top_builddir)/src/lib/python/isc/datasrc/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs \
TESTDATA_PATH=$(abs_srcdir)/testdata \
TESTDATA_WRITE_PATH=$(abs_builddir) \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
diff --git a/src/lib/python/isc/datasrc/tests/datasrc_test.py b/src/lib/python/isc/datasrc/tests/datasrc_test.py
new file mode 100644
index 0000000..15ceb80
--- /dev/null
+++ b/src/lib/python/isc/datasrc/tests/datasrc_test.py
@@ -0,0 +1,389 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import isc.log
+import isc.datasrc
+import isc.dns
+import unittest
+import os
+import shutil
+
+TESTDATA_PATH = os.environ['TESTDATA_PATH'] + os.sep
+TESTDATA_WRITE_PATH = os.environ['TESTDATA_WRITE_PATH'] + os.sep
+
+READ_ZONE_DB_FILE = TESTDATA_PATH + "example.com.sqlite3"
+BROKEN_DB_FILE = TESTDATA_PATH + "brokendb.sqlite3"
+WRITE_ZONE_DB_FILE = TESTDATA_WRITE_PATH + "rwtest.sqlite3.copied"
+NEW_DB_FILE = TESTDATA_WRITE_PATH + "new_db.sqlite3"
+
+def add_rrset(rrset_list, name, rrclass, rrtype, ttl, rdatas):
+ rrset_to_add = isc.dns.RRset(name, rrclass, rrtype, ttl)
+ if rdatas is not None:
+ for rdata in rdatas:
+ rrset_to_add.add_rdata(isc.dns.Rdata(rrtype, rrclass, rdata))
+ rrset_list.append(rrset_to_add)
+
+# helper function, we have no direct rrset comparison atm
+def rrsets_equal(a, b):
+ # no accessor for sigs either (so this only checks name, class, type, ttl,
+ # and rdata)
+ # also, because of the fake data in rrsigs, if the type is rrsig, the
+ # rdata is not checked
+ return a.get_name() == b.get_name() and\
+ a.get_class() == b.get_class() and\
+ a.get_type() == b.get_type() and \
+ a.get_ttl() == b.get_ttl() and\
+ (a.get_type() == isc.dns.RRType.RRSIG() or
+ sorted(a.get_rdata()) == sorted(b.get_rdata()))
+
+# returns true if rrset is in expected_rrsets
+# will remove the rrset from expected_rrsets if found
+def check_for_rrset(expected_rrsets, rrset):
+ for cur_rrset in expected_rrsets[:]:
+ if rrsets_equal(cur_rrset, rrset):
+ expected_rrsets.remove(cur_rrset)
+ return True
+ return False
+
+class DataSrcClient(unittest.TestCase):
+
+ def test_construct(self):
+ # can't construct directly
+ self.assertRaises(TypeError, isc.datasrc.ZoneIterator)
+
+
+ def test_iterate(self):
+ dsc = isc.datasrc.DataSourceClient(READ_ZONE_DB_FILE)
+
+ # for RRSIGS, the TTL's are currently modified. This test should
+ # start failing when we fix that.
+ rrs = dsc.get_iterator(isc.dns.Name("sql1.example.com."))
+
+ # we do not know the order in which they are returned by the iterator
+ # but we do want to check them, so we put all records into one list
+ # sort it (doesn't matter which way it is sorted, as long as it is
+ # sorted)
+
+ # RRset is (atm) an unorderable type, and within an rrset, the
+ # rdatas and rrsigs may also be in random order. In theory the
+ # rrsets themselves can be returned in any order.
+ #
+ # So we create a second list with all rrsets we expect, and for each
+ # rrset we get from the iterator, see if it is in that list, and
+ # remove it.
+ #
+ # When the iterator is empty, we check no rrsets are left in the
+ # list of expected ones
+ expected_rrset_list = []
+
+ name = isc.dns.Name("sql1.example.com")
+ rrclass = isc.dns.RRClass.IN()
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.DNSKEY(), isc.dns.RRTTL(3600),
+ [
+ "256 3 5 AwEAAdYdRhBAEY67R/8G1N5AjGF6asIiNh/pNGeQ8xDQP13J"+
+ "N2lo+sNqWcmpYNhuVqRbLB+mamsU1XcCICSBvAlSmfz/ZUdafX23knAr"+
+ "TlALxMmspcfdpqun3Yr3YYnztuj06rV7RqmveYckWvAUXVYMSMQZfJ30"+
+ "5fs0dE/xLztL/CzZ",
+ "257 3 5 AwEAAbaKDSa9XEFTsjSYpUTHRotTS9Tz3krfDucugW5UokGQ"+
+ "KC26QlyHXlPTZkC+aRFUs/dicJX2kopndLcnlNAPWiKnKtrsFSCnIJDB"+
+ "ZIyvcKq+9RXmV3HK3bUdHnQZ88IZWBRmWKfZ6wnzHo53kdYKAemTErkz"+
+ "taX3lRRPLYWpxRcDPEjysXT3Lh0vfL5D+CIO1yKw/q7C+v6+/kYAxc2l"+
+ "fbNE3HpklSuF+dyX4nXxWgzbcFuLz5Bwfq6ZJ9RYe/kNkA0uMWNa1KkG"+
+ "eRh8gg22kgD/KT5hPTnpezUWLvoY5Qc7IB3T0y4n2JIwiF2ZrZYVrWgD"+
+ "jRWAzGsxJiJyjd6w2k0="
+ ])
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.NS(), isc.dns.RRTTL(3600),
+ [
+ "dns01.example.com.",
+ "dns02.example.com.",
+ "dns03.example.com."
+ ])
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.NSEC(), isc.dns.RRTTL(7200),
+ [
+ "www.sql1.example.com. NS SOA RRSIG NSEC DNSKEY"
+ ])
+ # For RRSIGS, we can't add the fake data through the API, so we
+ # simply pass no rdata at all (which is skipped by the check later)
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.SOA(), isc.dns.RRTTL(3600),
+ [
+ "master.example.com. admin.example.com. 678 3600 1800 2419200 7200"
+ ])
+ name = isc.dns.Name("www.sql1.example.com.")
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.A(), isc.dns.RRTTL(3600),
+ [
+ "192.0.2.100"
+ ])
+ name = isc.dns.Name("www.sql1.example.com.")
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.NSEC(), isc.dns.RRTTL(7200),
+ [
+ "sql1.example.com. A RRSIG NSEC"
+ ])
+ add_rrset(expected_rrset_list, name, rrclass,
+ isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
+
+ # rrs is an iterator, but also has direct get_next_rrset(), use
+ # the latter one here
+ rrset_to_check = rrs.get_next_rrset()
+ while (rrset_to_check != None):
+ self.assertTrue(check_for_rrset(expected_rrset_list,
+ rrset_to_check),
+ "Unexpected rrset returned by iterator:\n" +
+ rrset_to_check.to_text())
+ rrset_to_check = rrs.get_next_rrset()
+
+ # Now check there are none left
+ self.assertEqual(0, len(expected_rrset_list),
+ "RRset(s) not returned by iterator: " +
+ str([rrset.to_text() for rrset in expected_rrset_list ]
+ ))
+
+ # TODO should we catch this (iterating past end) and just return None
+ # instead of failing?
+ self.assertRaises(isc.datasrc.Error, rrs.get_next_rrset)
+
+ rrets = dsc.get_iterator(isc.dns.Name("example.com"))
+ # there are more than 80 RRs in this zone... let's just count them
+ # (already did a full check of the smaller zone above)
+ self.assertEqual(55, len(list(rrets)))
+ # TODO should we catch this (iterating past end) and just return None
+ # instead of failing?
+ self.assertRaises(isc.datasrc.Error, rrs.get_next_rrset)
+
+ self.assertRaises(TypeError, dsc.get_iterator, "asdf")
+
+ def test_construct(self):
+ # can't construct directly
+ self.assertRaises(TypeError, isc.datasrc.ZoneFinder)
+
+ def test_find(self):
+ dsc = isc.datasrc.DataSourceClient(READ_ZONE_DB_FILE)
+
+ result, finder = dsc.find_zone(isc.dns.Name("example.com"))
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual(isc.dns.RRClass.IN(), finder.get_class())
+ self.assertEqual("example.com.", finder.get_origin().to_text())
+
+ result, rrset = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+ rrset.to_text())
+
+ result, rrset = finder.find(isc.dns.Name("www.sql1.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.DELEGATION, result)
+ self.assertEqual("sql1.example.com. 3600 IN NS dns01.example.com.\n" +
+ "sql1.example.com. 3600 IN NS dns02.example.com.\n" +
+ "sql1.example.com. 3600 IN NS dns03.example.com.\n",
+ rrset.to_text())
+
+ result, rrset = finder.find(isc.dns.Name("doesnotexist.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.NXDOMAIN, result)
+ self.assertEqual(None, rrset)
+
+ result, rrset = finder.find(isc.dns.Name("www.some.other.domain"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.NXDOMAIN, result)
+ self.assertEqual(None, rrset)
+
+ result, rrset = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.TXT(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.NXRRSET, result)
+ self.assertEqual(None, rrset)
+
+ result, rrset = finder.find(isc.dns.Name("cname-ext.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.CNAME, result)
+ self.assertEqual(
+ "cname-ext.example.com. 3600 IN CNAME www.sql1.example.com.\n",
+ rrset.to_text())
+
+ self.assertRaises(TypeError, finder.find,
+ "foo",
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertRaises(TypeError, finder.find,
+ isc.dns.Name("cname-ext.example.com"),
+ "foo",
+ None,
+ finder.FIND_DEFAULT)
+ self.assertRaises(TypeError, finder.find,
+ isc.dns.Name("cname-ext.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ "foo")
+
+
+class DataSrcUpdater(unittest.TestCase):
+
+ def setUp(self):
+ # Make a fresh copy of the writable database with all original content
+ shutil.copyfile(READ_ZONE_DB_FILE, WRITE_ZONE_DB_FILE)
+
+ def test_construct(self):
+ # can't construct directly
+ self.assertRaises(TypeError, isc.datasrc.ZoneUpdater)
+
+ def test_update_delete_commit(self):
+
+ dsc = isc.datasrc.DataSourceClient(WRITE_ZONE_DB_FILE)
+
+ # first make sure, through a separate finder, that some record exists
+ result, finder = dsc.find_zone(isc.dns.Name("example.com"))
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual(isc.dns.RRClass.IN(), finder.get_class())
+ self.assertEqual("example.com.", finder.get_origin().to_text())
+
+ result, rrset = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+ rrset.to_text())
+
+ rrset_to_delete = rrset;
+
+ # can't delete rrset with associated sig. Abuse that to force an
+ # exception first, then remove the sig, then delete the record
+ updater = dsc.get_updater(isc.dns.Name("example.com"), True)
+ self.assertRaises(isc.datasrc.Error, updater.delete_rrset,
+ rrset_to_delete)
+
+ rrset_to_delete.remove_rrsig()
+
+ updater.delete_rrset(rrset_to_delete)
+
+ # The record should be gone in the updater, but not in the original
+ # finder (since we have not committed)
+ result, rrset = updater.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.NXDOMAIN, result)
+ self.assertEqual(None, rrset)
+
+ result, rrset = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+ rrset.to_text())
+
+ updater.commit()
+ # second commit should raise exception
+ self.assertRaises(isc.datasrc.Error, updater.commit)
+
+ # the record should be gone now in the 'real' finder as well
+ result, rrset = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.NXDOMAIN, result)
+ self.assertEqual(None, rrset)
+
+ # now add it again
+ updater = dsc.get_updater(isc.dns.Name("example.com"), True)
+ updater.add_rrset(rrset_to_delete)
+ updater.commit()
+
+ # second commit should throw
+ self.assertRaises(isc.datasrc.Error, updater.commit)
+
+ result, rrset = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+ rrset.to_text())
+
+ def test_update_delete_abort(self):
+ dsc = isc.datasrc.DataSourceClient(WRITE_ZONE_DB_FILE)
+
+ # first make sure, through a separate finder, that some record exists
+ result, finder = dsc.find_zone(isc.dns.Name("example.com"))
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual(isc.dns.RRClass.IN(), finder.get_class())
+ self.assertEqual("example.com.", finder.get_origin().to_text())
+
+ result, rrset = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+ rrset.to_text())
+
+ rrset_to_delete = rrset;
+
+ # can't delete rrset with associated sig. Abuse that to force an
+ # exception first, then remove the sig, then delete the record
+ updater = dsc.get_updater(isc.dns.Name("example.com"), True)
+ self.assertRaises(isc.datasrc.Error, updater.delete_rrset,
+ rrset_to_delete)
+
+ rrset_to_delete.remove_rrsig()
+
+ updater.delete_rrset(rrset_to_delete)
+
+ # The record should be gone in the updater, but not in the original
+ # finder (since we have not committed)
+ result, rrset = updater.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.NXDOMAIN, result)
+ self.assertEqual(None, rrset)
+
+ # destroy the updater, which should make it roll back
+ updater = None
+
+ # the record should still be available in the 'real' finder as well
+ result, rrset = finder.find(isc.dns.Name("www.example.com"),
+ isc.dns.RRType.A(),
+ None,
+ finder.FIND_DEFAULT)
+ self.assertEqual(finder.SUCCESS, result)
+ self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+ rrset.to_text())
+
+
+if __name__ == "__main__":
+ isc.log.init("bind10")
+ unittest.main()
diff --git a/src/lib/python/isc/datasrc/updater_inc.cc b/src/lib/python/isc/datasrc/updater_inc.cc
new file mode 100644
index 0000000..32715ec
--- /dev/null
+++ b/src/lib/python/isc/datasrc/updater_inc.cc
@@ -0,0 +1,181 @@
+namespace {
+
+const char* const ZoneUpdater_doc = "\
+The base class to make updates to a single zone.\n\
+\n\
+On construction, each derived class object will start a\n\
+\"transaction\" for making updates to a specific zone (this means a\n\
+constructor of a derived class would normally take parameters to\n\
+identify the zone to be updated). The underlying realization of a\n\
+\"transaction\" will differ for different derived classes; if it uses\n\
+a general purpose database as a backend, it will involve performing\n\
+some form of \"begin transaction\" statement for the database.\n\
+\n\
+Updates (adding or deleting RRs) are made via add_rrset() and\n\
+delete_rrset() methods. Until the commit() method is called the\n\
+changes are local to the updater object. For example, they won't be\n\
+visible via a ZoneFinder object, but only by the updater's own find()\n\
+method. The commit() completes the transaction and makes the changes\n\
+visible to others.\n\
+\n\
+This class does not provide an explicit \"rollback\" interface. If\n\
+something wrong or unexpected happens during the updates and the\n\
+caller wants to cancel the intermediate updates, the caller should\n\
+simply destroy the updater object without calling commit(). The\n\
+destructor is supposed to perform the \"rollback\" operation,\n\
+depending on the internal details of the derived class.\n\
+\n\
+This initial implementation provides a quite simple interface of\n\
+adding and deleting RRs (see the description of the related methods).\n\
+It may be revisited as we gain more experiences.\n\
+\n\
+";
+
+const char* const ZoneUpdater_addRRset_doc = "\
+add_rrset(rrset) -> No return value\n\
+\n\
+Add an RRset to a zone via the updater.\n\
+It performs a few basic checks:\n\
+- Whether the RR class is identical to that for the zone to be updated\n\
+- Whether the RRset is not empty, i.e., it has at least one RDATA\n\
+- Whether the RRset is not associated with an RRSIG, i.e., whether\n\
+ get_rrsig() on the RRset returns a NULL pointer.\n\
+\n\
+and otherwise does not check any oddity. For example, it doesn't check\n\
+whether the owner name of the specified RRset is a subdomain of the\n\
+zone's origin; it doesn't care whether or not there is already an\n\
+RRset of the same name and RR type in the zone, and if there is,\n\
+whether any of the existing RRs have duplicate RDATA with the added\n\
+ones. If these conditions matter the calling application must examine\n\
+the existing data beforehand using the ZoneFinder returned by\n\
+get_finder().\n\
+\n\
+The validation requirement on the associated RRSIG is temporary. If we\n\
+find it more reasonable and useful to allow adding a pair of RRset and\n\
+its RRSIG RRset as we gain experiences with the interface, we may\n\
+remove this restriction. Until then we explicitly check it to prevent\n\
+accidental misuse.\n\
+\n\
+Conceptually, on successful call to this method, the zone will have\n\
+the specified RRset, and if there is already an RRset of the same name\n\
+and RR type, these two sets will be \"merged\". \"Merged\" means that\n\
+a subsequent call to ZoneFinder.find() for the name and type will\n\
+result in success and the returned RRset will contain all previously\n\
+existing and newly added RDATAs with the TTL being the minimum of the\n\
+two RRsets. The underlying representation of the \"merged\" RRsets may\n\
+vary depending on the characteristic of the underlying data source.\n\
+For example, if it uses a general purpose database that stores each RR\n\
+of the same RRset separately, it may simply be a larger sets of RRs\n\
+based on both the existing and added RRsets; the TTLs of the RRs may\n\
+be different within the database, and there may even be duplicate RRs\n\
+in different database rows. As long as the RRset returned via\n\
+ZoneFinder.find() conforms to the concept of \"merge\", the actual\n\
+internal representation is up to the implementation.\n\
+\n\
+This method must not be called once commit() is performed. If it calls\n\
+after commit() the implementation must throw a isc.datasrc.Error\n\
+exception.\n\
+\n\
+Todo As noted above we may have to revisit the design details as we\n\
+gain experiences:\n\
+\n\
+- we may want to check (and maybe reject) if there is already a\n\
+ duplicate RR (that has the same RDATA).\n\
+- we may want to check (and maybe reject) if there is already an RRset\n\
+ of the same name and RR type with different TTL\n\
+- we may even want to check if there is already any RRset of the same\n\
+ name and RR type.\n\
+- we may want to add an \"options\" parameter that can control the\n\
+ above points\n\
+- we may want to have this method return a value containing the\n\
+ information on whether there's a duplicate, etc.\n\
+\n\
+Exceptions:\n\
+ isc.datasrc.Error Called after commit(), RRset is invalid (see above),\n\
+ internal data source error, or wrapper error\n\
+\n\
+Parameters:\n\
+ rrset The RRset to be added\n\
+\n\
+";
+
+const char* const ZoneUpdater_deleteRRset_doc = "\
+delete_rrset(rrset) -> No return value\n\
+\n\
+Delete an RRset from a zone via the updater.\n\
+\n\
+Like add_rrset(), the detailed semantics and behavior of this method\n\
+may have to be revisited in a future version. The following are based\n\
+on the initial implementation decisions.\n\
+\n\
+- Existing RRs that don't match any of the specified RDATAs will\n\
+ remain in the zone.\n\
+- Any RRs of the specified RRset that doesn't exist in the zone will\n\
+ simply be ignored; the implementation of this method is not supposed\n\
+ to check that condition.\n\
+- The TTL of the RRset is ignored; matching is only performed by the\n\
+ owner name, RR type and RDATA\n\
+\n\
+Ignoring the TTL may not look sensible, but it's based on the\n\
+observation that it will result in more intuitive result, especially\n\
+when the underlying data source is a general purpose database. See\n\
+also the c++ documentation of DatabaseAccessor::DeleteRecordInZone()\n\
+on this point. It also matches the dynamic update protocol (RFC2136),\n\
+where TTLs are ignored when deleting RRs.\n\
+\n\
+This method performs a limited level of validation on the specified\n\
+RRset:\n\
+- Whether the RR class is identical to that for the zone to be updated\n\
+- Whether the RRset is not empty, i.e., it has at least one RDATA\n\
+- Whether the RRset is not associated with an RRSIG\n\
+\n\
+This method must not be called once commit() is performed. If it calls\n\
+after commit() the implementation must throw a isc.datasrc.Error\n\
+exception.\n\
+\n\
+Todo: As noted above we may have to revisit the design details as we\n\
+gain experiences:\n\
+\n\
+- we may want to check (and maybe reject) if some or all of the RRs\n\
+ for the specified RRset don't exist in the zone\n\
+- we may want to allow an option to \"delete everything\" for\n\
+ specified name and/or specified name + RR type.\n\
+- as mentioned above, we may want to include the TTL in matching the\n\
+ deleted RRs\n\
+- we may want to add an \"options\" parameter that can control the\n\
+ above points\n\
+- we may want to have this method return a value containing the\n\
+ information on whether there's any RRs that are specified but don't\n\
+ exit, the number of actually deleted RRs, etc.\n\
+\n\
+Exceptions:\n\
+ isc.datasrc.Error Called after commit(), RRset is invalid (see above),\n\
+ internal data source error\n\
+ std.bad_alloc Resource allocation failure\n\
+\n\
+Parameters:\n\
+ rrset The RRset to be deleted\n\
+\n\
+";
+
+const char* const ZoneUpdater_commit_doc = "\
+commit() -> void\n\
+\n\
+Commit the updates made in the updater to the zone.\n\
+\n\
+This method completes the \"transaction\" started at the creation of\n\
+the updater. After successful completion of this method, the updates\n\
+will be visible outside the scope of the updater. The actual internal\n\
+behavior will defer for different derived classes. For a derived class\n\
+with a general purpose database as a backend, for example, this method\n\
+would perform a \"commit\" statement for the database.\n\
+\n\
+This operation can only be performed at most once. A duplicate call\n\
+must result in a isc.datasrc.Error exception.\n\
+\n\
+Exceptions:\n\
+ isc.datasrc.Error Duplicate call of the method, internal data source\n\
+ error, or wrapper error\n\\n\
+\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/datasrc/updater_python.cc b/src/lib/python/isc/datasrc/updater_python.cc
new file mode 100644
index 0000000..a9dc581
--- /dev/null
+++ b/src/lib/python/isc/datasrc/updater_python.cc
@@ -0,0 +1,318 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// Enable this if you use s# variants with PyArg_ParseTuple(), see
+// http://docs.python.org/py3k/c-api/arg.html#strings-and-buffers
+//#define PY_SSIZE_T_CLEAN
+
+// Python.h needs to be placed at the head of the program file, see:
+// http://docs.python.org/py3k/extending/extending.html#a-simple-example
+#include <Python.h>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <datasrc/client.h>
+#include <datasrc/database.h>
+#include <datasrc/data_source.h>
+#include <datasrc/sqlite3_accessor.h>
+#include <datasrc/zone.h>
+
+#include <dns/python/name_python.h>
+#include <dns/python/rrset_python.h>
+#include <dns/python/rrclass_python.h>
+#include <dns/python/rrtype_python.h>
+
+#include "datasrc.h"
+#include "updater_python.h"
+
+#include "updater_inc.cc"
+#include "finder_inc.cc"
+
+using namespace std;
+using namespace isc::util::python;
+using namespace isc::dns::python;
+using namespace isc::datasrc;
+using namespace isc::datasrc::python;
+
+namespace isc_datasrc_internal {
+// See finder_python.cc
+PyObject* ZoneFinder_helper(ZoneFinder* finder, PyObject* args);
+}
+
+namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_ZoneUpdater : public PyObject {
+public:
+ s_ZoneUpdater() : cppobj(ZoneUpdaterPtr()) {};
+ ZoneUpdaterPtr cppobj;
+};
+
+// Shortcut type which would be convenient for adding class variables safely.
+typedef CPPPyObjectContainer<s_ZoneUpdater, ZoneUpdater> ZoneUpdaterContainer;
+
+//
+// We declare the functions here, the definitions are below
+// the type definition of the object, since both can use the other
+//
+
+// General creation and destruction
+int
+ZoneUpdater_init(s_ZoneUpdater* self, PyObject* args) {
+ // can't be called directly
+ PyErr_SetString(PyExc_TypeError,
+ "ZoneUpdater cannot be constructed directly");
+
+ return (-1);
+}
+
+void
+ZoneUpdater_destroy(s_ZoneUpdater* const self) {
+ // cppobj is a shared ptr, but to make sure things are not destroyed in
+ // the wrong order, we reset it here.
+ self->cppobj.reset();
+ Py_TYPE(self)->tp_free(self);
+}
+
+PyObject*
+ZoneUpdater_addRRset(PyObject* po_self, PyObject* args) {
+ s_ZoneUpdater* const self = static_cast<s_ZoneUpdater*>(po_self);
+ PyObject* rrset_obj;
+ if (PyArg_ParseTuple(args, "O!", &rrset_type, &rrset_obj)) {
+ try {
+ self->cppobj->addRRset(PyRRset_ToRRset(rrset_obj));
+ Py_RETURN_NONE;
+ } catch (const DataSourceError& dse) {
+ PyErr_SetString(getDataSourceException("Error"), dse.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ }
+ } else {
+ return (NULL);
+ }
+}
+
+PyObject*
+ZoneUpdater_deleteRRset(PyObject* po_self, PyObject* args) {
+ s_ZoneUpdater* const self = static_cast<s_ZoneUpdater*>(po_self);
+ PyObject* rrset_obj;
+ if (PyArg_ParseTuple(args, "O!", &rrset_type, &rrset_obj)) {
+ try {
+ self->cppobj->deleteRRset(PyRRset_ToRRset(rrset_obj));
+ Py_RETURN_NONE;
+ } catch (const DataSourceError& dse) {
+ PyErr_SetString(getDataSourceException("Error"), dse.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ }
+ } else {
+ return (NULL);
+ }
+}
+
+PyObject*
+ZoneUpdater_commit(PyObject* po_self, PyObject*) {
+ s_ZoneUpdater* const self = static_cast<s_ZoneUpdater*>(po_self);
+ try {
+ self->cppobj->commit();
+ Py_RETURN_NONE;
+ } catch (const DataSourceError& dse) {
+ PyErr_SetString(getDataSourceException("Error"), dse.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ }
+}
+
+PyObject*
+ZoneUpdater_getClass(PyObject* po_self, PyObject*) {
+ s_ZoneUpdater* self = static_cast<s_ZoneUpdater*>(po_self);
+ try {
+ return (createRRClassObject(self->cppobj->getFinder().getClass()));
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+}
+
+PyObject*
+ZoneUpdater_getOrigin(PyObject* po_self, PyObject*) {
+ s_ZoneUpdater* self = static_cast<s_ZoneUpdater*>(po_self);
+ try {
+ return (createNameObject(self->cppobj->getFinder().getOrigin()));
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+}
+
+PyObject*
+ZoneUpdater_find(PyObject* po_self, PyObject* args) {
+ s_ZoneUpdater* const self = static_cast<s_ZoneUpdater*>(po_self);
+ return (isc_datasrc_internal::ZoneFinder_helper(&self->cppobj->getFinder(),
+ args));
+}
+
+PyObject*
+AZoneUpdater_find(PyObject* po_self, PyObject* args) {
+ s_ZoneUpdater* const self = static_cast<s_ZoneUpdater*>(po_self);
+ PyObject *name;
+ PyObject *rrtype;
+ PyObject *target;
+ int options_int;
+ if (PyArg_ParseTuple(args, "O!O!OI", &name_type, &name,
+ &rrtype_type, &rrtype,
+ &target, &options_int)) {
+ try {
+ ZoneFinder::FindOptions options =
+ static_cast<ZoneFinder::FindOptions>(options_int);
+ ZoneFinder::FindResult find_result(
+ self->cppobj->getFinder().find(PyName_ToName(name),
+ PyRRType_ToRRType(rrtype),
+ NULL,
+ options
+ ));
+ ZoneFinder::Result r = find_result.code;
+ isc::dns::ConstRRsetPtr rrsp = find_result.rrset;
+ if (rrsp) {
+ // Use N instead of O so the refcount isn't increased twice
+ return Py_BuildValue("IN", r, createRRsetObject(*rrsp));
+ } else {
+ return Py_BuildValue("IO", r, Py_None);
+ }
+ } catch (const DataSourceError& dse) {
+ PyErr_SetString(getDataSourceException("Error"), dse.what());
+ return (NULL);
+ } catch (const std::exception& exc) {
+ PyErr_SetString(getDataSourceException("Error"), exc.what());
+ return (NULL);
+ } catch (...) {
+ PyErr_SetString(getDataSourceException("Error"),
+ "Unexpected exception");
+ return (NULL);
+ }
+ } else {
+ return (NULL);
+ }
+ return Py_BuildValue("I", 1);
+}
+
+
+// This list contains the actual set of functions we have in
+// python. Each entry has
+// 1. Python method name
+// 2. Our static function here
+// 3. Argument type
+// 4. Documentation
+PyMethodDef ZoneUpdater_methods[] = {
+ { "add_rrset", reinterpret_cast<PyCFunction>(ZoneUpdater_addRRset),
+ METH_VARARGS, ZoneUpdater_addRRset_doc },
+ { "delete_rrset", reinterpret_cast<PyCFunction>(ZoneUpdater_deleteRRset),
+ METH_VARARGS, ZoneUpdater_deleteRRset_doc },
+ { "commit", reinterpret_cast<PyCFunction>(ZoneUpdater_commit), METH_NOARGS,
+ ZoneUpdater_commit_doc },
+ // Instead of a getFinder, we implement the finder functionality directly
+ // This is because ZoneFinder is non-copyable, and we should not create
+ // a ZoneFinder object from a reference only (which is what is returned
+ // by getFinder(). Apart from that
+ { "get_origin", reinterpret_cast<PyCFunction>(ZoneUpdater_getOrigin),
+ METH_NOARGS, ZoneFinder_getOrigin_doc },
+ { "get_class", reinterpret_cast<PyCFunction>(ZoneUpdater_getClass),
+ METH_NOARGS, ZoneFinder_getClass_doc },
+ { "find", reinterpret_cast<PyCFunction>(ZoneUpdater_find), METH_VARARGS,
+ ZoneFinder_find_doc },
+ { NULL, NULL, 0, NULL }
+};
+
+} // end of unnamed namespace
+
+namespace isc {
+namespace datasrc {
+namespace python {
+PyTypeObject zoneupdater_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "datasrc.ZoneUpdater",
+ sizeof(s_ZoneUpdater), // tp_basicsize
+ 0, // tp_itemsize
+ reinterpret_cast<destructor>(ZoneUpdater_destroy),// tp_dealloc
+ NULL, // tp_print
+ NULL, // tp_getattr
+ NULL, // tp_setattr
+ NULL, // tp_reserved
+ NULL, // tp_repr
+ NULL, // tp_as_number
+ NULL, // tp_as_sequence
+ NULL, // tp_as_mapping
+ NULL, // tp_hash
+ NULL, // tp_call
+ NULL, // tp_str
+ NULL, // tp_getattro
+ NULL, // tp_setattro
+ NULL, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT, // tp_flags
+ ZoneUpdater_doc,
+ NULL, // tp_traverse
+ NULL, // tp_clear
+ NULL, // tp_richcompare
+ 0, // tp_weaklistoffset
+ NULL, // tp_iter
+ NULL, // tp_iternext
+ ZoneUpdater_methods, // tp_methods
+ NULL, // tp_members
+ NULL, // tp_getset
+ NULL, // tp_base
+ NULL, // tp_dict
+ NULL, // tp_descr_get
+ NULL, // tp_descr_set
+ 0, // tp_dictoffset
+ reinterpret_cast<initproc>(ZoneUpdater_init),// tp_init
+ NULL, // tp_alloc
+ PyType_GenericNew, // tp_new
+ NULL, // tp_free
+ NULL, // tp_is_gc
+ NULL, // tp_bases
+ NULL, // tp_mro
+ NULL, // tp_cache
+ NULL, // tp_subclasses
+ NULL, // tp_weaklist
+ NULL, // tp_del
+ 0 // tp_version_tag
+};
+
+PyObject*
+createZoneUpdaterObject(isc::datasrc::ZoneUpdaterPtr source) {
+ s_ZoneUpdater* py_zi = static_cast<s_ZoneUpdater*>(
+ zoneupdater_type.tp_alloc(&zoneupdater_type, 0));
+ if (py_zi != NULL) {
+ py_zi->cppobj = source;
+ }
+ return (py_zi);
+}
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+
diff --git a/src/lib/python/isc/datasrc/updater_python.h b/src/lib/python/isc/datasrc/updater_python.h
new file mode 100644
index 0000000..3886aa3
--- /dev/null
+++ b/src/lib/python/isc/datasrc/updater_python.h
@@ -0,0 +1,39 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_DATASRC_UPDATER_H
+#define __PYTHON_DATASRC_UPDATER_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace datasrc {
+class DataSourceClient;
+
+namespace python {
+
+
+extern PyTypeObject zoneupdater_type;
+
+PyObject* createZoneUpdaterObject(isc::datasrc::ZoneUpdaterPtr source);
+
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+#endif // __PYTHON_DATASRC_UPDATER_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/dns/Makefile.am b/src/lib/python/isc/dns/Makefile.am
new file mode 100644
index 0000000..161c2a5
--- /dev/null
+++ b/src/lib/python/isc/dns/Makefile.am
@@ -0,0 +1,7 @@
+python_PYTHON = __init__.py
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
+
diff --git a/src/lib/python/isc/log/log.cc b/src/lib/python/isc/log/log.cc
index aa12664..5bb6a94 100644
--- a/src/lib/python/isc/log/log.cc
+++ b/src/lib/python/isc/log/log.cc
@@ -185,7 +185,7 @@ init(PyObject*, PyObject* args) {
Py_RETURN_NONE;
}
-// This initialization is for unit tests. It allows message settings to be
+// This initialization is for unit tests. It allows message settings to
// be determined by a set of B10_xxx environment variables. (See the
// description of initLogger() for more details.) The function has been named
// resetUnitTestRootLogger() here as being more descriptive and
diff --git a/src/lib/python/isc/log/tests/Makefile.am b/src/lib/python/isc/log/tests/Makefile.am
index 8fa3746..170eee6 100644
--- a/src/lib/python/isc/log/tests/Makefile.am
+++ b/src/lib/python/isc/log/tests/Makefile.am
@@ -8,7 +8,7 @@ EXTRA_DIST = console.out check_output.sh $(PYTESTS_NOGEN)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
diff --git a/src/lib/python/isc/net/tests/Makefile.am b/src/lib/python/isc/net/tests/Makefile.am
index 371df59..dd94946 100644
--- a/src/lib/python/isc/net/tests/Makefile.am
+++ b/src/lib/python/isc/net/tests/Makefile.am
@@ -6,7 +6,7 @@ EXTRA_DIST = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
diff --git a/src/lib/python/isc/notify/tests/Makefile.am b/src/lib/python/isc/notify/tests/Makefile.am
index 2f4e060..00c2eee 100644
--- a/src/lib/python/isc/notify/tests/Makefile.am
+++ b/src/lib/python/isc/notify/tests/Makefile.am
@@ -6,7 +6,7 @@ EXTRA_DIST = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
diff --git a/src/lib/python/isc/util/tests/Makefile.am b/src/lib/python/isc/util/tests/Makefile.am
index db44c86..3b882b4 100644
--- a/src/lib/python/isc/util/tests/Makefile.am
+++ b/src/lib/python/isc/util/tests/Makefile.am
@@ -6,7 +6,7 @@ EXTRA_DIST = $(PYTESTS)
# required by loadable python modules.
LIBRARY_PATH_PLACEHOLDER =
if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
endif
# test using command-line arguments, so use check-local target instead of TESTS
diff --git a/tests/system/bindctl/tests.sh b/tests/system/bindctl/tests.sh
index 6923c41..49ef0f1 100755
--- a/tests/system/bindctl/tests.sh
+++ b/tests/system/bindctl/tests.sh
@@ -24,6 +24,10 @@ SYSTEMTESTTOP=..
status=0
n=0
+# TODO: consider consistency with statistics definition in auth.spec
+auth_queries_tcp="\<queries\.tcp\>"
+auth_queries_udp="\<queries\.udp\>"
+
echo "I:Checking b10-auth is working by default ($n)"
$DIG +norec @10.53.0.1 -p 53210 ns.example.com. A >dig.out.$n || status=1
# perform a simple check on the output (digcomp would be too much for this)
@@ -40,8 +44,8 @@ echo 'Stats show
--csv-file-dir=$BINDCTL_CSV_DIR > bindctl.out.$n || status=1
# the server should have received 1 UDP and 1 TCP queries (TCP query was
# sent from the server startup script)
-grep "\"auth.queries.tcp\": 1," bindctl.out.$n > /dev/null || status=1
-grep "\"auth.queries.udp\": 1," bindctl.out.$n > /dev/null || status=1
+grep $auth_queries_tcp".*\<1\>" bindctl.out.$n > /dev/null || status=1
+grep $auth_queries_udp".*\<1\>" bindctl.out.$n > /dev/null || status=1
if [ $status != 0 ]; then echo "I:failed"; fi
n=`expr $n + 1`
@@ -73,8 +77,8 @@ echo 'Stats show
' | $RUN_BINDCTL \
--csv-file-dir=$BINDCTL_CSV_DIR > bindctl.out.$n || status=1
# The statistics counters should have been reset while stop/start.
-grep "\"auth.queries.tcp\": 0," bindctl.out.$n > /dev/null || status=1
-grep "\"auth.queries.udp\": 1," bindctl.out.$n > /dev/null || status=1
+grep $auth_queries_tcp".*\<0\>" bindctl.out.$n > /dev/null || status=1
+grep $auth_queries_udp".*\<1\>" bindctl.out.$n > /dev/null || status=1
if [ $status != 0 ]; then echo "I:failed"; fi
n=`expr $n + 1`
@@ -97,8 +101,8 @@ echo 'Stats show
' | $RUN_BINDCTL \
--csv-file-dir=$BINDCTL_CSV_DIR > bindctl.out.$n || status=1
# The statistics counters shouldn't be reset due to hot-swapping datasource.
-grep "\"auth.queries.tcp\": 0," bindctl.out.$n > /dev/null || status=1
-grep "\"auth.queries.udp\": 2," bindctl.out.$n > /dev/null || status=1
+grep $auth_queries_tcp".*\<0\>" bindctl.out.$n > /dev/null || status=1
+grep $auth_queries_udp".*\<2\>" bindctl.out.$n > /dev/null || status=1
if [ $status != 0 ]; then echo "I:failed"; fi
n=`expr $n + 1`
More information about the bind10-changes
mailing list