BIND 10 trac615, updated. 170485efd4eb6cb6ba6af78d62be71b2354befd8 [trac615] Unification of test parsers
BIND 10 source code commits
bind10-changes at lists.isc.org
Fri Mar 11 11:44:53 UTC 2011
The branch, trac615 has been updated
via 170485efd4eb6cb6ba6af78d62be71b2354befd8 (commit)
via 5874ea28a5e9a3af38fc15c53bcf9d0690680e6c (commit)
via 4fd1b4c0f03f01b2fb085dae50455f6746b435c9 (commit)
via 691a4a2f4e82d52d313e67ddd9b1e5480226fefe (commit)
via 52247fe2ac1e071d95b3333d3c0cea73a0429399 (commit)
via 11889ab357ea40764683e4be03bcc27a4ab6bb04 (commit)
via 9efbe64fe3ff22bb5fba46de409ae058f199c8a7 (commit)
via 7e385db1cb3baae0bcc5461c17a07062816cc5a6 (commit)
via 385a9f81dd2d30a370b2f45a6ff0ec7d09dbea79 (commit)
via 420ec42bd913fb83da37b26b75faae49c7957c46 (commit)
via 1ecb969fc160370724527c3e2e1812463f2398ca (commit)
via 4c05048ba059b79efeab53498737abe94d37ee07 (commit)
via d6f579083977bb3a94ed1bf2ffcaa31da878e0bf (commit)
via cba9add1f69d78f1ace066e76136bcf6f0a0d3d9 (commit)
via f7d2e62980d949a74bcaf03f6079cb847d6f0f0f (commit)
via 0cd6c3812b7e7250be1381cf87140794c9fa3c42 (commit)
via bf0b8426e2d05c1fc30a2b29929531c430f8bcda (commit)
via a84eadd788ec9544c79b26a47f5ef34f6b34c359 (commit)
via d39787aa4305f91cfca004088c6f5ca2a3867622 (commit)
via ba92da6356b8d3a5b164102af9d8d22259efccc9 (commit)
via 7c38ee8fabb2f93c83495e45c58a37712da25e67 (commit)
via b1bc4d63d568d15f6b21a3c25a2848bea2553ff7 (commit)
via a0d91794e105108425429a6ab9eed196171955eb (commit)
via db57bdd2f3f73b7d4048fec614f0a3db73c8489a (commit)
via 08d197753711038ef73a50ebc4458df5a454f648 (commit)
via 87d58087b71b509d9798bd700b9ba968712e142d (commit)
via 827babb0d6caade8ca685e5f3e779c756f9ebe1a (commit)
via 290bbf2d7c93c202b22f1f4e2638ae01038bc983 (commit)
via 86d0ba9023b4f4a23cf203d3c9bf7c2f3668315e (commit)
via 1ac4ea1a6996dd39d15d4f6cbdf0a766e8d8d569 (commit)
via 002aa60f46e33079f4d9bc131115a127ea5a5130 (commit)
via 3a6535a46295133690647d8fb75819abb5107bac (commit)
via 58da05cd5f8709c8f4e1824c5479a0e3cd2b4371 (commit)
via bf8ba76aed284a9f1907b3edde1f546c9a4c0a3c (commit)
via 416fcf7eb6c7c0dc249bcc838d75504f070750a1 (commit)
via 64978870d1b6843e94268160f5e9c7b7dfbeb755 (commit)
via b4149e03fd8f722fe1f0fe567b34d530da005109 (commit)
via a447b8e583551311e487816e11a171fdb0e8951a (commit)
via e0863720a874d75923ea66adcfbf5b2948efb10a (commit)
via 03de120c1148775977a8cad2b0697e934e2a823a (commit)
via 241571157deb66f6766918b783379409e294e2f5 (commit)
via 65a77d8fde64d464c75917a1ab9b6b3f02640ca6 (commit)
via 9287cc8f31d6ed03cdcd95f6ca59b58ed6d695b6 (commit)
via 34eaa7c5e831bb170f0fe8792e40df88f66ed8b9 (commit)
via 0eefd8be74290fd9e72e3d25356c64b24c844816 (commit)
via 2be9594e5c6cac0e01a75a06d8df31d8cef98380 (commit)
via 17f4eab177a8d459024a5fcf2b43e6826c129891 (commit)
via de2d7124bd8a6bf3f2c0a6e661dc5b6b178765cc (commit)
via 5b825a61a8f083aa879da6aedc6717d42534d5b4 (commit)
via 1af89c43e91b613bf3d8ece42ec5d93c19a5b225 (commit)
via 933b35dcb9cec5113853c93994d69cac44a97f4b (commit)
via 71bcc9a5c413276bd90276c24ee28d59478a138e (commit)
via 6ac000df85625f5921e8895a1aafff5e4be3ba9c (commit)
via 7fef6a88c5ab2cedc5f2fa0003ae1beb7a380b9c (commit)
via b0c4e1599b68046bbd72455ce90433285b226ffe (commit)
via 56f0f77fe500b6cb41cda2a950b4349f0778561e (commit)
via e49c4b5a038981cea80bf81613efa2c538c8b3e0 (commit)
via 5222b51a047d8f2352bc9f92fd022baf1681ed81 (commit)
via 5ecd1322b677fa9808125986a65b28d619c79120 (commit)
via 78828990b58d0b791f94ed2441bf1267f81d58d3 (commit)
via ccd3fea75828ec4e0e2b20d8bba1d7b103164141 (commit)
via a663b567f5a57c19c033de049219e4a5eb6d936b (commit)
via 4cf5938f86b3c1a54beac417f6723505183ddf11 (commit)
via 977a6a742c821ed49fc7d977f917b47f23563bdb (commit)
via 137a6934a14cf0c5b5c065e910b8b364beb0973f (commit)
via b23ef1cf58b0754e23e550362e7a979d5f8624ef (commit)
via bdc63200a7e2adcde2b0ce44aaeb8477fb059d17 (commit)
via 2495b96e3a3adeaaca091d7bf2533c1a58971ee5 (commit)
via eb6441aca464980d00e3ff827cbf4195c5a7afc5 (commit)
via 5e2e3fa2759475313d69d3f9cf1cd339f3db8bc7 (commit)
via 5ff855e14dca375c24404eebc4573b77dba915d3 (commit)
via 117fa08fc7f8b7462190b75f99d7604c49f3b7c6 (commit)
via 058f40e75eb59aea1d3e18ffcdfdacc8386aafa3 (commit)
via 0b42ead35810e82a97561d71309283950ca9d5a3 (commit)
via ed6b07b60ae503e97e284eff351386ba24377981 (commit)
via 19197b5bc9f2955bd6a8ca48a2d04472ed696e81 (commit)
via fa64874fe02ff6ab8a782e0ede0d70ff2c95783e (commit)
via 44bf7654cb3ce85fe63fa47c7b39f48bb041d4de (commit)
via 1c8557996bdd428c9a3bf0935c30bf42fb167f35 (commit)
via dfbeea4eb3f414dcb66d2447e59216c23dade515 (commit)
via 54e61304131965c4a1d88c9151f8697dcbb3ce12 (commit)
via b9d341ecd382b44d72888899e3559d7d41baa203 (commit)
via 242cb68113bd4d2c0a3ea3b61cc56e143a42b38e (commit)
via 3ffa099e0969468d0f16f00c503b8936b895a2d4 (commit)
via e0d5584bdb3ca05b078693787c35d8f8f9228040 (commit)
via 4596c143eadfb9d67c5395bb6fa367470976d0cb (commit)
via f3bd834c8e9334004994c33d0798ed6b61a7a99b (commit)
via 860a2965d04f80fd0e90d3c5100332a8bb2ae9d8 (commit)
via c4f4d32eed0f4c26e4968336179a1e749e925aa7 (commit)
via b2d210a1ed486dc8c1083deb6f1b0bf2802e8d57 (commit)
via 3407184a20c367a0de02b41befb4db16971c9589 (commit)
via 6c1f3af003f02eee8952da8463d05732e667c986 (commit)
via fa83bb99518bd8c4dede832769fe4f7a6405ea62 (commit)
via 0ca10e6357d7a41bc308f90183848ce41582adf8 (commit)
via 25a5f4ec755bc09b54410fcdff22691283147f32 (commit)
via 2275e0d7afa15b69bd7511616a6ebae0b9de0d22 (commit)
via 301da7d26d41e64d87c0cf72727f3347aa61fb40 (commit)
via 26582526e20e56ea4547a447cfa93e122af0f0e1 (commit)
via 6934390c92855d4006b727e1c8c1c89688afeb5f (commit)
via 333edc6244dafdab42258b84a46237c6dcf936a0 (commit)
via 8af2ccd45c7f070d2aa168763815fe8a82808e79 (commit)
via 19f9f10fa5594bfe311688823fba5dd149e76a59 (commit)
via 5851a6cc19356b49fcc2500f5d40e7861874561e (commit)
via 8be2bf19228473f5d54f5015e893b44791a5c0c2 (commit)
via edfcbcaccbf4b8098f5cfc1bd1943fe4542b3306 (commit)
via 7396247046ccc046d711265100aa8d46be057626 (commit)
via 9c3f27137967b5f92c430baab6ab442258d9d692 (commit)
via 56df4f9ab31b300b83c26fbeb1a7cf7951f8338e (commit)
via 2298392e3ce9b5a470f4c0e3b2a22ba571f9b8ab (commit)
via 309d02a51ee22ff4c0226784ecd7bb4394d19542 (commit)
via ddd7b7c3dfdebae68e3742e175e4931f0a5f6c5e (commit)
via a2609d0762b9dfdf2750a1bc44ea33525c634f5b (commit)
via de130e6d9b6dec3586126f660123a905cc9a284a (commit)
via 53b5297513ee2320556dead019f268e79b49f77a (commit)
via eeacd1d5adeb27ee536fb04296cc20ffa898cdab (commit)
via 33cf7e5a182bc98cd168928fa8b5c775581fe90a (commit)
via a9211d7973042503353e61f0e16ea092c0f621bf (commit)
via 48211526895ce56de771e65a7e6d23892c639660 (commit)
via 87829d23b5d698611c94ee92f758558d8ad95468 (commit)
via ff55e63140852f4c7cf8a0404dde36442c45ff4a (commit)
via b723719cdad0da4db9b2099596c09f407dbdb135 (commit)
via 996e2593c867478eec8f2b3700aa52776528394b (commit)
via 5cde27560e4b8e3cd098daae572ca4d0124358de (commit)
via 1f682b8172e368d7609aa2f066b778989a7fdf7e (commit)
via 6ba7a9cf0d9bcaa52ab17652ef1e6e3ad78094a0 (commit)
via 70d71681b83dd3e0cb125e3f3e176d04e7461aae (commit)
via e55ec73b0bdcc2b7d6286c6a18886de194280947 (commit)
via 9ae710aa95a75d63e2a37ca7d79297f8ef308e6c (commit)
via 5017cbe7d18c752b80e8e8289b9ab0315f5915d0 (commit)
via 77027490d53b644e157fce2df59c5dbd496d1e1a (commit)
via be701769ab3ba884c550d22455ba09af3b725a07 (commit)
via f4b20fc54b74d03279245942900806a5e7b57d39 (commit)
via 682436e844799b2194eccc2ffdfb3fa3da527cea (commit)
via 8c7144c6aef7eb26215f045ee95551ed7a6fe307 (commit)
via 86355ec5ded2f1988dc466f8844360c78e8454a7 (commit)
via b13c2fd090114e2c0cfe164bae10cde145f509b4 (commit)
via d3b169adf97f9c5a4990400e54562d644016fdaf (commit)
via 7de2a6f3f96d466ed24f7d8b24708a30ee679729 (commit)
via 2e46317ec615067947983aa9ebcce371a6a8fd2f (commit)
via 7c419681601df9c3a453f0e46756dd751344b1a8 (commit)
via 24ef7a16425e696fbb794605a89ff8d7bdd7172c (commit)
via f0dd8824dc9bb06420b960f2842902ca2d63fba3 (commit)
via af01e66fde8c0055319c47d775b0af1408a3e18a (commit)
via d4af3712f3987069dffe6ed30919ce0b7bf84699 (commit)
via e321d8b344152b558cb42856deb5e8798f153d1b (commit)
via 74d131f92fbcd1fe21335a743dde205063b63d00 (commit)
via 8a18de79026a2620f82662d308c4f87e9a54faa3 (commit)
from dcaab4f4a892d1ab0e9e2c246282066891b2ffcb (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
- Log -----------------------------------------------------------------
commit 170485efd4eb6cb6ba6af78d62be71b2354befd8
Author: Michal 'vorner' Vaner <michal.vaner at nic.cz>
Date: Fri Mar 11 12:37:23 2011 +0100
[trac615] Unification of test parsers
Not directly related to this ticket, but master recently create a
private version of library test class this branch already has.
commit 5874ea28a5e9a3af38fc15c53bcf9d0690680e6c
Merge: 4fd1b4c0f03f01b2fb085dae50455f6746b435c9 11889ab357ea40764683e4be03bcc27a4ab6bb04
Author: Michal 'vorner' Vaner <michal.vaner at nic.cz>
Date: Fri Mar 11 12:31:52 2011 +0100
Merge branch 'master' into work/configuration
Conflicts:
src/bin/bind10/bind10.py.in
src/bin/bind10/tests/bind10_test.py.in
commit 4fd1b4c0f03f01b2fb085dae50455f6746b435c9
Author: Michal 'vorner' Vaner <michal.vaner at nic.cz>
Date: Fri Mar 11 12:03:20 2011 +0100
[trac615] Clarify man page
commit 691a4a2f4e82d52d313e67ddd9b1e5480226fefe
Author: Michal 'vorner' Vaner <michal.vaner at nic.cz>
Date: Fri Mar 11 12:04:25 2011 +0100
[trac615] Code cleanups
- Less hardcoding of values
- Tests of missing argument values
- "args" instead of "opts" for argument values
- Some documentation updates
commit 52247fe2ac1e071d95b3333d3c0cea73a0429399
Author: Michal 'vorner' Vaner <michal.vaner at nic.cz>
Date: Fri Mar 11 12:01:50 2011 +0100
[trac615] Unify config param to -c
-----------------------------------------------------------------------
Summary of changes:
ChangeLog | 85 +++-
Makefile.am | 7 +-
README | 2 -
configure.ac | 16 +-
doc/guide/bind10-guide.xml | 15 +-
src/bin/auth/auth.spec.pre.in | 10 +-
src/bin/auth/b10-auth.8 | 62 +--
src/bin/auth/b10-auth.xml | 11 +-
src/bin/auth/benchmarks/query_bench.cc | 2 +-
src/bin/auth/main.cc | 8 +-
src/bin/auth/tests/auth_srv_unittest.cc | 2 +-
src/bin/bind10/bind10.8 | 37 +-
src/bin/bind10/bind10.py.in | 58 ++-
src/bin/bind10/bind10.xml | 9 +-
src/bin/bind10/tests/bind10_test.py | 460 --------------
src/bin/bind10/tests/bind10_test.py.in | 515 ++++++++++++++++
src/bin/bindctl/Makefile.am | 9 +-
src/bin/bindctl/bindcmd.py | 17 +-
src/bin/bindctl/bindctl-source.py.in | 135 ----
src/bin/bindctl/bindctl.xml | 21 +-
src/bin/bindctl/bindctl_main.py.in | 138 +++++
src/bin/bindctl/tests/Makefile.am | 2 +-
src/bin/bindctl/tests/bindctl_test.py | 48 ++-
src/bin/cfgmgr/b10-cfgmgr.py.in | 22 +-
src/bin/cfgmgr/b10-cfgmgr.xml | 6 +-
src/bin/cfgmgr/tests/b10-cfgmgr_test.py.in | 20 +-
src/bin/resolver/b10-resolver.8 | 2 +-
src/bin/resolver/b10-resolver.xml | 5 +-
src/bin/resolver/main.cc | 1 -
src/bin/resolver/resolver.cc | 34 +-
src/bin/resolver/resolver.spec.pre.in | 6 +-
src/bin/resolver/tests/resolver_unittest.cc | 21 +
src/bin/xfrout/xfrout.py.in | 6 +-
src/cppcheck-suppress.lst | 2 +-
src/lib/asiolink/Makefile.am | 29 +-
src/lib/asiolink/asiodef.cc | 37 ++
src/lib/asiolink/asiodef.h | 21 +
src/lib/asiolink/asiodef.msg | 56 ++
src/lib/asiolink/asiolink.h | 4 -
src/lib/asiolink/asiolink_utilities.h | 61 ++
src/lib/asiolink/dns_server.h | 10 +-
src/lib/asiolink/dns_service.h | 2 +-
src/lib/asiolink/dummy_io_cb.h | 8 +
src/lib/asiolink/internal/iofetch.h | 125 ----
src/lib/asiolink/interval_timer.h | 2 +-
src/lib/asiolink/io_address.h | 4 -
src/lib/asiolink/io_asio_socket.h | 214 +++++--
src/lib/asiolink/io_endpoint.cc | 1 +
src/lib/asiolink/io_endpoint.h | 4 -
src/lib/asiolink/io_fetch.cc | 260 +++++++--
src/lib/asiolink/io_fetch.h | 147 ++---
src/lib/asiolink/io_message.h | 4 -
src/lib/asiolink/qid_gen.cc | 54 ++
src/lib/asiolink/qid_gen.h | 85 +++
src/lib/asiolink/recursive_query.cc | 70 ++-
src/lib/asiolink/recursive_query.h | 14 +
src/lib/asiolink/tcp_endpoint.h | 61 ++-
src/lib/asiolink/tcp_server.cc | 24 +-
src/lib/asiolink/tcp_socket.h | 303 +++++++---
src/lib/asiolink/tests/Makefile.am | 12 +-
.../asiolink/tests/asiolink_utilities_unittest.cc | 74 +++
src/lib/asiolink/tests/io_endpoint_unittest.cc | 6 +-
src/lib/asiolink/tests/io_fetch_unittest.cc | 601 ++++++++++++++++---
src/lib/asiolink/tests/io_service_unittest.cc | 2 +-
src/lib/asiolink/tests/qid_gen_unittest.cc | 59 ++
.../asiolink/tests/recursive_query_unittest_2.cc | 642 ++++++++++++++++++++
src/lib/asiolink/tests/run_unittests.cc | 6 +-
src/lib/asiolink/tests/tcp_endpoint_unittest.cc | 55 ++
src/lib/asiolink/tests/tcp_socket_unittest.cc | 515 ++++++++++++++++
src/lib/asiolink/tests/udp_socket_unittest.cc | 88 ++-
src/lib/asiolink/udp_endpoint.h | 11 +
src/lib/asiolink/udp_server.cc | 13 +-
src/lib/asiolink/udp_socket.h | 184 ++++---
src/lib/cache/message_cache.cc | 12 +-
src/lib/cache/message_cache.h | 3 +
src/lib/cache/message_entry.cc | 101 ++--
src/lib/cache/message_entry.h | 6 +
src/lib/cache/rrset_cache.cc | 48 +-
src/lib/cache/rrset_cache.h | 5 +-
src/lib/cache/tests/Makefile.am | 3 +
src/lib/cache/tests/message_cache_unittest.cc | 69 ++-
src/lib/cache/tests/message_entry_unittest.cc | 67 ++-
src/lib/cache/tests/rrset_cache_unittest.cc | 100 +++-
src/lib/cache/tests/testdata/message_fromWire7 | 27 +
src/lib/cache/tests/testdata/message_fromWire8 | 23 +
src/lib/cache/tests/testdata/message_fromWire9 | 25 +
src/lib/datasrc/rbtree.h | 23 +-
src/lib/datasrc/tests/rbtree_unittest.cc | 52 ++-
src/lib/log/Makefile.am | 5 +
src/lib/log/README | 376 ++++++++++++
src/lib/log/compiler/Makefile.am | 4 +-
src/lib/log/documentation.txt | 434 -------------
src/lib/python/isc/config/cfgmgr.py | 16 +-
src/lib/python/isc/config/tests/cfgmgr_test.py | 12 +-
src/lib/server_common/tests/Makefile.am | 3 +
src/lib/testutils/portconfig.h | 12 +-
src/lib/testutils/srv_test.cc | 4 +-
tests/Makefile.am | 1 +
tests/system/Makefile.am | 16 +
tests/system/README | 63 ++
tests/system/bindctl/clean.sh | 20 +
.../system/bindctl/nsx1/b10-config.db.template.in | 10 +
tests/system/bindctl/nsx1/example-normalized.db | 3 +
tests/system/bindctl/nsx1/root.db | 25 +
tests/system/bindctl/setup.sh | 26 +
tests/system/bindctl/tests.sh | 106 ++++
tests/system/cleanall.sh | 33 +
tests/system/common/default_user.csv | 1 +
tests/system/conf.sh.in | 57 ++
tests/system/glue/auth.good | 15 +
tests/system/glue/clean.sh | 23 +
tests/system/glue/example.good | 19 +
tests/system/glue/noglue.good | 14 +
tests/system/glue/nsx1/b10-config.db.in | 9 +
tests/system/glue/nsx1/com.db | 31 +
tests/system/glue/nsx1/net.db | 32 +
tests/system/glue/nsx1/root-servers.nil.db | 26 +
tests/system/glue/nsx1/root.db | 55 ++
tests/system/glue/setup.sh.in | 25 +
tests/system/glue/test.good | 19 +
tests/system/glue/tests.sh | 66 ++
tests/system/ifconfig.sh | 226 +++++++
tests/system/run.sh | 125 ++++
tests/system/runall.sh | 44 ++
tests/system/start.pl | 226 +++++++
tests/system/stop.pl | 188 ++++++
126 files changed, 6557 insertions(+), 2039 deletions(-)
delete mode 100644 src/bin/bind10/tests/bind10_test.py
create mode 100644 src/bin/bind10/tests/bind10_test.py.in
delete mode 100644 src/bin/bindctl/bindctl-source.py.in
create mode 100755 src/bin/bindctl/bindctl_main.py.in
create mode 100644 src/lib/asiolink/asiodef.cc
create mode 100644 src/lib/asiolink/asiodef.h
create mode 100644 src/lib/asiolink/asiodef.msg
create mode 100644 src/lib/asiolink/asiolink_utilities.h
delete mode 100644 src/lib/asiolink/internal/iofetch.h
create mode 100644 src/lib/asiolink/qid_gen.cc
create mode 100644 src/lib/asiolink/qid_gen.h
create mode 100644 src/lib/asiolink/tests/asiolink_utilities_unittest.cc
create mode 100644 src/lib/asiolink/tests/qid_gen_unittest.cc
create mode 100644 src/lib/asiolink/tests/recursive_query_unittest_2.cc
create mode 100644 src/lib/asiolink/tests/tcp_endpoint_unittest.cc
create mode 100644 src/lib/asiolink/tests/tcp_socket_unittest.cc
create mode 100644 src/lib/cache/tests/testdata/message_fromWire7
create mode 100644 src/lib/cache/tests/testdata/message_fromWire8
create mode 100644 src/lib/cache/tests/testdata/message_fromWire9
create mode 100644 src/lib/log/README
delete mode 100644 src/lib/log/documentation.txt
create mode 100644 tests/Makefile.am
create mode 100644 tests/system/Makefile.am
create mode 100644 tests/system/README
create mode 100755 tests/system/bindctl/clean.sh
create mode 100644 tests/system/bindctl/nsx1/b10-config.db.template.in
create mode 100644 tests/system/bindctl/nsx1/example-normalized.db
create mode 100644 tests/system/bindctl/nsx1/root.db
create mode 100755 tests/system/bindctl/setup.sh
create mode 100755 tests/system/bindctl/tests.sh
create mode 100755 tests/system/cleanall.sh
create mode 100644 tests/system/common/default_user.csv
create mode 100755 tests/system/conf.sh.in
create mode 100644 tests/system/glue/auth.good
create mode 100755 tests/system/glue/clean.sh
create mode 100644 tests/system/glue/example.good
create mode 100644 tests/system/glue/noglue.good
create mode 100644 tests/system/glue/nsx1/b10-config.db.in
create mode 100644 tests/system/glue/nsx1/com.db
create mode 100644 tests/system/glue/nsx1/net.db
create mode 100644 tests/system/glue/nsx1/root-servers.nil.db
create mode 100644 tests/system/glue/nsx1/root.db
create mode 100755 tests/system/glue/setup.sh.in
create mode 100644 tests/system/glue/test.good
create mode 100755 tests/system/glue/tests.sh
create mode 100755 tests/system/ifconfig.sh
create mode 100755 tests/system/run.sh
create mode 100755 tests/system/runall.sh
create mode 100755 tests/system/start.pl
create mode 100755 tests/system/stop.pl
-----------------------------------------------------------------------
diff --git a/ChangeLog b/ChangeLog
index 5179548..3bbc21f 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,7 +1,84 @@
- 184. [func] vorner
- Listening address and port configuration of b10-auth is the same as for
- b10-resolver now. That means, it is configured trough bindctl at runtime,
- in the Auth/listen_on list, not trough command line arguments.
+ 197. [bug] zhang likun
+ Remove expired message and rrset entries when looking up them
+ in cache, touch or remove the rrset entry in cache properly
+ when doing lookup or update.
+ (Trac #661, git 9efbe64fe3ff22bb5fba46de409ae058f199c8a7)
+
+ 196. [bug] jinmei
+ b10-auth, src/lib/datasrc: the backend of the in-memory data
+ source could not handle the root name. As a result b10-auth could
+ not work as a root server when using the in-memory data source.
+ (Trac #683, git 420ec42bd913fb83da37b26b75faae49c7957c46)
+
+ 195. [func] stephen
+ Resolver will now re-try a query over TCP if a response to a UDP
+ query has the TC bit set.
+ (Trac #499, git 4c05048ba059b79efeab53498737abe94d37ee07)
+
+ 194. [bug] vorner
+ Solved a 100% CPU usage problem after switching addresses in b10-auth
+ (and possibly, but unconfirmed, in b10-resolver). It was caused by
+ repeated reads/accepts on closed socket (the bug was in the code for a
+ long time, recent changes made it show).
+ (Trac #657, git e0863720a874d75923ea66adcfbf5b2948efb10a)
+
+ 193. [func]* jreed
+ Listen on the IPv6 (::) and IPv4 (0.0.0.0) wildcard addresses
+ for b10-auth. This returns to previous behavior prior to
+ change #184. Document the listen_on configuration in manual.
+ (Trac #649, git 65a77d8fde64d464c75917a1ab9b6b3f02640ca6)
+
+ 192. [func]* jreed
+ Listen on standard domain port 53 for b10-auth and
+ b10-resolver.
+ (Trac #617, #618, git 137a6934a14cf0c5b5c065e910b8b364beb0973f)
+
+ 191. [func] jinmei
+ Imported system test framework of BIND 9. It can be run by
+ 'make systest' at the top source directory. Notes: currently it
+ doesn't work when built in a separate tree. It also requires
+ perl, an inherited dependency from the original framework.
+ Also, mainly for the purpose of tests, a new option "--pid-file"
+ was added to BoB, with which the boss process will dump its PID
+ to the specified file.
+ (Trac #606, git 6ac000df85625f5921e8895a1aafff5e4be3ba9c)
+
+ 190. [func] jelte
+ Resolver now sets random qids on outgoing queries using
+ the boost::mt19937 prng.
+ (Trac #583, git 5222b51a047d8f2352bc9f92fd022baf1681ed81)
+
+ 189. [bug] jreed
+ Do not install the log message compiler.
+ (Trac #634, git eb6441aca464980d00e3ff827cbf4195c5a7afc5)
+
+ 188. [bug] zhang likun
+ Make the rrset trust level ranking algorithm used by
+ isc::cache::MessageEntry::getRRsetTrustLevel() follow RFC2181
+ section 5.4.1.
+ (Trac #595 git 19197b5bc9f2955bd6a8ca48a2d04472ed696e81)
+
+ 187. [bug] zhang likun
+ Fix the assert error in class isc::cache::RRsetCache by adding the
+ check for empty pointer and test case for it.
+ (Trac #638, git 54e61304131965c4a1d88c9151f8697dcbb3ce12)
+
+ 186. [bug] jelte
+ b10-resolver could stop with an assertion failure on certain kinds
+ of messages (there was a problem in error message creation). This
+ fixes that.
+ (Trac #607, git 25a5f4ec755bc09b54410fcdff22691283147f32)
+
+ 185. [bug] vorner
+ Tests use port from private range (53210), lowering chance of
+ a conflict with something else (eg. running bind 10).
+ (Trac #523, git 301da7d26d41e64d87c0cf72727f3347aa61fb40)
+
+ 184. [func]* vorner
+ Listening address and port configuration of b10-auth is the same as
+ for b10-resolver now. That means, it is configured through bindctl
+ at runtime, in the Auth/listen_on list, not through command line
+ arguments.
(Trac #575, #576, git f06ce638877acf6f8e1994962bf2dbfbab029edf)
183. [bug] jerry
diff --git a/Makefile.am b/Makefile.am
index 9a28f20..e31a1a5 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -1,4 +1,4 @@
-SUBDIRS = doc src
+SUBDIRS = doc src tests
USE_LCOV=@USE_LCOV@
LCOV=@LCOV@
GENHTML=@GENHTML@
@@ -77,6 +77,11 @@ cppcheck:
--template '{file}:{line}: check_fail: {message} ({severity},{id})' \
src
+# system tests
+systest:
+ cd tests/system; \
+ sh $(abs_srcdir)/tests/system/runall.sh
+
#### include external sources in the distributed tarball:
EXTRA_DIST = ext/asio/README
EXTRA_DIST += ext/asio/asio/local/stream_protocol.hpp
diff --git a/README b/README
index 5bc3154..b10d12e 100644
--- a/README
+++ b/README
@@ -164,8 +164,6 @@ source tree:
(Which will use the modules and configurations also from the source
tree.)
-The server will listen on port 5300 for DNS requests.
-
CONFIGURATION
Commands can be given through the bindctl tool.
diff --git a/configure.ac b/configure.ac
index d5795c4..b93fdc7 100644
--- a/configure.ac
+++ b/configure.ac
@@ -583,6 +583,12 @@ if test "X$ac_cv_have_devpoll" = "Xyes" -a "X$GXX" = "Xyes"; then
CPPFLAGS="$CPPFLAGS -DASIO_DISABLE_DEV_POLL=1"
fi
+#
+# Perl is optional; it is used only by some of the system test scripts.
+#
+AC_PATH_PROGS(PERL, perl5 perl)
+AC_SUBST(PERL)
+
AC_ARG_ENABLE(man, [AC_HELP_STRING([--enable-man],
[regenerate man pages [default=no]])], enable_man=yes, enable_man=no)
@@ -684,6 +690,8 @@ AC_CONFIG_FILES([Makefile
src/lib/cache/tests/Makefile
src/lib/server_common/Makefile
src/lib/server_common/tests/Makefile
+ tests/Makefile
+ tests/system/Makefile
])
AC_OUTPUT([doc/version.ent
src/bin/cfgmgr/b10-cfgmgr.py
@@ -713,9 +721,10 @@ AC_OUTPUT([doc/version.ent
src/bin/stats/tests/stats_test
src/bin/bind10/bind10.py
src/bin/bind10/tests/bind10_test
+ src/bin/bind10/tests/bind10_test.py
src/bin/bind10/run_bind10.sh
src/bin/bindctl/run_bindctl.sh
- src/bin/bindctl/bindctl-source.py
+ src/bin/bindctl/bindctl_main.py
src/bin/bindctl/tests/bindctl_test
src/bin/loadzone/run_loadzone.sh
src/bin/loadzone/tests/correct/correct_test.sh
@@ -740,6 +749,10 @@ AC_OUTPUT([doc/version.ent
src/lib/cc/session_config.h.pre
src/lib/cc/tests/session_unittests_config.h
src/lib/log/tests/run_time_init_test.sh
+ tests/system/conf.sh
+ tests/system/glue/setup.sh
+ tests/system/glue/nsx1/b10-config.db
+ tests/system/bindctl/nsx1/b10-config.db.template
], [
chmod +x src/bin/cmdctl/run_b10-cmdctl.sh
chmod +x src/bin/xfrin/run_b10-xfrin.sh
@@ -764,6 +777,7 @@ AC_OUTPUT([doc/version.ent
chmod +x src/lib/dns/gen-rdatacode.py
chmod +x src/lib/dns/tests/testdata/gen-wiredata.py
chmod +x src/lib/log/tests/run_time_init_test.sh
+ chmod +x tests/system/conf.sh
])
AC_OUTPUT
diff --git a/doc/guide/bind10-guide.xml b/doc/guide/bind10-guide.xml
index 0935c81..bceb40c 100644
--- a/doc/guide/bind10-guide.xml
+++ b/doc/guide/bind10-guide.xml
@@ -336,14 +336,6 @@ var/
</simpara>
</note>
- <note>
- <simpara>
- The development prototype of the b10-auth server listens on
- 0.0.0.0 (all interfaces) port 5300. (This is not the standard
- domain service port.)
- </simpara>
- </note>
-
<para>
To quickly get started with BIND 10, follow these steps.
</para>
@@ -397,7 +389,7 @@ var/
<listitem>
<para>Test it; for example:
- <screen>$ <userinput>dig @127.0.0.1 -p 5300 -c CH -t TXT authors.bind</userinput></screen>
+ <screen>$ <userinput>dig @127.0.0.1 -c CH -t TXT authors.bind</userinput></screen>
</para>
</listitem>
@@ -1044,11 +1036,6 @@ TODO
process.
</para>
- <note><simpara>
- This development prototype release listens on all interfaces
- and the non-standard port 5300.
- </simpara></note>
-
<section>
<title>Server Configurations</title>
diff --git a/src/bin/auth/auth.spec.pre.in b/src/bin/auth/auth.spec.pre.in
index e95dabd..d88ffb5 100644
--- a/src/bin/auth/auth.spec.pre.in
+++ b/src/bin/auth/auth.spec.pre.in
@@ -63,12 +63,12 @@
"item_optional": false,
"item_default": [
{
- "address": "::1",
- "port": 5300
+ "address": "::",
+ "port": 53
},
{
- "address": "127.0.0.1",
- "port": 5300
+ "address": "0.0.0.0",
+ "port": 53
}
],
"list_item_spec": {
@@ -87,7 +87,7 @@
"item_name": "port",
"item_type": "integer",
"item_optional": false,
- "item_default": 5300
+ "item_default": 53
}
]
}
diff --git a/src/bin/auth/b10-auth.8 b/src/bin/auth/b10-auth.8
index bae8e4a..0356683 100644
--- a/src/bin/auth/b10-auth.8
+++ b/src/bin/auth/b10-auth.8
@@ -2,12 +2,12 @@
.\" Title: b10-auth
.\" Author: [FIXME: author] [see http://docbook.sf.net/el/author]
.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\" Date: January 19, 2011
+.\" Date: March 8, 2011
.\" Manual: BIND10
.\" Source: BIND10
.\" Language: English
.\"
-.TH "B10\-AUTH" "8" "January 19, 2011" "BIND10" "BIND10"
+.TH "B10\-AUTH" "8" "March 8, 2011" "BIND10" "BIND10"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
@@ -22,7 +22,7 @@
b10-auth \- Authoritative DNS server
.SH "SYNOPSIS"
.HP \w'\fBb10\-auth\fR\ 'u
-\fBb10\-auth\fR [\fB\-4\fR] [\fB\-6\fR] [\fB\-a\ \fR\fB\fIaddress\fR\fR] [\fB\-n\fR] [\fB\-p\ \fR\fB\fInumber\fR\fR] [\fB\-u\ \fR\fB\fIusername\fR\fR] [\fB\-v\fR]
+\fBb10\-auth\fR [\fB\-n\fR] [\fB\-u\ \fR\fB\fIusername\fR\fR] [\fB\-v\fR]
.SH "DESCRIPTION"
.PP
The
@@ -42,55 +42,11 @@ It receives its configurations from
.PP
The arguments are as follows:
.PP
-\fB\-4\fR
-.RS 4
-Enables IPv4 only mode\&. This switch may not be used with
-\fB\-6\fR
-nor
-\fB\-a\fR\&. By default, it listens on both IPv4 and IPv6 (if capable)\&.
-.RE
-.PP
-\fB\-6\fR
-.RS 4
-Enables IPv6 only mode\&. This switch may not be used with
-\fB\-4\fR
-nor
-\fB\-a\fR\&. By default, it listens on both IPv4 and IPv6 (if capable)\&.
-.RE
-.PP
-\fB\-a \fR\fB\fIaddress\fR\fR
-.RS 4
-The IPv4 or IPv6 address to listen on\&. This switch may not be used with
-\fB\-4\fR
-nor
-\fB\-6\fR\&. The default is to listen on all addresses\&. (This is a short term workaround\&. This argument may change\&.)
-.RE
-.PP
\fB\-n\fR
.RS 4
Do not cache answers in memory\&. The default is to use the cache for faster responses\&. The cache keeps the most recent 30,000 answers (positive and negative) in memory for 30 seconds (instead of querying the data source, such as SQLite3 database, each time)\&.
.RE
.PP
-\fB\-p \fR\fB\fInumber\fR\fR
-.RS 4
-The port number it listens on\&. The default is 5300\&.
-.if n \{\
-.sp
-.\}
-.RS 4
-.it 1 an-trap
-.nr an-no-space-flag 1
-.nr an-break-flag 1
-.br
-.ps +1
-\fBNote\fR
-.ps -1
-.br
-The Y1 prototype runs on all interfaces and on this nonstandard port\&.
-.sp .5v
-.RE
-.RE
-.PP
\fB\-u \fR\fB\fIusername\fR\fR
.RS 4
The user name of the
@@ -114,6 +70,18 @@ defines the path to the SQLite3 zone file when using the sqlite datasource\&. Th
/usr/local/var/bind10\-devel/zone\&.sqlite3\&.
.PP
+\fIlisten_on\fR
+is a list of addresses and ports for
+\fBb10\-auth\fR
+to listen on\&. The list items are the
+\fIaddress\fR
+string and
+\fIport\fR
+number\&. By default,
+\fBb10\-auth\fR
+listens on port 53 on the IPv6 (::) and IPv4 (0\&.0\&.0\&.0) wildcard addresses\&.
+.PP
+
\fIdatasources\fR
configures data sources\&. The list items include:
\fItype\fR
diff --git a/src/bin/auth/b10-auth.xml b/src/bin/auth/b10-auth.xml
index c9e935a..2b53394 100644
--- a/src/bin/auth/b10-auth.xml
+++ b/src/bin/auth/b10-auth.xml
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>January 19, 2011</date>
+ <date>March 8, 2011</date>
</refentryinfo>
<refmeta>
@@ -132,6 +132,15 @@
</para>
<para>
+ <varname>listen_on</varname> is a list of addresses and ports for
+ <command>b10-auth</command> to listen on.
+ The list items are the <varname>address</varname> string
+ and <varname>port</varname> number.
+ By default, <command>b10-auth</command> listens on port 53
+ on the IPv6 (::) and IPv4 (0.0.0.0) wildcard addresses.
+ </para>
+
+ <para>
<varname>datasources</varname> configures data sources.
The list items include:
<varname>type</varname> to optionally choose the data source type
diff --git a/src/bin/auth/benchmarks/query_bench.cc b/src/bin/auth/benchmarks/query_bench.cc
index 7f643f3..5e69134 100644
--- a/src/bin/auth/benchmarks/query_bench.cc
+++ b/src/bin/auth/benchmarks/query_bench.cc
@@ -77,7 +77,7 @@ protected:
dummy_socket(IOSocket::getDummyUDPSocket()),
dummy_endpoint(IOEndpointPtr(IOEndpoint::create(IPPROTO_UDP,
IOAddress("192.0.2.1"),
- 5300)))
+ 53210)))
{}
public:
unsigned int run() {
diff --git a/src/bin/auth/main.cc b/src/bin/auth/main.cc
index 275ae7d..0701b94 100644
--- a/src/bin/auth/main.cc
+++ b/src/bin/auth/main.cc
@@ -122,7 +122,13 @@ main(int argc, char* argv[]) {
ModuleCCSession* config_session = NULL;
string xfrout_socket_path;
if (getenv("B10_FROM_BUILD") != NULL) {
- xfrout_socket_path = string(getenv("B10_FROM_BUILD")) + "/auth_xfrout_conn";
+ if (getenv("B10_FROM_SOURCE_LOCALSTATEDIR")) {
+ xfrout_socket_path = string("B10_FROM_SOURCE_LOCALSTATEDIR") +
+ "/auth_xfrout_conn";
+ } else {
+ xfrout_socket_path = string(getenv("B10_FROM_BUILD")) +
+ "/auth_xfrout_conn";
+ }
} else {
xfrout_socket_path = UNIX_SOCKET_FILE;
}
diff --git a/src/bin/auth/tests/auth_srv_unittest.cc b/src/bin/auth/tests/auth_srv_unittest.cc
index 54dc0c7..379342e 100644
--- a/src/bin/auth/tests/auth_srv_unittest.cc
+++ b/src/bin/auth/tests/auth_srv_unittest.cc
@@ -644,7 +644,7 @@ TEST_F(AuthSrvTest, queryCounterUnexpected) {
// Modify the message.
delete io_message;
endpoint = IOEndpoint::create(IPPROTO_UDP,
- IOAddress(DEFAULT_REMOTE_ADDRESS), 5300);
+ IOAddress(DEFAULT_REMOTE_ADDRESS), 53210);
io_message = new IOMessage(request_renderer.getData(),
request_renderer.getLength(),
getDummyUnknownSocket(), *endpoint);
diff --git a/src/bin/bind10/bind10.8 b/src/bin/bind10/bind10.8
index f625abe..a75136b 100644
--- a/src/bin/bind10/bind10.8
+++ b/src/bin/bind10/bind10.8
@@ -2,12 +2,12 @@
.\" Title: bind10
.\" Author: [see the "AUTHORS" section]
.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\" Date: July 29, 2010
+.\" Date: February 22, 2011
.\" Manual: BIND10
.\" Source: BIND10
.\" Language: English
.\"
-.TH "BIND10" "8" "July 29, 2010" "BIND10" "BIND10"
+.TH "BIND10" "8" "February 22, 2011" "BIND10" "BIND10"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
@@ -22,7 +22,7 @@
bind10 \- BIND 10 boss process
.SH "SYNOPSIS"
.HP \w'\fBbind10\fR\ 'u
-\fBbind10\fR [\fB\-a\ \fR\fB\fIaddress\fR\fR] [\fB\-m\ \fR\fB\fIfile\fR\fR] [\fB\-n\fR] [\fB\-p\ \fR\fB\fInumber\fR\fR] [\fB\-u\ \fR\fB\fIuser\fR\fR] [\fB\-v\fR] [\fB\-\-address\ \fR\fB\fIaddress\fR\fR] [\fB\-\-msgq\-socket\-file\ \fR\fB\fIfile\fR\fR] [\fB\-\-no\-cache\fR] [\fB\-\-port\ \fR\fB\fInumber\fR\fR] [\fB\-\-user\ \fR\fB\fIuser\fR\fR] [\fB\-\-pretty\-name\ \fR\fB\fIname\fR\fR] [\fB\-\-verbose\fR]
+\fBbind10\fR [\fB\-m\ \fR\fB\fIfile\fR\fR] [\fB\-n\fR] [\fB\-u\ \fR\fB\fIuser\fR\fR] [\fB\-v\fR] [\fB\-\-msgq\-socket\-file\ \fR\fB\fIfile\fR\fR] [\fB\-\-no\-cache\fR] [\fB\-\-user\ \fR\fB\fIuser\fR\fR] [\fB\-\-pretty\-name\ \fR\fB\fIname\fR\fR] [\fB\-\-verbose\fR]
.SH "DESCRIPTION"
.PP
The
@@ -32,13 +32,6 @@ daemon starts up other BIND 10 required daemons\&. It handles restarting of exit
.PP
The arguments are as follows:
.PP
-\fB\-a\fR \fIaddress\fR, \fB\-\-address\fR \fIaddress\fR
-.RS 4
-The IPv4 or IPv6 address for the
-\fBb10-auth\fR(8)
-daemon to listen on\&. The default is to listen on all addresses\&. (This is a short term workaround\&. This argument may change\&.)
-.RE
-.PP
\fB\-m\fR \fIfile\fR, \fB\-\-msgq\-socket\-file\fR \fIfile\fR
.RS 4
The UNIX domain socket file for the
@@ -54,28 +47,6 @@ Disables the hot\-spot caching used by the
daemon\&.
.RE
.PP
-\fB\-p\fR \fInumber\fR, \fB\-\-port\fR \fInumber\fR
-.RS 4
-The port number for the
-\fBb10-auth\fR(8)
-daemon to listen on\&. The default is 5300\&.
-.if n \{\
-.sp
-.\}
-.RS 4
-.it 1 an-trap
-.nr an-no-space-flag 1
-.nr an-break-flag 1
-.br
-.ps +1
-\fBNote\fR
-.ps -1
-.br
-This prototype release uses a non\-default port for domain service\&.
-.sp .5v
-.RE
-.RE
-.PP
\fB\-u\fR \fIuser\fR, \fB\-\-user\fR \fIname\fR
.RS 4
The username for
@@ -125,5 +96,5 @@ The
daemon was initially designed by Shane Kerr of ISC\&.
.SH "COPYRIGHT"
.br
-Copyright \(co 2010 Internet Systems Consortium, Inc. ("ISC")
+Copyright \(co 2011 Internet Systems Consortium, Inc. ("ISC")
.br
diff --git a/src/bin/bind10/bind10.py.in b/src/bin/bind10/bind10.py.in
index 0709c67..9cdf6f0 100755
--- a/src/bin/bind10/bind10.py.in
+++ b/src/bin/bind10/bind10.py.in
@@ -204,7 +204,11 @@ class BoB:
msgq process listens on. If verbose is True, then the boss reports
what it is doing.
- Data path and config filename are passed trough to config manager.
+ Data path and config filename are passed trough to config manager
+ (if provided) and specify the config file to be used.
+
+ The cmdctl_port is passed to cmdctl and specify on which port it
+ should listen.
"""
self.cc_session = None
self.ccs = None
@@ -396,12 +400,12 @@ class BoB:
Starts the configuration manager process
"""
self.log_starting("b10-cfgmgr")
- opts = ["b10-cfgmgr"]
+ args = ["b10-cfgmgr"]
if self.data_path is not None:
- opts.append("--data-path=" + self.data_path)
+ args.append("--data-path=" + self.data_path)
if self.config_filename is not None:
- opts.append("--database-filename=" + self.config_filename)
- bind_cfgd = ProcessInfo("b10-cfgmgr", opts,
+ args.append("--config-filename=" + self.config_filename)
+ bind_cfgd = ProcessInfo("b10-cfgmgr", args,
c_channel_env, uid=self.uid,
username=self.username)
self.processes[bind_cfgd.pid] = bind_cfgd
@@ -514,10 +518,10 @@ class BoB:
"""
Starts the command control process
"""
- opts = ["b10-cmdctl"]
+ args = ["b10-cmdctl"]
if self.cmdctl_port is not None:
- opts.append("--port=" + str(self.cmdctl_port))
- self.start_process("b10-cmdctl", opts, c_channel_env, self.cmdctl_port)
+ args.append("--port=" + str(self.cmdctl_port))
+ self.start_process("b10-cmdctl", args, c_channel_env, self.cmdctl_port)
def start_all_processes(self):
"""
@@ -827,18 +831,54 @@ def parse_args(args=sys.argv[1:], Parser=OptionParser):
default=None)
parser.add_option("--cmdctl-port", dest="cmdctl_port", type="int",
default=None, help="Port of command control")
+ parser.add_option("--pid-file", dest="pid_file", type="string",
+ default=None,
+ help="file to dump the PID of the BIND 10 process")
+
(options, args) = parser.parse_args(args)
+
if options.cmdctl_port is not None:
try:
isc.net.parse.port_parse(options.cmdctl_port)
except ValueError as e:
parser.error(e)
+
if args:
parser.print_help()
sys.exit(1)
return options
+def dump_pid(pid_file):
+ """
+ Dump the PID of the current process to the specified file. If the given
+ file is None this function does nothing. If the file already exists,
+ the existing content will be removed. If a system error happens in
+ creating or writing to the file, the corresponding exception will be
+ propagated to the caller.
+ """
+ if pid_file is None:
+ return
+ f = open(pid_file, "w")
+ f.write('%d\n' % os.getpid())
+ f.close()
+
+def unlink_pid_file(pid_file):
+ """
+ Remove the given file, which is basically expected to be the PID file
+ created by dump_pid(). The specified may or may not exist; if it
+ doesn't this function does nothing. Other system level errors in removing
+ the file will be propagated as the corresponding exception.
+ """
+ if pid_file is None:
+ return
+ try:
+ os.unlink(pid_file)
+ except OSError as error:
+ if error.errno is not errno.ENOENT:
+ raise
+
+
def main():
global options
global boss_of_bind
@@ -903,6 +943,7 @@ def main():
sys.stderr.write("[bind10] Error on startup: %s\n" % startup_result)
sys.exit(1)
sys.stdout.write("[bind10] BIND 10 started\n")
+ dump_pid(options.pid_file)
# send "bind10.boot_time" to b10-stats
time.sleep(1) # wait a second
@@ -956,6 +997,7 @@ def main():
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
boss_of_bind.shutdown()
sys.stdout.write("[bind10] BIND 10 exiting\n");
+ unlink_pid_file(options.pid_file)
sys.exit(0)
if __name__ == "__main__":
diff --git a/src/bin/bind10/bind10.xml b/src/bin/bind10/bind10.xml
index d7cf638..059f474 100644
--- a/src/bin/bind10/bind10.xml
+++ b/src/bin/bind10/bind10.xml
@@ -89,8 +89,9 @@
</term>
<listitem>
<para>The configuration filename to use. Can be either absolute or
- relative to data path.</para>
- <para>Defaults to b10-config.db</para>
+ relative to data path. In case it is absolute, value of data path is
+ not considered.</para>
+ <para>Defaults to b10-config.db.</para>
</listitem>
</varlistentry>
@@ -100,7 +101,9 @@
<option>--data-path</option> <replaceable>data-path</replaceable>
</term>
<listitem>
- <para>The path where bind looks for configuration files.</para>
+ <para>The path where BIND looks for configuration files, if their
+ names are not absolute. It might be used for other data files in
+ future as well.</para>
</listitem>
</varlistentry>
diff --git a/src/bin/bind10/tests/bind10_test.py b/src/bin/bind10/tests/bind10_test.py
deleted file mode 100644
index a7e8149..0000000
--- a/src/bin/bind10/tests/bind10_test.py
+++ /dev/null
@@ -1,460 +0,0 @@
-from bind10 import ProcessInfo, BoB, parse_args
-
-# XXX: environment tests are currently disabled, due to the preprocessor
-# setup that we have now complicating the environment
-
-import unittest
-import sys
-import os
-import signal
-import socket
-from isc.net.addr import IPAddr
-from isc.testutils.parse_args import TestOptParser, OptsError
-
-class TestProcessInfo(unittest.TestCase):
- def setUp(self):
- # redirect stdout to a pipe so we can check that our
- # process spawning is doing the right thing with stdout
- self.old_stdout = os.dup(sys.stdout.fileno())
- self.pipes = os.pipe()
- os.dup2(self.pipes[1], sys.stdout.fileno())
- os.close(self.pipes[1])
- # note that we use dup2() to restore the original stdout
- # to the main program ASAP in each test... this prevents
- # hangs reading from the child process (as the pipe is only
- # open in the child), and also insures nice pretty output
-
- def tearDown(self):
- # clean up our stdout munging
- os.dup2(self.old_stdout, sys.stdout.fileno())
- os.close(self.pipes[0])
-
- def test_init(self):
- pi = ProcessInfo('Test Process', [ '/bin/echo', 'foo' ])
- os.dup2(self.old_stdout, sys.stdout.fileno())
- self.assertEqual(pi.name, 'Test Process')
- self.assertEqual(pi.args, [ '/bin/echo', 'foo' ])
-# self.assertEqual(pi.env, { 'PATH': os.environ['PATH'],
-# 'PYTHON_EXEC': os.environ['PYTHON_EXEC'] })
- self.assertEqual(pi.dev_null_stdout, False)
- self.assertEqual(os.read(self.pipes[0], 100), b"foo\n")
- self.assertNotEqual(pi.process, None)
- self.assertTrue(type(pi.pid) is int)
-
-# def test_setting_env(self):
-# pi = ProcessInfo('Test Process', [ '/bin/true' ], env={'FOO': 'BAR'})
-# os.dup2(self.old_stdout, sys.stdout.fileno())
-# self.assertEqual(pi.env, { 'PATH': os.environ['PATH'],
-# 'PYTHON_EXEC': os.environ['PYTHON_EXEC'],
-# 'FOO': 'BAR' })
-
- def test_setting_null_stdout(self):
- pi = ProcessInfo('Test Process', [ '/bin/echo', 'foo' ],
- dev_null_stdout=True)
- os.dup2(self.old_stdout, sys.stdout.fileno())
- self.assertEqual(pi.dev_null_stdout, True)
- self.assertEqual(os.read(self.pipes[0], 100), b"")
-
- def test_respawn(self):
- pi = ProcessInfo('Test Process', [ '/bin/echo', 'foo' ])
- # wait for old process to work...
- self.assertEqual(os.read(self.pipes[0], 100), b"foo\n")
- # respawn it
- old_pid = pi.pid
- pi.respawn()
- os.dup2(self.old_stdout, sys.stdout.fileno())
- # make sure the new one started properly
- self.assertEqual(pi.name, 'Test Process')
- self.assertEqual(pi.args, [ '/bin/echo', 'foo' ])
-# self.assertEqual(pi.env, { 'PATH': os.environ['PATH'],
-# 'PYTHON_EXEC': os.environ['PYTHON_EXEC'] })
- self.assertEqual(pi.dev_null_stdout, False)
- self.assertEqual(os.read(self.pipes[0], 100), b"foo\n")
- self.assertNotEqual(pi.process, None)
- self.assertTrue(type(pi.pid) is int)
- self.assertNotEqual(pi.pid, old_pid)
-
-class TestBoB(unittest.TestCase):
- def test_init(self):
- bob = BoB()
- self.assertEqual(bob.verbose, False)
- self.assertEqual(bob.msgq_socket_file, None)
- self.assertEqual(bob.cc_session, None)
- self.assertEqual(bob.ccs, None)
- self.assertEqual(bob.processes, {})
- self.assertEqual(bob.dead_processes, {})
- self.assertEqual(bob.runnable, False)
- self.assertEqual(bob.uid, None)
- self.assertEqual(bob.username, None)
- self.assertEqual(bob.nocache, False)
- self.assertEqual(bob.cfg_start_auth, True)
- self.assertEqual(bob.cfg_start_resolver, False)
-
- def test_init_alternate_socket(self):
- bob = BoB("alt_socket_file")
- self.assertEqual(bob.verbose, False)
- self.assertEqual(bob.msgq_socket_file, "alt_socket_file")
- self.assertEqual(bob.cc_session, None)
- self.assertEqual(bob.ccs, None)
- self.assertEqual(bob.processes, {})
- self.assertEqual(bob.dead_processes, {})
- self.assertEqual(bob.runnable, False)
- self.assertEqual(bob.uid, None)
- self.assertEqual(bob.username, None)
- self.assertEqual(bob.nocache, False)
- self.assertEqual(bob.cfg_start_auth, True)
- self.assertEqual(bob.cfg_start_resolver, False)
-
-# Class for testing the BoB start/stop components routines.
-#
-# Although testing that external processes start is outside the scope
-# of the unit test, by overriding the process start methods we can check
-# that the right processes are started depending on the configuration
-# options.
-class StartStopCheckBob(BoB):
- def __init__(self):
- BoB.__init__(self)
-
-# Set flags as to which of the overridden methods has been run.
- self.msgq = False
- self.cfgmgr = False
- self.ccsession = False
- self.auth = False
- self.resolver = False
- self.xfrout = False
- self.xfrin = False
- self.zonemgr = False
- self.stats = False
- self.cmdctl = False
- self.c_channel_env = {}
-
- def read_bind10_config(self):
- # Configuration options are set directly
- pass
-
- def start_msgq(self, c_channel_env):
- self.msgq = True
-
- def start_cfgmgr(self, c_channel_env):
- self.cfgmgr = True
-
- def start_ccsession(self, c_channel_env):
- self.ccsession = True
-
- def start_auth(self, c_channel_env):
- self.auth = True
-
- def start_resolver(self, c_channel_env):
- self.resolver = True
-
- def start_xfrout(self, c_channel_env):
- self.xfrout = True
-
- def start_xfrin(self, c_channel_env):
- self.xfrin = True
-
- def start_zonemgr(self, c_channel_env):
- self.zonemgr = True
-
- def start_stats(self, c_channel_env):
- self.stats = True
-
- def start_cmdctl(self, c_channel_env):
- self.cmdctl = True
-
- # We don't really use all of these stop_ methods. But it might turn out
- # someone would add some stop_ method to BoB and we want that one overriden
- # in case he forgets to update the tests.
- def stop_msgq(self):
- self.msgq = False
-
- def stop_cfgmgr(self):
- self.cfgmgr = False
-
- def stop_ccsession(self):
- self.ccsession = False
-
- def stop_auth(self):
- self.auth = False
-
- def stop_resolver(self):
- self.resolver = False
-
- def stop_xfrout(self):
- self.xfrout = False
-
- def stop_xfrin(self):
- self.xfrin = False
-
- def stop_zonemgr(self):
- self.zonemgr = False
-
- def stop_stats(self):
- self.stats = False
-
- def stop_cmdctl(self):
- self.cmdctl = False
-
-class TestStartStopProcessesBob(unittest.TestCase):
- """
- Check that the start_all_processes method starts the right combination
- of processes and that the right processes are started and stopped
- according to changes in configuration.
- """
- def check_started(self, bob, core, auth, resolver):
- """
- Check that the right sets of services are started. The ones that
- should be running are specified by the core, auth and resolver parameters
- (they are groups of processes, eg. auth means b10-auth, -xfrout, -xfrin
- and -zonemgr).
- """
- self.assertEqual(bob.msgq, core)
- self.assertEqual(bob.cfgmgr, core)
- self.assertEqual(bob.ccsession, core)
- self.assertEqual(bob.auth, auth)
- self.assertEqual(bob.resolver, resolver)
- self.assertEqual(bob.xfrout, auth)
- self.assertEqual(bob.xfrin, auth)
- self.assertEqual(bob.zonemgr, auth)
- self.assertEqual(bob.stats, core)
- self.assertEqual(bob.cmdctl, core)
-
- def check_preconditions(self, bob):
- self.check_started(bob, False, False, False)
-
- def check_started_none(self, bob):
- """
- Check that the situation is according to configuration where no servers
- should be started. Some processes still need to be running.
- """
- self.check_started(bob, True, False, False)
-
- def check_started_both(self, bob):
- """
- Check the situation is according to configuration where both servers
- (auth and resolver) are enabled.
- """
- self.check_started(bob, True, True, True)
-
- def check_started_auth(self, bob):
- """
- Check the set of processes needed to run auth only is started.
- """
- self.check_started(bob, True, True, False)
-
- def check_started_resolver(self, bob):
- """
- Check the set of processes needed to run resolver only is started.
- """
- self.check_started(bob, True, False, True)
-
- # Checks the processes started when starting neither auth nor resolver
- # is specified.
- def test_start_none(self):
- # Create BoB and ensure correct initialization
- bob = StartStopCheckBob()
- self.check_preconditions(bob)
-
- # Start processes and check what was started
- bob.cfg_start_auth = False
- bob.cfg_start_resolver = False
-
- bob.start_all_processes()
- self.check_started_none(bob)
-
- # Checks the processes started when starting only the auth process
- def test_start_auth(self):
- # Create BoB and ensure correct initialization
- bob = StartStopCheckBob()
- self.check_preconditions(bob)
-
- # Start processes and check what was started
- bob.cfg_start_auth = True
- bob.cfg_start_resolver = False
-
- bob.start_all_processes()
-
- self.check_started_auth(bob)
-
- # Checks the processes started when starting only the resolver process
- def test_start_resolver(self):
- # Create BoB and ensure correct initialization
- bob = StartStopCheckBob()
- self.check_preconditions(bob)
-
- # Start processes and check what was started
- bob.cfg_start_auth = False
- bob.cfg_start_resolver = True
-
- bob.start_all_processes()
-
- self.check_started_resolver(bob)
-
- # Checks the processes started when starting both auth and resolver process
- def test_start_both(self):
- # Create BoB and ensure correct initialization
- bob = StartStopCheckBob()
- self.check_preconditions(bob)
-
- # Start processes and check what was started
- bob.cfg_start_auth = True
- bob.cfg_start_resolver = True
-
- bob.start_all_processes()
-
- self.check_started_both(bob)
-
- def test_config_start(self):
- """
- Test that the configuration starts and stops processes according
- to configuration changes.
- """
-
- # Create BoB and ensure correct initialization
- bob = StartStopCheckBob()
- self.check_preconditions(bob)
-
- # Start processes (nothing much should be started, as in
- # test_start_none)
- bob.cfg_start_auth = False
- bob.cfg_start_resolver = False
-
- bob.start_all_processes()
- bob.runnable = True
- self.check_started_none(bob)
-
- # Enable both at once
- bob.config_handler({'start_auth': True, 'start_resolver': True})
- self.check_started_both(bob)
-
- # Not touched by empty change
- bob.config_handler({})
- self.check_started_both(bob)
-
- # Not touched by change to the same configuration
- bob.config_handler({'start_auth': True, 'start_resolver': True})
- self.check_started_both(bob)
-
- # Turn them both off again
- bob.config_handler({'start_auth': False, 'start_resolver': False})
- self.check_started_none(bob)
-
- # Not touched by empty change
- bob.config_handler({})
- self.check_started_none(bob)
-
- # Not touched by change to the same configuration
- bob.config_handler({'start_auth': False, 'start_resolver': False})
- self.check_started_none(bob)
-
- # Start and stop auth separately
- bob.config_handler({'start_auth': True})
- self.check_started_auth(bob)
-
- bob.config_handler({'start_auth': False})
- self.check_started_none(bob)
-
- # Start and stop resolver separately
- bob.config_handler({'start_resolver': True})
- self.check_started_resolver(bob)
-
- bob.config_handler({'start_resolver': False})
- self.check_started_none(bob)
-
- # Alternate
- bob.config_handler({'start_auth': True})
- self.check_started_auth(bob)
-
- bob.config_handler({'start_auth': False, 'start_resolver': True})
- self.check_started_resolver(bob)
-
- bob.config_handler({'start_auth': True, 'start_resolver': False})
- self.check_started_auth(bob)
-
- def test_config_start_once(self):
- """
- Tests that a process is started only once.
- """
- # Create BoB and ensure correct initialization
- bob = StartStopCheckBob()
- self.check_preconditions(bob)
-
- # Start processes (both)
- bob.cfg_start_auth = True
- bob.cfg_start_resolver = True
-
- bob.start_all_processes()
- bob.runnable = True
- self.check_started_both(bob)
-
- bob.start_auth = lambda: self.fail("Started auth again")
- bob.start_xfrout = lambda: self.fail("Started xfrout again")
- bob.start_xfrin = lambda: self.fail("Started xfrin again")
- bob.start_zonemgr = lambda: self.fail("Started zonemgr again")
- bob.start_resolver = lambda: self.fail("Started resolver again")
-
- # Send again we want to start them. Should not do it, as they are.
- bob.config_handler({'start_auth': True})
- bob.config_handler({'start_resolver': True})
-
- def test_config_not_started_early(self):
- """
- Test that processes are not started by the config handler before
- startup.
- """
- bob = StartStopCheckBob()
- self.check_preconditions(bob)
-
- bob.start_auth = lambda: self.fail("Started auth again")
- bob.start_xfrout = lambda: self.fail("Started xfrout again")
- bob.start_xfrin = lambda: self.fail("Started xfrin again")
- bob.start_zonemgr = lambda: self.fail("Started zonemgr again")
- bob.start_resolver = lambda: self.fail("Started resolver again")
-
- bob.config_handler({'start_auth': True, 'start_resolver': True})
-
-class TestParseArgs(unittest.TestCase):
- """
- This tests parsing of arguments of the bind10 master process.
- """
- #TODO: Write tests for the original parsing, bad options, etc.
- def test_no_opts(self):
- """
- Test correct default values when no options are passed.
- """
- options = parse_args([], TestOptParser)
- self.assertEqual(None, options.data_path)
- self.assertEqual(None, options.config_file)
- self.assertEqual(None, options.cmdctl_port)
-
- def test_data_path(self):
- """
- Test it can parse the data path.
- """
- options = parse_args(['-p', '/data/path'], TestOptParser)
- self.assertEqual('/data/path', options.data_path)
- options = parse_args(['--data-path=/data/path'], TestOptParser)
- self.assertEqual('/data/path', options.data_path)
-
- def test_config_filename(self):
- """
- Test it can parse the config switch.
- """
- options = parse_args(['-c', 'config-file'], TestOptParser)
- self.assertEqual('config-file', options.config_file)
- options = parse_args(['--config-file=config-file'], TestOptParser)
- self.assertEqual('config-file', options.config_file)
-
- def test_cmdctl_port(self):
- """
- Test it can parse the command control port.
- """
- self.assertRaises(OptsError, parse_args, ['--cmdctl-port=abc'],
- TestOptParser)
- self.assertRaises(OptsError, parse_args, ['--cmdctl-port=100000000'],
- TestOptParser)
- options = parse_args(['--cmdctl-port=1234'], TestOptParser)
- self.assertEqual(1234, options.cmdctl_port)
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/src/bin/bind10/tests/bind10_test.py.in b/src/bin/bind10/tests/bind10_test.py.in
new file mode 100644
index 0000000..a53df1a
--- /dev/null
+++ b/src/bin/bind10/tests/bind10_test.py.in
@@ -0,0 +1,515 @@
+from bind10 import ProcessInfo, BoB, parse_args, dump_pid, unlink_pid_file
+
+# XXX: environment tests are currently disabled, due to the preprocessor
+# setup that we have now complicating the environment
+
+import unittest
+import sys
+import os
+import signal
+import socket
+from isc.net.addr import IPAddr
+from isc.testutils.parse_args import TestOptParser, OptsError
+
+class TestProcessInfo(unittest.TestCase):
+ def setUp(self):
+ # redirect stdout to a pipe so we can check that our
+ # process spawning is doing the right thing with stdout
+ self.old_stdout = os.dup(sys.stdout.fileno())
+ self.pipes = os.pipe()
+ os.dup2(self.pipes[1], sys.stdout.fileno())
+ os.close(self.pipes[1])
+ # note that we use dup2() to restore the original stdout
+ # to the main program ASAP in each test... this prevents
+ # hangs reading from the child process (as the pipe is only
+ # open in the child), and also insures nice pretty output
+
+ def tearDown(self):
+ # clean up our stdout munging
+ os.dup2(self.old_stdout, sys.stdout.fileno())
+ os.close(self.pipes[0])
+
+ def test_init(self):
+ pi = ProcessInfo('Test Process', [ '/bin/echo', 'foo' ])
+ os.dup2(self.old_stdout, sys.stdout.fileno())
+ self.assertEqual(pi.name, 'Test Process')
+ self.assertEqual(pi.args, [ '/bin/echo', 'foo' ])
+# self.assertEqual(pi.env, { 'PATH': os.environ['PATH'],
+# 'PYTHON_EXEC': os.environ['PYTHON_EXEC'] })
+ self.assertEqual(pi.dev_null_stdout, False)
+ self.assertEqual(os.read(self.pipes[0], 100), b"foo\n")
+ self.assertNotEqual(pi.process, None)
+ self.assertTrue(type(pi.pid) is int)
+
+# def test_setting_env(self):
+# pi = ProcessInfo('Test Process', [ '/bin/true' ], env={'FOO': 'BAR'})
+# os.dup2(self.old_stdout, sys.stdout.fileno())
+# self.assertEqual(pi.env, { 'PATH': os.environ['PATH'],
+# 'PYTHON_EXEC': os.environ['PYTHON_EXEC'],
+# 'FOO': 'BAR' })
+
+ def test_setting_null_stdout(self):
+ pi = ProcessInfo('Test Process', [ '/bin/echo', 'foo' ],
+ dev_null_stdout=True)
+ os.dup2(self.old_stdout, sys.stdout.fileno())
+ self.assertEqual(pi.dev_null_stdout, True)
+ self.assertEqual(os.read(self.pipes[0], 100), b"")
+
+ def test_respawn(self):
+ pi = ProcessInfo('Test Process', [ '/bin/echo', 'foo' ])
+ # wait for old process to work...
+ self.assertEqual(os.read(self.pipes[0], 100), b"foo\n")
+ # respawn it
+ old_pid = pi.pid
+ pi.respawn()
+ os.dup2(self.old_stdout, sys.stdout.fileno())
+ # make sure the new one started properly
+ self.assertEqual(pi.name, 'Test Process')
+ self.assertEqual(pi.args, [ '/bin/echo', 'foo' ])
+# self.assertEqual(pi.env, { 'PATH': os.environ['PATH'],
+# 'PYTHON_EXEC': os.environ['PYTHON_EXEC'] })
+ self.assertEqual(pi.dev_null_stdout, False)
+ self.assertEqual(os.read(self.pipes[0], 100), b"foo\n")
+ self.assertNotEqual(pi.process, None)
+ self.assertTrue(type(pi.pid) is int)
+ self.assertNotEqual(pi.pid, old_pid)
+
+class TestBoB(unittest.TestCase):
+ def test_init(self):
+ bob = BoB()
+ self.assertEqual(bob.verbose, False)
+ self.assertEqual(bob.msgq_socket_file, None)
+ self.assertEqual(bob.cc_session, None)
+ self.assertEqual(bob.ccs, None)
+ self.assertEqual(bob.processes, {})
+ self.assertEqual(bob.dead_processes, {})
+ self.assertEqual(bob.runnable, False)
+ self.assertEqual(bob.uid, None)
+ self.assertEqual(bob.username, None)
+ self.assertEqual(bob.nocache, False)
+ self.assertEqual(bob.cfg_start_auth, True)
+ self.assertEqual(bob.cfg_start_resolver, False)
+
+ def test_init_alternate_socket(self):
+ bob = BoB("alt_socket_file")
+ self.assertEqual(bob.verbose, False)
+ self.assertEqual(bob.msgq_socket_file, "alt_socket_file")
+ self.assertEqual(bob.cc_session, None)
+ self.assertEqual(bob.ccs, None)
+ self.assertEqual(bob.processes, {})
+ self.assertEqual(bob.dead_processes, {})
+ self.assertEqual(bob.runnable, False)
+ self.assertEqual(bob.uid, None)
+ self.assertEqual(bob.username, None)
+ self.assertEqual(bob.nocache, False)
+ self.assertEqual(bob.cfg_start_auth, True)
+ self.assertEqual(bob.cfg_start_resolver, False)
+
+# Class for testing the BoB start/stop components routines.
+#
+# Although testing that external processes start is outside the scope
+# of the unit test, by overriding the process start methods we can check
+# that the right processes are started depending on the configuration
+# options.
+class StartStopCheckBob(BoB):
+ def __init__(self):
+ BoB.__init__(self)
+
+# Set flags as to which of the overridden methods has been run.
+ self.msgq = False
+ self.cfgmgr = False
+ self.ccsession = False
+ self.auth = False
+ self.resolver = False
+ self.xfrout = False
+ self.xfrin = False
+ self.zonemgr = False
+ self.stats = False
+ self.cmdctl = False
+ self.c_channel_env = {}
+
+ def read_bind10_config(self):
+ # Configuration options are set directly
+ pass
+
+ def start_msgq(self, c_channel_env):
+ self.msgq = True
+
+ def start_cfgmgr(self, c_channel_env):
+ self.cfgmgr = True
+
+ def start_ccsession(self, c_channel_env):
+ self.ccsession = True
+
+ def start_auth(self, c_channel_env):
+ self.auth = True
+
+ def start_resolver(self, c_channel_env):
+ self.resolver = True
+
+ def start_xfrout(self, c_channel_env):
+ self.xfrout = True
+
+ def start_xfrin(self, c_channel_env):
+ self.xfrin = True
+
+ def start_zonemgr(self, c_channel_env):
+ self.zonemgr = True
+
+ def start_stats(self, c_channel_env):
+ self.stats = True
+
+ def start_cmdctl(self, c_channel_env):
+ self.cmdctl = True
+
+ # We don't really use all of these stop_ methods. But it might turn out
+ # someone would add some stop_ method to BoB and we want that one overriden
+ # in case he forgets to update the tests.
+ def stop_msgq(self):
+ self.msgq = False
+
+ def stop_cfgmgr(self):
+ self.cfgmgr = False
+
+ def stop_ccsession(self):
+ self.ccsession = False
+
+ def stop_auth(self):
+ self.auth = False
+
+ def stop_resolver(self):
+ self.resolver = False
+
+ def stop_xfrout(self):
+ self.xfrout = False
+
+ def stop_xfrin(self):
+ self.xfrin = False
+
+ def stop_zonemgr(self):
+ self.zonemgr = False
+
+ def stop_stats(self):
+ self.stats = False
+
+ def stop_cmdctl(self):
+ self.cmdctl = False
+
+class TestStartStopProcessesBob(unittest.TestCase):
+ """
+ Check that the start_all_processes method starts the right combination
+ of processes and that the right processes are started and stopped
+ according to changes in configuration.
+ """
+ def check_started(self, bob, core, auth, resolver):
+ """
+ Check that the right sets of services are started. The ones that
+ should be running are specified by the core, auth and resolver parameters
+ (they are groups of processes, eg. auth means b10-auth, -xfrout, -xfrin
+ and -zonemgr).
+ """
+ self.assertEqual(bob.msgq, core)
+ self.assertEqual(bob.cfgmgr, core)
+ self.assertEqual(bob.ccsession, core)
+ self.assertEqual(bob.auth, auth)
+ self.assertEqual(bob.resolver, resolver)
+ self.assertEqual(bob.xfrout, auth)
+ self.assertEqual(bob.xfrin, auth)
+ self.assertEqual(bob.zonemgr, auth)
+ self.assertEqual(bob.stats, core)
+ self.assertEqual(bob.cmdctl, core)
+
+ def check_preconditions(self, bob):
+ self.check_started(bob, False, False, False)
+
+ def check_started_none(self, bob):
+ """
+ Check that the situation is according to configuration where no servers
+ should be started. Some processes still need to be running.
+ """
+ self.check_started(bob, True, False, False)
+
+ def check_started_both(self, bob):
+ """
+ Check the situation is according to configuration where both servers
+ (auth and resolver) are enabled.
+ """
+ self.check_started(bob, True, True, True)
+
+ def check_started_auth(self, bob):
+ """
+ Check the set of processes needed to run auth only is started.
+ """
+ self.check_started(bob, True, True, False)
+
+ def check_started_resolver(self, bob):
+ """
+ Check the set of processes needed to run resolver only is started.
+ """
+ self.check_started(bob, True, False, True)
+
+ # Checks the processes started when starting neither auth nor resolver
+ # is specified.
+ def test_start_none(self):
+ # Create BoB and ensure correct initialization
+ bob = StartStopCheckBob()
+ self.check_preconditions(bob)
+
+ # Start processes and check what was started
+ bob.cfg_start_auth = False
+ bob.cfg_start_resolver = False
+
+ bob.start_all_processes()
+ self.check_started_none(bob)
+
+ # Checks the processes started when starting only the auth process
+ def test_start_auth(self):
+ # Create BoB and ensure correct initialization
+ bob = StartStopCheckBob()
+ self.check_preconditions(bob)
+
+ # Start processes and check what was started
+ bob.cfg_start_auth = True
+ bob.cfg_start_resolver = False
+
+ bob.start_all_processes()
+
+ self.check_started_auth(bob)
+
+ # Checks the processes started when starting only the resolver process
+ def test_start_resolver(self):
+ # Create BoB and ensure correct initialization
+ bob = StartStopCheckBob()
+ self.check_preconditions(bob)
+
+ # Start processes and check what was started
+ bob.cfg_start_auth = False
+ bob.cfg_start_resolver = True
+
+ bob.start_all_processes()
+
+ self.check_started_resolver(bob)
+
+ # Checks the processes started when starting both auth and resolver process
+ def test_start_both(self):
+ # Create BoB and ensure correct initialization
+ bob = StartStopCheckBob()
+ self.check_preconditions(bob)
+
+ # Start processes and check what was started
+ bob.cfg_start_auth = True
+ bob.cfg_start_resolver = True
+
+ bob.start_all_processes()
+
+ self.check_started_both(bob)
+
+ def test_config_start(self):
+ """
+ Test that the configuration starts and stops processes according
+ to configuration changes.
+ """
+
+ # Create BoB and ensure correct initialization
+ bob = StartStopCheckBob()
+ self.check_preconditions(bob)
+
+ # Start processes (nothing much should be started, as in
+ # test_start_none)
+ bob.cfg_start_auth = False
+ bob.cfg_start_resolver = False
+
+ bob.start_all_processes()
+ bob.runnable = True
+ self.check_started_none(bob)
+
+ # Enable both at once
+ bob.config_handler({'start_auth': True, 'start_resolver': True})
+ self.check_started_both(bob)
+
+ # Not touched by empty change
+ bob.config_handler({})
+ self.check_started_both(bob)
+
+ # Not touched by change to the same configuration
+ bob.config_handler({'start_auth': True, 'start_resolver': True})
+ self.check_started_both(bob)
+
+ # Turn them both off again
+ bob.config_handler({'start_auth': False, 'start_resolver': False})
+ self.check_started_none(bob)
+
+ # Not touched by empty change
+ bob.config_handler({})
+ self.check_started_none(bob)
+
+ # Not touched by change to the same configuration
+ bob.config_handler({'start_auth': False, 'start_resolver': False})
+ self.check_started_none(bob)
+
+ # Start and stop auth separately
+ bob.config_handler({'start_auth': True})
+ self.check_started_auth(bob)
+
+ bob.config_handler({'start_auth': False})
+ self.check_started_none(bob)
+
+ # Start and stop resolver separately
+ bob.config_handler({'start_resolver': True})
+ self.check_started_resolver(bob)
+
+ bob.config_handler({'start_resolver': False})
+ self.check_started_none(bob)
+
+ # Alternate
+ bob.config_handler({'start_auth': True})
+ self.check_started_auth(bob)
+
+ bob.config_handler({'start_auth': False, 'start_resolver': True})
+ self.check_started_resolver(bob)
+
+ bob.config_handler({'start_auth': True, 'start_resolver': False})
+ self.check_started_auth(bob)
+
+ def test_config_start_once(self):
+ """
+ Tests that a process is started only once.
+ """
+ # Create BoB and ensure correct initialization
+ bob = StartStopCheckBob()
+ self.check_preconditions(bob)
+
+ # Start processes (both)
+ bob.cfg_start_auth = True
+ bob.cfg_start_resolver = True
+
+ bob.start_all_processes()
+ bob.runnable = True
+ self.check_started_both(bob)
+
+ bob.start_auth = lambda: self.fail("Started auth again")
+ bob.start_xfrout = lambda: self.fail("Started xfrout again")
+ bob.start_xfrin = lambda: self.fail("Started xfrin again")
+ bob.start_zonemgr = lambda: self.fail("Started zonemgr again")
+ bob.start_resolver = lambda: self.fail("Started resolver again")
+
+ # Send again we want to start them. Should not do it, as they are.
+ bob.config_handler({'start_auth': True})
+ bob.config_handler({'start_resolver': True})
+
+ def test_config_not_started_early(self):
+ """
+ Test that processes are not started by the config handler before
+ startup.
+ """
+ bob = StartStopCheckBob()
+ self.check_preconditions(bob)
+
+ bob.start_auth = lambda: self.fail("Started auth again")
+ bob.start_xfrout = lambda: self.fail("Started xfrout again")
+ bob.start_xfrin = lambda: self.fail("Started xfrin again")
+ bob.start_zonemgr = lambda: self.fail("Started zonemgr again")
+ bob.start_resolver = lambda: self.fail("Started resolver again")
+
+ bob.config_handler({'start_auth': True, 'start_resolver': True})
+
+class TestParseArgs(unittest.TestCase):
+ """
+ This tests parsing of arguments of the bind10 master process.
+ """
+ #TODO: Write tests for the original parsing, bad options, etc.
+ def test_no_opts(self):
+ """
+ Test correct default values when no options are passed.
+ """
+ options = parse_args([], TestOptParser)
+ self.assertEqual(None, options.data_path)
+ self.assertEqual(None, options.config_file)
+ self.assertEqual(None, options.cmdctl_port)
+
+ def test_data_path(self):
+ """
+ Test it can parse the data path.
+ """
+ self.assertRaises(OptsError, parse_args, ['-p'], TestOptParser)
+ self.assertRaises(OptsError, parse_args, ['--data-path'],
+ TestOptParser)
+ options = parse_args(['-p', '/data/path'], TestOptParser)
+ self.assertEqual('/data/path', options.data_path)
+ options = parse_args(['--data-path=/data/path'], TestOptParser)
+ self.assertEqual('/data/path', options.data_path)
+
+ def test_config_filename(self):
+ """
+ Test it can parse the config switch.
+ """
+ self.assertRaises(OptsError, parse_args, ['-c'], TestOptParser)
+ self.assertRaises(OptsError, parse_args, ['--config-file'],
+ TestOptParser)
+ options = parse_args(['-c', 'config-file'], TestOptParser)
+ self.assertEqual('config-file', options.config_file)
+ options = parse_args(['--config-file=config-file'], TestOptParser)
+ self.assertEqual('config-file', options.config_file)
+
+ def test_cmdctl_port(self):
+ """
+ Test it can parse the command control port.
+ """
+ self.assertRaises(OptsError, parse_args, ['--cmdctl-port=abc'],
+ TestOptParser)
+ self.assertRaises(OptsError, parse_args, ['--cmdctl-port=100000000'],
+ TestOptParser)
+ self.assertRaises(OptsError, parse_args, ['--cmdctl-port'],
+ TestOptParser)
+ options = parse_args(['--cmdctl-port=1234'], TestOptParser)
+ self.assertEqual(1234, options.cmdctl_port)
+
+class TestPIDFile(unittest.TestCase):
+ def setUp(self):
+ self.pid_file = '@builddir@' + os.sep + 'bind10.pid'
+ if os.path.exists(self.pid_file):
+ os.unlink(self.pid_file)
+
+ def tearDown(self):
+ if os.path.exists(self.pid_file):
+ os.unlink(self.pid_file)
+
+ def check_pid_file(self):
+ # dump PID to the file, and confirm the content is correct
+ dump_pid(self.pid_file)
+ my_pid = os.getpid()
+ self.assertEqual(my_pid, int(open(self.pid_file, "r").read()))
+
+ def test_dump_pid(self):
+ self.check_pid_file()
+
+ # make sure any existing content will be removed
+ open(self.pid_file, "w").write('dummy data\n')
+ self.check_pid_file()
+
+ def test_unlink_pid_file_notexist(self):
+ dummy_data = 'dummy_data\n'
+ open(self.pid_file, "w").write(dummy_data)
+ unlink_pid_file("no_such_pid_file")
+ # the file specified for unlink_pid_file doesn't exist,
+ # and the original content of the file should be intact.
+ self.assertEqual(dummy_data, open(self.pid_file, "r").read())
+
+ def test_dump_pid_with_none(self):
+ # Check the behavior of dump_pid() and unlink_pid_file() with None.
+ # This should be no-op.
+ dump_pid(None)
+ self.assertFalse(os.path.exists(self.pid_file))
+
+ dummy_data = 'dummy_data\n'
+ open(self.pid_file, "w").write(dummy_data)
+ unlink_pid_file(None)
+ self.assertEqual(dummy_data, open(self.pid_file, "r").read())
+
+ def test_dump_pid_failure(self):
+ # the attempt to open file will fail, which should result in exception.
+ self.assertRaises(IOError, dump_pid,
+ 'nonexistent_dir' + os.sep + 'bind10.pid')
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/src/bin/bindctl/Makefile.am b/src/bin/bindctl/Makefile.am
index e95af78..2f412ec 100644
--- a/src/bin/bindctl/Makefile.am
+++ b/src/bin/bindctl/Makefile.am
@@ -5,12 +5,13 @@ man_MANS = bindctl.1
EXTRA_DIST = $(man_MANS) bindctl.xml
-python_PYTHON = __init__.py bindcmd.py cmdparse.py exception.py moduleinfo.py mycollections.py
+python_PYTHON = __init__.py bindcmd.py cmdparse.py exception.py moduleinfo.py \
+ mycollections.py
pythondir = $(pyexecdir)/bindctl
bindctldir = $(pkgdatadir)
-CLEANFILES = bindctl
+CLEANFILES = bindctl bindctl_main.pyc
if ENABLE_MAN
@@ -19,8 +20,8 @@ bindctl.1: bindctl.xml
endif
-bindctl: bindctl-source.py
+bindctl: bindctl_main.py
$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
-e "s|@@SYSCONFDIR@@|@sysconfdir@|" \
- -e "s|@@LIBEXECDIR@@|$(pkglibexecdir)|" bindctl-source.py >$@
+ -e "s|@@LIBEXECDIR@@|$(pkglibexecdir)|" bindctl_main.py >$@
chmod a+x $@
diff --git a/src/bin/bindctl/bindcmd.py b/src/bin/bindctl/bindcmd.py
index 683dda9..83dab25 100644
--- a/src/bin/bindctl/bindcmd.py
+++ b/src/bin/bindctl/bindcmd.py
@@ -87,7 +87,8 @@ class ValidatedHTTPSConnection(http.client.HTTPSConnection):
class BindCmdInterpreter(Cmd):
"""simple bindctl example."""
- def __init__(self, server_port = 'localhost:8080', pem_file = None):
+ def __init__(self, server_port='localhost:8080', pem_file=None,
+ csv_file_dir=None):
Cmd.__init__(self)
self.location = ""
self.prompt_end = '> '
@@ -103,7 +104,12 @@ class BindCmdInterpreter(Cmd):
ca_certs=pem_file)
self.session_id = self._get_session_id()
self.config_data = None
-
+ if csv_file_dir is not None:
+ self.csv_file_dir = csv_file_dir
+ else:
+ self.csv_file_dir = pwd.getpwnam(getpass.getuser()).pw_dir + \
+ os.sep + '.bind10' + os.sep
+
def _get_session_id(self):
'''Generate one session id for the connection. '''
rand = os.urandom(16)
@@ -175,9 +181,7 @@ class BindCmdInterpreter(Cmd):
time, username and password saved in 'default_user.csv' will be
used first.
'''
- csv_file_dir = pwd.getpwnam(getpass.getuser()).pw_dir
- csv_file_dir += os.sep + '.bind10' + os.sep
- users = self._get_saved_user_info(csv_file_dir, CSV_FILE_NAME)
+ users = self._get_saved_user_info(self.csv_file_dir, CSV_FILE_NAME)
for row in users:
param = {'username': row[0], 'password' : row[1]}
try:
@@ -211,7 +215,8 @@ class BindCmdInterpreter(Cmd):
raise FailToLogin()
if response.status == http.client.OK:
- self._save_user_info(username, passwd, csv_file_dir, CSV_FILE_NAME)
+ self._save_user_info(username, passwd, self.csv_file_dir,
+ CSV_FILE_NAME)
return True
def _update_commands(self):
diff --git a/src/bin/bindctl/bindctl-source.py.in b/src/bin/bindctl/bindctl-source.py.in
deleted file mode 100644
index 080c3bc..0000000
--- a/src/bin/bindctl/bindctl-source.py.in
+++ /dev/null
@@ -1,135 +0,0 @@
-#!@PYTHON@
-
-# Copyright (C) 2009 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""This is the main calling class for the bindctl configuration and
- command tool. It sets up a command interpreter and runs that."""
-
-import sys; sys.path.append ('@@PYTHONPATH@@')
-
-from bindctl.moduleinfo import *
-from bindctl.bindcmd import *
-import pprint
-from optparse import OptionParser, OptionValueError
-import isc.util.process
-
-isc.util.process.rename()
-
-# This is the version that gets displayed to the user.
-# The VERSION string consists of the module name, the module version
-# number, and the overall BIND 10 version number (set in configure.ac).
-VERSION = "bindctl 20110217 (BIND 10 @PACKAGE_VERSION@)"
-
-DEFAULT_IDENTIFIER_DESC = "The identifier specifies the config item. Child elements are separated with the '/' character. List indices can be specified with '[i]', where i is an integer specifying the index, starting with 0. Examples: 'Boss/start_auth', 'Recurse/listen_on[0]/address'. If no identifier is given, shows the item at the current location."
-
-def prepare_config_commands(tool):
- '''Prepare fixed commands for local configuration editing'''
- module = ModuleInfo(name = CONFIG_MODULE_NAME, desc = "Configuration commands.")
- cmd = CommandInfo(name = "show", desc = "Show configuration.")
- param = ParamInfo(name = "argument", type = "string", optional=True, desc = "If you specify the argument 'all' (before the identifier), recursively show all child elements for the given identifier.")
- cmd.add_param(param)
- param = ParamInfo(name = "identifier", type = "string", optional=True, desc = DEFAULT_IDENTIFIER_DESC)
- cmd.add_param(param)
- module.add_command(cmd)
-
- cmd = CommandInfo(name = "show_json", desc = "Show full configuration in JSON format.")
- param = ParamInfo(name = "identifier", type = "string", optional=True, desc = DEFAULT_IDENTIFIER_DESC)
- cmd.add_param(param)
- module.add_command(cmd)
-
- cmd = CommandInfo(name = "add", desc = "Add an entry to configuration list. If no value is given, a default value is added.")
- param = ParamInfo(name = "identifier", type = "string", optional=True, desc = DEFAULT_IDENTIFIER_DESC)
- cmd.add_param(param)
- param = ParamInfo(name = "value", type = "string", optional=True, desc = "Specifies a value to add to the list. It must be in correct JSON format and complete.")
- cmd.add_param(param)
- module.add_command(cmd)
-
- cmd = CommandInfo(name = "remove", desc = "Remove entry from configuration list.")
- param = ParamInfo(name = "identifier", type = "string", optional=True, desc = DEFAULT_IDENTIFIER_DESC)
- cmd.add_param(param)
- param = ParamInfo(name = "value", type = "string", optional=True, desc = "Specifies a value to remove from the list. It must be in correct JSON format and complete.")
- cmd.add_param(param)
- module.add_command(cmd)
-
- cmd = CommandInfo(name = "set", desc = "Set a configuration value.")
- param = ParamInfo(name = "identifier", type = "string", optional=True, desc = DEFAULT_IDENTIFIER_DESC)
- cmd.add_param(param)
- param = ParamInfo(name = "value", type = "string", optional=False, desc = "Specifies a value to set. It must be in correct JSON format and complete.")
- cmd.add_param(param)
- module.add_command(cmd)
-
- cmd = CommandInfo(name = "unset", desc = "Unset a configuration value (i.e. revert to the default, if any).")
- param = ParamInfo(name = "identifier", type = "string", optional=False, desc = DEFAULT_IDENTIFIER_DESC)
- cmd.add_param(param)
- module.add_command(cmd)
-
- cmd = CommandInfo(name = "diff", desc = "Show all local changes that have not been committed.")
- module.add_command(cmd)
-
- cmd = CommandInfo(name = "revert", desc = "Revert all local changes.")
- module.add_command(cmd)
-
- cmd = CommandInfo(name = "commit", desc = "Commit all local changes.")
- module.add_command(cmd)
-
- cmd = CommandInfo(name = "go", desc = "Go to a specific configuration part.")
- param = ParamInfo(name = "identifier", type="string", optional=False, desc = DEFAULT_IDENTIFIER_DESC)
- cmd.add_param(param)
- module.add_command(cmd)
-
- tool.add_module_info(module)
-
-def check_port(option, opt_str, value, parser):
- if (value < 0) or (value > 65535):
- raise OptionValueError('%s requires a port number (0-65535)' % opt_str)
- parser.values.port = value
-
-def check_addr(option, opt_str, value, parser):
- ipstr = value
- ip_family = socket.AF_INET
- if (ipstr.find(':') != -1):
- ip_family = socket.AF_INET6
-
- try:
- socket.inet_pton(ip_family, ipstr)
- except:
- raise OptionValueError("%s invalid ip address" % ipstr)
-
- parser.values.addr = value
-
-def set_bindctl_options(parser):
- parser.add_option('-p', '--port', dest = 'port', type = 'int',
- action = 'callback', callback=check_port,
- default = '8080', help = 'port for cmdctl of bind10')
-
- parser.add_option('-a', '--address', dest = 'addr', type = 'string',
- action = 'callback', callback=check_addr,
- default = '127.0.0.1', help = 'IP address for cmdctl of bind10')
-
- parser.add_option('-c', '--certificate-chain', dest = 'cert_chain',
- type = 'string', action = 'store',
- help = 'PEM formatted server certificate validation chain file')
-
-if __name__ == '__main__':
- parser = OptionParser(version = VERSION)
- set_bindctl_options(parser)
- (options, args) = parser.parse_args()
- server_addr = options.addr + ':' + str(options.port)
- tool = BindCmdInterpreter(server_addr, pem_file=options.cert_chain)
- prepare_config_commands(tool)
- tool.run()
-
-
diff --git a/src/bin/bindctl/bindctl.xml b/src/bin/bindctl/bindctl.xml
index 98d65f9..eff1de2 100644
--- a/src/bin/bindctl/bindctl.xml
+++ b/src/bin/bindctl/bindctl.xml
@@ -51,6 +51,7 @@
<arg><option>--address <replaceable>address</replaceable></option></arg>
<arg><option>--help</option></arg>
<arg><option>--certificate-chain <replaceable>file</replaceable></option></arg>
+ <arg><option>--csv-file-dir<replaceable>file</replaceable></option></arg>
<arg><option>--port <replaceable>number</replaceable></option></arg>
<arg><option>--version</option></arg>
</cmdsynopsis>
@@ -110,6 +111,22 @@
</varlistentry>
<varlistentry>
+ <term>
+ <option>--csv-file-dir</option><replaceable>file</replaceable>
+ </term>
+
+ <listitem>
+ <para>
+ The directory name in which the user/password CSV file
+ is stored (see AUTHENTICATION).
+ By default this option doesn't have any value,
+ in which case the ".bind10" directory under the user's
+ home directory will be used.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
<term><option>-h</option>,
<option>--help</option></term>
<listitem><para>
@@ -148,8 +165,10 @@
<para>
The tool will authenticate using a username and password.
On the first successful login, it will save the details to
- <filename>~/.bind10/default_user.csv</filename>
+ a comma-separated-value (CSV) file
which will be used for later uses of <command>bindctl</command>.
+ The file name is <filename>default_user.csv</filename>
+ located under the directory specified by the --csv-file-dir option.
</para>
<!-- TODO: mention HTTPS? -->
diff --git a/src/bin/bindctl/bindctl_main.py.in b/src/bin/bindctl/bindctl_main.py.in
new file mode 100755
index 0000000..01307e9
--- /dev/null
+++ b/src/bin/bindctl/bindctl_main.py.in
@@ -0,0 +1,138 @@
+#!@PYTHON@
+
+# Copyright (C) 2009 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""This is the main calling class for the bindctl configuration and
+ command tool. It sets up a command interpreter and runs that."""
+
+import sys; sys.path.append ('@@PYTHONPATH@@')
+
+from bindctl.moduleinfo import *
+from bindctl.bindcmd import *
+import pprint
+from optparse import OptionParser, OptionValueError
+import isc.util.process
+
+isc.util.process.rename()
+
+# This is the version that gets displayed to the user.
+# The VERSION string consists of the module name, the module version
+# number, and the overall BIND 10 version number (set in configure.ac).
+VERSION = "bindctl 20110217 (BIND 10 @PACKAGE_VERSION@)"
+
+DEFAULT_IDENTIFIER_DESC = "The identifier specifies the config item. Child elements are separated with the '/' character. List indices can be specified with '[i]', where i is an integer specifying the index, starting with 0. Examples: 'Boss/start_auth', 'Recurse/listen_on[0]/address'. If no identifier is given, shows the item at the current location."
+
+def prepare_config_commands(tool):
+ '''Prepare fixed commands for local configuration editing'''
+ module = ModuleInfo(name = CONFIG_MODULE_NAME, desc = "Configuration commands.")
+ cmd = CommandInfo(name = "show", desc = "Show configuration.")
+ param = ParamInfo(name = "argument", type = "string", optional=True, desc = "If you specify the argument 'all' (before the identifier), recursively show all child elements for the given identifier.")
+ cmd.add_param(param)
+ param = ParamInfo(name = "identifier", type = "string", optional=True, desc = DEFAULT_IDENTIFIER_DESC)
+ cmd.add_param(param)
+ module.add_command(cmd)
+
+ cmd = CommandInfo(name = "show_json", desc = "Show full configuration in JSON format.")
+ param = ParamInfo(name = "identifier", type = "string", optional=True, desc = DEFAULT_IDENTIFIER_DESC)
+ cmd.add_param(param)
+ module.add_command(cmd)
+
+ cmd = CommandInfo(name = "add", desc = "Add an entry to configuration list. If no value is given, a default value is added.")
+ param = ParamInfo(name = "identifier", type = "string", optional=True, desc = DEFAULT_IDENTIFIER_DESC)
+ cmd.add_param(param)
+ param = ParamInfo(name = "value", type = "string", optional=True, desc = "Specifies a value to add to the list. It must be in correct JSON format and complete.")
+ cmd.add_param(param)
+ module.add_command(cmd)
+
+ cmd = CommandInfo(name = "remove", desc = "Remove entry from configuration list.")
+ param = ParamInfo(name = "identifier", type = "string", optional=True, desc = DEFAULT_IDENTIFIER_DESC)
+ cmd.add_param(param)
+ param = ParamInfo(name = "value", type = "string", optional=True, desc = "Specifies a value to remove from the list. It must be in correct JSON format and complete.")
+ cmd.add_param(param)
+ module.add_command(cmd)
+
+ cmd = CommandInfo(name = "set", desc = "Set a configuration value.")
+ param = ParamInfo(name = "identifier", type = "string", optional=True, desc = DEFAULT_IDENTIFIER_DESC)
+ cmd.add_param(param)
+ param = ParamInfo(name = "value", type = "string", optional=False, desc = "Specifies a value to set. It must be in correct JSON format and complete.")
+ cmd.add_param(param)
+ module.add_command(cmd)
+
+ cmd = CommandInfo(name = "unset", desc = "Unset a configuration value (i.e. revert to the default, if any).")
+ param = ParamInfo(name = "identifier", type = "string", optional=False, desc = DEFAULT_IDENTIFIER_DESC)
+ cmd.add_param(param)
+ module.add_command(cmd)
+
+ cmd = CommandInfo(name = "diff", desc = "Show all local changes that have not been committed.")
+ module.add_command(cmd)
+
+ cmd = CommandInfo(name = "revert", desc = "Revert all local changes.")
+ module.add_command(cmd)
+
+ cmd = CommandInfo(name = "commit", desc = "Commit all local changes.")
+ module.add_command(cmd)
+
+ cmd = CommandInfo(name = "go", desc = "Go to a specific configuration part.")
+ param = ParamInfo(name = "identifier", type="string", optional=False, desc = DEFAULT_IDENTIFIER_DESC)
+ cmd.add_param(param)
+ module.add_command(cmd)
+
+ tool.add_module_info(module)
+
+def check_port(option, opt_str, value, parser):
+ if (value < 0) or (value > 65535):
+ raise OptionValueError('%s requires a port number (0-65535)' % opt_str)
+ parser.values.port = value
+
+def check_addr(option, opt_str, value, parser):
+ ipstr = value
+ ip_family = socket.AF_INET
+ if (ipstr.find(':') != -1):
+ ip_family = socket.AF_INET6
+
+ try:
+ socket.inet_pton(ip_family, ipstr)
+ except:
+ raise OptionValueError("%s invalid ip address" % ipstr)
+
+ parser.values.addr = value
+
+def set_bindctl_options(parser):
+ parser.add_option('-p', '--port', dest='port', type='int',
+ action='callback', callback=check_port,
+ default='8080', help='port for cmdctl of bind10')
+
+ parser.add_option('-a', '--address', dest='addr', type='string',
+ action='callback', callback=check_addr,
+ default='127.0.0.1', help='IP address for cmdctl of bind10')
+
+ parser.add_option('-c', '--certificate-chain', dest='cert_chain',
+ type='string', action='store',
+ help='PEM formatted server certificate validation chain file')
+
+ parser.add_option('--csv-file-dir', dest='csv_file_dir', type='string',
+ default=None, action='store',
+ help='Directory to store the password CSV file')
+
+if __name__ == '__main__':
+ parser = OptionParser(version = VERSION)
+ set_bindctl_options(parser)
+ (options, args) = parser.parse_args()
+ server_addr = options.addr + ':' + str(options.port)
+ tool = BindCmdInterpreter(server_addr, pem_file=options.cert_chain,
+ csv_file_dir=options.csv_file_dir)
+ prepare_config_commands(tool)
+ tool.run()
diff --git a/src/bin/bindctl/tests/Makefile.am b/src/bin/bindctl/tests/Makefile.am
index 8a7a623..d2bb90f 100644
--- a/src/bin/bindctl/tests/Makefile.am
+++ b/src/bin/bindctl/tests/Makefile.am
@@ -11,6 +11,6 @@ if ENABLE_PYTHON_COVERAGE
endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
- env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_srcdir)/src/bin \
+ env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/bindctl:$(abs_top_srcdir)/src/bin \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
done
diff --git a/src/bin/bindctl/tests/bindctl_test.py b/src/bin/bindctl/tests/bindctl_test.py
index 490dd7a..8a46575 100644
--- a/src/bin/bindctl/tests/bindctl_test.py
+++ b/src/bin/bindctl/tests/bindctl_test.py
@@ -17,8 +17,13 @@
import unittest
import isc.cc.data
import os
+import pwd
+import getpass
+from optparse import OptionParser
from isc.config.config_data import ConfigData, MultiConfigData
from isc.config.module_spec import ModuleSpec
+from isc.testutils.parse_args import TestOptParser, OptsError
+from bindctl_main import set_bindctl_options
from bindctl import cmdparse
from bindctl import bindcmd
from bindctl.moduleinfo import *
@@ -332,13 +337,6 @@ class TestConfigCommands(unittest.TestCase):
cmd = cmdparse.BindCmdParse("config set identifier=\"foo/a_list\" value=[1]")
self.assertRaises(isc.cc.data.DataTypeError, self.tool.apply_config_cmd, cmd)
-
-
-
-class FakeBindCmdInterpreter(bindcmd.BindCmdInterpreter):
- def __init__(self):
- pass
-
class TestBindCmdInterpreter(unittest.TestCase):
def _create_invalid_csv_file(self, csvfilename):
@@ -349,9 +347,22 @@ class TestBindCmdInterpreter(unittest.TestCase):
writer.writerow(['name2'])
csvfile.close()
+ def test_csv_file_dir(self):
+ # Checking default value
+ if "HOME" in os.environ:
+ home_dir = os.environ["HOME"]
+ else:
+ home_dir = pwd.getpwnam(getpass.getuser()).pw_dir
+ self.assertEqual(home_dir + os.sep + '.bind10' + os.sep,
+ bindcmd.BindCmdInterpreter().csv_file_dir)
+
+ new_csv_dir = '/something/different/'
+ custom_cmd = bindcmd.BindCmdInterpreter(csv_file_dir=new_csv_dir)
+ self.assertEqual(new_csv_dir, custom_cmd.csv_file_dir)
+
def test_get_saved_user_info(self):
- cmd = FakeBindCmdInterpreter()
- users = cmd._get_saved_user_info('/notexist', 'cvs_file.cvs')
+ cmd = bindcmd.BindCmdInterpreter()
+ users = cmd._get_saved_user_info('/notexist', 'csv_file.csv')
self.assertEqual([], users)
csvfilename = 'csv_file.csv'
@@ -360,6 +371,25 @@ class TestBindCmdInterpreter(unittest.TestCase):
self.assertEqual([], users)
os.remove(csvfilename)
+
+class TestCommandLineOptions(unittest.TestCase):
+ def setUp(self):
+ self.parser = TestOptParser()
+ set_bindctl_options(self.parser)
+
+ def test_csv_file_dir(self):
+ # by default the option is "undefined"
+ (options, _) = self.parser.parse_args([])
+ self.assertEqual(None, options.csv_file_dir)
+
+ # specify the option, valid case.
+ (options, _) = self.parser.parse_args(['--csv-file-dir', 'some_dir'])
+ self.assertEqual('some_dir', options.csv_file_dir)
+
+ # missing option arg; should trigger parser error.
+ self.assertRaises(OptsError, self.parser.parse_args,
+ ['--csv-file-dir'])
+
if __name__== "__main__":
unittest.main()
diff --git a/src/bin/cfgmgr/b10-cfgmgr.py.in b/src/bin/cfgmgr/b10-cfgmgr.py.in
index 98f515b..41a6790 100755
--- a/src/bin/cfgmgr/b10-cfgmgr.py.in
+++ b/src/bin/cfgmgr/b10-cfgmgr.py.in
@@ -27,13 +27,22 @@ from optparse import OptionParser
isc.util.process.rename()
# If B10_FROM_SOURCE is set in the environment, we use data files
-# from a directory relative to that, otherwise we use the ones
-# installed on the system
+# from a directory relative to the value of that variable, or, if defined,
+# relative to the value of B10_FROM_SOURCE_LOCALSTATEDIR. Otherwise
+# we use the ones installed on the system.
+# B10_FROM_SOURCE_LOCALSTATEDIR is specifically intended to be used for
+# tests where we want to use variuos types of configuration within the test
+# environment. (We may want to make it even more generic so that the path is
+# passed from the boss process)
if "B10_FROM_SOURCE" in os.environ:
- DATA_PATH = os.environ["B10_FROM_SOURCE"]
+ if "B10_FROM_SOURCE_LOCALSTATEDIR" in os.environ:
+ DATA_PATH = os.environ["B10_FROM_SOURCE_LOCALSTATEDIR"]
+ else:
+ DATA_PATH = os.environ["B10_FROM_SOURCE"]
else:
PREFIX = "@prefix@"
DATA_PATH = "@localstatedir@/@PACKAGE@".replace("${prefix}", PREFIX)
+DEFAULT_CONFIG_FILE = "b10-config.db"
cm = None
@@ -41,10 +50,11 @@ def parse_options(args=sys.argv[1:], Parser=OptionParser):
parser = Parser()
parser.add_option("-p", "--data-path", dest="data_path",
help="Directory to search for configuration files " +
- "(default="+DATA_PATH+")", default=DATA_PATH)
- parser.add_option("-f", "--database-filename", dest="file_name",
+ "(default=" + DATA_PATH + ")", default=DATA_PATH)
+ parser.add_option("-c", "--config-filename", dest="file_name",
help="Configuration database filename " +
- "(default=b10-config.db)", default="b10-config.db")
+ "(default=" + DEFAULT_CONFIG_FILE + ")",
+ default=DEFAULT_CONFIG_FILE)
(options, args) = parser.parse_args(args)
if args:
parser.error("No non-option arguments allowed")
diff --git a/src/bin/cfgmgr/b10-cfgmgr.xml b/src/bin/cfgmgr/b10-cfgmgr.xml
index 0734271..81cc644 100644
--- a/src/bin/cfgmgr/b10-cfgmgr.xml
+++ b/src/bin/cfgmgr/b10-cfgmgr.xml
@@ -44,7 +44,7 @@
<refsynopsisdiv>
<cmdsynopsis>
<command>b10-cfgmgr</command>
- <arg><option>-f<replaceable>config-filename</replaceable></option></arg>
+ <arg><option>-c<replaceable>config-filename</replaceable></option></arg>
<arg><option>-p<replaceable>data_path</replaceable></option></arg>
</cmdsynopsis>
</refsynopsisdiv>
@@ -98,8 +98,8 @@
<variablelist>
<varlistentry>
<term>
- <option>-f</option><replaceable>config-filename</replaceable>,
- <option>--database-filename</option> <replaceable>config-filename</replaceable>
+ <option>-c</option><replaceable>config-filename</replaceable>,
+ <option>--config-filename</option> <replaceable>config-filename</replaceable>
</term>
<listitem>
<para>The configuration database filename to use. Can be either
diff --git a/src/bin/cfgmgr/tests/b10-cfgmgr_test.py.in b/src/bin/cfgmgr/tests/b10-cfgmgr_test.py.in
index cdc4306..4cc4ddf 100644
--- a/src/bin/cfgmgr/tests/b10-cfgmgr_test.py.in
+++ b/src/bin/cfgmgr/tests/b10-cfgmgr_test.py.in
@@ -103,14 +103,14 @@ class TestParseArgs(unittest.TestCase):
b = __import__("b10-cfgmgr")
parsed = b.parse_options([], TestOptParser)
self.assertEqual(b.DATA_PATH, parsed.data_path)
- self.assertEqual("b10-config.db", parsed.file_name)
+ self.assertEqual(b.DEFAULT_CONFIG_FILE, parsed.file_name)
def test_wrong_args(self):
"""
Test it fails when we pass invalid option.
"""
b = __import__("b10-cfgmgr")
- self.assertRaises(OptsError, b.parse_options, (['--wrong-option']),
+ self.assertRaises(OptsError, b.parse_options, ['--wrong-option'],
TestOptParser)
def test_not_arg(self):
@@ -119,7 +119,7 @@ class TestParseArgs(unittest.TestCase):
(eg. without -- at the beginning).
"""
b = __import__("b10-cfgmgr")
- self.assertRaises(OptsError, b.parse_options, (['not-option']),
+ self.assertRaises(OptsError, b.parse_options, ['not-option'],
TestOptParser)
def test_datapath(self):
@@ -129,23 +129,29 @@ class TestParseArgs(unittest.TestCase):
b = __import__("b10-cfgmgr")
parsed = b.parse_options(['--data-path=/path'], TestOptParser)
self.assertEqual('/path', parsed.data_path)
- self.assertEqual("b10-config.db", parsed.file_name)
+ self.assertEqual(b.DEFAULT_CONFIG_FILE, parsed.file_name)
parsed = b.parse_options(['-p', '/path'], TestOptParser)
self.assertEqual('/path', parsed.data_path)
- self.assertEqual("b10-config.db", parsed.file_name)
+ self.assertEqual(b.DEFAULT_CONFIG_FILE, parsed.file_name)
+ self.assertRaises(OptsError, b.parse_options, ['-p'], TestOptParser)
+ self.assertRaises(OptsError, b.parse_options, ['--data-path'],
+ TestOptParser)
def test_db_filename(self):
"""
Test setting the configuration database file.
"""
b = __import__("b10-cfgmgr")
- parsed = b.parse_options(['--database-filename=filename'],
+ parsed = b.parse_options(['--config-filename=filename'],
TestOptParser)
self.assertEqual(b.DATA_PATH, parsed.data_path)
self.assertEqual("filename", parsed.file_name)
- parsed = b.parse_options(['-f', 'filename'], TestOptParser)
+ parsed = b.parse_options(['-c', 'filename'], TestOptParser)
self.assertEqual(b.DATA_PATH, parsed.data_path)
self.assertEqual("filename", parsed.file_name)
+ self.assertRaises(OptsError, b.parse_options, ['-c'], TestOptParser)
+ self.assertRaises(OptsError, b.parse_options, ['--config-filename'],
+ TestOptParser)
if __name__ == '__main__':
unittest.main()
diff --git a/src/bin/resolver/b10-resolver.8 b/src/bin/resolver/b10-resolver.8
index 3125e32..849092c 100644
--- a/src/bin/resolver/b10-resolver.8
+++ b/src/bin/resolver/b10-resolver.8
@@ -74,7 +74,7 @@ to listen on\&. The list items are the
\fIaddress\fR
string and
\fIport\fR
-number\&. The defaults are address ::1 port 5300 and address 127\&.0\&.0\&.1 port 5300\&.
+number\&. The defaults are address ::1 port 53 and address 127\&.0\&.0\&.1 port 53\&.
.PP
\fIretries\fR
diff --git a/src/bin/resolver/b10-resolver.xml b/src/bin/resolver/b10-resolver.xml
index 0d395a7..bdf4f8a 100644
--- a/src/bin/resolver/b10-resolver.xml
+++ b/src/bin/resolver/b10-resolver.xml
@@ -141,8 +141,9 @@ once that is merged you can for instance do 'config add Resolver/forward_address
<command>b10-resolver</command> to listen on.
The list items are the <varname>address</varname> string
and <varname>port</varname> number.
- The defaults are address ::1 port 5300 and
- address 127.0.0.1 port 5300.
+ The defaults are address ::1 port 53 and
+ address 127.0.0.1 port 53.
+<!-- TODO: but defaults are not used, Trac #518 -->
</para>
<para>
diff --git a/src/bin/resolver/main.cc b/src/bin/resolver/main.cc
index 03f9ab7..d987c74 100644
--- a/src/bin/resolver/main.cc
+++ b/src/bin/resolver/main.cc
@@ -56,7 +56,6 @@ using namespace asiolink;
namespace {
-// Default port current 5300 for testing purposes
static const string PROGRAM = "Resolver";
IOService io_service;
diff --git a/src/bin/resolver/resolver.cc b/src/bin/resolver/resolver.cc
index 4996b71..84df9d2 100644
--- a/src/bin/resolver/resolver.cc
+++ b/src/bin/resolver/resolver.cc
@@ -185,8 +185,8 @@ public:
// TODO: REMOVE, USE isc::resolve::MakeErrorMessage?
void
-makeErrorMessage(MessagePtr message, OutputBufferPtr buffer,
- const Rcode& rcode)
+makeErrorMessage(MessagePtr message, MessagePtr answer_message,
+ OutputBufferPtr buffer, const Rcode& rcode)
{
// extract the parameters that should be kept.
// XXX: with the current implementation, it's not easy to set EDNS0
@@ -197,6 +197,12 @@ makeErrorMessage(MessagePtr message, OutputBufferPtr buffer,
const Opcode& opcode = message->getOpcode();
vector<QuestionPtr> questions;
+ // answer_message is actually ignored right now,
+ // see the comment in #607
+ answer_message->setRcode(rcode);
+ answer_message->setOpcode(opcode);
+ answer_message->setQid(qid);
+
// If this is an error to a query or notify, we should also copy the
// question section.
if (opcode == Opcode::QUERY() || opcode == Opcode::NOTIFY()) {
@@ -385,12 +391,14 @@ Resolver::processMessage(const IOMessage& io_message,
} catch (const DNSProtocolError& error) {
dlog(string("returning ") + error.getRcode().toText() + ": " +
error.what());
- makeErrorMessage(query_message, buffer, error.getRcode());
+ makeErrorMessage(query_message, answer_message,
+ buffer, error.getRcode());
server->resume(true);
return;
} catch (const Exception& ex) {
dlog(string("returning SERVFAIL: ") + ex.what());
- makeErrorMessage(query_message, buffer, Rcode::SERVFAIL());
+ makeErrorMessage(query_message, answer_message,
+ buffer, Rcode::SERVFAIL());
server->resume(true);
return;
} // other exceptions will be handled at a higher layer.
@@ -400,28 +408,34 @@ Resolver::processMessage(const IOMessage& io_message,
// Perform further protocol-level validation.
bool sendAnswer = true;
if (query_message->getOpcode() == Opcode::NOTIFY()) {
- makeErrorMessage(query_message, buffer, Rcode::NOTAUTH());
+ makeErrorMessage(query_message, answer_message,
+ buffer, Rcode::NOTAUTH());
dlog("Notify arrived, but we are not authoritative");
} else if (query_message->getOpcode() != Opcode::QUERY()) {
dlog("Unsupported opcode (got: " + query_message->getOpcode().toText() +
", expected: " + Opcode::QUERY().toText());
- makeErrorMessage(query_message, buffer, Rcode::NOTIMP());
+ makeErrorMessage(query_message, answer_message,
+ buffer, Rcode::NOTIMP());
} else if (query_message->getRRCount(Message::SECTION_QUESTION) != 1) {
dlog("The query contained " +
boost::lexical_cast<string>(query_message->getRRCount(
Message::SECTION_QUESTION) + " questions, exactly one expected"));
- makeErrorMessage(query_message, buffer, Rcode::FORMERR());
+ makeErrorMessage(query_message, answer_message,
+ buffer, Rcode::FORMERR());
} else {
ConstQuestionPtr question = *query_message->beginQuestion();
const RRType &qtype = question->getType();
if (qtype == RRType::AXFR()) {
if (io_message.getSocket().getProtocol() == IPPROTO_UDP) {
- makeErrorMessage(query_message, buffer, Rcode::FORMERR());
+ makeErrorMessage(query_message, answer_message,
+ buffer, Rcode::FORMERR());
} else {
- makeErrorMessage(query_message, buffer, Rcode::NOTIMP());
+ makeErrorMessage(query_message, answer_message,
+ buffer, Rcode::NOTIMP());
}
} else if (qtype == RRType::IXFR()) {
- makeErrorMessage(query_message, buffer, Rcode::NOTIMP());
+ makeErrorMessage(query_message, answer_message,
+ buffer, Rcode::NOTIMP());
} else {
// The RecursiveQuery object will post the "resume" event to the
// DNSServer when an answer arrives, so we don't have to do it now.
diff --git a/src/bin/resolver/resolver.spec.pre.in b/src/bin/resolver/resolver.spec.pre.in
index bc598b0..9df1e75 100644
--- a/src/bin/resolver/resolver.spec.pre.in
+++ b/src/bin/resolver/resolver.spec.pre.in
@@ -86,11 +86,11 @@
"item_default": [
{
"address": "::1",
- "port": 5300
+ "port": 53
},
{
"address": "127.0.0.1",
- "port": 5300
+ "port": 53
}
],
"list_item_spec": {
@@ -109,7 +109,7 @@
"item_name": "port",
"item_type": "integer",
"item_optional": false,
- "item_default": 5300
+ "item_default": 53
}
]
}
diff --git a/src/bin/resolver/tests/resolver_unittest.cc b/src/bin/resolver/tests/resolver_unittest.cc
index a4f11f5..97edf12 100644
--- a/src/bin/resolver/tests/resolver_unittest.cc
+++ b/src/bin/resolver/tests/resolver_unittest.cc
@@ -96,6 +96,27 @@ TEST_F(ResolverTest, AXFRFail) {
QR_FLAG, 1, 0, 0, 0);
}
+TEST_F(ResolverTest, IXFRFail) {
+ UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
+ Name("example.com"), RRClass::IN(),
+ RRType::IXFR());
+ createRequestPacket(request_message, IPPROTO_TCP);
+ // IXFR is not implemented and should always send NOTIMP.
+ server.processMessage(*io_message,
+ parse_message,
+ response_message,
+ response_obuffer,
+ &dnsserv);
+ EXPECT_TRUE(dnsserv.hasAnswer());
+ // the second check is what we'll need in the end (with the values
+ // from the first one), but right now the first one is for what
+ // will actually be returned to the client
+ headerCheck(*parse_message, default_qid, Rcode::NOTIMP(), opcode.getCode(),
+ QR_FLAG, 1, 0, 0, 0);
+ headerCheck(*response_message, default_qid, Rcode::NOTIMP(), opcode.getCode(),
+ 0, 0, 0, 0, 0);
+}
+
TEST_F(ResolverTest, notifyFail) {
// Notify should always return NOTAUTH
request_message.clear(Message::RENDER);
diff --git a/src/bin/xfrout/xfrout.py.in b/src/bin/xfrout/xfrout.py.in
index fd1288d..f420d4b 100755
--- a/src/bin/xfrout/xfrout.py.in
+++ b/src/bin/xfrout/xfrout.py.in
@@ -50,7 +50,11 @@ isc.util.process.rename()
if "B10_FROM_BUILD" in os.environ:
SPECFILE_PATH = os.environ["B10_FROM_BUILD"] + "/src/bin/xfrout"
AUTH_SPECFILE_PATH = os.environ["B10_FROM_BUILD"] + "/src/bin/auth"
- UNIX_SOCKET_FILE= os.environ["B10_FROM_BUILD"] + "/auth_xfrout_conn"
+ if "B10_FROM_SOURCE_LOCALSTATEDIR" in os.environ:
+ UNIX_SOCKET_FILE = os.environ["B10_FROM_SOURCE_LOCALSTATEDIR"] + \
+ "/auth_xfrout_conn"
+ else:
+ UNIX_SOCKET_FILE = os.environ["B10_FROM_BUILD"] + "/auth_xfrout_conn"
else:
PREFIX = "@prefix@"
DATAROOTDIR = "@datarootdir@"
diff --git a/src/cppcheck-suppress.lst b/src/cppcheck-suppress.lst
index 2c82b74..3e4dcd6 100644
--- a/src/cppcheck-suppress.lst
+++ b/src/cppcheck-suppress.lst
@@ -4,7 +4,7 @@ debug
missingInclude
// This is a template, and should be excluded from the check
unreadVariable:src/lib/dns/rdata/template.cc:59
-// These two trigger warnings due to the incomplete implementation. This is
+// These three trigger warnings due to the incomplete implementation. This is
// our problem, but we need to suppress the warnings for now.
functionConst:src/lib/cache/resolver_cache.h
functionConst:src/lib/cache/message_cache.h
diff --git a/src/lib/asiolink/Makefile.am b/src/lib/asiolink/Makefile.am
index b3968f0..b28b784 100644
--- a/src/lib/asiolink/Makefile.am
+++ b/src/lib/asiolink/Makefile.am
@@ -13,28 +13,34 @@ CLEANFILES = *.gcno *.gcda
# which would make the build fail with -Werror (our default setting).
lib_LTLIBRARIES = libasiolink.la
libasiolink_la_SOURCES = asiolink.h
+libasiolink_la_SOURCES += asiolink_utilities.h
+libasiolink_la_SOURCES += asiodef.cc asiodef.h
libasiolink_la_SOURCES += dns_answer.h
libasiolink_la_SOURCES += dns_lookup.h
libasiolink_la_SOURCES += dns_server.h
-libasiolink_la_SOURCES += dns_service.h dns_service.cc
+libasiolink_la_SOURCES += dns_service.cc dns_service.h
libasiolink_la_SOURCES += dummy_io_cb.h
-libasiolink_la_SOURCES += interval_timer.h interval_timer.cc
-libasiolink_la_SOURCES += io_address.h io_address.cc
+libasiolink_la_SOURCES += interval_timer.cc interval_timer.h
+libasiolink_la_SOURCES += io_address.cc io_address.h
libasiolink_la_SOURCES += io_asio_socket.h
-libasiolink_la_SOURCES += io_endpoint.h io_endpoint.cc
+libasiolink_la_SOURCES += io_endpoint.cc io_endpoint.h
libasiolink_la_SOURCES += io_error.h
-libasiolink_la_SOURCES += io_fetch.h io_fetch.cc
+libasiolink_la_SOURCES += io_fetch.cc io_fetch.h
libasiolink_la_SOURCES += io_message.h
-libasiolink_la_SOURCES += io_service.h io_service.cc
-libasiolink_la_SOURCES += io_socket.h io_socket.cc
-libasiolink_la_SOURCES += recursive_query.h recursive_query.cc
+libasiolink_la_SOURCES += io_service.cc io_service.h
+libasiolink_la_SOURCES += io_socket.cc io_socket.h
+libasiolink_la_SOURCES += qid_gen.cc qid_gen.h
+libasiolink_la_SOURCES += recursive_query.cc recursive_query.h
libasiolink_la_SOURCES += simple_callback.h
libasiolink_la_SOURCES += tcp_endpoint.h
-libasiolink_la_SOURCES += tcp_server.h tcp_server.cc
+libasiolink_la_SOURCES += tcp_server.cc tcp_server.h
libasiolink_la_SOURCES += tcp_socket.h
libasiolink_la_SOURCES += udp_endpoint.h
-libasiolink_la_SOURCES += udp_server.h udp_server.cc
+libasiolink_la_SOURCES += udp_server.cc udp_server.h
libasiolink_la_SOURCES += udp_socket.h
+
+EXTRA_DIST = asiodef.msg
+
# Note: the ordering matters: -Wno-... must follow -Wextra (defined in
# B10_CXXFLAGS)
libasiolink_la_CXXFLAGS = $(AM_CXXFLAGS)
@@ -46,7 +52,8 @@ if USE_CLANGPP
libasiolink_la_CXXFLAGS += -Wno-error
endif
libasiolink_la_CPPFLAGS = $(AM_CPPFLAGS)
-libasiolink_la_LIBADD = $(top_builddir)/src/lib/log/liblog.la
+libasiolink_la_LIBADD =
libasiolink_la_LIBADD += $(top_builddir)/src/lib/resolve/libresolve.la
libasiolink_la_LIBADD += $(top_builddir)/src/lib/cache/libcache.la
libasiolink_la_LIBADD += $(top_builddir)/src/lib/nsas/libnsas.la
+libasiolink_la_LIBADD += $(top_builddir)/src/lib/log/liblog.la
diff --git a/src/lib/asiolink/asiodef.cc b/src/lib/asiolink/asiodef.cc
new file mode 100644
index 0000000..94c71b5
--- /dev/null
+++ b/src/lib/asiolink/asiodef.cc
@@ -0,0 +1,37 @@
+// File created from asiodef.msg on Mon Feb 28 17:15:30 2011
+
+#include <cstddef>
+#include <log/message_types.h>
+#include <log/message_initializer.h>
+
+namespace asiolink {
+
+extern const isc::log::MessageID ASIO_FETCHCOMP = "FETCHCOMP";
+extern const isc::log::MessageID ASIO_FETCHSTOP = "FETCHSTOP";
+extern const isc::log::MessageID ASIO_OPENSOCK = "OPENSOCK";
+extern const isc::log::MessageID ASIO_RECVSOCK = "RECVSOCK";
+extern const isc::log::MessageID ASIO_RECVTMO = "RECVTMO";
+extern const isc::log::MessageID ASIO_SENDSOCK = "SENDSOCK";
+extern const isc::log::MessageID ASIO_UNKORIGIN = "UNKORIGIN";
+extern const isc::log::MessageID ASIO_UNKRESULT = "UNKRESULT";
+
+} // namespace asiolink
+
+namespace {
+
+const char* values[] = {
+ "FETCHCOMP", "upstream fetch to %s(%d) has now completed",
+ "FETCHSTOP", "upstream fetch to %s(%d) has been stopped",
+ "OPENSOCK", "error %d opening %s socket to %s(%d)",
+ "RECVSOCK", "error %d reading %s data from %s(%d)",
+ "RECVTMO", "receive timeout while waiting for data from %s(%d)",
+ "SENDSOCK", "error %d sending data using %s to %s(%d)",
+ "UNKORIGIN", "unknown origin for ASIO error code %d (protocol: %s, address %s)",
+ "UNKRESULT", "unknown result (%d) when IOFetch::stop() was executed for I/O to %s(%d)",
+ NULL
+};
+
+const isc::log::MessageInitializer initializer(values);
+
+} // Anonymous namespace
+
diff --git a/src/lib/asiolink/asiodef.h b/src/lib/asiolink/asiodef.h
new file mode 100644
index 0000000..ba77817
--- /dev/null
+++ b/src/lib/asiolink/asiodef.h
@@ -0,0 +1,21 @@
+// File created from asiodef.msg on Mon Feb 28 17:15:30 2011
+
+#ifndef __ASIODEF_H
+#define __ASIODEF_H
+
+#include <log/message_types.h>
+
+namespace asiolink {
+
+extern const isc::log::MessageID ASIO_FETCHCOMP;
+extern const isc::log::MessageID ASIO_FETCHSTOP;
+extern const isc::log::MessageID ASIO_OPENSOCK;
+extern const isc::log::MessageID ASIO_RECVSOCK;
+extern const isc::log::MessageID ASIO_RECVTMO;
+extern const isc::log::MessageID ASIO_SENDSOCK;
+extern const isc::log::MessageID ASIO_UNKORIGIN;
+extern const isc::log::MessageID ASIO_UNKRESULT;
+
+} // namespace asiolink
+
+#endif // __ASIODEF_H
diff --git a/src/lib/asiolink/asiodef.msg b/src/lib/asiolink/asiodef.msg
new file mode 100644
index 0000000..2fcadd1
--- /dev/null
+++ b/src/lib/asiolink/asiodef.msg
@@ -0,0 +1,56 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+$PREFIX ASIO_
+$NAMESPACE asiolink
+
+FETCHCOMP upstream fetch to %s(%d) has now completed
++ A debug message, this records the the upstream fetch (a query made by the
++ resolver on behalf of its client) to the specified address has completed.
+
+FETCHSTOP upstream fetch to %s(%d) has been stopped
++ An external component has requested the halting of an upstream fetch. This
++ is an allowed operation, and the message should only appear if debug is
++ enabled.
+
+OPENSOCK error %d opening %s socket to %s(%d)
++ The asynchronous I/O code encountered an error when trying to open a socket
++ of the specified protocol in order to send a message to the target address.
++ The the number of the system error that cause the problem is given in the
++ message.
+
+RECVSOCK error %d reading %s data from %s(%d)
++ The asynchronous I/O code encountered an error when trying read data from
++ the specified address on the given protocol. The the number of the system
++ error that cause the problem is given in the message.
+
+SENDSOCK error %d sending data using %s to %s(%d)
++ The asynchronous I/O code encountered an error when trying send data to
++ the specified address on the given protocol. The the number of the system
++ error that cause the problem is given in the message.
+
+RECVTMO receive timeout while waiting for data from %s(%d)
++ An upstream fetch from the specified address timed out. This may happen for
++ any number of reasons and is most probably a problem at the remote server
++ or a problem on the network. The message will only appear if debug is
++ enabled.
+
+UNKORIGIN unknown origin for ASIO error code %d (protocol: %s, address %s)
++ This message should not appear and indicates an internal error if it does.
++ Please enter a bug report.
+
+UNKRESULT unknown result (%d) when IOFetch::stop() was executed for I/O to %s(%d)
++ The termination method of the resolver's upstream fetch class was called with
++ an unknown result code (which is given in the message). This message should
++ not appear and may indicate an internal error. Please enter a bug report.
diff --git a/src/lib/asiolink/asiolink.h b/src/lib/asiolink/asiolink.h
index 03951ae..251413e 100644
--- a/src/lib/asiolink/asiolink.h
+++ b/src/lib/asiolink/asiolink.h
@@ -85,7 +85,3 @@
/// http://think-async.com/Asio/asio-1.3.1/doc/asio/reference/asio_handler_allocate.html
#endif // __ASIOLINK_H
-
-// Local Variables:
-// mode: c++
-// End:
diff --git a/src/lib/asiolink/asiolink_utilities.h b/src/lib/asiolink/asiolink_utilities.h
new file mode 100644
index 0000000..659e6a0
--- /dev/null
+++ b/src/lib/asiolink/asiolink_utilities.h
@@ -0,0 +1,61 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __ASIOLINK_UTILITIES_H
+#define __ASIOLINK_UTILITIES_H
+
+#include <cstddef>
+
+namespace asiolink {
+
+/// \brief Read Unsigned 16-Bit Integer from Buffer
+///
+/// This is essentially a copy of the isc::dns::InputBuffer::readUint16. It
+/// should really be moved into a separate library.
+///
+/// \param buffer Data buffer at least two bytes long of which the first two
+/// bytes are assumed to represent a 16-bit integer in network-byte
+/// order.
+///
+/// \return Value of 16-bit integer
+inline uint16_t
+readUint16(const void* buffer) {
+ const uint8_t* byte_buffer = static_cast<const uint8_t*>(buffer);
+
+ uint16_t result = (static_cast<uint16_t>(byte_buffer[0])) << 8;
+ result |= static_cast<uint16_t>(byte_buffer[1]);
+
+ return (result);
+}
+
+/// \brief Write Unisgned 16-Bit Integer to Buffer
+///
+/// This is essentially a copy of isc::dns::OutputBuffer::writeUint16. It
+/// should really be moved into a separate library.
+///
+/// \param value 16-bit value to convert
+/// \param buffer Data buffer at least two bytes long into which the 16-bit
+/// value is written in network-byte order.
+
+inline void
+writeUint16(uint16_t value, void* buffer) {
+ uint8_t* byte_buffer = static_cast<uint8_t*>(buffer);
+
+ byte_buffer[0] = static_cast<uint8_t>((value & 0xff00U) >> 8);
+ byte_buffer[1] = static_cast<uint8_t>(value & 0x00ffU);
+}
+
+} // namespace asiolink
+
+#endif // __ASIOLINK_UTILITIES_H
diff --git a/src/lib/asiolink/dns_server.h b/src/lib/asiolink/dns_server.h
index 352ea8e..f15f808 100644
--- a/src/lib/asiolink/dns_server.h
+++ b/src/lib/asiolink/dns_server.h
@@ -21,7 +21,7 @@ namespace asiolink {
/// \brief The \c DNSServer class is a wrapper (and base class) for
/// classes which provide DNS server functionality.
-///
+///
/// The classes derived from this one, \c TCPServer and \c UDPServer,
/// act as the interface layer between clients sending queries, and
/// functions defined elsewhere that provide answers to those queries.
@@ -42,10 +42,10 @@ namespace asiolink {
/// when "forking", and that instances will be posted as ASIO handler
/// objects, which are always copied.
///
-/// Because these objects are frequently copied, it is recommended
+/// Because these objects are frequently copied, it is recommended
/// that derived classes be kept small to reduce copy overhead.
class DNSServer {
-protected:
+protected:
///
/// \name Constructors and destructors
///
@@ -66,7 +66,7 @@ public:
/// the ones in the derived class. This makes it possible to pass
/// instances of derived classes as references to this base class
/// without losing access to derived class data.
- ///
+ ///
//@{
/// \brief The funtion operator
virtual void operator()(asio::error_code ec = asio::error_code(),
@@ -87,7 +87,7 @@ public:
/// \brief Indicate whether the server is able to send an answer
/// to a query.
- ///
+ ///
/// This is presently used only for testing purposes.
virtual bool hasAnswer() { return (self_->hasAnswer()); }
diff --git a/src/lib/asiolink/dns_service.h b/src/lib/asiolink/dns_service.h
index 84aa5fb..e1583c0 100644
--- a/src/lib/asiolink/dns_service.h
+++ b/src/lib/asiolink/dns_service.h
@@ -26,13 +26,13 @@ class DNSLookup;
class DNSAnswer;
class DNSServiceImpl;
+/// \brief Handle DNS Queries
///
/// DNSService is the service that handles DNS queries and answers with
/// a given IOService. This class is mainly intended to hold all the
/// logic that is shared between the authoritative and the recursive
/// server implementations. As such, it handles asio, including config
/// updates (through the 'Checkinprovider'), and listening sockets.
-///
class DNSService {
///
/// \name Constructors and Destructor
diff --git a/src/lib/asiolink/dummy_io_cb.h b/src/lib/asiolink/dummy_io_cb.h
index bde656c..0006b95 100644
--- a/src/lib/asiolink/dummy_io_cb.h
+++ b/src/lib/asiolink/dummy_io_cb.h
@@ -39,6 +39,14 @@ public:
/// \brief Asynchronous I/O callback method
///
/// \param error Unused
+ void operator()(asio::error_code)
+ {
+ // TODO: log an error if this method ever gets called.
+ }
+
+ /// \brief Asynchronous I/O callback method
+ ///
+ /// \param error Unused
/// \param length Unused
void operator()(asio::error_code, size_t)
{
diff --git a/src/lib/asiolink/internal/iofetch.h b/src/lib/asiolink/internal/iofetch.h
deleted file mode 100644
index d066c92..0000000
--- a/src/lib/asiolink/internal/iofetch.h
+++ /dev/null
@@ -1,125 +0,0 @@
-// Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-// PERFORMANCE OF THIS SOFTWARE.
-
-#ifndef __IOFETCH_H
-#define __IOFETCH_H 1
-
-#include <config.h>
-
-#include <asio.hpp>
-#include <boost/shared_array.hpp>
-#include <boost/shared_ptr.hpp>
-
-#include <dns/buffer.h>
-#include <dns/message.h>
-#include <dns/messagerenderer.h>
-
-#include <asiolink/asiolink.h>
-#include <asiolink/internal/coroutine.h>
-
-// This file contains TCP/UDP-specific implementations of generic classes
-// defined in asiolink.h. It is *not* intended to be part of the public
-// API.
-
-namespace asiolink {
-//
-// Asynchronous UDP/TCP coroutine for upstream fetches
-//
-//class IOFetch : public coroutine, public UdpFetch, public TcpFetch {
-class IOFetch : public coroutine {
-public:
- // TODO Maybe this should be more generic than just for IOFetch?
- ///
- /// \brief Result of the query
- ///
- /// This is related only to contacting the remote server. If the answer
- ///indicates error, it is still counted as SUCCESS here, if it comes back.
- ///
- enum Result {
- SUCCESS,
- TIME_OUT,
- STOPPED
- };
- /// Abstract callback for the IOFetch.
- class Callback {
- public:
- virtual ~Callback() {}
-
- /// This will be called when the IOFetch is completed
- virtual void operator()(Result result) = 0;
- };
- ///
- /// \brief Constructor.
- ///
- /// It creates the query.
- /// @param callback will be called when we terminate. It is your task to
- /// delete it if allocated on heap.
- ///@param timeout in ms.
- ///
- IOFetch(asio::io_service& io_service,
- const isc::dns::Question& q,
- const IOAddress& addr, uint16_t port,
- isc::dns::OutputBufferPtr buffer,
- Callback* callback, int timeout = -1,
- int protocol = IPPROTO_UDP);
- void operator()(asio::error_code ec = asio::error_code(),
- size_t length = 0);
- /// Terminate the query.
- void stop(Result reason = STOPPED);
-private:
- enum { MAX_LENGTH = 4096 };
-
- ///
- /// \short Private data
- ///
- /// They are not private because of stability of the
- /// interface (this is private class anyway), but because this class
- /// will be copyed often (it is used as a coroutine and passed as callback
- /// to many async_*() functions) and we want keep the same data. Some of
- /// the data is not copyable too.
- ///
- //struct IOFetchProtocol;
- //boost::shared_ptr<IOFetchProtocol> data_;
- //struct UdpData;
- //struct TcpData;
- boost::shared_ptr<UdpFetch> data_;
- boost::shared_ptr<TcpFetch> tcp_data_;
-};
-class UdpFetch : public IOFetch {
- public:
- struct UdpData;
- explicit UdpFetch(asio::io_service& io_service,
- const isc::dns::Question& q,
- const IOAddress& addr,
- uint16_t port,
- isc::dns::OutputBufferPtr buffer,
- IOFetch::Callback *callback,
- int timeout);
-};
-class TcpFetch : public IOFetch {
- public:
- struct TcpData;
- explicit TcpFetch(io_service& io_service, const Question& q,
- const IOAddress& addr, uint16_t port,
- OutputBufferPtr buffer, Callback *callback, int timeout);
-};
-
-}
-
-
-#endif // __IOFETCH_H
-
-// Local Variables:
-// mode: c++
-// End:
diff --git a/src/lib/asiolink/interval_timer.h b/src/lib/asiolink/interval_timer.h
index d805cd7..6c43327 100644
--- a/src/lib/asiolink/interval_timer.h
+++ b/src/lib/asiolink/interval_timer.h
@@ -37,7 +37,7 @@ struct IntervalTimerImpl;
/// The function calls the call back function set by \c setup() and updates
/// the timer to expire in (now + interval) milliseconds.
/// The type of call back function is \c void(void).
-///
+///
/// The call back function will not be called if the instance of this class is
/// destroyed before the timer is expired.
///
diff --git a/src/lib/asiolink/io_address.h b/src/lib/asiolink/io_address.h
index 0d2787f..d759827 100644
--- a/src/lib/asiolink/io_address.h
+++ b/src/lib/asiolink/io_address.h
@@ -121,7 +121,3 @@ private:
} // asiolink
#endif // __IO_ADDRESS_H
-
-// Local Variables:
-// mode: c++
-// End:
diff --git a/src/lib/asiolink/io_asio_socket.h b/src/lib/asiolink/io_asio_socket.h
index eae9b32..ac793a6 100644
--- a/src/lib/asiolink/io_asio_socket.h
+++ b/src/lib/asiolink/io_asio_socket.h
@@ -26,6 +26,8 @@
#include <exceptions/exceptions.h>
#include <coroutine.h>
+#include <dns/buffer.h>
+
#include <asiolink/io_error.h>
#include <asiolink/io_socket.h>
@@ -41,7 +43,24 @@ public:
IOError(file, line, what) {}
};
+/// \brief Error setting socket options
+///
+/// Thrown if attempt to change socket options fails.
+class SocketSetError : public IOError {
+public:
+ SocketSetError(const char* file, size_t line, const char* what) :
+ IOError(file, line, what) {}
+};
+/// \brief Buffer overflow
+///
+/// Thrown if an attempt is made to receive into an area beyond the end of
+/// the receive data buffer.
+class BufferOverflow : public IOError {
+public:
+ BufferOverflow(const char* file, size_t line, const char* what) :
+ IOError(file, line, what) {}
+};
/// Forward declaration of an IOEndpoint
class IOEndpoint;
@@ -91,24 +110,23 @@ public:
/// \brief Return the "native" representation of the socket.
///
- /// In practice, this is the file descriptor of the socket for
- /// UNIX-like systems so the current implementation simply uses
- /// \c int as the type of the return value.
- /// We may have to need revisit this decision later.
+ /// In practice, this is the file descriptor of the socket for UNIX-like
+ /// systems so the current implementation simply uses \c int as the type of
+ /// the return value. We may have to need revisit this decision later.
///
- /// In general, the application should avoid using this method;
- /// it essentially discloses an implementation specific "handle" that
- /// can change the internal state of the socket (consider the
- /// application closes it, for example).
- /// But we sometimes need to perform very low-level operations that
- /// requires the native representation. Passing the file descriptor
- /// to a different process is one example.
- /// This method is provided as a necessary evil for such limited purposes.
+ /// In general, the application should avoid using this method; it
+ /// essentially discloses an implementation specific "handle" that can
+ /// change the internal state of the socket (consider what would happen if
+ /// the application closes it, for example). But we sometimes need to
+ /// perform very low-level operations that requires the native
+ /// representation. Passing the file descriptor to a different process is
+ /// one example. This method is provided as a necessary evil for such
+ /// limited purposes.
///
/// This method never throws an exception.
///
/// \return The native representation of the socket. This is the socket
- /// file descriptor for UNIX-like systems.
+ /// file descriptor for UNIX-like systems.
virtual int getNative() const = 0;
/// \brief Return the transport protocol of the socket.
@@ -118,36 +136,50 @@ public:
///
/// This method never throws an exception.
///
- /// \return IPPROTO_UDP for UDP sockets
- /// \return IPPROTO_TCP for TCP sockets
+ /// \return \c IPPROTO_UDP for UDP sockets, \c IPPROTO_TCP for TCP sockets
virtual int getProtocol() const = 0;
- /// \brief Open AsioSocket
+ /// \brief Is Open() synchronous?
///
- /// Opens the socket for asynchronous I/O. On a UDP socket, this is merely
- /// an "open()" on the underlying socket (so completes immediately), but on
- /// a TCP socket it also connects to the remote end (which is done as an
- /// asynchronous operation).
+ /// On a TCP socket, an "open" operation is a call to the socket's "open()"
+ /// method followed by a connection to the remote system: it is an
+ /// asynchronous operation. On a UDP socket, it is just a call to "open()"
+ /// and completes synchronously.
///
/// For TCP, signalling of the completion of the operation is done by
/// by calling the callback function in the normal way. This could be done
/// for UDP (by posting en event on the event queue); however, that will
- /// incur additional overhead in the most common case. Instead, the return
- /// value indicates whether the operation was asynchronous or not. If yes,
- /// (i.e. TCP) the callback has been posted to the event queue: if no (UDP),
- /// no callback has been posted (in which case it is up to the caller as to
- /// whether they want to manually post the callback themself.)
+ /// incur additional overhead in the most common case. So we give the
+ /// caller the choice for calling this open() method synchronously or
+ /// asynchronously.
+ ///
+ /// Owing to the way that the stackless coroutines are implemented, we need
+ /// to know _before_ executing the "open" function whether or not it is
+ /// asynchronous. So this method is called to provide that information.
+ ///
+ /// (The reason there is a need to know is because the call to open() passes
+ /// in the state of the coroutine at the time the call is made. On an
+ /// asynchronous I/O, we need to set the state to point to the statement
+ /// after the call to open() _before_ we pass the corouine to the open()
+ /// call. Unfortunately, the macros that set the state of the coroutine
+ /// also yield control - which we don't want to do if the open is
+ /// synchronous. Hence we need to know before we make the call to open()
+ /// whether that call will complete asynchronously.)
+ virtual bool isOpenSynchronous() const = 0;
+
+ /// \brief Open AsioSocket
+ ///
+ /// Opens the socket for asynchronous I/O. The open will complete
+ /// synchronously on UCP or asynchronously on TCP (in which case a callback
+ /// will be queued).
///
/// \param endpoint Pointer to the endpoint object. This is ignored for
- /// a UDP socket (the target is specified in the send call), but should
- /// be of type TCPEndpoint for a TCP connection.
+ /// a UDP socket (the target is specified in the send call), but
+ /// should be of type TCPEndpoint for a TCP connection.
/// \param callback I/O Completion callback, called when the operation has
- /// completed, but only if the operation was asynchronous.
- ///
- /// \return true if an asynchronous operation was started and the caller
- /// should yield and wait for completion, false if the operation was
- /// completed synchronously and no callback was queued.
- virtual bool open(const IOEndpoint* endpoint, C& callback) = 0;
+ /// completed, but only if the operation was asynchronous. (It is
+ /// ignored on a UDP socket.)
+ virtual void open(const IOEndpoint* endpoint, C& callback) = 0;
/// \brief Send Asynchronously
///
@@ -160,44 +192,85 @@ public:
/// \param endpoint Target of the send
/// \param callback Callback object.
virtual void asyncSend(const void* data, size_t length,
- const IOEndpoint* endpoint, C& callback) = 0;
+ const IOEndpoint* endpoint, C& callback) = 0;
/// \brief Receive Asynchronously
///
- /// This correstponds to async_receive_from() for UDP sockets and
+ /// This corresponds to async_receive_from() for UDP sockets and
/// async_receive() for TCP. In both cases, an endpoint argument is
/// supplied to receive the source of the communication. For TCP it will
/// be filled in with details of the connection.
///
/// \param data Buffer to receive incoming message
/// \param length Length of the data buffer
- /// \param cumulative Amount of data that should already be in the buffer.
+ /// \param offset Offset into buffer where data is to be put. Although the
+ /// offset could be implied by adjusting "data" and "length"
+ /// appropriately, using this argument allows data to be specified as
+ /// "const void*" - the overhead of converting it to a pointer to a
+ /// set of bytes is hidden away here.
/// \param endpoint Source of the communication
/// \param callback Callback object
- virtual void asyncReceive(void* data, size_t length, size_t cumulative,
- IOEndpoint* endpoint, C& callback) = 0;
-
- /// \brief Checks if the data received is complete.
- ///
- /// This applies to TCP receives, where the data is a byte stream and a
- /// receive is not guaranteed to receive the entire message. DNS messages
- /// over TCP are prefixed by a two-byte count field. This method takes the
- /// amount received so far and the amount received in this I/O and checks
- /// if the message is complete, returning the appropriate indication. As
- /// a side-effect, it also updates the amount received.
- ///
- /// For a UDP receive, all the data is received in one I/O, so this is
- /// effectively a no-op (although it does update the amount received).
- ///
- /// \param data Data buffer containing data to date
- /// \param length Amount of data received in last asynchronous I/O
- /// \param cumulative On input, amount of data received before the last
- /// I/O. On output, the total amount of data received to date.
+ virtual void asyncReceive(void* data, size_t length, size_t offset,
+ IOEndpoint* endpoint, C& callback) = 0;
+
+ /// \brief Processes received data
+ ///
+ /// In the IOFetch code, data is received into a staging buffer before being
+ /// copied into the target buffer. (This is because (a) we don't know how
+ /// much data we will be receiving, so don't know how to size the output
+ /// buffer and (b) TCP data is preceded by a two-byte count field that needs
+ /// to be discarded before being returned to the user.)
+ ///
+ /// An additional consideration is that TCP data is not received in one
+ /// I/O - it may take a number of I/Os - each receiving any non-zero number
+ /// of bytes - to read the entire message.
+ ///
+ /// So the IOFetch code has to loop until it determines that all the data
+ /// has been read. This is where this method comes in. It has several
+ /// functions:
+ ///
+ /// - It checks if the received data is complete.
+ /// - If data is not complete, decides if the next set of data is to go into
+ /// the start of the staging buffer or at some offset into it. (This
+ /// simplifies the case we could have in a TCP receive where the two-byte
+ /// count field is received in one-byte chunks: we put off interpreting
+ /// the count until we have all of it. The alternative - copying the
+ /// data to the output buffer and interpreting the count from there -
+ /// would require moving the data in the output buffer by two bytes before
+ /// returning it to the caller.)
+ /// - Copies data from the staging buffer into the output buffer.
+ ///
+ /// This functionality mainly applies to TCP receives. For UDP, all the
+ /// data is received in one I/O, so this just copies the data into the
+ /// output buffer.
+ ///
+ /// \param staging Pointer to the start of the staging buffer.
+ /// \param length Amount of data in the staging buffer.
+ /// \param cumulative Amount of data received before the staging buffer is
+ /// processed (this includes the TCP count field if appropriate).
+ /// The value should be set to zero before the receive loop is
+ /// entered, and it will be updated by this method as required.
+ /// \param offset Offset into the staging buffer where the next read should
+ /// put the received data. It should be set to zero before the first
+ /// call and may be updated by this method.
+ /// \param expected Expected amount of data to be received. This is
+ /// really the TCP count field and is set to that value when enough
+ /// of a TCP message is received. It should be initialized to -1
+ /// before the first read is executed.
+ /// \param outbuff Output buffer. Data in the staging buffer may be copied
+ /// to this output buffer in the call.
///
/// \return true if the receive is complete, false if another receive is
- /// needed.
- virtual bool receiveComplete(void* data, size_t length,
- size_t& cumulative) = 0;
+ /// needed. This is always true for UDP, but for TCP involves
+ /// checking the amount of data received so far against the amount
+ /// expected (as indicated by the two-byte count field). If this
+ /// method returns false, another read should be queued and data
+ /// should be read into the staging buffer at offset given by the
+ /// "offset" parameter.
+ virtual bool processReceivedData(const void* staging, size_t length,
+ size_t& cumulative, size_t& offset,
+ size_t& expected,
+ isc::dns::OutputBufferPtr& outbuff) = 0;
/// \brief Cancel I/O On AsioSocket
virtual void cancel() = 0;
@@ -244,6 +317,13 @@ public:
virtual int getProtocol() const { return (protocol_); }
+ /// \brief Is socket opening synchronous?
+ ///
+ /// \return true - it is for this class.
+ bool isOpenSynchronous() const {
+ return true;
+ }
+
/// \brief Open AsioSocket
///
/// A call that is a no-op on UDP sockets, this opens a connection to the
@@ -273,21 +353,31 @@ public:
///
/// \param data Unused
/// \param length Unused
- /// \param cumulative Unused
+ /// \param offset Unused
/// \param endpoint Unused
/// \param callback Unused
- virtual void asyncReceive(void* data, size_t, size_t, IOEndpoint*, C&) { }
+ virtual void asyncReceive(void* data, size_t, size_t, IOEndpoint*, C&) {
+ }
+
/// \brief Checks if the data received is complete.
///
- /// \param data Unused
+ /// \param staging Unused
/// \param length Unused
/// \param cumulative Unused
+ /// \param offset Unused.
+ /// \param expected Unused.
+ /// \param outbuff Unused.
///
/// \return Always true
- virtual bool receiveComplete(void*, size_t, size_t&) {
+ virtual bool receiveComplete(const void* staging, size_t length,
+ size_t& cumulative, size_t& offset,
+ size_t& expected,
+ isc::dns::OutputBufferPtr& outbuff)
+ {
return (true);
}
+
/// \brief Cancel I/O On AsioSocket
///
/// Must be supplied as it is abstract in the base class.
diff --git a/src/lib/asiolink/io_endpoint.cc b/src/lib/asiolink/io_endpoint.cc
index bf79f61..97e9c91 100644
--- a/src/lib/asiolink/io_endpoint.cc
+++ b/src/lib/asiolink/io_endpoint.cc
@@ -22,6 +22,7 @@
#include <asiolink/io_address.h>
#include <asiolink/io_error.h>
+#include <asiolink/io_endpoint.h>
#include <asiolink/tcp_endpoint.h>
#include <asiolink/udp_endpoint.h>
diff --git a/src/lib/asiolink/io_endpoint.h b/src/lib/asiolink/io_endpoint.h
index 62b9e47..2ec4083 100644
--- a/src/lib/asiolink/io_endpoint.h
+++ b/src/lib/asiolink/io_endpoint.h
@@ -116,7 +116,3 @@ public:
} // asiolink
#endif // __IO_ENDPOINT_H
-
-// Local Variables:
-// mode: c++
-// End:
diff --git a/src/lib/asiolink/io_fetch.cc b/src/lib/asiolink/io_fetch.cc
index d1f722c..3ff44c0 100644
--- a/src/lib/asiolink/io_fetch.cc
+++ b/src/lib/asiolink/io_fetch.cc
@@ -19,15 +19,30 @@
#include <netinet/in.h>
#include <boost/bind.hpp>
+#include <boost/scoped_ptr.hpp>
+#include <boost/date_time/posix_time/posix_time_types.hpp>
#include <dns/message.h>
#include <dns/messagerenderer.h>
#include <dns/opcode.h>
#include <dns/rcode.h>
-#include <log/dummylog.h>
+#include <log/logger.h>
+
+#include <asiolink/qid_gen.h>
#include <asio.hpp>
+#include <asio/deadline_timer.hpp>
+
+#include <asiolink/asiodef.h>
+#include <asiolink/io_address.h>
+#include <asiolink/io_asio_socket.h>
+#include <asiolink/io_endpoint.h>
#include <asiolink/io_fetch.h>
+#include <asiolink/io_service.h>
+#include <asiolink/tcp_endpoint.h>
+#include <asiolink/tcp_socket.h>
+#include <asiolink/udp_endpoint.h>
+#include <asiolink/udp_socket.h>
using namespace asio;
using namespace isc::dns;
@@ -36,23 +51,125 @@ using namespace std;
namespace asiolink {
+/// Use the ASIO logger
+
+isc::log::Logger logger("asiolink");
+
+/// \brief IOFetch Data
+///
+/// The data for IOFetch is held in a separate struct pointed to by a shared_ptr
+/// object. This is because the IOFetch object will be copied often (it is used
+/// as a coroutine and passed as callback to many async_*() functions) and we
+/// want keep the same data). Organising the data in this way keeps copying to
+/// a minimum.
+struct IOFetchData {
+
+ // The first two members are shared pointers to a base class because what is
+ // actually instantiated depends on whether the fetch is over UDP or TCP,
+ // which is not known until construction of the IOFetch. Use of a shared
+ // pointer here is merely to ensure deletion when the data object is deleted.
+ boost::scoped_ptr<IOAsioSocket<IOFetch> > socket;
+ ///< Socket to use for I/O
+ boost::scoped_ptr<IOEndpoint> remote; ///< Where the fetch was sent
+ isc::dns::Question question; ///< Question to be asked
+ isc::dns::OutputBufferPtr msgbuf; ///< Wire buffer for question
+ isc::dns::OutputBufferPtr received; ///< Received data put here
+ IOFetch::Callback* callback; ///< Called on I/O Completion
+ asio::deadline_timer timer; ///< Timer to measure timeouts
+ IOFetch::Protocol protocol; ///< Protocol being used
+ size_t cumulative; ///< Cumulative received amount
+ size_t expected; ///< Expected amount of data
+ size_t offset; ///< Offset to receive data
+ bool stopped; ///< Have we stopped running?
+ int timeout; ///< Timeout in ms
+
+ // In case we need to log an error, the origin of the last asynchronous
+ // I/O is recorded. To save time and simplify the code, this is recorded
+ // as the ID of the error message that would be generated if the I/O failed.
+ // This means that we must make sure that all possible "origins" take the
+ // same arguments in their message in the same order.
+ isc::log::MessageID origin; ///< Origin of last asynchronous I/O
+ uint8_t staging[IOFetch::STAGING_LENGTH];
+ ///< Temporary array for received data
+
+ /// \brief Constructor
+ ///
+ /// Just fills in the data members of the IOFetchData structure
+ ///
+ /// \param proto Either IOFetch::TCP or IOFetch::UDP.
+ /// \param service I/O Service object to handle the asynchronous
+ /// operations.
+ /// \param query DNS question to send to the upstream server.
+ /// \param address IP address of upstream server
+ /// \param port Port to use for the query
+ /// \param buff Output buffer into which the response (in wire format)
+ /// is written (if a response is received).
+ /// \param cb Callback object containing the callback to be called
+ /// when we terminate. The caller is responsible for managing this
+ /// object and deleting it if necessary.
+ /// \param wait Timeout for the fetch (in ms).
+ ///
+ /// TODO: May need to alter constructor (see comment 4 in Trac ticket #554)
+ IOFetchData(IOFetch::Protocol proto, IOService& service,
+ const isc::dns::Question& query, const IOAddress& address,
+ uint16_t port, isc::dns::OutputBufferPtr& buff, IOFetch::Callback* cb,
+ int wait)
+ :
+ socket((proto == IOFetch::UDP) ?
+ static_cast<IOAsioSocket<IOFetch>*>(
+ new UDPSocket<IOFetch>(service)) :
+ static_cast<IOAsioSocket<IOFetch>*>(
+ new TCPSocket<IOFetch>(service))
+ ),
+ remote((proto == IOFetch::UDP) ?
+ static_cast<IOEndpoint*>(new UDPEndpoint(address, port)) :
+ static_cast<IOEndpoint*>(new TCPEndpoint(address, port))
+ ),
+ question(query),
+ msgbuf(new isc::dns::OutputBuffer(512)),
+ received(buff),
+
+ callback(cb),
+ timer(service.get_io_service()),
+ protocol(proto),
+ cumulative(0),
+ expected(0),
+ offset(0),
+ stopped(false),
+ timeout(wait),
+ origin(ASIO_UNKORIGIN),
+ staging()
+ {}
+};
+
/// IOFetch Constructor - just initialize the private data
-IOFetch::IOFetch(int protocol, IOService& service,
+IOFetch::IOFetch(Protocol protocol, IOService& service,
const isc::dns::Question& question, const IOAddress& address, uint16_t port,
- isc::dns::OutputBufferPtr& buff, Callback* cb, int wait)
+ OutputBufferPtr& buff, Callback* cb, int wait)
:
- data_(new IOFetch::IOFetchData(protocol, service, question, address,
+ data_(new IOFetchData(protocol, service, question, address,
port, buff, cb, wait))
{
}
+// Return protocol in use.
+
+IOFetch::Protocol
+IOFetch::getProtocol() const {
+ return (data_->protocol);
+}
+
/// The function operator is implemented with the "stackless coroutine"
/// pattern; see internal/coroutine.h for details.
void
-IOFetch::operator()(error_code ec, size_t length) {
- if (ec || data_->stopped) {
+IOFetch::operator()(asio::error_code ec, size_t length) {
+
+ if (data_->stopped) {
+ return;
+ } else if (ec) {
+ logIOFailure(ec);
return;
}
@@ -63,26 +180,17 @@ IOFetch::operator()(error_code ec, size_t length) {
/// declarations.
{
Message msg(Message::RENDER);
-
- // TODO: replace with boost::random or some other suitable PRNG
- msg.setQid(0);
+ msg.setQid(QidGenerator::getInstance().generateQid());
msg.setOpcode(Opcode::QUERY());
msg.setRcode(Rcode::NOERROR());
msg.setHeaderFlag(Message::HEADERFLAG_RD);
msg.addQuestion(data_->question);
MessageRenderer renderer(*data_->msgbuf);
msg.toWire(renderer);
-
- // As this is a new fetch, clear the amount of data received
- data_->cumulative = 0;
-
- dlog("Sending " + msg.toText() + " to " +
- data_->remote->getAddress().toText());
}
-
- // If we timeout, we stop, which will shutdown everything and
- // cancel all other attempts to run inside the coroutine
+ // If we timeout, we stop, which will can cancel outstanding I/Os and
+ // shutdown everything.
if (data_->timeout != -1) {
data_->timer.expires_from_now(boost::posix_time::milliseconds(
data_->timeout));
@@ -91,13 +199,17 @@ IOFetch::operator()(error_code ec, size_t length) {
}
// Open a connection to the target system. For speed, if the operation
- // was completed synchronously (i.e. UDP operation) we bypass the yield.
- if (data_->socket->open(data_->remote.get(), *this)) {
- CORO_YIELD;
+ // is synchronous (i.e. UDP operation) we bypass the yield.
+ data_->origin = ASIO_OPENSOCK;
+ if (data_->socket->isOpenSynchronous()) {
+ data_->socket->open(data_->remote.get(), *this);
+ } else {
+ CORO_YIELD data_->socket->open(data_->remote.get(), *this);
}
- // Begin an asynchronous send, and then yield. When the send completes
- // send completes, we will resume immediately after this point.
+ // Begin an asynchronous send, and then yield. When the send completes,
+ // we will resume immediately after this point.
+ data_->origin = ASIO_SENDSOCK;
CORO_YIELD data_->socket->asyncSend(data_->msgbuf->getData(),
data_->msgbuf->getLength(), data_->remote.get(), *this);
@@ -108,24 +220,33 @@ IOFetch::operator()(error_code ec, size_t length) {
// we need to yield ... and we *really* don't want to set up another
// coroutine within that method.) So after each receive (and yield),
// we check if the operation is complete and if not, loop to read again.
+ //
+ // Another concession to TCP is that the amount of is contained in the
+ // first two bytes. This leads to two problems:
+ //
+ // a) We don't want those bytes in the return buffer.
+ // b) They may not both arrive in the first I/O.
+ //
+ // So... we need to loop until we have at least two bytes, then store
+ // the expected amount of data. Then we need to loop until we have
+ // received all the data before copying it back to the user's buffer.
+ // And we want to minimise the amount of copying...
+
+ data_->origin = ASIO_RECVSOCK;
+ data_->cumulative = 0; // No data yet received
+ data_->offset = 0; // First data into start of buffer
do {
- CORO_YIELD data_->socket->asyncReceive(data_->data.get(),
- static_cast<size_t>(MAX_LENGTH), data_->cumulative,
- data_->remote.get(), *this);
- } while (!data_->socket->receiveComplete(data_->data.get(), length,
- data_->cumulative));
-
- // The message is not rendered yet, so we can't print it easily
- dlog("Received response from " + data_->remote->getAddress().toText());
-
- /// Copy the answer into the response buffer. (TODO: If the
- /// OutputBuffer object were made to meet the requirements of
- /// a MutableBufferSequence, then it could be written to directly
- /// by async_receive_from() and this additional copy step would
- /// be unnecessary.)
- data_->buffer->writeData(data_->data.get(), length);
-
- // Finished with this socket, so close it.
+ CORO_YIELD data_->socket->asyncReceive(data_->staging,
+ static_cast<size_t>(STAGING_LENGTH),
+ data_->offset,
+ data_->remote.get(), *this);
+ } while (!data_->socket->processReceivedData(data_->staging, length,
+ data_->cumulative, data_->offset,
+ data_->expected, data_->received));
+
+ // Finished with this socket, so close it. This will not generate an
+ // I/O error, but reset the origin to unknown in case we change this.
+ data_->origin = ASIO_UNKORIGIN;
data_->socket->close();
/// We are done
@@ -137,9 +258,8 @@ IOFetch::operator()(error_code ec, size_t length) {
// query finishes or when the timer times out. Either way, it sets the
// "stopped_" flag and cancels anything that is in progress.
//
-// As the function may be entered multiple times as things wind down, the
-// stopped_ flag checks if stop() has already been called. If it has,
-// subsequent calls are no-ops.
+// As the function may be entered multiple times as things wind down, it checks
+// if the stopped_ flag is already set. If it is, the call is a no-op.
void
IOFetch::stop(Result result) {
@@ -156,20 +276,46 @@ IOFetch::stop(Result result) {
// variable should be done inside a mutex (and the stopped_ variable
// declared as "volatile").
//
+ // The numeric arguments indicate the debug level, with the lower
+ // numbers indicating the most important information. The relative
+ // values are somewhat arbitrary.
+ //
+ // Although Logger::debug checks the debug flag internally, doing it
+ // below before calling Logger::debug avoids the overhead of a string
+ // conversion in the common case when debug is not enabled.
+ //
// TODO: Update testing of stopped_ if threads are used.
data_->stopped = true;
-
switch (result) {
case TIME_OUT:
- dlog("Query timed out");
+ if (logger.isDebugEnabled(1)) {
+ logger.debug(20, ASIO_RECVTMO,
+ data_->remote->getAddress().toText().c_str(),
+ static_cast<int>(data_->remote->getPort()));
+ }
+ break;
+
+ case SUCCESS:
+ if (logger.isDebugEnabled(50)) {
+ logger.debug(30, ASIO_FETCHCOMP,
+ data_->remote->getAddress().toText().c_str(),
+ static_cast<int>(data_->remote->getPort()));
+ }
break;
case STOPPED:
- dlog("Query stopped");
+ // Fetch has been stopped for some other reason. This is
+ // allowed but as it is unusual it is logged, but with a lower
+ // debug level than a timeout (which is totally normal).
+ logger.debug(1, ASIO_FETCHSTOP,
+ data_->remote->getAddress().toText().c_str(),
+ static_cast<int>(data_->remote->getPort()));
break;
default:
- ;
+ logger.error(ASIO_UNKRESULT, static_cast<int>(result),
+ data_->remote->getAddress().toText().c_str(),
+ static_cast<int>(data_->remote->getPort()));
}
// Stop requested, cancel and I/O's on the socket and shut it down,
@@ -183,10 +329,26 @@ IOFetch::stop(Result result) {
if (data_->callback) {
(*(data_->callback))(result);
}
+ }
+}
- // Mark that stop() has now been called.
+// Log an error - called on I/O failure
- }
+void IOFetch::logIOFailure(asio::error_code ec) {
+
+ // Should only get here with a known error code.
+ assert((data_->origin == ASIO_OPENSOCK) ||
+ (data_->origin == ASIO_SENDSOCK) ||
+ (data_->origin == ASIO_RECVSOCK) ||
+ (data_->origin == ASIO_UNKORIGIN));
+
+ static const char* PROTOCOL[2] = {"TCP", "UDP"};
+ logger.error(data_->origin,
+ ec.value(),
+ ((data_->remote->getProtocol() == IPPROTO_TCP) ?
+ PROTOCOL[0] : PROTOCOL[1]),
+ data_->remote->getAddress().toText().c_str(),
+ static_cast<int>(data_->remote->getPort()));
}
} // namespace asiolink
diff --git a/src/lib/asiolink/io_fetch.h b/src/lib/asiolink/io_fetch.h
index 8158c6c..0723777 100644
--- a/src/lib/asiolink/io_fetch.h
+++ b/src/lib/asiolink/io_fetch.h
@@ -17,31 +17,23 @@
#include <config.h>
-#include <netinet/in.h>
-#include <sys/socket.h>
-#include <unistd.h> // for some IPC/network system calls
-
#include <boost/shared_array.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/date_time/posix_time/posix_time_types.hpp>
-#include <asio/deadline_timer.hpp>
#include <coroutine.h>
+#include <asio/error_code.hpp>
+
#include <dns/buffer.h>
#include <dns/question.h>
-#include <asiolink/io_asio_socket.h>
-#include <asiolink/io_endpoint.h>
-#include <asiolink/io_service.h>
-#include <asiolink/tcp_socket.h>
-#include <asiolink/tcp_endpoint.h>
-#include <asiolink/udp_socket.h>
-#include <asiolink/udp_endpoint.h>
-
-
namespace asiolink {
+// Forward declarations
+class IOAddress;
+class IOFetchData;
+class IOService;
/// \brief Upstream Fetch Processing
///
@@ -51,6 +43,23 @@ namespace asiolink {
class IOFetch : public coroutine {
public:
+ /// \brief Protocol to use on the fetch
+ enum Protocol {
+ UDP = 0,
+ TCP = 1
+ };
+
+ /// \brief Origin of Asynchronous I/O Call
+ ///
+ /// Indicates what initiated an asynchronous I/O call and used in deciding
+ /// what error message to output if the I/O fails.
+ enum Origin {
+ NONE = 0, ///< No asynchronous call outstanding
+ OPEN = 1,
+ SEND = 2,
+ RECEIVE = 3,
+ CLOSE = 4
+ };
/// \brief Result of Upstream Fetch
///
@@ -59,9 +68,9 @@ public:
/// even if the contents of the packet indicate that some error occurred.
enum Result {
SUCCESS = 0, ///< Success, fetch completed
- TIME_OUT, ///< Failure, fetch timed out
- STOPPED, ///< Control code, fetch has been stopped
- NOTSET ///< For testing, indicates value not set
+ TIME_OUT = 1, ///< Failure, fetch timed out
+ STOPPED = 2, ///< Control code, fetch has been stopped
+ NOTSET = 3 ///< For testing, indicates value not set
};
// The next enum is a "trick" to allow constants to be defined in a class
@@ -69,7 +78,7 @@ public:
/// \brief Integer Constants
enum {
- MAX_LENGTH = 4096 ///< Maximum size of receive buffer
+ STAGING_LENGTH = 8192 ///< Size of staging buffer
};
/// \brief I/O Fetch Callback
@@ -95,82 +104,12 @@ public:
virtual ~Callback()
{}
- /// \brief Callback method called when the fetch completes
- ///
- /// \brief result Result of the fetch
- virtual void operator()(Result result) = 0;
- };
-
- /// \brief IOFetch Data
- ///
- /// The data for IOFetch is held in a separate struct pointed to by a
- /// shared_ptr object. This is because the IOFetch object will be copied
- /// often (it is used as a coroutine and passed as callback to many
- /// async_*() functions) and we want keep the same data). Organising the
- /// data in this way keeps copying to a minimum.
- struct IOFetchData {
-
- // The next two members are shared pointers to a base class because what
- // is actually instantiated depends on whether the fetch is over UDP or
- // TCP, which is not known until construction of the IOFetch. Use of
- // a shared pointer here is merely to ensure deletion when the data
- // object is deleted.
- boost::shared_ptr<IOAsioSocket<IOFetch> > socket;
- ///< Socket to use for I/O
- boost::shared_ptr<IOEndpoint> remote; ///< Where the fetch was sent
- isc::dns::Question question; ///< Question to be asked
- isc::dns::OutputBufferPtr msgbuf; ///< Wire buffer for question
- isc::dns::OutputBufferPtr buffer; ///< Received data held here
- boost::shared_array<char> data; ///< Temporary array for data
- IOFetch::Callback* callback; ///< Called on I/O Completion
- size_t cumulative; ///< Cumulative received amount
- bool stopped; ///< Have we stopped running?
- asio::deadline_timer timer; ///< Timer to measure timeouts
- int timeout; ///< Timeout in ms
-
- /// \brief Constructor
- ///
- /// Just fills in the data members of the IOFetchData structure
+ /// \brief Callback method
///
- /// \param protocol either IPPROTO_UDP or IPPROTO_TCP
- /// \param service I/O Service object to handle the asynchronous
- /// operations.
- /// \param query DNS question to send to the upstream server.
- /// \param address IP address of upstream server
- /// \param port Port to use for the query
- /// \param buff Output buffer into which the response (in wire format)
- /// is written (if a response is received).
- /// \param cb Callback object containing the callback to be called
- /// when we terminate. The caller is responsible for managing this
- /// object and deleting it if necessary.
- /// \param wait Timeout for the fetch (in ms).
+ /// This is the method called when the fetch completes.
///
- /// TODO: May need to alter constructor (see comment 4 in Trac ticket #554)
- IOFetchData(int protocol, IOService& service,
- const isc::dns::Question& query, const IOAddress& address,
- uint16_t port, isc::dns::OutputBufferPtr& buff, Callback* cb,
- int wait)
- :
- socket((protocol == IPPROTO_UDP) ?
- static_cast<IOAsioSocket<IOFetch>*>(
- new UDPSocket<IOFetch>(service)) :
- static_cast<IOAsioSocket<IOFetch>*>(
- new TCPSocket<IOFetch>(service))
- ),
- remote((protocol == IPPROTO_UDP) ?
- static_cast<IOEndpoint*>(new UDPEndpoint(address, port)) :
- static_cast<IOEndpoint*>(new TCPEndpoint(address, port))
- ),
- question(query),
- msgbuf(new isc::dns::OutputBuffer(512)),
- buffer(buff),
- data(new char[IOFetch::MAX_LENGTH]),
- callback(cb),
- cumulative(0),
- stopped(false),
- timer(service.get_io_service()),
- timeout(wait)
- {}
+ /// \param result Result of the fetch
+ virtual void operator()(Result result) = 0;
};
/// \brief Constructor.
@@ -179,7 +118,7 @@ public:
///
/// TODO: Need to randomise the source port
///
- /// \param protocol Fetch protocol, either IPPROTO_UDP or IPPROTO_TCP
+ /// \param protocol Fetch protocol, either IOFetch::TCP or IOFetch::UDP
/// \param service I/O Service object to handle the asynchronous
/// operations.
/// \param question DNS question to send to the upstream server.
@@ -193,11 +132,16 @@ public:
/// (default = 53)
/// \param wait Timeout for the fetch (in ms). The default value of
/// -1 indicates no timeout.
- IOFetch(int protocol, IOService& service,
+ IOFetch(Protocol protocol, IOService& service,
const isc::dns::Question& question, const IOAddress& address,
uint16_t port, isc::dns::OutputBufferPtr& buff, Callback* cb,
int wait = -1);
-
+
+ /// \brief Return Current Protocol
+ ///
+ /// \return Protocol associated with this IOFetch object.
+ Protocol getProtocol() const;
+
/// \brief Coroutine entry point
///
/// The operator() method is the method in which the coroutine code enters
@@ -205,8 +149,7 @@ public:
///
/// \param ec Error code, the result of the last asynchronous I/O operation.
/// \param length Amount of data received on the last asynchronous read
- void operator()(asio::error_code ec = asio::error_code(),
- size_t length = 0);
+ void operator()(asio::error_code ec = asio::error_code(), size_t length = 0);
/// \brief Terminate query
///
@@ -217,6 +160,16 @@ public:
void stop(Result reason = STOPPED);
private:
+ /// \brief Log I/O Failure
+ ///
+ /// Records an I/O failure to the log file
+ ///
+ /// \param ec ASIO error code
+ void logIOFailure(asio::error_code ec);
+
+ // Member variables. All data is in a structure pointed to by a shared
+ // pointer. The IOFetch object is copied a number of times during its
+ // life, and only requiring a pointer to be copied reduces overhead.
boost::shared_ptr<IOFetchData> data_; ///< Private data
};
diff --git a/src/lib/asiolink/io_message.h b/src/lib/asiolink/io_message.h
index 532f449..e857bd9 100644
--- a/src/lib/asiolink/io_message.h
+++ b/src/lib/asiolink/io_message.h
@@ -98,7 +98,3 @@ private:
} // asiolink
#endif // __IO_MESSAGE_H
-
-// Local Variables:
-// mode: c++
-// End:
diff --git a/src/lib/asiolink/qid_gen.cc b/src/lib/asiolink/qid_gen.cc
new file mode 100644
index 0000000..4063b39
--- /dev/null
+++ b/src/lib/asiolink/qid_gen.cc
@@ -0,0 +1,54 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// qid_gen defines a generator for query id's
+//
+// We probably want to merge this with the weighted random in the nsas
+// (and other parts where we need randomness, perhaps another thing
+// for a general libutil?)
+
+#include <asiolink/qid_gen.h>
+
+#include <sys/time.h>
+
+namespace {
+ asiolink::QidGenerator qid_generator_instance;
+}
+
+namespace asiolink {
+
+QidGenerator&
+QidGenerator::getInstance() {
+ return (qid_generator_instance);
+}
+
+QidGenerator::QidGenerator() : dist_(0, 65535),
+ vgen_(generator_, dist_)
+{
+ seed();
+}
+
+void
+QidGenerator::seed() {
+ struct timeval tv;
+ gettimeofday(&tv, 0);
+ generator_.seed((tv.tv_sec * 1000000) + tv.tv_usec);
+}
+
+isc::dns::qid_t
+QidGenerator::generateQid() {
+ return (vgen_());
+}
+
+} // namespace asiolink
diff --git a/src/lib/asiolink/qid_gen.h b/src/lib/asiolink/qid_gen.h
new file mode 100644
index 0000000..a5caa17
--- /dev/null
+++ b/src/lib/asiolink/qid_gen.h
@@ -0,0 +1,85 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// qid_gen defines a generator for query id's
+//
+// We probably want to merge this with the weighted random in the nsas
+// (and other parts where we need randomness, perhaps another thing
+// for a general libutil?)
+
+#ifndef __QID_GEN_H
+#define __QID_GEN_H
+
+#include <dns/message.h>
+#include <boost/random/mersenne_twister.hpp>
+#include <boost/random/uniform_int.hpp>
+#include <boost/random/variate_generator.hpp>
+
+
+namespace asiolink {
+
+/// This class generates Qids for outgoing queries
+///
+/// It is implemented as a singleton; the public way to access it
+/// is to call getInstance()->generateQid().
+///
+/// It automatically seeds it with the current time when it is first
+/// used.
+class QidGenerator {
+public:
+ /// \brief Returns the singleton instance of the QidGenerator
+ ///
+ /// Returns a reference to the singleton instance of the generator
+ static QidGenerator& getInstance();
+
+ /// \brief Default constructor
+ ///
+ /// It is recommended that getInstance is used rather than creating
+ /// separate instances of this class.
+ ///
+ /// The constructor automatically seeds the generator with the
+ /// current time.
+ QidGenerator();
+
+ /// Generate a Qid
+ ///
+ /// \return A random Qid
+ isc::dns::qid_t generateQid();
+
+ /// \brief Seeds the QidGenerator (based on the current time)
+ ///
+ /// This is automatically called by the constructor
+ void seed();
+
+private:
+ // "Mersenne Twister: A 623-dimensionally equidistributed
+ // uniform pseudo-random number generator", Makoto Matsumoto and
+ // Takuji Nishimura, ACM Transactions on Modeling and Computer
+ // Simulation: Special Issue on Uniform Random Number Generation,
+ // Vol. 8, No. 1, January 1998, pp. 3-30.
+ //
+ // mt19937 is an implementation of one of the pseudo random
+ // generators described in this paper.
+ boost::mt19937 generator_;
+
+ // For qid's we want a uniform distribution
+ boost::uniform_int<> dist_;
+
+ boost::variate_generator<boost::mt19937&, boost::uniform_int<> > vgen_;
+};
+
+
+} // namespace asiolink
+
+#endif // __QID_GEN_H
diff --git a/src/lib/asiolink/recursive_query.cc b/src/lib/asiolink/recursive_query.cc
index 0bdf24e..406b176 100644
--- a/src/lib/asiolink/recursive_query.cc
+++ b/src/lib/asiolink/recursive_query.cc
@@ -55,10 +55,22 @@ RecursiveQuery::RecursiveQuery(DNSService& dns_service,
unsigned retries) :
dns_service_(dns_service), upstream_(new AddressVector(upstream)),
upstream_root_(new AddressVector(upstream_root)),
+ test_server_("", 0),
query_timeout_(query_timeout), client_timeout_(client_timeout),
lookup_timeout_(lookup_timeout), retries_(retries)
{}
+// Set the test server - only used for unit testing.
+
+void
+RecursiveQuery::setTestServer(const std::string& address, uint16_t port) {
+ dlog("Setting test server to " + address + "(" +
+ boost::lexical_cast<std::string>(port) + ")");
+ test_server_.first = address;
+ test_server_.second = port;
+}
+
+
namespace {
typedef std::pair<std::string, uint16_t> addr_t;
@@ -88,6 +100,10 @@ private:
// root servers...just copied over to the zone_servers_
boost::shared_ptr<AddressVector> upstream_root_;
+ // Test server - only used for testing. This takes precedence over all
+ // other servers if the port is non-zero.
+ std::pair<std::string, uint16_t> test_server_;
+
// Buffer to store the result.
OutputBufferPtr buffer_;
@@ -95,6 +111,12 @@ private:
//shared_ptr<DNSServer> server_;
isc::resolve::ResolverInterface::CallbackPtr resolvercallback_;
+ // Protocol used for the last query. This is set to IOFetch::UDP when a
+ // new upstream query is initiated, and changed to IOFetch::TCP if a
+ // packet is returned with the TC bit set. It is stored here to detect the
+ // case of a TCP packet being returned with the TC bit set.
+ IOFetch::Protocol protocol_;
+
// To prevent both unreasonably long cname chains and cname loops,
// we simply keep a counter of the number of CNAMEs we have
// followed so far (and error if it exceeds RESOLVER_MAX_CNAME_CHAIN
@@ -155,15 +177,27 @@ private:
}
// (re)send the query to the server.
- void send() {
+ //
+ // \param protocol Protocol to use for the fetch (default is UDP)
+ void send(IOFetch::Protocol protocol = IOFetch::UDP) {
const int uc = upstream_->size();
const int zs = zone_servers_.size();
+ protocol_ = protocol; // Store protocol being used for this
buffer_->clear();
- if (uc > 0) {
+ if (test_server_.second != 0) {
+ dlog("Sending upstream query (" + question_.toText() +
+ ") to test server at " + test_server_.first);
+ IOFetch query(protocol, io_, question_,
+ test_server_.first,
+ test_server_.second, buffer_, this,
+ query_timeout_);
+ ++queries_out_;
+ io_.get_io_service().post(query);
+ } else if (uc > 0) {
int serverIndex = rand() % uc;
dlog("Sending upstream query (" + question_.toText() +
") to " + upstream_->at(serverIndex).first);
- IOFetch query(IPPROTO_UDP, io_, question_,
+ IOFetch query(protocol, io_, question_,
upstream_->at(serverIndex).first,
upstream_->at(serverIndex).second, buffer_, this,
query_timeout_);
@@ -173,7 +207,7 @@ private:
int serverIndex = rand() % zs;
dlog("Sending query to zone server (" + question_.toText() +
") to " + zone_servers_.at(serverIndex).first);
- IOFetch query(IPPROTO_UDP, io_, question_,
+ IOFetch query(protocol, io_, question_,
zone_servers_.at(serverIndex).first,
zone_servers_.at(serverIndex).second, buffer_, this,
query_timeout_);
@@ -203,7 +237,7 @@ private:
isc::resolve::ResponseClassifier::Category category =
isc::resolve::ResponseClassifier::classify(
- question_, incoming, cname_target, cname_count_, true);
+ question_, incoming, cname_target, cname_count_);
bool found_ns_address = false;
@@ -291,6 +325,18 @@ private:
return true;
}
break;
+ case isc::resolve::ResponseClassifier::TRUNCATED:
+ // Truncated packet. If the protocol we used for the last one is
+ // UDP, re-query using TCP. Otherwise regard it as an error.
+ if (protocol_ == IOFetch::UDP) {
+ dlog("Response truncated, re-querying over TCP");
+ send(IOFetch::TCP);
+ return false;
+ }
+ // Was a TCP query so we have received a packet over TCP with the TC
+ // bit set: drop through to common error processing.
+ // TODO: Can we use what we have received instead of discarding it?
+
case isc::resolve::ResponseClassifier::EMPTY:
case isc::resolve::ResponseClassifier::EXTRADATA:
case isc::resolve::ResponseClassifier::INVNAMCLASS:
@@ -302,7 +348,7 @@ private:
case isc::resolve::ResponseClassifier::NOTSINGLE:
case isc::resolve::ResponseClassifier::OPCODE:
case isc::resolve::ResponseClassifier::RCODE:
- case isc::resolve::ResponseClassifier::TRUNCATED:
+
// Should we try a different server rather than SERVFAIL?
isc::resolve::makeErrorMessage(answer_message_,
Rcode::SERVFAIL());
@@ -320,6 +366,7 @@ public:
MessagePtr answer_message,
boost::shared_ptr<AddressVector> upstream,
boost::shared_ptr<AddressVector> upstream_root,
+ std::pair<std::string, uint16_t>& test_server,
OutputBufferPtr buffer,
isc::resolve::ResolverInterface::CallbackPtr cb,
int query_timeout, int client_timeout, int lookup_timeout,
@@ -330,8 +377,10 @@ public:
answer_message_(answer_message),
upstream_(upstream),
upstream_root_(upstream_root),
+ test_server_(test_server),
buffer_(buffer),
resolvercallback_(cb),
+ protocol_(IOFetch::UDP),
cname_count_(0),
query_timeout_(query_timeout),
retries_(retries),
@@ -441,7 +490,6 @@ public:
// This function is used as callback from DNSQuery.
virtual void operator()(IOFetch::Result result) {
- // XXX is this the place for TCP retry?
--queries_out_;
if (!done_ && result != IOFetch::TIME_OUT) {
// we got an answer
@@ -496,7 +544,8 @@ RecursiveQuery::resolve(const QuestionPtr& question,
dlog("Message not found in cache, starting recursive query");
// It will delete itself when it is done
new RunningQuery(io, *question, answer_message, upstream_,
- upstream_root_, buffer, callback, query_timeout_,
+ upstream_root_, test_server_,
+ buffer, callback, query_timeout_,
client_timeout_, lookup_timeout_, retries_,
cache_);
}
@@ -533,8 +582,9 @@ RecursiveQuery::resolve(const Question& question,
dlog("Message not found in cache, starting recursive query");
// It will delete itself when it is done
new RunningQuery(io, question, answer_message, upstream_, upstream_root_,
- buffer, crs, query_timeout_, client_timeout_,
- lookup_timeout_, retries_, cache_);
+ test_server_,
+ buffer, crs, query_timeout_, client_timeout_,
+ lookup_timeout_, retries_, cache_);
}
}
diff --git a/src/lib/asiolink/recursive_query.h b/src/lib/asiolink/recursive_query.h
index 6ef0069..626ff42 100644
--- a/src/lib/asiolink/recursive_query.h
+++ b/src/lib/asiolink/recursive_query.h
@@ -98,12 +98,26 @@ public:
isc::dns::MessagePtr answer_message,
isc::dns::OutputBufferPtr buffer,
DNSServer* server);
+
+ /// \brief Set Test Server
+ ///
+ /// This method is *only* for unit testing the class. If set, it enables
+ /// recursive behaviour but, regardless of responses received, sends every
+ /// query to the test server.
+ ///
+ /// The test server is enabled by setting a non-zero port number.
+ ///
+ /// \param address IP address of the test server.
+ /// \param port Port number of the test server
+ void setTestServer(const std::string& address, uint16_t port);
+
private:
DNSService& dns_service_;
boost::shared_ptr<std::vector<std::pair<std::string, uint16_t> > >
upstream_;
boost::shared_ptr<std::vector<std::pair<std::string, uint16_t> > >
upstream_root_;
+ std::pair<std::string, uint16_t> test_server_;
int query_timeout_;
int client_timeout_;
int lookup_timeout_;
diff --git a/src/lib/asiolink/tcp_endpoint.h b/src/lib/asiolink/tcp_endpoint.h
index 8f6270f..158ca4a 100644
--- a/src/lib/asiolink/tcp_endpoint.h
+++ b/src/lib/asiolink/tcp_endpoint.h
@@ -24,32 +24,33 @@
namespace asiolink {
/// \brief The \c TCPEndpoint class is a concrete derived class of
-/// \c IOEndpoint that represents an endpoint of a TCP connection.
+/// \c IOEndpoint that represents an endpoint of a TCP packet.
///
-/// In the current implementation, an object of this class is always
-/// instantiated within the wrapper routines. Applications are expected to
-/// get access to the object via the abstract base class, \c IOEndpoint.
-/// This design may be changed when we generalize the wrapper interface.
-///
-/// Note: this implementation is optimized for the case where this object
-/// is created from an ASIO endpoint object in a receiving code path
-/// by avoiding to make a copy of the base endpoint. For TCP it may not be
-/// a big deal, but when we receive UDP packets at a high rate, the copy
-/// overhead might be significant.
+/// Other notes about \c TCPEndpoint applies to this class, too.
class TCPEndpoint : public IOEndpoint {
public:
///
- /// \name Constructors and Destructor
+ /// \name Constructors and Destructor.
///
//@{
+
+ /// \brief Default Constructor
+ ///
+ /// Creates an internal endpoint. This is expected to be set by some
+ /// external call.
+ TCPEndpoint() :
+ asio_endpoint_placeholder_(new asio::ip::tcp::endpoint()),
+ asio_endpoint_(*asio_endpoint_placeholder_)
+ {}
+
/// \brief Constructor from a pair of address and port.
///
/// \param address The IP address of the endpoint.
/// \param port The TCP port number of the endpoint.
TCPEndpoint(const IOAddress& address, const unsigned short port) :
asio_endpoint_placeholder_(
- new asio::ip::tcp::endpoint(
- asio::ip::address::from_string(address.toText()), port)),
+ new asio::ip::tcp::endpoint(asio::ip::address::from_string(address.toText()),
+ port)),
asio_endpoint_(*asio_endpoint_placeholder_)
{}
@@ -59,39 +60,53 @@ public:
/// corresponding ASIO class, \c tcp::endpoint.
///
/// \param asio_endpoint The ASIO representation of the TCP endpoint.
- TCPEndpoint(const asio::ip::tcp::endpoint& asio_endpoint) :
+ TCPEndpoint(asio::ip::tcp::endpoint& asio_endpoint) :
asio_endpoint_placeholder_(NULL), asio_endpoint_(asio_endpoint)
{}
+ /// \brief Constructor from an ASIO TCP endpoint.
+ ///
+ /// This constructor is designed to be an efficient wrapper for the
+ /// corresponding ASIO class, \c tcp::endpoint.
+ ///
+ /// \param asio_endpoint The ASIO representation of the TCP endpoint.
+ TCPEndpoint(const asio::ip::tcp::endpoint& asio_endpoint) :
+ asio_endpoint_placeholder_(new asio::ip::tcp::endpoint(asio_endpoint)),
+ asio_endpoint_(*asio_endpoint_placeholder_)
+ {}
+
/// \brief The destructor.
- ~TCPEndpoint() { delete asio_endpoint_placeholder_; }
+ virtual ~TCPEndpoint() { delete asio_endpoint_placeholder_; }
//@}
- IOAddress getAddress() const {
+ virtual IOAddress getAddress() const {
return (asio_endpoint_.address());
}
- uint16_t getPort() const {
+ virtual uint16_t getPort() const {
return (asio_endpoint_.port());
}
- short getProtocol() const {
+ virtual short getProtocol() const {
return (asio_endpoint_.protocol().protocol());
}
- short getFamily() const {
+ virtual short getFamily() const {
return (asio_endpoint_.protocol().family());
}
// This is not part of the exosed IOEndpoint API but allows
// direct access to the ASIO implementation of the endpoint
- const asio::ip::tcp::endpoint& getASIOEndpoint() const {
+ inline const asio::ip::tcp::endpoint& getASIOEndpoint() const {
+ return (asio_endpoint_);
+ }
+ inline asio::ip::tcp::endpoint& getASIOEndpoint() {
return (asio_endpoint_);
}
private:
- const asio::ip::tcp::endpoint* asio_endpoint_placeholder_;
- const asio::ip::tcp::endpoint& asio_endpoint_;
+ asio::ip::tcp::endpoint* asio_endpoint_placeholder_;
+ asio::ip::tcp::endpoint& asio_endpoint_;
};
} // namespace asiolink
diff --git a/src/lib/asiolink/tcp_server.cc b/src/lib/asiolink/tcp_server.cc
index e77828c..3e0cdb4 100644
--- a/src/lib/asiolink/tcp_server.cc
+++ b/src/lib/asiolink/tcp_server.cc
@@ -17,6 +17,7 @@
#include <netinet/in.h>
#include <sys/socket.h>
#include <unistd.h> // for some IPC/network system calls
+#include <errno.h>
#include <boost/shared_array.hpp>
@@ -65,7 +66,7 @@ TCPServer::TCPServer(io_service& io_service,
void
TCPServer::operator()(error_code ec, size_t length) {
- /// Because the coroutine reeentry block is implemented as
+ /// Because the coroutine reentry block is implemented as
/// a switch statement, inline variable declarations are not
/// permitted. Certain variables used below can be declared here.
@@ -83,11 +84,21 @@ TCPServer::operator()(error_code ec, size_t length) {
/// Create a socket to listen for connections
socket_.reset(new tcp::socket(acceptor_->get_io_service()));
- /// Wait for new connections. In the event of error,
+ /// Wait for new connections. In the event of non-fatal error,
/// try again
do {
CORO_YIELD acceptor_->async_accept(*socket_, *this);
- } while (!ec);
+ // Abort on fatal errors
+ // TODO: Log error?
+ if (ec) {
+ using namespace asio::error;
+ if (ec.value() != would_block && ec.value() != try_again &&
+ ec.value() != connection_aborted &&
+ ec.value() != interrupted) {
+ return;
+ }
+ }
+ } while (ec);
/// Fork the coroutine by creating a copy of this one and
/// scheduling it on the ASIO service queue. The parent
@@ -110,7 +121,7 @@ TCPServer::operator()(error_code ec, size_t length) {
/// Now read the message itself. (This is done in a different scope
/// to allow inline variable declarations.)
CORO_YIELD {
- InputBuffer dnsbuffer((const void *) data_.get(), length);
+ InputBuffer dnsbuffer(data_.get(), length);
uint16_t msglen = dnsbuffer.readUint16();
async_read(*socket_, asio::buffer(data_.get(), msglen), *this);
}
@@ -196,9 +207,10 @@ TCPServer::asyncLookup() {
}
void TCPServer::stop() {
- //server should not be stopped twice
- if (stopped_by_hand_)
+ // server should not be stopped twice
+ if (stopped_by_hand_) {
return;
+ }
stopped_by_hand_ = true;
acceptor_->close();
diff --git a/src/lib/asiolink/tcp_socket.h b/src/lib/asiolink/tcp_socket.h
index 5a85aaa..fcbf3b7 100644
--- a/src/lib/asiolink/tcp_socket.h
+++ b/src/lib/asiolink/tcp_socket.h
@@ -24,11 +24,18 @@
#include <sys/socket.h>
#include <unistd.h> // for some IPC/network system calls
-#include <iostream>
+#include <algorithm>
+#include <cassert>
#include <cstddef>
+#include <boost/bind.hpp>
+#include <boost/numeric/conversion/cast.hpp>
+
#include <config.h>
+#include <dns/buffer.h>
+
+#include <asiolink/asiolink_utilities.h>
#include <asiolink/io_asio_socket.h>
#include <asiolink/io_endpoint.h>
#include <asiolink/io_service.h>
@@ -36,6 +43,15 @@
namespace asiolink {
+/// \brief Buffer Too Large
+///
+/// Thrown on an attempt to send a buffer > 64k
+class BufferTooLarge : public IOError {
+public:
+ BufferTooLarge(const char* file, size_t line, const char* what) :
+ IOError(file, line, what) {}
+};
+
/// \brief The \c TCPSocket class is a concrete derived class of \c IOAsioSocket
/// that represents a TCP socket.
///
@@ -48,18 +64,18 @@ private:
TCPSocket& operator=(const TCPSocket&);
public:
-
+
/// \brief Constructor from an ASIO TCP socket.
///
- /// \param socket The ASIO representation of the TCP socket. It
- /// is assumed that the caller will open and close the socket, so
- /// these operations are a no-op for that socket.
+ /// \param socket The ASIO representation of the TCP socket. It is assumed
+ /// that the caller will open and close the socket, so these
+ /// operations are a no-op for that socket.
TCPSocket(asio::ip::tcp::socket& socket);
/// \brief Constructor
///
/// Used when the TCPSocket is being asked to manage its own internal
- /// socket. It is assumed that open() and close() will not be used.
+ /// socket. In this case, the open() and close() methods are used.
///
/// \param service I/O Service object used to manage the socket.
TCPSocket(IOService& service);
@@ -67,68 +83,79 @@ public:
/// \brief Destructor
virtual ~TCPSocket();
- virtual int getNative() const { return (socket_.native()); }
- virtual int getProtocol() const { return (IPPROTO_TCP); }
+ /// \brief Return file descriptor of underlying socket
+ virtual int getNative() const {
+ return (socket_.native());
+ }
- /// \brief Open Socket
+ /// \brief Return protocol of socket
+ virtual int getProtocol() const {
+ return (IPPROTO_TCP);
+ }
+
+ /// \brief Is "open()" synchronous?
///
- /// Opens the TCP socket. In the model for transport-layer agnostic I/O,
- /// an "open" operation includes a connection to the remote end (which
- /// may take time). This does not happen for TCP, so the method returns
- /// "false" to indicate that the operation completed synchronously.
+ /// Indicates that the opening of a TCP socket is asynchronous.
+ virtual bool isOpenSynchronous() const {
+ return (false);
+ }
+
+ /// \brief Open Socket
///
- /// \param endpoint Endpoint to which the socket will connect to.
- /// \param callback Unused.
+ /// Opens the TCP socket. This is an asynchronous operation, completion of
+ /// which will be signalled via a call to the callback function.
///
- /// \return false to indicate that the "operation" completed synchronously.
- virtual bool open(const IOEndpoint* endpoint, C&);
+ /// \param endpoint Endpoint to which the socket will connect.
+ /// \param callback Callback object.
+ virtual void open(const IOEndpoint* endpoint, C& callback);
/// \brief Send Asynchronously
///
- /// This corresponds to async_send_to() for TCP sockets and async_send()
- /// for TCP. In both cases an endpoint argument is supplied indicating the
- /// target of the send - this is ignored for TCP.
+ /// Calls the underlying socket's async_send() method to send a packet of
+ /// data asynchronously to the remote endpoint. The callback will be called
+ /// on completion.
///
/// \param data Data to send
/// \param length Length of data to send
- /// \param endpoint Target of the send
+ /// \param endpoint Target of the send. (Unused for a TCP socket because
+ /// that was determined when the connection was opened.)
/// \param callback Callback object.
virtual void asyncSend(const void* data, size_t length,
- const IOEndpoint* endpoint, C& callback);
+ const IOEndpoint* endpoint, C& callback);
/// \brief Receive Asynchronously
///
- /// This correstponds to async_receive_from() for TCP sockets and
- /// async_receive() for TCP. In both cases, an endpoint argument is
- /// supplied to receive the source of the communication. For TCP it will
- /// be filled in with details of the connection.
+ /// Calls the underlying socket's async_receive() method to read a packet
+ /// of data from a remote endpoint. Arrival of the data is signalled via a
+ /// call to the callback function.
///
/// \param data Buffer to receive incoming message
/// \param length Length of the data buffer
- /// \param cumulative Amount of data that should already be in the buffer.
- /// (This is ignored - every UPD receive fills the buffer from the start.)
+ /// \param offset Offset into buffer where data is to be put
/// \param endpoint Source of the communication
/// \param callback Callback object
- virtual void asyncReceive(void* data, size_t length, size_t cumulative,
- IOEndpoint* endpoint, C& callback);
+ virtual void asyncReceive(void* data, size_t length, size_t offset,
+ IOEndpoint* endpoint, C& callback);
- /// \brief Checks if the data received is complete.
+ /// \brief Process received data packet
///
- /// As all the data is received in one I/O, so this is, this is effectively
- /// a no-op (although it does update the amount of data received).
+ /// See the description of IOAsioSocket::receiveComplete for a complete
+ /// description of this method.
///
- /// \param data Data buffer containing data to date. (This is ignored
- /// for TCP receives.)
- /// \param length Amount of data received in last asynchronous I/O
- /// \param cumulative On input, amount of data received before the last
- /// I/O. On output, the total amount of data received to date.
+ /// \param staging Pointer to the start of the staging buffer.
+ /// \param length Amount of data in the staging buffer.
+ /// \param cumulative Amount of data received before the staging buffer is
+ /// processed.
+ /// \param offset Unused.
+ /// \param expected unused.
+ /// \param outbuff Output buffer. Data in the staging buffer is be copied
+ /// to this output buffer in the call.
///
- /// \return true if the receive is complete, false if another receive is
- /// needed.
- virtual bool receiveComplete(void*, size_t length, size_t& cumulative) {
- cumulative = length;
- return (true);
- }
+ /// \return Always true
+ virtual bool processReceivedData(const void* staging, size_t length,
+ size_t& cumulative, size_t& offset,
+ size_t& expected,
+ isc::dns::OutputBufferPtr& outbuff);
/// \brief Cancel I/O On Socket
virtual void cancel();
@@ -144,13 +171,28 @@ private:
asio::ip::tcp::socket* socket_ptr_; ///< Pointer to own socket
asio::ip::tcp::socket& socket_; ///< Socket
bool isopen_; ///< true when socket is open
+
+ // TODO: Remove temporary buffer
+ // The current implementation copies the buffer passed to asyncSend() into
+ // a temporary buffer and precedes it with a two-byte count field. As
+ // ASIO should really be just about sending and receiving data, the TCP
+ // code should not do this. If the protocol using this requires a two-byte
+ // count, it should add it before calling this code. (This may be best
+ // achieved by altering isc::dns::buffer to have pairs of methods:
+ // getLength()/getTCPLength(), getData()/getTCPData(), with the getTCPXxx()
+ // methods taking into account a two-byte count field.)
+ //
+ // The option of sending the data in two operations, the count followed by
+ // the data was discounted as that would lead to two callbacks which would
+ // cause problems with the stackless coroutine code.
+ isc::dns::OutputBufferPtr send_buffer_; ///< Send buffer
};
// Constructor - caller manages socket
template <typename C>
TCPSocket<C>::TCPSocket(asio::ip::tcp::socket& socket) :
- socket_ptr_(NULL), socket_(socket), isopen_(true)
+ socket_ptr_(NULL), socket_(socket), isopen_(true), send_buffer_()
{
}
@@ -171,16 +213,14 @@ TCPSocket<C>::~TCPSocket()
delete socket_ptr_;
}
-// Open the socket. Throws an error on failure
-// TODO: Make the open more resilient
+// Open the socket.
-template <typename C> bool
-TCPSocket<C>::open(const IOEndpoint* endpoint, C&) {
+template <typename C> void
+TCPSocket<C>::open(const IOEndpoint* endpoint, C& callback) {
// Ignore opens on already-open socket. Don't throw a failure because
// of uncertainties as to what precedes whan when using asynchronous I/O.
// At also allows us a treat a passed-in socket as a self-managed socket.
-
if (!isopen_) {
if (endpoint->getFamily() == AF_INET) {
socket_.open(asio::ip::tcp::v4());
@@ -190,35 +230,55 @@ TCPSocket<C>::open(const IOEndpoint* endpoint, C&) {
}
isopen_ = true;
- // TODO: Complete TCPSocket::open()
+ // Set options on the socket:
+ // Reuse address - allow the socket to bind to a port even if the port
+ // is in the TIMED_WAIT state.
+ socket_.set_option(asio::socket_base::reuse_address(true));
}
- return (false);
+
+ // Upconvert to a TCPEndpoint. We need to do this because although
+ // IOEndpoint is the base class of UDPEndpoint and TCPEndpoint, it does not
+ // contain a method for getting at the underlying endpoint type - that is in
+ /// the derived class and the two classes differ on return type.
+ assert(endpoint->getProtocol() == IPPROTO_TCP);
+ const TCPEndpoint* tcp_endpoint =
+ static_cast<const TCPEndpoint*>(endpoint);
+
+ // Connect to the remote endpoint. On success, the handler will be
+ // called (with one argument - the length argument will default to
+ // zero).
+ socket_.async_connect(tcp_endpoint->getASIOEndpoint(), callback);
}
// Send a message. Should never do this if the socket is not open, so throw
// an exception if this is the case.
template <typename C> void
-TCPSocket<C>::asyncSend(const void* data, size_t length,
- const IOEndpoint* endpoint, C& callback)
+TCPSocket<C>::asyncSend(const void* data, size_t length, const IOEndpoint*,
+ C& callback)
{
if (isopen_) {
- // Upconvert to a TCPEndpoint. We need to do this because although
- // IOEndpoint is the base class of TCPEndpoint and TCPEndpoint, it
- // doing cont contain a method for getting at the underlying endpoint
- // type - those are in the derived class and the two classes differ on
- // return type.
-
- assert(endpoint->getProtocol() == IPPROTO_TCP);
- const TCPEndpoint* tcp_endpoint =
- static_cast<const TCPEndpoint*>(endpoint);
- std::cerr << "TCPSocket::asyncSend(): sending to " <<
- tcp_endpoint->getAddress().toText() <<
- ", port " << tcp_endpoint->getPort() << "\n";
-
- // TODO: Complete TCPSocket::asyncSend()
+ // Need to copy the data into a temporary buffer and precede it with
+ // a two-byte count field.
+ // TODO: arrange for the buffer passed to be preceded by the count
+ try {
+ // Ensure it fits into 16 bits
+ uint16_t count = boost::numeric_cast<uint16_t>(length);
+
+ // Copy data into a buffer preceded by the count field.
+ send_buffer_.reset(new isc::dns::OutputBuffer(length + 2));
+ send_buffer_->writeUint16(count);
+ send_buffer_->writeData(data, length);
+
+ // ... and send it
+ socket_.async_send(asio::buffer(send_buffer_->getData(),
+ send_buffer_->getLength()), callback);
+ } catch (boost::numeric::bad_numeric_cast& e) {
+ isc_throw(BufferTooLarge,
+ "attempt to send buffer larger than 64kB");
+ }
} else {
isc_throw(SocketNotOpen,
@@ -226,26 +286,40 @@ TCPSocket<C>::asyncSend(const void* data, size_t length,
}
}
-// Receive a message. Note that the "cumulative" argument is ignored - every TCP
-// receive is put into the buffer beginning at the start - there is no concept
-// receiving a subsequent part of a message. Same critera as before concerning
-// the need for the socket to be open.
-
+// Receive a message. Note that the "offset" argument is used as an index
+// into the buffer in order to decide where to put the data. It is up to the
+// caller to initialize the data to zero
template <typename C> void
-TCPSocket<C>::asyncReceive(void* data, size_t length, size_t,
+TCPSocket<C>::asyncReceive(void* data, size_t length, size_t offset,
IOEndpoint* endpoint, C& callback)
{
if (isopen_) {
-
- // Upconvert the endpoint again.
+ // Upconvert to a TCPEndpoint. We need to do this because although
+ // IOEndpoint is the base class of UDPEndpoint and TCPEndpoint, it
+ // does not contain a method for getting at the underlying endpoint
+ // type - that is in the derived class and the two classes differ on
+ // return type.
assert(endpoint->getProtocol() == IPPROTO_TCP);
- const TCPEndpoint* tcp_endpoint =
- static_cast<const TCPEndpoint*>(endpoint);
- std::cerr << "TCPSocket::asyncReceive(): receiving from " <<
- tcp_endpoint->getAddress().toText() <<
- ", port " << tcp_endpoint->getPort() << "\n";
+ TCPEndpoint* tcp_endpoint = static_cast<TCPEndpoint*>(endpoint);
+
+ // Write the endpoint details from the communications link. Ideally
+ // we should make IOEndpoint assignable, but this runs in to all sorts
+ // of problems concerning the management of the underlying Boost
+ // endpoint (e.g. if it is not self-managed, is the copied one
+ // self-managed?) The most pragmatic solution is to let Boost take care
+ // of everything and copy details of the underlying endpoint.
+ tcp_endpoint->getASIOEndpoint() = socket_.remote_endpoint();
+
+ // Ensure we can write into the buffer and if so, set the pointer to
+ // where the data will be written.
+ if (offset >= length) {
+ isc_throw(BufferOverflow, "attempt to read into area beyond end of "
+ "TCP receive buffer");
+ }
+ void* buffer_start = static_cast<void*>(static_cast<uint8_t*>(data) + offset);
- // TODO: Complete TCPSocket::asyncReceive()
+ // ... and kick off the read.
+ socket_.async_receive(asio::buffer(buffer_start, length - offset), callback);
} else {
isc_throw(SocketNotOpen,
@@ -253,7 +327,72 @@ TCPSocket<C>::asyncReceive(void* data, size_t length, size_t,
}
}
+// Is the receive complete?
+
+template <typename C> bool
+TCPSocket<C>::processReceivedData(const void* staging, size_t length,
+ size_t& cumulative, size_t& offset,
+ size_t& expected,
+ isc::dns::OutputBufferPtr& outbuff)
+{
+ // Point to the data in the staging buffer and note how much there is.
+ const uint8_t* data = static_cast<const uint8_t*>(staging);
+ size_t data_length = length;
+
+ // Is the number is "expected" valid? It won't be unless we have received
+ // at least two bytes of data in total for this set of receives.
+ if (cumulative < 2) {
+
+ // "expected" is not valid. Did this read give us enough data to
+ // work it out?
+ cumulative += length;
+ if (cumulative < 2) {
+
+ // Nope, still not valid. This must have been the first packet and
+ // was only one byte long. Tell the fetch code to read the next
+ // packet into the staging buffer beyond the data that is already
+ // there so that the next time we are called we have a complete
+ // TCP count.
+ offset = cumulative;
+ return (false);
+ }
+
+ // Have enough data to interpret the packet count, so do so now.
+ expected = readUint16(data);
+
+ // We have two bytes less of data to process. Point to the start of the
+ // data and adjust the packet size. Note that at this point,
+ // "cumulative" is the true amount of data in the staging buffer, not
+ // "length".
+ data += 2;
+ data_length = cumulative - 2;
+ } else {
+
+ // Update total amount of data received.
+ cumulative += length;
+ }
+
+ // Regardless of anything else, the next read goes into the start of the
+ // staging buffer.
+ offset = 0;
+
+ // Work out how much data we still have to put in the output buffer. (This
+ // could be zero if we have just interpreted the TCP count and that was
+ // set to zero.)
+ if (expected >= outbuff->getLength()) {
+
+ // Still need data in the output packet. Copy what we can from the
+ // staging buffer to the output buffer.
+ size_t copy_amount = std::min(expected - outbuff->getLength(), data_length);
+ outbuff->writeData(data, copy_amount);
+ }
+
+ // We can now say if we have all the data.
+ return (expected == outbuff->getLength());
+}
+
// Cancel I/O on the socket. No-op if the socket is not open.
+
template <typename C> void
TCPSocket<C>::cancel() {
if (isopen_) {
diff --git a/src/lib/asiolink/tests/Makefile.am b/src/lib/asiolink/tests/Makefile.am
index b4f0a87..3a229f3 100644
--- a/src/lib/asiolink/tests/Makefile.am
+++ b/src/lib/asiolink/tests/Makefile.am
@@ -18,6 +18,7 @@ TESTS += run_unittests
run_unittests_SOURCES = run_unittests.cc
run_unittests_SOURCES += $(top_srcdir)/src/lib/dns/tests/unittest_util.h
run_unittests_SOURCES += $(top_srcdir)/src/lib/dns/tests/unittest_util.cc
+run_unittests_SOURCES += asiolink_utilities_unittest.cc
run_unittests_SOURCES += io_address_unittest.cc
run_unittests_SOURCES += io_endpoint_unittest.cc
run_unittests_SOURCES += io_fetch_unittest.cc
@@ -25,19 +26,24 @@ run_unittests_SOURCES += io_socket_unittest.cc
run_unittests_SOURCES += io_service_unittest.cc
run_unittests_SOURCES += interval_timer_unittest.cc
run_unittests_SOURCES += recursive_query_unittest.cc
+run_unittests_SOURCES += recursive_query_unittest_2.cc
+run_unittests_SOURCES += tcp_endpoint_unittest.cc
+run_unittests_SOURCES += tcp_socket_unittest.cc
run_unittests_SOURCES += udp_endpoint_unittest.cc
run_unittests_SOURCES += udp_socket_unittest.cc
+run_unittests_SOURCES += qid_gen_unittest.cc
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
run_unittests_LDADD = $(GTEST_LDADD)
run_unittests_LDADD += $(SQLITE_LIBS)
-run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
-run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
+run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
run_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
-run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
+run_unittests_LDADD += $(top_builddir)/src/lib/resolve/libresolve.la
run_unittests_LDADD += $(top_builddir)/src/lib/cache/libcache.la
run_unittests_LDADD += $(top_builddir)/src/lib/nsas/libnsas.la
+run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
+run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
diff --git a/src/lib/asiolink/tests/asiolink_utilities_unittest.cc b/src/lib/asiolink/tests/asiolink_utilities_unittest.cc
new file mode 100644
index 0000000..51f565f
--- /dev/null
+++ b/src/lib/asiolink/tests/asiolink_utilities_unittest.cc
@@ -0,0 +1,74 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+/// \brief Test of asiolink utilties
+///
+/// Tests the fuctionality of the asiolink utilities code by comparing them
+/// with the equivalent methods in isc::dns::[Input/Output]Buffer.
+
+#include <cstddef>
+
+#include <gtest/gtest.h>
+
+#include <dns/buffer.h>
+#include <asiolink/asiolink_utilities.h>
+
+using namespace asiolink;
+using namespace isc::dns;
+
+TEST(asioutil, readUint16) {
+
+ // Reference buffer
+ uint8_t data[2];
+ isc::dns::InputBuffer buffer(data, sizeof(data));
+
+ // Avoid possible compiler warnings by only setting uint8_t variables to
+ // uint8_t values.
+ uint8_t i8 = 0;
+ uint8_t j8 = 0;
+ for (int i = 0; i < (2 << 8); ++i, ++i8) {
+ for (int j = 0; j < (2 << 8); ++j, ++j8) {
+ data[0] = i8;
+ data[1] = j8;
+ buffer.setPosition(0);
+ EXPECT_EQ(buffer.readUint16(), readUint16(data));
+ }
+ }
+}
+
+
+TEST(asioutil, writeUint16) {
+
+ // Reference buffer
+ isc::dns::OutputBuffer buffer(2);
+ uint8_t test[2];
+
+ // Avoid possible compiler warnings by only setting uint16_t variables to
+ // uint16_t values.
+ uint16_t i16 = 0;
+ for (uint32_t i = 0; i < (2 << 16); ++i, ++i16) {
+
+ // Write the reference data
+ buffer.clear();
+ buffer.writeUint16(i16);
+
+ // ... and the test data
+ writeUint16(i16, test);
+
+ // ... and compare
+ const uint8_t* ref = static_cast<const uint8_t*>(buffer.getData());
+ EXPECT_EQ(ref[0], test[0]);
+ EXPECT_EQ(ref[1], test[1]);
+ }
+}
diff --git a/src/lib/asiolink/tests/io_endpoint_unittest.cc b/src/lib/asiolink/tests/io_endpoint_unittest.cc
index 534850a..6101473 100644
--- a/src/lib/asiolink/tests/io_endpoint_unittest.cc
+++ b/src/lib/asiolink/tests/io_endpoint_unittest.cc
@@ -22,9 +22,9 @@ using namespace asiolink;
TEST(IOEndpointTest, createUDPv4) {
const IOEndpoint* ep;
- ep = IOEndpoint::create(IPPROTO_UDP, IOAddress("192.0.2.1"), 5300);
+ ep = IOEndpoint::create(IPPROTO_UDP, IOAddress("192.0.2.1"), 53210);
EXPECT_EQ("192.0.2.1", ep->getAddress().toText());
- EXPECT_EQ(5300, ep->getPort());
+ EXPECT_EQ(53210, ep->getPort());
EXPECT_EQ(AF_INET, ep->getFamily());
EXPECT_EQ(AF_INET, ep->getAddress().getFamily());
EXPECT_EQ(IPPROTO_UDP, ep->getProtocol());
@@ -62,7 +62,7 @@ TEST(IOEndpointTest, createTCPv6) {
TEST(IOEndpointTest, createIPProto) {
EXPECT_THROW(IOEndpoint::create(IPPROTO_IP, IOAddress("192.0.2.1"),
- 5300)->getAddress().toText(),
+ 53210)->getAddress().toText(),
IOError);
}
diff --git a/src/lib/asiolink/tests/io_fetch_unittest.cc b/src/lib/asiolink/tests/io_fetch_unittest.cc
index 9b74ee0..901df45 100644
--- a/src/lib/asiolink/tests/io_fetch_unittest.cc
+++ b/src/lib/asiolink/tests/io_fetch_unittest.cc
@@ -12,13 +12,17 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
-#include <gtest/gtest.h>
-#include <boost/bind.hpp>
+#include <algorithm>
#include <cstdlib>
#include <string>
+#include <iostream>
+#include <iomanip>
+#include <iterator>
#include <vector>
-#include <string.h>
+#include <gtest/gtest.h>
+#include <boost/bind.hpp>
+#include <boost/date_time/posix_time/posix_time_types.hpp>
#include <asio.hpp>
@@ -30,19 +34,27 @@
#include <dns/name.h>
#include <dns/rcode.h>
+#include <asiolink/asiolink_utilities.h>
+#include <asiolink/io_address.h>
+#include <asiolink/io_endpoint.h>
#include <asiolink/io_fetch.h>
#include <asiolink/io_service.h>
using namespace asio;
using namespace isc::dns;
-using asio::ip::udp;
+using namespace asio::ip;
+using namespace std;
namespace asiolink {
const asio::ip::address TEST_HOST(asio::ip::address::from_string("127.0.0.1"));
const uint16_t TEST_PORT(5301);
-// FIXME Shouldn't we send something that is real message?
-const char TEST_DATA[] = "TEST DATA";
+const int SEND_INTERVAL = 250; // Interval in ms between TCP sends
+const size_t MAX_SIZE = 64 * 1024; // Should be able to take 64kB
+
+// The tests are complex, so debug output has been left in (although disabled).
+// Set this to true to enable it.
+const bool DEBUG = false;
/// \brief Test fixture for the asiolink::IOFetch.
class IOFetchTest : public virtual ::testing::Test, public virtual IOFetch::Callback
@@ -52,13 +64,26 @@ public:
IOFetch::Result expected_; ///< Expected result of the callback
bool run_; ///< Did the callback run already?
Question question_; ///< What to ask
- OutputBufferPtr buff_; ///< Buffer to hold result
+ OutputBufferPtr result_buff_; ///< Buffer to hold result of fetch
+ OutputBufferPtr msgbuf_; ///< Buffer corresponding to known question
IOFetch udp_fetch_; ///< For UDP query test
- //IOFetch tcp_fetch_; ///< For TCP query test
-
- // The next member is the buffer iin which the "server" (implemented by the
- // response handler method) receives the question sent by the fetch object.
- std::vector<char> server_buff_; ///< Server buffer
+ IOFetch tcp_fetch_; ///< For TCP query test
+ IOFetch::Protocol protocol_; ///< Protocol being tested
+ size_t cumulative_; ///< Cumulative data received by "server".
+ deadline_timer timer_; ///< Timer to measure timeouts
+
+ // The next member is the buffer in which the "server" (implemented by the
+ // response handler methods in this class) receives the question sent by the
+ // fetch object.
+ uint8_t receive_buffer_[MAX_SIZE]; ///< Server receive buffer
+ vector<uint8_t> send_buffer_; ///< Server send buffer
+ uint16_t send_cumulative_; ///< Data sent so far
+
+ // Other data.
+ string return_data_; ///< Data returned by server
+ string test_data_; ///< Large string - here for convenience
+ bool debug_; ///< true to enable debug output
+ size_t tcp_send_size_; ///< Max size of TCP send
/// \brief Constructor
IOFetchTest() :
@@ -66,126 +91,518 @@ public:
expected_(IOFetch::NOTSET),
run_(false),
question_(Name("example.net"), RRClass::IN(), RRType::A()),
- buff_(new OutputBuffer(512)),
- udp_fetch_(IPPROTO_UDP, service_, question_, IOAddress(TEST_HOST),
- TEST_PORT, buff_, this, 100),
- server_buff_(512)
- // tcp_fetch_(service_, question_, IOAddress(TEST_HOST), TEST_PORT,
- // buff_, this, 100, IPPROTO_UDP)
- { }
+ result_buff_(new OutputBuffer(512)),
+ msgbuf_(new OutputBuffer(512)),
+ udp_fetch_(IOFetch::UDP, service_, question_, IOAddress(TEST_HOST),
+ TEST_PORT, result_buff_, this, 100),
+ tcp_fetch_(IOFetch::TCP, service_, question_, IOAddress(TEST_HOST),
+ TEST_PORT, result_buff_, this, (16 * SEND_INTERVAL)),
+ // Timeout interval chosen to ensure no timeout
+ protocol_(IOFetch::TCP), // for initialization - will be changed
+ cumulative_(0),
+ timer_(service_.get_io_service()),
+ receive_buffer_(),
+ send_buffer_(),
+ send_cumulative_(0),
+ return_data_(""),
+ test_data_(""),
+ debug_(DEBUG),
+ tcp_send_size_(0)
+ {
+ // Construct the data buffer for question we expect to receive.
+ Message msg(Message::RENDER);
+ msg.setQid(0);
+ msg.setOpcode(Opcode::QUERY());
+ msg.setRcode(Rcode::NOERROR());
+ msg.setHeaderFlag(Message::HEADERFLAG_RD);
+ msg.addQuestion(question_);
+ MessageRenderer renderer(*msgbuf_);
+ msg.toWire(renderer);
+
+ // Initialize the test data to be returned: tests will return a
+ // substring of this data. (It's convenient to have this as a member of
+ // the class.)
+ //
+ // We could initialize the data with a single character, but as an added
+ // check we'll make ssre that it has some structure.
+
+ test_data_.clear();
+ test_data_.reserve(MAX_SIZE);
+ while (test_data_.size() < MAX_SIZE) {
+ test_data_ += "A message to be returned to the client that has "
+ "some sort of structure.";
+ }
+ }
+
+ /// \brief UDP Response handler (the "remote UDP DNS server")
+ ///
+ /// When IOFetch is sending data, this response handler emulates the remote
+ /// DNS server. It checks that the data sent by the IOFetch object is what
+ /// was expected to have been sent, then sends back a known buffer of data.
+ ///
+ /// \param remote Endpoint to which to send the answer
+ /// \param socket Socket to use to send the answer
+ /// \param ec ASIO error code, completion code of asynchronous I/O issued
+ /// by the "server" to receive data.
+ /// \param length Amount of data received.
+ void udpReceiveHandler(udp::endpoint* remote, udp::socket* socket,
+ error_code ec = error_code(), size_t length = 0) {
+ if (debug_) {
+ cout << "udpReceiveHandler(): error = " << ec.value() <<
+ ", length = " << length << endl;
+ }
+
+ // The QID in the incoming data is random so set it to 0 for the
+ // data comparison check. (It is set to 0 in the buffer containing
+ // the expected data.)
+ receive_buffer_[0] = receive_buffer_[1] = 0;
+
+ // Check that length of the received data and the expected data are
+ // identical, then check that the data is identical as well.
+ EXPECT_EQ(msgbuf_->getLength(), length);
+ EXPECT_TRUE(equal(receive_buffer_, (receive_buffer_ + length - 1),
+ static_cast<const uint8_t*>(msgbuf_->getData())));
+
+ // Return a message back to the IOFetch object.
+ socket->send_to(asio::buffer(return_data_.c_str(), return_data_.size()),
+ *remote);
+ if (debug_) {
+ cout << "udpReceiveHandler(): returned " << return_data_.size() <<
+ " bytes to the client" << endl;
+ }
+ }
+
+ /// \brief Completion Handler for accepting TCP data
+ ///
+ /// Called when the remote system connects to the "server". It issues
+ /// an asynchronous read on the socket to read data.
+ ///
+ /// \param socket Socket on which data will be received
+ /// \param ec Boost error code, value should be zero.
+ void tcpAcceptHandler(tcp::socket* socket, error_code ec = error_code())
+ {
+ if (debug_) {
+ cout << "tcpAcceptHandler(): error = " << ec.value() << endl;
+ }
+
+ // Expect that the accept completed without a problem.
+ EXPECT_EQ(0, ec.value());
+
+ // Work out the maximum size of data we can send over it when we
+ // respond, then subtract 1kB or so for safety.
+ tcp::socket::send_buffer_size send_size;
+ socket->get_option(send_size);
+ if (send_size.value() < (2 * 1024)) {
+ FAIL() << "TCP send size is less than 2kB";
+ } else {
+ tcp_send_size_ = send_size.value() - 1024;
+ if (debug_) {
+ cout << "tcpacceptHandler(): will use send size = " << tcp_send_size_ << endl;
+ }
+ }
+
+ // Initiate a read on the socket.
+ cumulative_ = 0;
+ socket->async_receive(asio::buffer(receive_buffer_, sizeof(receive_buffer_)),
+ boost::bind(&IOFetchTest::tcpReceiveHandler, this, socket, _1, _2));
+ }
+
+ /// \brief Completion handler for receiving TCP data
+ ///
+ /// When IOFetch is sending data, this response handler emulates the remote
+ /// DNS server. It that all the data sent by the IOFetch object has been
+ /// received, issuing another read if not. If the data is complete, it is
+ /// compared to what is expected and a reply sent back to the IOFetch.
+ ///
+ /// \param socket Socket to use to send the answer
+ /// \param ec ASIO error code, completion code of asynchronous I/O issued
+ /// by the "server" to receive data.
+ /// \param length Amount of data received.
+ void tcpReceiveHandler(tcp::socket* socket, error_code ec = error_code(),
+ size_t length = 0)
+ {
+ if (debug_) {
+ cout << "tcpReceiveHandler(): error = " << ec.value() <<
+ ", length = " << length << endl;
+ }
+ // Expect that the receive completed without a problem.
+ EXPECT_EQ(0, ec.value());
+
+ // If we haven't received all the data, issue another read.
+ cumulative_ += length;
+ bool complete = false;
+ if (cumulative_ > 2) {
+ uint16_t dns_length = readUint16(receive_buffer_);
+ complete = ((dns_length + 2) == cumulative_);
+ }
+
+ if (!complete) {
+ socket->async_receive(asio::buffer((receive_buffer_ + cumulative_),
+ (sizeof(receive_buffer_) - cumulative_)),
+ boost::bind(&IOFetchTest::tcpReceiveHandler, this, socket, _1, _2));
+ return;
+ }
+
+ // Check that length of the DNS message received is that expected, then
+ // compare buffers, zeroing the QID in the received buffer to match
+ // that set in our expected question. Note that due to the length
+ // field the QID in the received buffer is in the third and fourth
+ // bytes.
+ EXPECT_EQ(msgbuf_->getLength() + 2, cumulative_);
+ receive_buffer_[2] = receive_buffer_[3] = 0;
+ EXPECT_TRUE(equal((receive_buffer_ + 2), (receive_buffer_ + cumulative_ - 2),
+ static_cast<const uint8_t*>(msgbuf_->getData())));
+
+ // ... and return a message back. This has to be preceded by a two-byte
+ // count field.
+ send_buffer_.clear();
+ send_buffer_.push_back(0);
+ send_buffer_.push_back(0);
+ writeUint16(return_data_.size(), &send_buffer_[0]);
+ copy(return_data_.begin(), return_data_.end(), back_inserter(send_buffer_));
+
+ // Send the data. This is done in multiple writes with a delay between
+ // each to check that the reassembly of TCP packets from fragments works.
+ send_cumulative_ = 0;
+ tcpSendData(socket);
+ }
+
+ /// \brief Sent Data Over TCP
+ ///
+ /// Send the TCP data back to the IOFetch object. The data is sent in
+ /// three chunks - two of 16 bytes and the remainder, with a 250ms gap
+ /// between each. (Amounts of data smaller than one 32 bytes are sent in
+ /// one or two packets.)
+ ///
+ /// \param socket Socket over which send should take place
+ void tcpSendData(tcp::socket* socket) {
+ if (debug_) {
+ cout << "tcpSendData()" << endl;
+ }
+
+ // Decide what to send based on the cumulative count. At most we'll do
+ // two chunks of 16 bytes (with a 250ms gap between) and then the
+ // remainder.
+ uint8_t* send_ptr = &send_buffer_[send_cumulative_];
+ // Pointer to data to send
+ size_t amount = 16; // Amount of data to send
+ if (send_cumulative_ < (2 * amount)) {
+
+ // First or second time through, send at most 16 bytes
+ amount = min(amount, (send_buffer_.size() - send_cumulative_));
+
+ } else {
+
+ // For all subsequent times, send the remainder, maximised to
+ // whatever we have chosen for the maximum send size.
+ amount = min(tcp_send_size_,
+ (send_buffer_.size() - send_cumulative_));
+ }
+ if (debug_) {
+ cout << "tcpSendData(): sending " << amount << " bytes" << endl;
+ }
+
+
+ // ... and send it. The amount sent is also passed as the first
+ // argument of the send callback, as a check.
+ socket->async_send(asio::buffer(send_ptr, amount),
+ boost::bind(&IOFetchTest::tcpSendHandler, this,
+ amount, socket, _1, _2));
+ }
+
+ /// \brief Completion Handler for Sending TCP data
+ ///
+ /// Called when the asynchronous send of data back to the IOFetch object
+ /// by the TCP "server" in this class has completed. (This send has to
+ /// be asynchronous because control needs to return to the caller in order
+ /// for the IOService "run()" method to be called to run the handlers.)
+ ///
+ /// If not all the data has been sent, a short delay is instigated (during
+ /// which control returns to the IOService). This should force the queued
+ /// data to actually be sent and the IOFetch receive handler to be triggered.
+ /// In this way, the ability of IOFetch to handle fragmented TCP packets
+ /// should be checked.
+ ///
+ /// \param expected Number of bytes that were expected to have been sent.
+ /// \param socket Socket over which the send took place. Only used to
+ /// pass back to the send method.
+ /// \param ec Boost error code, value should be zero.
+ /// \param length Number of bytes sent.
+ void tcpSendHandler(size_t expected, tcp::socket* socket,
+ error_code ec = error_code(), size_t length = 0)
+ {
+ if (debug_) {
+ cout << "tcpSendHandler(): error = " << ec.value() <<
+ ", length = " << length << endl;
+ }
+
+ EXPECT_EQ(0, ec.value()); // Expect no error
+ EXPECT_EQ(expected, length); // And that amount sent is as expected
+
+ // Do we need to send more?
+ send_cumulative_ += length;
+ if (send_cumulative_ < send_buffer_.size()) {
+
+ // Yes - set up a timer: the callback handler for the timer is
+ // tcpSendData, which will then send the next chunk. We pass the
+ // socket over which data should be sent as an argument to that
+ // function.
+ timer_.expires_from_now(boost::posix_time::milliseconds(SEND_INTERVAL));
+ timer_.async_wait(boost::bind(&IOFetchTest::tcpSendData, this,
+ socket));
+ }
+ }
/// \brief Fetch completion callback
///
/// This is the callback's operator() method which is called when the fetch
- /// is complete. Check that the data received is the wire format of the
- /// question, then send back an arbitrary response.
+ /// is complete. It checks that the data received is the wire format of the
+ /// data sent back by the server.
+ ///
+ /// \param result Result indicated by the callback
void operator()(IOFetch::Result result) {
+ if (debug_) {
+ cout << "operator()(): result = " << result << endl;
+ }
+
EXPECT_EQ(expected_, result); // Check correct result returned
EXPECT_FALSE(run_); // Check it is run only once
run_ = true; // Note success
- service_.stop(); // ... and exit run loop
- }
- /// \brief Response handler, pretending to be remote DNS server
- ///
- /// This checks that the data sent is what we expected to receive, and
- /// sends back a test answer.
- void respond(udp::endpoint* remote, udp::socket* socket,
- asio::error_code ec = asio::error_code(), size_t length = 0) {
+ // If the expected result for SUCCESS, then this should have been called
+ // when one of the "servers" in this class has sent back return_data_.
+ // Check the data is as expected/
+ if (expected_ == IOFetch::SUCCESS) {
+ EXPECT_EQ(return_data_.size(), result_buff_->getLength());
- // Construct the data buffer for question we expect to receive.
- OutputBuffer msgbuf(512);
- Message msg(Message::RENDER);
- msg.setQid(0);
- msg.setOpcode(Opcode::QUERY());
- msg.setRcode(Rcode::NOERROR());
- msg.setHeaderFlag(Message::HEADERFLAG_RD);
- msg.addQuestion(question_);
- MessageRenderer renderer(msgbuf);
- msg.toWire(renderer);
+ const uint8_t* start = static_cast<const uint8_t*>(result_buff_->getData());
+ EXPECT_TRUE(equal(return_data_.begin(), return_data_.end(), start));
+ }
- // The QID in the incoming data is random so set it to 0 for the
- // data comparison check. (It was set to 0 when the buffer containing
- // the expected data was constructed above.)
- server_buff_[0] = 0;
- server_buff_[1] = 0;
+ // ... and cause the run loop to exit.
+ service_.stop();
+ }
- // Check that lengths are identical.
- EXPECT_EQ(msgbuf.getLength(), length);
- EXPECT_TRUE(memcmp(msgbuf.getData(), &server_buff_[0], length) == 0);
+ // The next set of methods are the tests themselves. A number of the TCP
+ // and UDP tests are very similar.
- // ... and return a message back.
- socket->send_to(asio::buffer(TEST_DATA, sizeof TEST_DATA), *remote);
+ /// \brief Check for stop()
+ ///
+ /// Test that when we run the query and stop it after it was run, it returns
+ /// "stopped" correctly. (That is why stop() is posted to the service_ as
+ /// well instead of calling it.)
+ ///
+ /// \param protocol Test protocol
+ /// \param fetch Fetch object being tested
+ void stopTest(IOFetch::Protocol protocol, IOFetch& fetch) {
+ protocol_ = protocol;
+ expected_ = IOFetch::STOPPED;
+
+ // Post the query
+ service_.get_io_service().post(fetch);
+
+ // Post query_.stop() (yes, the boost::bind thing is just
+ // query_.stop()).
+ service_.get_io_service().post(
+ boost::bind(&IOFetch::stop, fetch, IOFetch::STOPPED));
+
+ // Run both of them. run() returns when everything in the I/O service
+ // queue has completed.
+ service_.run();
+ EXPECT_TRUE(run_);
}
-};
+ /// \brief Premature stop test
+ ///
+ /// Test that when we queue the query to service_ and call stop() before it
+ /// gets executed, it acts sanely as well (eg. has the same result as
+ /// running stop() after - calls the callback).
+ ///
+ /// \param protocol Test protocol
+ /// \param fetch Fetch object being tested
+ void prematureStopTest(IOFetch::Protocol protocol, IOFetch& fetch) {
+ protocol_ = protocol;
+ expected_ = IOFetch::STOPPED;
+
+ // Stop before it is started
+ fetch.stop();
+ service_.get_io_service().post(fetch);
+
+ service_.run();
+ EXPECT_TRUE(run_);
+ }
-/// Test that when we run the query and stop it after it was run,
-/// it returns "stopped" correctly.
-///
-/// That is why stop() is posted to the service_ as well instead
-/// of calling it.
-TEST_F(IOFetchTest, UdpStop) {
- expected_ = IOFetch::STOPPED;
+ /// \brief Timeout test
+ ///
+ /// Test that fetch times out when no answer arrives.
+ ///
+ /// \param protocol Test protocol
+ /// \param fetch Fetch object being tested
+ void timeoutTest(IOFetch::Protocol protocol, IOFetch& fetch) {
+ protocol_ = protocol;
+ expected_ = IOFetch::TIME_OUT;
+
+ service_.get_io_service().post(fetch);
+ service_.run();
+ EXPECT_TRUE(run_);
+ }
- // Post the query
- service_.get_io_service().post(udp_fetch_);
+ /// \brief Send/Receive Test
+ ///
+ /// Send a query to the server then receives a response.
+ ///
+ /// \param Test data to return to client
+ void tcpSendReturnTest(const std::string& return_data) {
+ if (debug_) {
+ cout << "tcpSendReturnTest(): data size = " << return_data.size() << endl;
+ }
+ return_data_ = return_data;
+ protocol_ = IOFetch::TCP;
+ expected_ = IOFetch::SUCCESS;
+
+ // Socket into which the connection will be accepted.
+ tcp::socket socket(service_.get_io_service());
+
+ // Acceptor object - called when the connection is made, the handler
+ // will initiate a read on the socket.
+ tcp::acceptor acceptor(service_.get_io_service(),
+ tcp::endpoint(tcp::v4(), TEST_PORT));
+ acceptor.async_accept(socket,
+ boost::bind(&IOFetchTest::tcpAcceptHandler, this, &socket, _1));
+
+ // Post the TCP fetch object to send the query and receive the response.
+ service_.get_io_service().post(tcp_fetch_);
+
+ // ... and execute all the callbacks. This exits when the fetch
+ // completes.
+ service_.run();
+ EXPECT_TRUE(run_); // Make sure the callback did execute
+
+ // Tidy up
+ socket.close();
+ }
+};
- // Post query_.stop() (yes, the boost::bind thing is just
- // query_.stop()).
- service_.get_io_service().post(
- boost::bind(&IOFetch::stop, udp_fetch_, IOFetch::STOPPED));
+// Check the protocol
+TEST_F(IOFetchTest, Protocol) {
+ EXPECT_EQ(IOFetch::UDP, udp_fetch_.getProtocol());
+ EXPECT_EQ(IOFetch::TCP, tcp_fetch_.getProtocol());
+}
- // Run both of them. run() returns when everything in the I/O service
- // queue has completed.
- service_.run();
- EXPECT_TRUE(run_);
+// UDP Stop test - see IOFetchTest::stopTest() header.
+TEST_F(IOFetchTest, UdpStop) {
+ stopTest(IOFetch::UDP, udp_fetch_);
}
-// Test that when we queue the query to service_ and call stop() before it gets
-// executed, it acts sanely as well (eg. has the same result as running stop()
-// after - calls the callback).
+// UDP premature stop test - see IOFetchTest::prematureStopTest() header.
TEST_F(IOFetchTest, UdpPrematureStop) {
- expected_ = IOFetch::STOPPED;
-
- // Stop before it is started
- udp_fetch_.stop();
- service_.get_io_service().post(udp_fetch_);
-
- service_.run();
- EXPECT_TRUE(run_);
+ prematureStopTest(IOFetch::UDP, udp_fetch_);
}
-// Test that it will timeout when no answer arrives.
+// UDP premature stop test - see IOFetchTest::timeoutTest() header.
TEST_F(IOFetchTest, UdpTimeout) {
- expected_ = IOFetch::TIME_OUT;
-
- service_.get_io_service().post(udp_fetch_);
- service_.run();
- EXPECT_TRUE(run_);
+ timeoutTest(IOFetch::UDP, udp_fetch_);
}
-// Test that it will succeed when we fake an answer and stores the same data we
-// send. This is done through a real socket on the loopback address.
-TEST_F(IOFetchTest, UdpReceive) {
+// UDP SendReceive test. Set up a UDP server then ports a UDP fetch object.
+// This will send question_ to the server and receive the answer back from it.
+TEST_F(IOFetchTest, UdpSendReceive) {
+ protocol_ = IOFetch::UDP;
expected_ = IOFetch::SUCCESS;
+ // Set up the server.
udp::socket socket(service_.get_io_service(), udp::v4());
socket.set_option(socket_base::reuse_address(true));
socket.bind(udp::endpoint(TEST_HOST, TEST_PORT));
+ return_data_ = "Message returned to the client";
udp::endpoint remote;
- socket.async_receive_from(asio::buffer(server_buff_),
+ socket.async_receive_from(asio::buffer(receive_buffer_, sizeof(receive_buffer_)),
remote,
- boost::bind(&IOFetchTest::respond, this, &remote, &socket, _1, _2));
+ boost::bind(&IOFetchTest::udpReceiveHandler, this, &remote, &socket,
+ _1, _2));
service_.get_io_service().post(udp_fetch_);
+ if (debug_) {
+ cout << "udpSendReceive: async_receive_from posted, waiting for callback" <<
+ endl;
+ }
service_.run();
socket.close();
- EXPECT_TRUE(run_);
- ASSERT_EQ(sizeof TEST_DATA, buff_->getLength());
- EXPECT_EQ(0, memcmp(TEST_DATA, buff_->getData(), sizeof TEST_DATA));
+ EXPECT_TRUE(run_);;
+}
+
+// Do the same tests for TCP transport
+
+TEST_F(IOFetchTest, TcpStop) {
+ stopTest(IOFetch::TCP, tcp_fetch_);
+}
+
+TEST_F(IOFetchTest, TcpPrematureStop) {
+ prematureStopTest(IOFetch::TCP, tcp_fetch_);
+}
+
+TEST_F(IOFetchTest, TcpTimeout) {
+ timeoutTest(IOFetch::TCP, tcp_fetch_);
+}
+
+// Test with values at or near 0, then at or near the chunk size (16 and 32
+// bytes, the sizes of the first two packets) then up to 65535. These are done
+// in separate tests because in practice a new IOFetch is created for each
+// query/response exchange and we don't want to confuse matters in the test
+// by running the test with an IOFetch that has already done one exchange.
+
+TEST_F(IOFetchTest, TcpSendReceive0) {
+ tcpSendReturnTest(test_data_.substr(0, 0));
+}
+
+TEST_F(IOFetchTest, TcpSendReceive1) {
+ tcpSendReturnTest(test_data_.substr(0, 1));
+}
+
+TEST_F(IOFetchTest, TcpSendReceive15) {
+ tcpSendReturnTest(test_data_.substr(0, 15));
+}
+
+TEST_F(IOFetchTest, TcpSendReceive16) {
+ tcpSendReturnTest(test_data_.substr(0, 16));
+}
+
+TEST_F(IOFetchTest, TcpSendReceive17) {
+ tcpSendReturnTest(test_data_.substr(0, 17));
+}
+
+TEST_F(IOFetchTest, TcpSendReceive31) {
+ tcpSendReturnTest(test_data_.substr(0, 31));
+}
+
+TEST_F(IOFetchTest, TcpSendReceive32) {
+ tcpSendReturnTest(test_data_.substr(0, 32));
+}
+
+TEST_F(IOFetchTest, TcpSendReceive33) {
+ tcpSendReturnTest(test_data_.substr(0, 33));
+}
+
+TEST_F(IOFetchTest, TcpSendReceive4096) {
+ tcpSendReturnTest(test_data_.substr(0, 4096));
+}
+
+TEST_F(IOFetchTest, TcpSendReceive8192) {
+ tcpSendReturnTest(test_data_.substr(0, 8192));
+}
+
+TEST_F(IOFetchTest, TcpSendReceive16384) {
+ tcpSendReturnTest(test_data_.substr(0, 16384));
+}
+
+TEST_F(IOFetchTest, TcpSendReceive32768) {
+ tcpSendReturnTest(test_data_.substr(0, 32768));
+}
+
+TEST_F(IOFetchTest, TcpSendReceive65535) {
+ tcpSendReturnTest(test_data_.substr(0, 65535));
}
} // namespace asiolink
diff --git a/src/lib/asiolink/tests/io_service_unittest.cc b/src/lib/asiolink/tests/io_service_unittest.cc
index 28924d4..779d03e 100644
--- a/src/lib/asiolink/tests/io_service_unittest.cc
+++ b/src/lib/asiolink/tests/io_service_unittest.cc
@@ -28,7 +28,7 @@ const char* const TEST_IPV4_ADDR = "127.0.0.1";
TEST(IOServiceTest, badPort) {
IOService io_service;
EXPECT_THROW(DNSService(io_service, *"65536", true, false, NULL, NULL, NULL), IOError);
- EXPECT_THROW(DNSService(io_service, *"5300.0", true, false, NULL, NULL, NULL), IOError);
+ EXPECT_THROW(DNSService(io_service, *"53210.0", true, false, NULL, NULL, NULL), IOError);
EXPECT_THROW(DNSService(io_service, *"-1", true, false, NULL, NULL, NULL), IOError);
EXPECT_THROW(DNSService(io_service, *"domain", true, false, NULL, NULL, NULL), IOError);
}
diff --git a/src/lib/asiolink/tests/qid_gen_unittest.cc b/src/lib/asiolink/tests/qid_gen_unittest.cc
new file mode 100644
index 0000000..3ad8a03
--- /dev/null
+++ b/src/lib/asiolink/tests/qid_gen_unittest.cc
@@ -0,0 +1,59 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+
+/// \brief Test of QidGenerator
+///
+
+#include <gtest/gtest.h>
+
+#include <asiolink/qid_gen.h>
+#include <dns/message.h>
+
+// Tests the operation of the Qid generator
+
+// Check that getInstance returns a singleton
+TEST(QidGenerator, singleton) {
+ asiolink::QidGenerator& g1 = asiolink::QidGenerator::getInstance();
+ asiolink::QidGenerator& g2 = asiolink::QidGenerator::getInstance();
+
+ EXPECT_TRUE(&g1 == &g2);
+}
+
+TEST(QidGenerator, generate) {
+ // We'll assume that boost's generator is 'good enough', and won't
+ // do full statistical checking here. Let's just call it the xkcd
+ // test (http://xkcd.com/221/), and check if three consecutive
+ // generates are not all the same.
+ isc::dns::qid_t one, two, three;
+ asiolink::QidGenerator& gen = asiolink::QidGenerator::getInstance();
+ one = gen.generateQid();
+ two = gen.generateQid();
+ three = gen.generateQid();
+ ASSERT_FALSE((one == two) && (one == three));
+}
diff --git a/src/lib/asiolink/tests/recursive_query_unittest_2.cc b/src/lib/asiolink/tests/recursive_query_unittest_2.cc
new file mode 100644
index 0000000..ce51bbf
--- /dev/null
+++ b/src/lib/asiolink/tests/recursive_query_unittest_2.cc
@@ -0,0 +1,642 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <algorithm>
+#include <cstdlib>
+#include <iomanip>
+#include <iostream>
+#include <string>
+
+#include <gtest/gtest.h>
+#include <boost/bind.hpp>
+
+
+#include <asio.hpp>
+
+#include <dns/buffer.h>
+#include <dns/question.h>
+#include <dns/message.h>
+#include <dns/messagerenderer.h>
+#include <dns/opcode.h>
+#include <dns/name.h>
+#include <dns/rcode.h>
+#include <dns/rrtype.h>
+#include <dns/rrset.h>
+#include <dns/rrttl.h>
+#include <dns/rdata.h>
+
+#include <asiolink/asiolink_utilities.h>
+#include <asiolink/dns_service.h>
+#include <asiolink/io_address.h>
+#include <asiolink/io_endpoint.h>
+#include <asiolink/io_fetch.h>
+#include <asiolink/io_service.h>
+#include <asiolink/recursive_query.h>
+#include <resolve/resolver_interface.h>
+
+using namespace asio;
+using namespace asio::ip;
+using namespace isc::dns;
+using namespace isc::dns::rdata;
+using namespace isc::resolve;
+using namespace std;
+
+/// RecursiveQuery Test - 2
+///
+/// The second part of the RecursiveQuery unit tests, this attempts to get the
+/// RecursiveQuery object to follow a set of referrals for "www.example.org" to
+/// and to invoke TCP fallback on one of the queries. In particular, we expect
+/// that the test will do the following in an attempt to resolve
+/// www.example.org:
+///
+/// - Send question over UDP to "root" - get referral to "org".
+/// - Send question over UDP to "org" - get referral to "example.org" with TC bit set.
+/// - Send question over TCP to "org" - get referral to "example.org".
+/// - Send question over UDP to "example.org" - get response for www.example.org.
+///
+/// (The order of queries is set in this way in order to also test that after a
+/// failover to TCP, queries revert to UDP).
+///
+/// By using the "test_server_" element of RecursiveQuery, all queries are
+/// directed to one or other of the "servers" in the RecursiveQueryTest2 class,
+/// regardless of the glue returned in referrals.
+
+namespace asiolink {
+
+const std::string TEST_ADDRESS = "127.0.0.1"; ///< Servers are on this address
+const uint16_t TEST_PORT = 5301; ///< ... and this port
+const size_t BUFFER_SIZE = 1024; ///< For all buffers
+const char* WWW_EXAMPLE_ORG = "192.0.2.254"; ///< Address of www.example.org
+
+// As the test is fairly long and complex, debugging "print" statements have
+// been left in although they are disabled. Set the following to "true" to
+// enable them.
+const bool DEBUG_PRINT = false;
+
+/// \brief Test fixture for the RecursiveQuery Test
+class RecursiveQueryTest2 : public virtual ::testing::Test
+{
+public:
+
+ /// \brief Status of query
+ ///
+ /// Set before the query and then by each "server" when responding.
+ enum QueryStatus {
+ NONE = 0, ///< Default
+ UDP_ROOT = 1, ///< Query root server over UDP
+ UDP_ORG = 2, ///< Query ORG server over UDP
+ TCP_ORG = 3, ///< Query ORG server over TCP
+ UDP_EXAMPLE_ORG = 4, ///< Query EXAMPLE.ORG server over UDP
+ COMPLETE = 5 ///< Query is complete
+ };
+
+ // Common stuff
+ bool debug_; ///< Set true for debug print
+ IOService service_; ///< Service to run everything
+ DNSService dns_service_; ///< Resolver is part of "server"
+ QuestionPtr question_; ///< What to ask
+ QueryStatus last_; ///< What was the last state
+ QueryStatus expected_; ///< Expected next state
+ OutputBufferPtr question_buffer_; ///< Question we expect to receive
+
+ // Data for TCP Server
+ size_t tcp_cumulative_; ///< Cumulative TCP data received
+ tcp::endpoint tcp_endpoint_; ///< Endpoint for TCP receives
+ size_t tcp_length_; ///< Expected length value
+ uint8_t tcp_receive_buffer_[BUFFER_SIZE]; ///< Receive buffer for TCP I/O
+ OutputBufferPtr tcp_send_buffer_; ///< Send buffer for TCP I/O
+ tcp::socket tcp_socket_; ///< Socket used by TCP server
+
+ /// Data for UDP
+ udp::endpoint udp_remote_; ///< Endpoint for UDP receives
+ size_t udp_length_; ///< Expected length value
+ uint8_t udp_receive_buffer_[BUFFER_SIZE]; ///< Receive buffer for UDP I/O
+ OutputBufferPtr udp_send_buffer_; ///< Send buffer for UDP I/O
+ udp::socket udp_socket_; ///< Socket used by UDP server
+
+ /// \brief Constructor
+ RecursiveQueryTest2() :
+ debug_(DEBUG_PRINT),
+ service_(),
+ dns_service_(service_, NULL, NULL, NULL),
+ question_(new Question(Name("www.example.org"), RRClass::IN(), RRType::A())),
+ last_(NONE),
+ expected_(NONE),
+ question_buffer_(new OutputBuffer(BUFFER_SIZE)),
+ tcp_cumulative_(0),
+ tcp_endpoint_(asio::ip::address::from_string(TEST_ADDRESS), TEST_PORT),
+ tcp_length_(0),
+ tcp_receive_buffer_(),
+ tcp_send_buffer_(new OutputBuffer(BUFFER_SIZE)),
+ tcp_socket_(service_.get_io_service()),
+ udp_remote_(),
+ udp_length_(0),
+ udp_receive_buffer_(),
+ udp_send_buffer_(new OutputBuffer(BUFFER_SIZE)),
+ udp_socket_(service_.get_io_service(), udp::v4())
+ {}
+
+ /// \brief Set Common Message Bits
+ ///
+ /// Sets up the common bits of a response message returned by the handlers.
+ ///
+ /// \param msg Message buffer in RENDER mode.
+ /// \param qid QIT to set the message to
+ void setCommonMessage(isc::dns::Message& msg, uint16_t qid = 0) {
+ msg.setQid(qid);
+ msg.setHeaderFlag(Message::HEADERFLAG_QR);
+ msg.setOpcode(Opcode::QUERY());
+ msg.setHeaderFlag(Message::HEADERFLAG_AA);
+ msg.setRcode(Rcode::NOERROR());
+ msg.addQuestion(*question_);
+ }
+
+ /// \brief Set Referral to "org"
+ ///
+ /// Sets up the passed-in message (expected to be in "RENDER" mode to
+ /// indicate a referral to fictitious .org nameservers.
+ ///
+ /// \param msg Message to update with referral information.
+ void setReferralOrg(isc::dns::Message& msg) {
+ if (debug_) {
+ cout << "setReferralOrg(): creating referral to .org nameservers" << endl;
+ }
+
+ // Do a referral to org. We'll define all NS records as "in-zone"
+ // nameservers (and supply glue) to avoid the possibility of the
+ // resolver starting another recursive query to resolve the address of
+ // a nameserver.
+ RRsetPtr org_ns(new RRset(Name("org."), RRClass::IN(), RRType::NS(), RRTTL(300)));
+ org_ns->addRdata(createRdata(RRType::NS(), RRClass::IN(), "ns1.org."));
+ org_ns->addRdata(createRdata(RRType::NS(), RRClass::IN(), "ns2.org."));
+ msg.addRRset(Message::SECTION_AUTHORITY, org_ns);
+
+ RRsetPtr org_ns1(new RRset(Name("ns1.org."), RRClass::IN(), RRType::A(), RRTTL(300)));
+ org_ns1->addRdata(createRdata(RRType::A(), RRClass::IN(), "192.0.2.1"));
+ msg.addRRset(Message::SECTION_ADDITIONAL, org_ns1);
+
+ RRsetPtr org_ns2(new RRset(Name("ns2.org."), RRClass::IN(), RRType::A(), RRTTL(300)));
+ org_ns2->addRdata(createRdata(RRType::A(), RRClass::IN(), "192.0.2.2"));
+ msg.addRRset(Message::SECTION_ADDITIONAL, org_ns2);
+ }
+
+ /// \brief Set Referral to "example.org"
+ ///
+ /// Sets up the passed-in message (expected to be in "RENDER" mode to
+ /// indicate a referral to fictitious example.org nameservers.
+ ///
+ /// \param msg Message to update with referral information.
+ void setReferralExampleOrg(isc::dns::Message& msg) {
+ if (debug_) {
+ cout << "setReferralExampleOrg(): creating referral to example.org nameservers" << endl;
+ }
+
+ // Do a referral to example.org. As before, we'll define all NS
+ // records as "in-zone" nameservers (and supply glue) to avoid the
+ // possibility of the resolver starting another recursive query to look
+ // up the address of the nameserver.
+ RRsetPtr example_org_ns(new RRset(Name("example.org."), RRClass::IN(), RRType::NS(), RRTTL(300)));
+ example_org_ns->addRdata(createRdata(RRType::NS(), RRClass::IN(), "ns1.example.org."));
+ example_org_ns->addRdata(createRdata(RRType::NS(), RRClass::IN(), "ns2.example.org."));
+ msg.addRRset(Message::SECTION_AUTHORITY, example_org_ns);
+
+ RRsetPtr example_org_ns1(new RRset(Name("ns1.example.org."), RRClass::IN(), RRType::A(), RRTTL(300)));
+ example_org_ns1->addRdata(createRdata(RRType::A(), RRClass::IN(), "192.0.2.11"));
+ msg.addRRset(Message::SECTION_ADDITIONAL, example_org_ns1);
+
+ RRsetPtr example_org_ns2(new RRset(Name("ns2.example.org."), RRClass::IN(), RRType::A(), RRTTL(300)));
+ example_org_ns2->addRdata(createRdata(RRType::A(), RRClass::IN(), "192.0.2.21"));
+ msg.addRRset(Message::SECTION_ADDITIONAL, example_org_ns2);
+ }
+
+ /// \brief Set Answer to "www.example.org"
+ ///
+ /// Sets up the passed-in message (expected to be in "RENDER" mode) to
+ /// indicate an authoritative answer to www.example.org.
+ ///
+ /// \param msg Message to update with referral information.
+ void setAnswerWwwExampleOrg(isc::dns::Message& msg) {
+ if (debug_) {
+ cout << "setAnswerWwwExampleOrg(): creating answer for www.example.org" << endl;
+ }
+
+ // Give a response for www.example.org.
+ RRsetPtr www_example_org_a(new RRset(Name("www.example.org."), RRClass::IN(), RRType::A(), RRTTL(300)));
+ www_example_org_a->addRdata(createRdata(RRType::A(), RRClass::IN(), WWW_EXAMPLE_ORG));
+ msg.addRRset(Message::SECTION_ANSWER, www_example_org_a);
+
+ // ... and add the Authority and Additional sections. (These are the
+ // same as in the referral to example.org from the .org nameserver.)
+ setReferralExampleOrg(msg);
+ }
+
+ /// \brief UDP Receive Handler
+ ///
+ /// This is invoked when a message is received over UDP from the
+ /// RecursiveQuery object under test. It formats an answer and sends it
+ /// asynchronously, with the UdpSendHandler method being specified as the
+ /// completion handler.
+ ///
+ /// \param ec ASIO error code, completion code of asynchronous I/O issued
+ /// by the "server" to receive data.
+ /// \param length Amount of data received.
+ void udpReceiveHandler(error_code ec = error_code(), size_t length = 0) {
+ if (debug_) {
+ cout << "udpReceiveHandler(): error = " << ec.value() <<
+ ", length = " << length << ", last state = " << last_ <<
+ ", expected state = " << expected_ << endl;
+ }
+
+ // Expected state should be one greater than the last state.
+ EXPECT_EQ(static_cast<int>(expected_), static_cast<int>(last_) + 1);
+ last_ = expected_;
+
+ // The QID in the incoming data is random so set it to 0 for the
+ // data comparison check. (It is set to 0 in the buffer containing
+ // the expected data.)
+ uint16_t qid = readUint16(udp_receive_buffer_);
+ udp_receive_buffer_[0] = udp_receive_buffer_[1] = 0;
+
+ // Check that question we received is what was expected.
+ checkReceivedPacket(udp_receive_buffer_, length);
+
+ // The message returned depends on what state we are in. Set up
+ // common stuff first: bits not mentioned are set to 0.
+ Message msg(Message::RENDER);
+ setCommonMessage(msg, qid);
+
+ // Set up state-dependent bits:
+ switch (expected_) {
+ case UDP_ROOT:
+ // Return a referral to org. We then expect to query the "org"
+ // nameservers over UDP next.
+ setReferralOrg(msg);
+ expected_ = UDP_ORG;
+ break;
+
+ case UDP_ORG:
+ // Return a referral to example.org. We explicitly set the TC bit to
+ // force a repeat query to the .org nameservers over TCP.
+ setReferralExampleOrg(msg);
+ if (debug_) {
+ cout << "udpReceiveHandler(): setting TC bit" << endl;
+ }
+ msg.setHeaderFlag(Message::HEADERFLAG_TC);
+ expected_ = TCP_ORG;
+ break;
+
+ case UDP_EXAMPLE_ORG:
+ // Return the answer to the question.
+ setAnswerWwwExampleOrg(msg);
+ expected_ = COMPLETE;
+ break;
+
+ default:
+ FAIL() << "UdpReceiveHandler called with unknown state";
+ }
+
+ // Convert to wire format
+ udp_send_buffer_->clear();
+ MessageRenderer renderer(*udp_send_buffer_);
+ msg.toWire(renderer);
+
+ // Return a message back to the IOFetch object (after setting the
+ // expected length of data for the check in the send handler).
+ udp_length_ = udp_send_buffer_->getLength();
+ udp_socket_.async_send_to(asio::buffer(udp_send_buffer_->getData(),
+ udp_send_buffer_->getLength()),
+ udp_remote_,
+ boost::bind(&RecursiveQueryTest2::udpSendHandler,
+ this, _1, _2));
+ }
+
+ /// \brief UDP Send Handler
+ ///
+ /// Called when a send operation of the UDP server (i.e. a response
+ /// being sent to the RecursiveQuery) has completed, this re-issues
+ /// a read call.
+ ///
+ /// \param ec Completion error code of the send.
+ /// \param length Actual number of bytes sent.
+ void udpSendHandler(error_code ec = error_code(), size_t length = 0) {
+ if (debug_) {
+ cout << "udpSendHandler(): error = " << ec.value() <<
+ ", length = " << length << endl;
+ }
+
+ // Check send was OK
+ EXPECT_EQ(0, ec.value());
+ EXPECT_EQ(udp_length_, length);
+
+ // Reissue the receive call to await the next message.
+ udp_socket_.async_receive_from(
+ asio::buffer(udp_receive_buffer_, sizeof(udp_receive_buffer_)),
+ udp_remote_,
+ boost::bind(&RecursiveQueryTest2::udpReceiveHandler, this, _1, _2));
+ }
+
+ /// \brief Completion Handler for Accepting TCP Data
+ ///
+ /// Called when the remote system connects to the "TCP server". It issues
+ /// an asynchronous read on the socket to read data.
+ ///
+ /// \param socket Socket on which data will be received
+ /// \param ec Boost error code, value should be zero.
+ void tcpAcceptHandler(error_code ec = error_code(), size_t length = 0) {
+ if (debug_) {
+ cout << "tcpAcceptHandler(): error = " << ec.value() <<
+ ", length = " << length << endl;
+ }
+
+ // Expect that the accept completed without a problem.
+ EXPECT_EQ(0, ec.value());
+
+ // Initiate a read on the socket, indicating that nothing has yet been
+ // received.
+ tcp_cumulative_ = 0;
+ tcp_socket_.async_receive(
+ asio::buffer(tcp_receive_buffer_, sizeof(tcp_receive_buffer_)),
+ boost::bind(&RecursiveQueryTest2::tcpReceiveHandler, this, _1, _2));
+ }
+
+ /// \brief Completion Handler for Receiving TCP Data
+ ///
+ /// Reads data from the RecursiveQuery object and loops, reissuing reads,
+ /// until all the message has been read. It then returns an appropriate
+ /// response.
+ ///
+ /// \param socket Socket to use to send the answer
+ /// \param ec ASIO error code, completion code of asynchronous I/O issued
+ /// by the "server" to receive data.
+ /// \param length Amount of data received.
+ void tcpReceiveHandler(error_code ec = error_code(), size_t length = 0) {
+ if (debug_) {
+ cout << "tcpReceiveHandler(): error = " << ec.value() <<
+ ", length = " << length <<
+ ", cumulative = " << tcp_cumulative_ << endl;
+ }
+
+ // Expect that the receive completed without a problem.
+ EXPECT_EQ(0, ec.value());
+
+ // Have we received all the data? We know this by checking if the two-
+ // byte length count in the message is equal to the data received.
+ tcp_cumulative_ += length;
+ bool complete = false;
+ if (tcp_cumulative_ > 2) {
+ uint16_t dns_length = readUint16(tcp_receive_buffer_);
+ complete = ((dns_length + 2) == tcp_cumulative_);
+ }
+
+ if (!complete) {
+ if (debug_) {
+ cout << "tcpReceiveHandler(): read not complete, " <<
+ "issuing another read" << endl;
+ }
+
+ // Not complete yet, issue another read.
+ tcp_socket_.async_receive(
+ asio::buffer(tcp_receive_buffer_ + tcp_cumulative_,
+ sizeof(tcp_receive_buffer_) - tcp_cumulative_),
+ boost::bind(&RecursiveQueryTest2::tcpReceiveHandler, this, _1, _2));
+ return;
+ }
+
+ // Have received a TCP message. Expected state should be one greater
+ // than the last state.
+ EXPECT_EQ(static_cast<int>(expected_), static_cast<int>(last_) + 1);
+ last_ = expected_;
+
+ // Check that question we received is what was expected. Note that we
+ // have to ignore the two-byte header in order to parse the message.
+ checkReceivedPacket(tcp_receive_buffer_ + 2, length - 2);
+
+ // Return a message back. This is a referral to example.org, which
+ // should result in another query over UDP. Note the setting of the
+ // QID in the returned message with what was in the received message.
+ Message msg(Message::RENDER);
+ setCommonMessage(msg, readUint16(tcp_receive_buffer_));
+ setReferralExampleOrg(msg);
+
+ // Convert to wire format
+ tcp_send_buffer_->clear();
+ MessageRenderer renderer(*tcp_send_buffer_);
+ msg.toWire(renderer);
+
+ // Expected next state (when checked) is the UDP query to example.org.
+ // Also, take this opportunity to clear the accumulated read count in
+ // readiness for the next read. (If any - at present, there is only
+ // one read in the test, although extensions to this test suite could
+ // change that.)
+ expected_ = UDP_EXAMPLE_ORG;
+ tcp_cumulative_ = 0;
+
+ // We'll write the message in two parts, the count and the message
+ // itself. This saves having to prepend the count onto the start of a
+ // buffer. When specifying the send handler, the expected size of the
+ // data written is passed as the first parameter so that the handler
+ // can check it.
+ uint8_t count[2];
+ writeUint16(tcp_send_buffer_->getLength(), count);
+ tcp_socket_.async_send(asio::buffer(count, 2),
+ boost::bind(&RecursiveQueryTest2::tcpSendHandler, this,
+ 2, _1, _2));
+ tcp_socket_.async_send(asio::buffer(tcp_send_buffer_->getData(),
+ tcp_send_buffer_->getLength()),
+ boost::bind(&RecursiveQueryTest2::tcpSendHandler, this,
+ tcp_send_buffer_->getLength(), _1, _2));
+ }
+
+ /// \brief Completion Handler for Sending TCP data
+ ///
+ /// Called when the asynchronous send of data back to the RecursiveQuery
+ /// by the TCP "server" in this class has completed. (This send has to
+ /// be asynchronous because control needs to return to the caller in order
+ /// for the IOService "run()" method to be called to run the handlers.)
+ ///
+ /// \param expected_length Number of bytes that were expected to have been sent.
+ /// \param ec Boost error code, value should be zero.
+ /// \param length Number of bytes sent.
+ void tcpSendHandler(size_t expected_length = 0, error_code ec = error_code(),
+ size_t length = 0)
+ {
+ if (debug_) {
+ cout << "tcpSendHandler(): error = " << ec.value() <<
+ ", length = " << length <<
+ ", (expected length = " << expected_length << ")" << endl;
+ }
+ EXPECT_EQ(0, ec.value()); // Expect no error
+ EXPECT_EQ(expected_length, length); // And that amount sent is as expected
+ }
+
+ /// \brief Check Received Packet
+ ///
+ /// Checks the packet received from the RecursiveQuery object to ensure
+ /// that the question is what is expected.
+ ///
+ /// \param data Start of data. This is the start of the received buffer in
+ /// the case of UDP data, and an offset into the buffer past the
+ /// count field for TCP data.
+ /// \param length Length of data.
+ void checkReceivedPacket(uint8_t* data, size_t length) {
+
+ // Decode the received buffer.
+ InputBuffer buffer(data, length);
+ Message message(Message::PARSE);
+ message.fromWire(buffer);
+
+ // Check the packet.
+ EXPECT_FALSE(message.getHeaderFlag(Message::HEADERFLAG_QR));
+
+ Question question = **(message.beginQuestion());
+ EXPECT_TRUE(question == *question_);
+ }
+};
+
+/// \brief Resolver Callback Object
+///
+/// Holds the success and failure callback methods for the resolver
+class ResolverCallback : public isc::resolve::ResolverInterface::Callback {
+public:
+ /// \brief Constructor
+ ResolverCallback(IOService& service) :
+ service_(service), run_(false), status_(false), debug_(DEBUG_PRINT)
+ {}
+
+ /// \brief Destructor
+ virtual ~ResolverCallback()
+ {}
+
+ /// \brief Resolver Callback Success
+ ///
+ /// Called if the resolver detects that the call has succeeded.
+ ///
+ /// \param response Answer to the question.
+ virtual void success(const isc::dns::MessagePtr response) {
+ if (debug_) {
+ cout << "ResolverCallback::success(): answer received" << endl;
+ }
+
+ // There should be one RR each in the question and answer sections, and
+ // two RRs in each of the the authority and additional sections.
+ EXPECT_EQ(1, response->getRRCount(Message::SECTION_QUESTION));
+ EXPECT_EQ(1, response->getRRCount(Message::SECTION_ANSWER));
+ EXPECT_EQ(2, response->getRRCount(Message::SECTION_AUTHORITY));
+ EXPECT_EQ(2, response->getRRCount(Message::SECTION_ADDITIONAL));
+
+ // Check the answer - that the RRset is there...
+ EXPECT_TRUE(response->hasRRset(Message::SECTION_ANSWER,
+ RRsetPtr(new RRset(Name("www.example.org."),
+ RRClass::IN(),
+ RRType::A(),
+ RRTTL(300)))));
+ const RRsetIterator rrset_i = response->beginSection(Message::SECTION_ANSWER);
+
+ // ... get iterator into the Rdata of this RRset and point to first
+ // element...
+ const RdataIteratorPtr rdata_i = (*rrset_i)->getRdataIterator();
+ rdata_i->first();
+
+ // ... and check it is what we expect.
+ EXPECT_EQ(string(WWW_EXAMPLE_ORG), rdata_i->getCurrent().toText());
+
+ // Flag completion
+ run_ = true;
+ status_ = true;
+
+ service_.stop(); // Cause run() to exit.
+ }
+
+ /// \brief Resolver Failure Completion
+ ///
+ /// Called if the resolver detects that the resolution has failed.
+ virtual void failure() {
+ if (debug_) {
+ cout << "ResolverCallback::success(): resolution failure" << endl;
+ }
+ FAIL() << "Resolver reported completion failure";
+
+ // Flag completion
+ run_ = true;
+ status_ = false;
+
+ service_.stop(); // Cause run() to exit.
+ }
+
+ /// \brief Return status of "run" flag
+ bool getRun() const {
+ return (run_);
+ }
+
+ /// \brief Return "status" flag
+ bool getStatus() const {
+ return (status_);
+ }
+
+private:
+ IOService& service_; ///< Service handling the run queue
+ bool run_; ///< Set true when completion handler run
+ bool status_; ///< Set true for success, false on error
+ bool debug_; ///< Debug flag
+};
+
+// Sets up the UDP and TCP "servers", then tries a resolution.
+
+TEST_F(RecursiveQueryTest2, Resolve) {
+
+ // Set up the UDP server and issue the first read. The endpoint from which
+ // the query is sent is put in udp_endpoint_ when the read completes, which
+ // is referenced in the callback as the place to which the response is sent.
+ udp_socket_.set_option(socket_base::reuse_address(true));
+ udp_socket_.bind(udp::endpoint(address::from_string(TEST_ADDRESS), TEST_PORT));
+ udp_socket_.async_receive_from(asio::buffer(udp_receive_buffer_,
+ sizeof(udp_receive_buffer_)),
+ udp_remote_,
+ boost::bind(&RecursiveQueryTest2::udpReceiveHandler,
+ this, _1, _2));
+
+ // Set up the TCP server and issue the accept. Acceptance will cause the
+ // read to be issued.
+ tcp::acceptor acceptor(service_.get_io_service(),
+ tcp::endpoint(tcp::v4(), TEST_PORT));
+ acceptor.async_accept(tcp_socket_,
+ boost::bind(&RecursiveQueryTest2::tcpAcceptHandler,
+ this, _1, 0));
+
+ // Set up the RecursiveQuery object.
+ std::vector<std::pair<std::string, uint16_t> > upstream; // Empty
+ std::vector<std::pair<std::string, uint16_t> > upstream_root; // Empty
+ RecursiveQuery query(dns_service_, upstream, upstream_root);
+ query.setTestServer(TEST_ADDRESS, TEST_PORT);
+
+ // Set up callback for the tor eceive notification that the query has
+ // completed.
+ ResolverInterface::CallbackPtr
+ resolver_callback(new ResolverCallback(service_));
+
+ // Kick off the resolution process. We expect the first question to go to
+ // "root".
+ expected_ = UDP_ROOT;
+ query.resolve(question_, resolver_callback);
+ service_.run();
+
+ // Check what ran. (We have to cast the callback to ResolverCallback as we
+ // lost the information on the derived class when we used a
+ // ResolverInterface::CallbackPtr to store a pointer to it.)
+ ResolverCallback* rc = static_cast<ResolverCallback*>(resolver_callback.get());
+ EXPECT_TRUE(rc->getRun());
+ EXPECT_TRUE(rc->getStatus());
+}
+
+} // namespace asiolink
diff --git a/src/lib/asiolink/tests/run_unittests.cc b/src/lib/asiolink/tests/run_unittests.cc
index b481784..c285f9e 100644
--- a/src/lib/asiolink/tests/run_unittests.cc
+++ b/src/lib/asiolink/tests/run_unittests.cc
@@ -14,13 +14,15 @@
#include <gtest/gtest.h>
+#include <log/root_logger_name.h>
#include <dns/tests/unittest_util.h>
int
main(int argc, char* argv[])
{
- ::testing::InitGoogleTest(&argc, argv);
- isc::UnitTestUtil::addDataPath(TEST_DATA_DIR);
+ ::testing::InitGoogleTest(&argc, argv); // Initialize Google test
+ isc::log::setRootLoggerName("unittest"); // Set a root logger name
+ isc::UnitTestUtil::addDataPath(TEST_DATA_DIR); // Add location of test data
return (RUN_ALL_TESTS());
}
diff --git a/src/lib/asiolink/tests/tcp_endpoint_unittest.cc b/src/lib/asiolink/tests/tcp_endpoint_unittest.cc
new file mode 100644
index 0000000..3787e1c
--- /dev/null
+++ b/src/lib/asiolink/tests/tcp_endpoint_unittest.cc
@@ -0,0 +1,55 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+
+#include <string>
+
+#include <gtest/gtest.h>
+
+#include <asio.hpp>
+#include <asiolink/io_address.h>
+#include <asiolink/tcp_endpoint.h>
+
+using namespace asiolink;
+using namespace std;
+
+// This test checks that the endpoint can manage its own internal
+// asio::ip::tcp::endpoint object.
+
+TEST(TCPEndpointTest, v4Address) {
+ const string test_address("192.0.2.1");
+ const unsigned short test_port = 5301;
+
+ IOAddress address(test_address);
+ TCPEndpoint endpoint(address, test_port);
+
+ EXPECT_TRUE(address == endpoint.getAddress());
+ EXPECT_EQ(test_port, endpoint.getPort());
+ EXPECT_EQ(IPPROTO_TCP, endpoint.getProtocol());
+ EXPECT_EQ(AF_INET, endpoint.getFamily());
+}
+
+TEST(TCPEndpointTest, v6Address) {
+ const string test_address("2001:db8::1235");
+ const unsigned short test_port = 5302;
+
+ IOAddress address(test_address);
+ TCPEndpoint endpoint(address, test_port);
+
+ EXPECT_TRUE(address == endpoint.getAddress());
+ EXPECT_EQ(test_port, endpoint.getPort());
+ EXPECT_EQ(IPPROTO_TCP, endpoint.getProtocol());
+ EXPECT_EQ(AF_INET6, endpoint.getFamily());
+}
diff --git a/src/lib/asiolink/tests/tcp_socket_unittest.cc b/src/lib/asiolink/tests/tcp_socket_unittest.cc
new file mode 100644
index 0000000..f0a45ee
--- /dev/null
+++ b/src/lib/asiolink/tests/tcp_socket_unittest.cc
@@ -0,0 +1,515 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+/// \brief Test of TCPSocket
+///
+/// Tests the fuctionality of a TCPSocket by working through an open-send-
+/// receive-close sequence and checking that the asynchronous notifications
+/// work.
+
+#include <string>
+
+#include <arpa/inet.h>
+#include <netinet/in.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+
+#include <algorithm>
+#include <cstdlib>
+#include <cstddef>
+#include <vector>
+
+#include <gtest/gtest.h>
+
+#include <boost/bind.hpp>
+#include <boost/shared_ptr.hpp>
+
+#include <dns/buffer.h>
+
+#include <asio.hpp>
+
+#include <asiolink/asiolink_utilities.h>
+#include <asiolink/io_service.h>
+#include <asiolink/tcp_endpoint.h>
+#include <asiolink/tcp_socket.h>
+
+using namespace asio;
+using namespace asio::ip;
+using namespace asiolink;
+using namespace isc::dns;
+using namespace std;
+
+namespace {
+
+const char SERVER_ADDRESS[] = "127.0.0.1";
+const unsigned short SERVER_PORT = 5303;
+
+// TODO: Shouldn't we send something that is real message?
+const char OUTBOUND_DATA[] = "Data sent from client to server";
+const char INBOUND_DATA[] = "Returned data from server to client";
+}
+
+/// An instance of this object is passed to the asynchronous I/O functions
+/// and the operator() method is called when when an asynchronous I/O completes.
+/// The arguments to the completion callback are stored for later retrieval.
+class TCPCallback {
+public:
+ /// \brief Operations the server is doing
+ enum Operation {
+ ACCEPT = 0, ///< accept() was issued
+ OPEN = 1, /// Client connected to server
+ READ = 2, ///< Asynchronous read completed
+ WRITE = 3, ///< Asynchronous write completed
+ NONE = 4 ///< "Not set" state
+ };
+
+ /// \brief Minimim size of buffers
+ enum {
+ MIN_SIZE = (64 * 1024 + 2) ///< 64kB + two bytes for a count
+ };
+
+ struct PrivateData {
+ PrivateData() :
+ error_code_(), length_(0), cumulative_(0), expected_(0), offset_(0),
+ name_(""), queued_(NONE), called_(NONE)
+ {}
+
+ asio::error_code error_code_; ///< Completion error code
+ size_t length_; ///< Bytes transferred in this I/O
+ size_t cumulative_; ///< Cumulative bytes transferred
+ size_t expected_; ///< Expected amount of data
+ size_t offset_; ///< Where to put data in buffer
+ std::string name_; ///< Which of the objects this is
+ Operation queued_; ///< Queued operation
+ Operation called_; ///< Which callback called
+ uint8_t data_[MIN_SIZE]; ///< Receive buffer
+
+ };
+
+ /// \brief Constructor
+ ///
+ /// Constructs the object. It also creates the data member pointed to by
+ /// a shared pointer. When used as a callback object, this is copied as it
+ /// is passed into the asynchronous function. This means that there are two
+ /// objects and inspecting the one we passed in does not tell us anything.
+ ///
+ /// Therefore we use a boost::shared_ptr. When the object is copied, the
+ /// shared pointer is copied, which leaves both objects pointing to the same
+ /// data.
+ ///
+ /// \param which Which of the two callback objects this is
+ TCPCallback(std::string which) : ptr_(new PrivateData())
+ {
+ ptr_->name_ = which;
+ }
+
+ /// \brief Destructor
+ ///
+ /// No code needed, destroying the shared pointer destroys the private data.
+ virtual ~TCPCallback()
+ {}
+
+ /// \brief Client Callback Function
+ ///
+ /// Called when an asynchronous operation is completed by the client, this
+ /// stores the origin of the operation in the client_called_ data member.
+ ///
+ /// \param ec I/O completion error code passed to callback function.
+ /// \param length Number of bytes transferred
+ void operator()(asio::error_code ec = asio::error_code(),
+ size_t length = 0)
+ {
+ setCode(ec.value());
+ ptr_->called_ = ptr_->queued_;
+ ptr_->length_ = length;
+ }
+
+ /// \brief Get I/O completion error code
+ int getCode() {
+ return (ptr_->error_code_.value());
+ }
+
+ /// \brief Set I/O completion code
+ ///
+ /// \param code New value of completion code
+ void setCode(int code) {
+ ptr_->error_code_ = asio::error_code(code, asio::error_code().category());
+ }
+
+ /// \brief Get number of bytes transferred in I/O
+ size_t& length() {
+ return (ptr_->length_);
+ }
+
+ /// \brief Get cumulative number of bytes transferred in I/O
+ size_t& cumulative() {
+ return (ptr_->cumulative_);
+ }
+
+ /// \brief Get expected amount of data
+ size_t& expected() {
+ return (ptr_->expected_);
+ }
+
+ /// \brief Get offset intodData
+ size_t& offset() {
+ return (ptr_->offset_);
+ }
+
+ /// \brief Get data member
+ uint8_t* data() {
+ return (ptr_->data_);
+ }
+
+ /// \brief Get flag to say what was queued
+ Operation& queued() {
+ return (ptr_->queued_);
+ }
+
+ /// \brief Get flag to say when callback was called
+ Operation& called() {
+ return (ptr_->called_);
+ }
+
+ /// \brief Return instance of callback name
+ std::string& name() {
+ return (ptr_->name_);
+ }
+
+private:
+ boost::shared_ptr<PrivateData> ptr_; ///< Pointer to private data
+};
+
+
+// Read Server Data
+//
+// Called in the part of the test that has the client send a message to the
+// server, this loops until all the data has been read (synchronously) by the
+// server.
+//
+// "All the data read" means that the server has received a message that is
+// preceded by a two-byte count field and that the total amount of data received
+// from the remote end is equal to the value in the count field plus two bytes
+// for the count field itself.
+//
+// \param socket Socket on which the server is reading data
+// \param server_cb Structure in which server data is held.
+void
+serverRead(tcp::socket& socket, TCPCallback& server_cb) {
+
+ // As we may need to read multiple times, keep a count of the cumulative
+ // amount of data read and do successive reads into the appropriate part
+ // of the buffer.
+ //
+ // Note that there are no checks for buffer overflow - this is a test
+ // program and we have sized the buffer to be large enough for the test.
+ server_cb.cumulative() = 0;
+
+ bool complete = false;
+ while (!complete) {
+
+ // Read block of data and update cumulative amount of data received.
+ server_cb.length() = socket.receive(
+ asio::buffer(server_cb.data() + server_cb.cumulative(),
+ TCPCallback::MIN_SIZE - server_cb.cumulative()));
+ server_cb.cumulative() += server_cb.length();
+
+ // If we have read at least two bytes, we can work out how much we
+ // should be reading.
+ if (server_cb.cumulative() >= 2) {
+ server_cb.expected() = readUint16(server_cb.data());
+ if ((server_cb.expected() + 2) == server_cb.cumulative()) {
+
+ // Amount of data read from socket equals the size of the
+ // message (as indicated in the first two bytes of the message)
+ // plus the size of the count field. Therefore we have received
+ // all the data.
+ complete = true;
+ }
+ }
+ }
+}
+
+// Receive complete method should return true only if the count in the first
+// two bytes is equal to the size of the rest if the buffer.
+
+TEST(TCPSocket, processReceivedData) {
+ const uint16_t PACKET_SIZE = 16382; // Amount of "real" data in the buffer
+
+ IOService service; // Used to instantiate socket
+ TCPSocket<TCPCallback> test(service); // Socket under test
+ uint8_t inbuff[PACKET_SIZE + 2]; // Buffer to check
+ OutputBufferPtr outbuff(new OutputBuffer(16));
+ // Where data is put
+ size_t expected; // Expected amount of data
+ size_t offset; // Where to put next data
+ size_t cumulative; // Cumulative data received
+
+ // Set some dummy values in the buffer to check
+ for (size_t i = 0; i < sizeof(inbuff); ++i) {
+ inbuff[i] = i % 256;
+ }
+
+ // Check that the method will handle various receive sizes.
+ writeUint16(PACKET_SIZE, inbuff);
+
+ cumulative = 0;
+ offset = 0;
+ expected = 0;
+ outbuff->clear();
+ bool complete = test.processReceivedData(inbuff, 1, cumulative, offset,
+ expected, outbuff);
+ EXPECT_FALSE(complete);
+ EXPECT_EQ(1, cumulative);
+ EXPECT_EQ(1, offset);
+ EXPECT_EQ(0, expected);
+ EXPECT_EQ(0, outbuff->getLength());
+
+ // Now pretend that we've received one more byte.
+ complete = test.processReceivedData(inbuff, 1, cumulative, offset, expected,
+ outbuff);
+ EXPECT_FALSE(complete);
+ EXPECT_EQ(2, cumulative);
+ EXPECT_EQ(0, offset);
+ EXPECT_EQ(PACKET_SIZE, expected);
+ EXPECT_EQ(0, outbuff->getLength());
+
+ // Add another two bytes. However, this time note that we have to offset
+ // in the input buffer because it is expected that the next chunk of data
+ // from the connection will be read into the start of the buffer.
+ complete = test.processReceivedData(inbuff + cumulative, 2, cumulative,
+ offset, expected, outbuff);
+ EXPECT_FALSE(complete);
+ EXPECT_EQ(4, cumulative);
+ EXPECT_EQ(0, offset);
+ EXPECT_EQ(PACKET_SIZE, expected);
+ EXPECT_EQ(2, outbuff->getLength());
+
+ const uint8_t* dataptr = static_cast<const uint8_t*>(outbuff->getData());
+ EXPECT_TRUE(equal(inbuff + 2, inbuff + cumulative, dataptr));
+
+ // And add the remaining data. Remember that "inbuff" is "PACKET_SIZE + 2"
+ // long.
+ complete = test.processReceivedData(inbuff + cumulative,
+ PACKET_SIZE + 2 - cumulative,
+ cumulative, offset, expected, outbuff);
+ EXPECT_TRUE(complete);
+ EXPECT_EQ(PACKET_SIZE + 2, cumulative);
+ EXPECT_EQ(0, offset);
+ EXPECT_EQ(PACKET_SIZE, expected);
+ EXPECT_EQ(PACKET_SIZE, outbuff->getLength());
+ dataptr = static_cast<const uint8_t*>(outbuff->getData());
+ EXPECT_TRUE(equal(inbuff + 2, inbuff + cumulative, dataptr));
+}
+
+// TODO: Need to add a test to check the cancel() method
+
+// Tests the operation of a TCPSocket by opening it, sending an asynchronous
+// message to a server, receiving an asynchronous message from the server and
+// closing.
+TEST(TCPSocket, SequenceTest) {
+
+ // Common objects.
+ IOService service; // Service object for async control
+
+ // The client - the TCPSocket being tested
+ TCPSocket<TCPCallback> client(service);// Socket under test
+ TCPCallback client_cb("Client"); // Async I/O callback function
+ TCPEndpoint client_remote_endpoint; // Where client receives message from
+ OutputBufferPtr client_buffer(new OutputBuffer(128));
+ // Received data is put here
+
+ // The server - with which the client communicates.
+ IOAddress server_address(SERVER_ADDRESS);
+ // Address of target server
+ TCPCallback server_cb("Server"); // Server callback
+ TCPEndpoint server_endpoint(server_address, SERVER_PORT);
+ // Endpoint describing server
+ TCPEndpoint server_remote_endpoint; // Address where server received message from
+ tcp::socket server_socket(service.get_io_service());
+ // Socket used for server
+
+ // Step 1. Create the connection between the client and the server. Set
+ // up the server to accept incoming connections and have the client open
+ // a channel to it.
+
+ // Set up server - open socket and queue an accept.
+ server_cb.queued() = TCPCallback::ACCEPT;
+ server_cb.called() = TCPCallback::NONE;
+ server_cb.setCode(42); // Some error
+ tcp::acceptor acceptor(service.get_io_service(),
+ tcp::endpoint(tcp::v4(), SERVER_PORT));
+ acceptor.set_option(tcp::acceptor::reuse_address(true));
+ acceptor.async_accept(server_socket, server_cb);
+
+ // Set up client - connect to the server.
+ client_cb.queued() = TCPCallback::OPEN;
+ client_cb.called() = TCPCallback::NONE;
+ client_cb.setCode(43); // Some error
+ EXPECT_FALSE(client.isOpenSynchronous());
+ client.open(&server_endpoint, client_cb);
+
+ // Run the open and the accept callback and check that they ran.
+ service.run_one();
+ service.run_one();
+
+ EXPECT_EQ(TCPCallback::ACCEPT, server_cb.called());
+ EXPECT_EQ(0, server_cb.getCode());
+
+ EXPECT_EQ(TCPCallback::OPEN, client_cb.called());
+ EXPECT_EQ(0, client_cb.getCode());
+
+ // Step 2. Get the client to write to the server asynchronously. The
+ // server will loop reading the data synchronously.
+
+ // Write asynchronously to the server.
+ client_cb.called() = TCPCallback::NONE;
+ client_cb.queued() = TCPCallback::WRITE;
+ client_cb.setCode(143); // Arbitrary number
+ client_cb.length() = 0;
+ client.asyncSend(OUTBOUND_DATA, sizeof(OUTBOUND_DATA), &server_endpoint, client_cb);
+
+ // Wait for the client callback to complete. (Must do this first on
+ // Solaris: if we do the synchronous read first, the test hangs.)
+ service.run_one();
+
+ // Synchronously read the data from the server.;
+ serverRead(server_socket, server_cb);
+
+ // Check the client state
+ EXPECT_EQ(TCPCallback::WRITE, client_cb.called());
+ EXPECT_EQ(0, client_cb.getCode());
+ EXPECT_EQ(sizeof(OUTBOUND_DATA) + 2, client_cb.length());
+
+ // ... and check what the server received.
+ EXPECT_EQ(sizeof(OUTBOUND_DATA) + 2, server_cb.cumulative());
+ EXPECT_TRUE(equal(OUTBOUND_DATA,
+ (OUTBOUND_DATA + (sizeof(OUTBOUND_DATA) - 1)),
+ (server_cb.data() + 2)));
+
+ // Step 3. Get the server to write all the data asynchronously and have the
+ // client loop (asynchronously) reading the data. Note that we copy the
+ // data into the server's internal buffer in order to precede it with a two-
+ // byte count field.
+
+ // Have the server write asynchronously to the client.
+ server_cb.called() = TCPCallback::NONE;
+ server_cb.queued() = TCPCallback::WRITE;
+ server_cb.length() = 0;
+ server_cb.cumulative() = 0;
+
+ writeUint16(sizeof(INBOUND_DATA), server_cb.data());
+ copy(INBOUND_DATA, (INBOUND_DATA + sizeof(INBOUND_DATA) - 1),
+ (server_cb.data() + 2));
+ server_socket.async_send(asio::buffer(server_cb.data(),
+ (sizeof(INBOUND_DATA) + 2)),
+ server_cb);
+
+ // Have the client read asynchronously.
+ client_cb.called() = TCPCallback::NONE;
+ client_cb.queued() = TCPCallback::READ;
+ client_cb.length() = 0;
+ client_cb.cumulative() = 0;
+ client_cb.expected() = 0;
+ client_cb.offset() = 0;
+
+ client.asyncReceive(client_cb.data(), TCPCallback::MIN_SIZE,
+ client_cb.offset(), &client_remote_endpoint,
+ client_cb);
+
+ // Run the callbacks. Several options are possible depending on how ASIO
+ // is implemented and whether the message gets fragmented:
+ //
+ // 1) The send handler may complete immediately, regardess of whether the
+ // data has been read by the client. (This is the most likely.)
+ // 2) The send handler may only run after all the data has been read by
+ // the client. (This could happen if the client's TCP buffers were too
+ // small so the data was not transferred to the "remote" system until the
+ // remote buffer has been emptied one or more times.)
+ // 3) The client handler may be run a number of times to handle the message
+ // fragments and the server handler may run between calls of the client
+ // handler.
+ //
+ // So loop, running one handler at a time until we are certain that all the
+ // handlers have run.
+
+ bool server_complete = false;
+ bool client_complete = false;
+ while (!server_complete || !client_complete) {
+ service.run_one();
+
+ // Has the server run?
+ if (!server_complete) {
+ if (server_cb.called() == server_cb.queued()) {
+
+ // Yes. Check that the send completed successfully and that
+ // all the data that was expected to have been sent was in fact
+ // sent.
+ EXPECT_EQ(0, server_cb.getCode());
+ EXPECT_EQ((sizeof(INBOUND_DATA) + 2), server_cb.length());
+ server_complete = true;
+ continue;
+ }
+ }
+
+ if (!client_complete) {
+
+ // Client callback must have run. Check that it ran OK.
+ EXPECT_EQ(TCPCallback::READ, client_cb.called());
+ EXPECT_EQ(0, client_cb.getCode());
+
+ // Check if we need to queue another read, copying the data into
+ // the output buffer as we do so.
+ client_complete = client.processReceivedData(client_cb.data(),
+ client_cb.length(),
+ client_cb.cumulative(),
+ client_cb.offset(),
+ client_cb.expected(),
+ client_buffer);
+
+ // If the data is not complete, queue another read.
+ if (! client_complete) {
+ client_cb.called() = TCPCallback::NONE;
+ client_cb.queued() = TCPCallback::READ;
+ client_cb.length() = 0;
+ client.asyncReceive(client_cb.data(), TCPCallback::MIN_SIZE ,
+ client_cb.offset(), &client_remote_endpoint,
+ client_cb);
+ }
+ }
+ }
+
+ // Both the send and the receive have completed. Check that the received
+ // is what was sent.
+
+ // Check the client state
+ EXPECT_EQ(TCPCallback::READ, client_cb.called());
+ EXPECT_EQ(0, client_cb.getCode());
+ EXPECT_EQ(sizeof(INBOUND_DATA) + 2, client_cb.cumulative());
+ EXPECT_EQ(sizeof(INBOUND_DATA), client_buffer->getLength());
+
+ // ... and check what the server sent.
+ EXPECT_EQ(TCPCallback::WRITE, server_cb.called());
+ EXPECT_EQ(0, server_cb.getCode());
+ EXPECT_EQ(sizeof(INBOUND_DATA) + 2, server_cb.length());
+
+ // ... and that what was sent is what was received.
+ const uint8_t* received = static_cast<const uint8_t*>(client_buffer->getData());
+ EXPECT_TRUE(equal(INBOUND_DATA, (INBOUND_DATA + (sizeof(INBOUND_DATA) - 1)),
+ received));
+
+ // Close client and server.
+ EXPECT_NO_THROW(client.close());
+ EXPECT_NO_THROW(server_socket.close());
+}
diff --git a/src/lib/asiolink/tests/udp_socket_unittest.cc b/src/lib/asiolink/tests/udp_socket_unittest.cc
index bb79b88..8563d22 100644
--- a/src/lib/asiolink/tests/udp_socket_unittest.cc
+++ b/src/lib/asiolink/tests/udp_socket_unittest.cc
@@ -12,21 +12,6 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-// PERFORMANCE OF THIS SOFTWARE.
-
-
/// \brief Test of UDPSocket
///
/// Tests the fuctionality of a UDPSocket by working through an open-send-
@@ -50,14 +35,18 @@
#include <boost/bind.hpp>
#include <boost/shared_ptr.hpp>
+#include <dns/buffer.h>
+
#include <asio.hpp>
+#include <asiolink/asiolink_utilities.h>
#include <asiolink/io_service.h>
#include <asiolink/udp_endpoint.h>
#include <asiolink/udp_socket.h>
using namespace asio;
using namespace asiolink;
+using namespace isc::dns;
using namespace std;
namespace {
@@ -177,6 +166,49 @@ private:
boost::shared_ptr<PrivateData> ptr_; ///< Pointer to private data
};
+// Receive complete method should return true regardless of what is in the first
+// two bytes of a buffer.
+
+TEST(UDPSocket, processReceivedData) {
+ IOService service; // Used to instantiate socket
+ UDPSocket<UDPCallback> test(service); // Socket under test
+ uint8_t inbuff[32]; // Buffer to check
+ OutputBufferPtr outbuff(new OutputBuffer(16));
+ // Where data is put
+ size_t expected; // Expected amount of data
+ size_t offset; // Where to put next data
+ size_t cumulative; // Cumulative data received
+
+ // Set some dummy values in the buffer to check
+ for (uint8_t i = 0; i < sizeof(inbuff); ++i) {
+ inbuff[i] = i;
+ }
+
+ // Expect that the value is true whatever number is written in the first
+ // two bytes of the buffer.
+ uint16_t count = 0;
+ for (uint32_t i = 0; i < (2 << 16); ++i, ++count) {
+ writeUint16(count, inbuff);
+
+ // Set some random values
+ cumulative = 5;
+ offset = 10;
+ expected = 15;
+ outbuff->clear();
+
+ bool completed = test.processReceivedData(inbuff, sizeof(inbuff),
+ cumulative, offset, expected,
+ outbuff);
+ EXPECT_TRUE(completed);
+ EXPECT_EQ(sizeof(inbuff), cumulative);
+ EXPECT_EQ(0, offset);
+ EXPECT_EQ(sizeof(inbuff), expected);
+
+ const uint8_t* dataptr = static_cast<const uint8_t*>(outbuff->getData());
+ EXPECT_TRUE(equal(inbuff, inbuff + sizeof(inbuff) - 1, dataptr));
+ }
+}
+
// TODO: Need to add a test to check the cancel() method
// Tests the operation of a UDPSocket by opening it, sending an asynchronous
@@ -199,6 +231,10 @@ TEST(UDPSocket, SequenceTest) {
UDPCallback client_cb("Client"); // Async I/O callback function
UDPEndpoint client_remote_endpoint; // Where client receives message from
size_t client_cumulative = 0; // Cumulative data received
+ size_t client_offset = 0; // Offset into buffer where data is put
+ size_t client_expected = 0; // Expected amount of data
+ OutputBufferPtr client_buffer(new OutputBuffer(16));
+ // Where data is put
// The server - with which the client communicates. For convenience, we
// use the same io_service, and use the endpoint object created for
@@ -208,11 +244,12 @@ TEST(UDPSocket, SequenceTest) {
server.set_option(socket_base::reuse_address(true));
// Assertion to ensure that the server buffer is large enough
- char data[UDPSocket<UDPCallback>::MAX_SIZE];
+ char data[UDPSocket<UDPCallback>::MIN_SIZE];
ASSERT_GT(sizeof(data), sizeof(OUTBOUND_DATA));
// Open the client socket - the operation should be synchronous
- EXPECT_FALSE(client.open(&server_endpoint, client_cb));
+ EXPECT_TRUE(client.isOpenSynchronous());
+ client.open(&server_endpoint, client_cb);
// Issue read on the server. Completion callback should not have run.
server_cb.setCalled(false);
@@ -257,7 +294,7 @@ TEST(UDPSocket, SequenceTest) {
server.async_send_to(buffer(INBOUND_DATA, sizeof(INBOUND_DATA)),
server_remote_endpoint.getASIOEndpoint(), server_cb);
- // Expect the two callbacks to run
+ // Expect two callbacks to run.
service.run_one();
service.run_one();
@@ -276,10 +313,19 @@ TEST(UDPSocket, SequenceTest) {
EXPECT_TRUE(server_address == client_remote_endpoint.getAddress());
EXPECT_EQ(SERVER_PORT, client_remote_endpoint.getPort());
- // Finally, check that the receive received a complete buffer's worth of data.
- EXPECT_TRUE(client.receiveComplete(&data[0], client_cb.getLength(),
- client_cumulative));
+ // Check that the receive received a complete buffer's worth of data.
+ EXPECT_TRUE(client.processReceivedData(&data[0], client_cb.getLength(),
+ client_cumulative, client_offset,
+ client_expected, client_buffer));
+
EXPECT_EQ(client_cb.getLength(), client_cumulative);
+ EXPECT_EQ(0, client_offset);
+ EXPECT_EQ(client_cb.getLength(), client_expected);
+ EXPECT_EQ(client_cb.getLength(), client_buffer->getLength());
+
+ // ...and check that the data was copied to the output client buffer.
+ const char* client_char_data = static_cast<const char*>(client_buffer->getData());
+ EXPECT_TRUE(equal(&data[0], &data[client_cb.getLength() - 1], client_char_data));
// Close client and server.
EXPECT_NO_THROW(client.close());
diff --git a/src/lib/asiolink/udp_endpoint.h b/src/lib/asiolink/udp_endpoint.h
index 0958af6..99dc27f 100644
--- a/src/lib/asiolink/udp_endpoint.h
+++ b/src/lib/asiolink/udp_endpoint.h
@@ -64,6 +64,17 @@ public:
asio_endpoint_placeholder_(NULL), asio_endpoint_(asio_endpoint)
{}
+ /// \brief Constructor from an ASIO UDP endpoint.
+ ///
+ /// This constructor is designed to be an efficient wrapper for the
+ /// corresponding ASIO class, \c udp::endpoint.
+ ///
+ /// \param asio_endpoint The ASIO representation of the TCP endpoint.
+ UDPEndpoint(const asio::ip::udp::endpoint& asio_endpoint) :
+ asio_endpoint_placeholder_(new asio::ip::udp::endpoint(asio_endpoint)),
+ asio_endpoint_(*asio_endpoint_placeholder_)
+ {}
+
/// \brief The destructor.
virtual ~UDPEndpoint() { delete asio_endpoint_placeholder_; }
//@}
diff --git a/src/lib/asiolink/udp_server.cc b/src/lib/asiolink/udp_server.cc
index 841bdc6..063926e 100644
--- a/src/lib/asiolink/udp_server.cc
+++ b/src/lib/asiolink/udp_server.cc
@@ -15,6 +15,7 @@
#include <netinet/in.h>
#include <sys/socket.h>
#include <unistd.h> // for some IPC/network system calls
+#include <errno.h>
#include <boost/shared_array.hpp>
@@ -169,7 +170,7 @@ UDPServer::UDPServer(io_service& io_service, const ip::address& addr,
/// pattern; see internal/coroutine.h for details.
void
UDPServer::operator()(error_code ec, size_t length) {
- /// Because the coroutine reeentry block is implemented as
+ /// Because the coroutine reentry block is implemented as
/// a switch statement, inline variable declarations are not
/// permitted. Certain variables used below can be declared here.
@@ -195,6 +196,14 @@ UDPServer::operator()(error_code ec, size_t length) {
CORO_YIELD data_->socket_->async_receive_from(
buffer(data_->data_.get(), MAX_LENGTH), *data_->sender_,
*this);
+ // Abort on fatal errors
+ if (ec) {
+ using namespace asio::error;
+ if (ec.value() != would_block && ec.value() != try_again &&
+ ec.value() != interrupted) {
+ return;
+ }
+ }
} while (ec || length == 0);
data_->bytes_ = length;
@@ -257,8 +266,6 @@ UDPServer::operator()(error_code ec, size_t length) {
// this point.
CORO_YIELD data_->io_.post(AsyncLookup<UDPServer>(*this));
- dlog("[XX] got an answer");
-
// The 'done_' flag indicates whether we have an answer
// to send back. If not, exit the coroutine permanently.
if (!data_->done_) {
diff --git a/src/lib/asiolink/udp_socket.h b/src/lib/asiolink/udp_socket.h
index bb94ad5..35fc7b1 100644
--- a/src/lib/asiolink/udp_socket.h
+++ b/src/lib/asiolink/udp_socket.h
@@ -28,7 +28,6 @@
#include <config.h>
-
#include <asiolink/io_asio_socket.h>
#include <asiolink/io_endpoint.h>
#include <asiolink/io_service.h>
@@ -49,20 +48,20 @@ private:
public:
enum {
- MAX_SIZE = 4096 // Send and receive size
+ MIN_SIZE = 4096 // Minimum send and receive size
};
-
+
/// \brief Constructor from an ASIO UDP socket.
///
- /// \param socket The ASIO representation of the UDP socket. It
- /// is assumed that the caller will open and close the socket, so
- /// these operations are a no-op for that socket.
+ /// \param socket The ASIO representation of the UDP socket. It is assumed
+ /// that the caller will open and close the socket, so these
+ /// operations are a no-op for that socket.
UDPSocket(asio::ip::udp::socket& socket);
/// \brief Constructor
///
/// Used when the UDPSocket is being asked to manage its own internal
- /// socket. It is assumed that open() and close() will not be used.
+ /// socket. In this case, the open() and close() methods are used.
///
/// \param service I/O Service object used to manage the socket.
UDPSocket(IOService& service);
@@ -70,68 +69,79 @@ public:
/// \brief Destructor
virtual ~UDPSocket();
- virtual int getNative() const { return (socket_.native()); }
- virtual int getProtocol() const { return (IPPROTO_UDP); }
+ /// \brief Return file descriptor of underlying socket
+ virtual int getNative() const {
+ return (socket_.native());
+ }
- /// \brief Open Socket
+ /// \brief Return protocol of socket
+ virtual int getProtocol() const {
+ return (IPPROTO_UDP);
+ }
+
+ /// \brief Is "open()" synchronous?
///
- /// Opens the UDP socket. In the model for transport-layer agnostic I/O,
- /// an "open" operation includes a connection to the remote end (which
- /// may take time). This does not happen for UDP, so the method returns
- /// "false" to indicate that the operation completed synchronously.
+ /// Indicates that the opening of a UDP socket is synchronous.
+ virtual bool isOpenSynchronous() const {
+ return true;
+ }
+
+ /// \brief Open Socket
///
- /// \param endpoint Endpoint to which the socket will connect to.
- /// \param callback Unused.
+ /// Opens the UDP socket. This is a synchronous operation.
///
- /// \return false to indicate that the "operation" completed synchronously.
- virtual bool open(const IOEndpoint* endpoint, C&);
+ /// \param endpoint Endpoint to which the socket will send data. This is
+ /// used to determine the address family trhat should be used for the
+ /// underlying socket.
+ /// \param callback Unused as the operation is synchronous.
+ virtual void open(const IOEndpoint* endpoint, C& callback);
/// \brief Send Asynchronously
///
- /// This corresponds to async_send_to() for UDP sockets and async_send()
- /// for TCP. In both cases an endpoint argument is supplied indicating the
- /// target of the send - this is ignored for TCP.
+ /// Calls the underlying socket's async_send_to() method to send a packet of
+ /// data asynchronously to the remote endpoint. The callback will be called
+ /// on completion.
///
/// \param data Data to send
/// \param length Length of data to send
/// \param endpoint Target of the send
/// \param callback Callback object.
virtual void asyncSend(const void* data, size_t length,
- const IOEndpoint* endpoint, C& callback);
+ const IOEndpoint* endpoint, C& callback);
/// \brief Receive Asynchronously
///
- /// This correstponds to async_receive_from() for UDP sockets and
- /// async_receive() for TCP. In both cases, an endpoint argument is
- /// supplied to receive the source of the communication. For TCP it will
- /// be filled in with details of the connection.
+ /// Calls the underlying socket's async_receive_from() method to read a
+ /// packet of data from a remote endpoint. Arrival of the data is signalled
+ /// via a call to the callback function.
///
/// \param data Buffer to receive incoming message
/// \param length Length of the data buffer
- /// \param cumulative Amount of data that should already be in the buffer.
- /// (This is ignored - every UPD receive fills the buffer from the start.)
+ /// \param offset Offset into buffer where data is to be put
/// \param endpoint Source of the communication
/// \param callback Callback object
- virtual void asyncReceive(void* data, size_t length, size_t cumulative,
- IOEndpoint* endpoint, C& callback);
+ virtual void asyncReceive(void* data, size_t length, size_t offset,
+ IOEndpoint* endpoint, C& callback);
- /// \brief Checks if the data received is complete.
+ /// \brief Process received data
///
- /// As all the data is received in one I/O, so this is, this is effectively
- /// a no-op (although it does update the amount of data received).
+ /// See the description of IOAsioSocket::receiveComplete for a complete
+ /// description of this method.
///
- /// \param data Data buffer containing data to date. (This is ignored
- /// for UDP receives.)
- /// \param length Amount of data received in last asynchronous I/O
- /// \param cumulative On input, amount of data received before the last
- /// I/O. On output, the total amount of data received to date.
+ /// \param staging Pointer to the start of the staging buffer.
+ /// \param length Amount of data in the staging buffer.
+ /// \param cumulative Amount of data received before the staging buffer is
+ /// processed.
+ /// \param offset Unused.
+ /// \param expected unused.
+ /// \param outbuff Output buffer. Data in the staging buffer is be copied
+ /// to this output buffer in the call.
///
- /// \return true if the receive is complete, false if another receive is
- /// needed.
- virtual bool receiveComplete(void*, size_t length, size_t& cumulative) {
- cumulative = length;
- return (true);
- }
+ /// \return Always true
+ virtual bool processReceivedData(const void* staging, size_t length,
+ size_t& cumulative, size_t& offset,
+ size_t& expected,
+ isc::dns::OutputBufferPtr& outbuff);
/// \brief Cancel I/O On Socket
virtual void cancel();
@@ -174,16 +184,16 @@ UDPSocket<C>::~UDPSocket()
delete socket_ptr_;
}
-// Open the socket. Throws an error on failure
-// TODO: Make the open more resilient
+// Open the socket.
-template <typename C> bool
+template <typename C> void
UDPSocket<C>::open(const IOEndpoint* endpoint, C&) {
- // Ignore opens on already-open socket. Don't throw a failure because
- // of uncertainties as to what precedes whan when using asynchronous I/O.
- // At also allows us a treat a passed-in socket as a self-managed socket.
-
+ // Ignore opens on already-open socket. (Don't throw a failure because
+ // of uncertainties as to what precedes whan when using asynchronous I/O.)
+ // It also allows us a treat a passed-in socket in exactly the same way as
+ // a self-managed socket (in that we can call the open() and close() methods
+ // of this class).
if (!isopen_) {
if (endpoint->getFamily() == AF_INET) {
socket_.open(asio::ip::udp::v4());
@@ -193,14 +203,21 @@ UDPSocket<C>::open(const IOEndpoint* endpoint, C&) {
}
isopen_ = true;
- // Ensure it can send and receive 4K buffers.
- socket_.set_option(asio::socket_base::send_buffer_size(MAX_SIZE));
- socket_.set_option(asio::socket_base::receive_buffer_size(MAX_SIZE));
- ;
- // Allow reuse of an existing port/address
- socket_.set_option(asio::socket_base::reuse_address(true));
+ // Ensure it can send and receive at least 4K buffers.
+ asio::ip::udp::socket::send_buffer_size snd_size;
+ socket_.get_option(snd_size);
+ if (snd_size.value() < MIN_SIZE) {
+ snd_size = MIN_SIZE;
+ socket_.set_option(snd_size);
+ }
+
+ asio::ip::udp::socket::receive_buffer_size rcv_size;
+ socket_.get_option(rcv_size);
+ if (rcv_size.value() < MIN_SIZE) {
+ rcv_size = MIN_SIZE;
+ socket_.set_option(rcv_size);
+ }
}
- return (false);
}
// Send a message. Should never do this if the socket is not open, so throw
@@ -208,19 +225,20 @@ UDPSocket<C>::open(const IOEndpoint* endpoint, C&) {
template <typename C> void
UDPSocket<C>::asyncSend(const void* data, size_t length,
- const IOEndpoint* endpoint, C& callback)
+ const IOEndpoint* endpoint, C& callback)
{
if (isopen_) {
// Upconvert to a UDPEndpoint. We need to do this because although
// IOEndpoint is the base class of UDPEndpoint and TCPEndpoint, it
- // doing cont contain a method for getting at the underlying endpoint
- // type - those are in the derived class and the two classes differ on
+ // does not contain a method for getting at the underlying endpoint
+ // type - that is in the derived class and the two classes differ on
// return type.
-
assert(endpoint->getProtocol() == IPPROTO_UDP);
const UDPEndpoint* udp_endpoint =
static_cast<const UDPEndpoint*>(endpoint);
+
+ // ... and send the message.
socket_.async_send_to(asio::buffer(data, length),
udp_endpoint->getASIOEndpoint(), callback);
} else {
@@ -229,14 +247,12 @@ UDPSocket<C>::asyncSend(const void* data, size_t length,
}
}
-// Receive a message. Note that the "cumulative" argument is ignored - every UDP
-// receive is put into the buffer beginning at the start - there is no concept
-// receiving a subsequent part of a message. Same critera as before concerning
-// the need for the socket to be open.
+// Receive a message. Should never do this if the socket is not open, so throw
+// an exception if this is the case.
template <typename C> void
-UDPSocket<C>::asyncReceive(void* data, size_t length, size_t,
- IOEndpoint* endpoint, C& callback)
+UDPSocket<C>::asyncReceive(void* data, size_t length, size_t offset,
+ IOEndpoint* endpoint, C& callback)
{
if (isopen_) {
@@ -244,7 +260,15 @@ UDPSocket<C>::asyncReceive(void* data, size_t length, size_t,
assert(endpoint->getProtocol() == IPPROTO_UDP);
UDPEndpoint* udp_endpoint = static_cast<UDPEndpoint*>(endpoint);
- socket_.async_receive_from(asio::buffer(data, length),
+ // Ensure we can write into the buffer
+ if (offset >= length) {
+ isc_throw(BufferOverflow, "attempt to read into area beyond end of "
+ "UDP receive buffer");
+ }
+ void* buffer_start = static_cast<void*>(static_cast<uint8_t*>(data) + offset);
+
+ // Issue the read
+ socket_.async_receive_from(asio::buffer(buffer_start, length - offset),
udp_endpoint->getASIOEndpoint(), callback);
} else {
isc_throw(SocketNotOpen,
@@ -252,7 +276,29 @@ UDPSocket<C>::asyncReceive(void* data, size_t length, size_t,
}
}
+// Receive complete. Just copy the data across to the output buffer and
+// update arguments as appropriate.
+
+template <typename C> bool
+UDPSocket<C>::processReceivedData(const void* staging, size_t length,
+ size_t& cumulative, size_t& offset,
+ size_t& expected,
+ isc::dns::OutputBufferPtr& outbuff)
+{
+ // Set return values to what we should expect.
+ cumulative = length;
+ expected = length;
+ offset = 0;
+
+ // Copy data across
+ outbuff->writeData(staging, length);
+
+ // ... and mark that we have everything.
+ return (true);
+}
+
// Cancel I/O on the socket. No-op if the socket is not open.
+
template <typename C> void
UDPSocket<C>::cancel() {
if (isopen_) {
diff --git a/src/lib/cache/message_cache.cc b/src/lib/cache/message_cache.cc
index f1334a2..53c73c1 100644
--- a/src/lib/cache/message_cache.cc
+++ b/src/lib/cache/message_cache.cc
@@ -46,8 +46,16 @@ MessageCache::lookup(const isc::dns::Name& qname,
HashKey entry_key = HashKey(entry_name, RRClass(message_class_));
MessageEntryPtr msg_entry = message_table_.get(entry_key);
if(msg_entry) {
- message_lru_.touch(msg_entry);
- return (msg_entry->genMessage(time(NULL), response));
+ // Check whether the message entry has expired.
+ if (msg_entry->getExpireTime() > time(NULL)) {
+ message_lru_.touch(msg_entry);
+ return (msg_entry->genMessage(time(NULL), response));
+ } else {
+ // message entry expires, remove it from hash table and lru list.
+ message_table_.remove(entry_key);
+ message_lru_.remove(msg_entry);
+ return (false);
+ }
}
return (false);
diff --git a/src/lib/cache/message_cache.h b/src/lib/cache/message_cache.h
index ef0d16d..65c7381 100644
--- a/src/lib/cache/message_cache.h
+++ b/src/lib/cache/message_cache.h
@@ -41,6 +41,9 @@ public:
MessageCache(boost::shared_ptr<RRsetCache> rrset_cache_,
uint32_t cache_size, uint16_t message_class);
+ /// \brief Destructor function
+ virtual ~MessageCache() {}
+
/// \brief Look up message in cache.
/// \param message generated response message if the message entry
/// can be found.
diff --git a/src/lib/cache/message_entry.cc b/src/lib/cache/message_entry.cc
index d4de11f..6396167 100644
--- a/src/lib/cache/message_entry.cc
+++ b/src/lib/cache/message_entry.cc
@@ -23,6 +23,34 @@
using namespace isc::dns;
using namespace std;
+// Put file scope functions in unnamed namespace.
+namespace {
+
+// Get the shortest existing ancestor which is the owner name of
+// one DNAME record for the given query name.
+// Note: there may be multiple DNAME records(DNAME chain) in answer
+// section. In most cases they are in order, but the code can't depend
+// on that, it has to find the starter by iterating the DNAME chain.
+Name
+getDNAMEChainStarter(const Message& message, const Name& query_name) {
+ Name dname = query_name;
+ RRsetIterator rrset_iter = message.beginSection(Message::SECTION_ANSWER);
+ while(rrset_iter != message.endSection(Message::SECTION_ANSWER)) {
+ if ((*rrset_iter)->getType() == RRType::DNAME()) {
+ const Name& rrname = (*rrset_iter)->getName();
+ if (NameComparisonResult::SUBDOMAIN ==
+ dname.compare(rrname).getRelation()) {
+ dname = rrname;
+ }
+ }
+ ++rrset_iter;
+ }
+
+ return (dname);
+}
+
+} // End of unnamed namespace
+
namespace isc {
namespace cache {
@@ -48,7 +76,7 @@ MessageEntry::getRRsetEntries(vector<RRsetEntryPtr>& rrset_entry_vec,
for (int index = 0; index < entry_count; ++index) {
RRsetEntryPtr rrset_entry = rrset_cache_->lookup(rrsets_[index].name_,
rrsets_[index].type_);
- if (time_now < rrset_entry->getExpireTime()) {
+ if (rrset_entry && time_now < rrset_entry->getExpireTime()) {
rrset_entry_vec.push_back(rrset_entry);
} else {
return (false);
@@ -120,48 +148,45 @@ MessageEntry::getRRsetTrustLevel(const Message& message,
switch(section) {
case Message::SECTION_ANSWER: {
if (aa) {
- RRsetIterator rrset_iter = message.beginSection(section);
-
- // Make sure we are inspecting the right RRset
- while((*rrset_iter)->getName() != rrset->getName() &&
- (*rrset_iter)->getType() != rrset->getType() &&
- rrset_iter != message.endSection(section)) {
- ++rrset_iter;
- }
- assert(rrset_iter != message.endSection(section));
-
// According RFC2181 section 5.4.1, only the record
// describing that ailas is necessarily authoritative.
- // If there is one or more CNAME records in answer section.
- // CNAME records is assumed as the first rrset.
- if ((*rrset_iter)->getType() == RRType::CNAME()) {
- // TODO: real equals for RRsets?
- if ((*rrset_iter).get() == rrset.get()) {
- return (RRSET_TRUST_ANSWER_AA);
- } else {
- return (RRSET_TRUST_ANSWER_NONAA);
+ // If there are CNAME(Not synchronized from DNAME)
+ // records in answer section, only the CNAME record
+ // whose owner name is same with qname is assumed as
+ // authoritative, all the left records are not authoritative.
+ //
+ // If there are DNAME records in answer section,
+ // Only the start DNAME and the synchronized CNAME record
+ // from it are authoritative, any other records in answer
+ // section are non-authoritative.
+ QuestionIterator quest_iter = message.beginQuestion();
+ // Make sure question section is not empty.
+ assert( quest_iter != message.endQuestion());
+
+ const Name& query_name = (*quest_iter)->getName();
+ const RRType& type = rrset->getType();
+ const Name& name = rrset->getName();
+ if ((type == RRType::CNAME() && name == query_name) ||
+ (type == RRType::DNAME() &&
+ name == getDNAMEChainStarter(message, query_name))) {
+ return (RRSET_TRUST_ANSWER_AA);
+ } else {
+ // If there is a CNAME record whose ower name is the same as
+ // the query name in answer section, the other records in answer
+ // section are non-authoritative, except the starter of DNAME
+ // chain (only checking CNAME is enough, because if the CNAME
+ // record is synthesized from a DNAME record, that DNAME
+ // record must be the starter of the DNAME chain).
+ RRsetIterator iter = message.beginSection(Message::SECTION_ANSWER);
+ while(iter != message.endSection(Message::SECTION_ANSWER)) {
+ if ((*iter)->getType() == RRType::CNAME() &&
+ (*iter)->getName() == query_name) {
+ return (RRSET_TRUST_ANSWER_NONAA);
+ }
+ ++iter;
}
}
-
- // Here, if the first rrset is DNAME, then assume the
- // second rrset is synchronized CNAME record, except
- // these two records, any other records in answer section
- // should be treated as non-authoritative.
- // TODO, this part logic should be revisited later,
- // since it's not mentioned by RFC2181.
- if ((*rrset_iter)->getType() == RRType::DNAME()) {
- // TODO: real equals for RRsets?
- if ((*rrset_iter).get() == rrset.get() ||
- ((++rrset_iter) != message.endSection(section) &&
- (*rrset_iter).get() == rrset.get())) {
- return (RRSET_TRUST_ANSWER_AA);
- } else {
- return (RRSET_TRUST_ANSWER_NONAA);
- }
- }
-
return (RRSET_TRUST_ANSWER_AA);
-
} else {
return (RRSET_TRUST_ANSWER_NONAA);
}
diff --git a/src/lib/cache/message_entry.h b/src/lib/cache/message_entry.h
index 682c171..67b0cf3 100644
--- a/src/lib/cache/message_entry.h
+++ b/src/lib/cache/message_entry.h
@@ -87,6 +87,12 @@ public:
return (*hash_key_ptr_);
}
+ /// \brief Get expire time of the message entry.
+ /// \return return the expire time of message entry.
+ time_t getExpireTime() const {
+ return (expire_time_);
+ }
+
/// \short Protected memebers, so they can be accessed by tests.
//@{
protected:
diff --git a/src/lib/cache/rrset_cache.cc b/src/lib/cache/rrset_cache.cc
index 7dab3b5..f538320 100644
--- a/src/lib/cache/rrset_cache.cc
+++ b/src/lib/cache/rrset_cache.cc
@@ -42,45 +42,41 @@ RRsetCache::lookup(const isc::dns::Name& qname,
{
const string entry_name = genCacheEntryName(qname, qtype);
RRsetEntryPtr entry_ptr = rrset_table_.get(HashKey(entry_name, RRClass(class_)));
-
- //If the rrset entry has expired, return NULL.
- if(entry_ptr && (time(NULL) > entry_ptr->getExpireTime())) {
- return (RRsetEntryPtr());
+ if (entry_ptr) {
+ if (entry_ptr->getExpireTime() > time(NULL)) {
+ // Only touch the non-expired rrset entries
+ rrset_lru_.touch(entry_ptr);
+ return (entry_ptr);
+ } else {
+ // the rrset entry has expired, so just remove it from
+ // hash table and lru list.
+ rrset_table_.remove(entry_ptr->hashKey());
+ rrset_lru_.remove(entry_ptr);
+ }
}
- return (entry_ptr);
+
+ return (RRsetEntryPtr());
}
RRsetEntryPtr
RRsetCache::update(const isc::dns::RRset& rrset, const RRsetTrustLevel& level) {
// TODO: If the RRset is an NS, we should update the NSAS as well
-
// lookup first
RRsetEntryPtr entry_ptr = lookup(rrset.getName(), rrset.getType());
- if(!entry_ptr) {
- // rrset entry doesn't exist, create one rrset entry for the rrset
- // and add it directly.
- entry_ptr.reset(new RRsetEntry(rrset, level));
- // Replace the expired rrset entry if it exists.
- rrset_table_.add(entry_ptr, entry_ptr->hashKey(), true);
- //TODO , lru list touch.
- return (entry_ptr);
- } else {
- // there is one rrset entry in the cache, need to check whether
- // the new rrset is more authoritative.
+ if (entry_ptr) {
if (entry_ptr->getTrustLevel() > level) {
- // existed rrset entry is more authoritative, do nothing,
- // just return it.
- //TODO, lru list touch
+ // existed rrset entry is more authoritative, just return it
return (entry_ptr);
} else {
- HashKey key = entry_ptr->hashKey();
- entry_ptr.reset(new RRsetEntry(rrset, level));
- //TODO, lru list touch.
- // Replace the expired rrset entry if it exists.
- rrset_table_.add(entry_ptr, entry_ptr->hashKey(), true);
- return (entry_ptr);
+ // Remove the old rrset entry from the lru list.
+ rrset_lru_.remove(entry_ptr);
}
}
+
+ entry_ptr.reset(new RRsetEntry(rrset, level));
+ rrset_table_.add(entry_ptr, entry_ptr->hashKey(), true);
+ rrset_lru_.add(entry_ptr);
+ return (entry_ptr);
}
#if 0
diff --git a/src/lib/cache/rrset_cache.h b/src/lib/cache/rrset_cache.h
index 9453f85..5bf2730 100644
--- a/src/lib/cache/rrset_cache.h
+++ b/src/lib/cache/rrset_cache.h
@@ -45,7 +45,7 @@ public:
/// \param cache_size the size of rrset cache.
/// \param rrset_class the class of rrset cache.
RRsetCache(uint32_t cache_size, uint16_t rrset_class);
- ~RRsetCache() {}
+ virtual ~RRsetCache() {}
//@}
/// \brief Look up rrset in cache.
@@ -92,7 +92,8 @@ public:
bool resize(uint32_t size);
#endif
-private:
+ /// \short Protected memebers, so they can be accessed by tests.
+protected:
uint16_t class_; // The class of the rrset cache.
isc::nsas::HashTable<RRsetEntry> rrset_table_;
isc::nsas::LruList<RRsetEntry> rrset_lru_;
diff --git a/src/lib/cache/tests/Makefile.am b/src/lib/cache/tests/Makefile.am
index b93c9a7..8c7b0af 100644
--- a/src/lib/cache/tests/Makefile.am
+++ b/src/lib/cache/tests/Makefile.am
@@ -64,3 +64,6 @@ EXTRA_DIST += testdata/message_fromWire3
EXTRA_DIST += testdata/message_fromWire4
EXTRA_DIST += testdata/message_fromWire5
EXTRA_DIST += testdata/message_fromWire6
+EXTRA_DIST += testdata/message_fromWire7
+EXTRA_DIST += testdata/message_fromWire8
+EXTRA_DIST += testdata/message_fromWire9
diff --git a/src/lib/cache/tests/message_cache_unittest.cc b/src/lib/cache/tests/message_cache_unittest.cc
index e7184bd..187216e 100644
--- a/src/lib/cache/tests/message_cache_unittest.cc
+++ b/src/lib/cache/tests/message_cache_unittest.cc
@@ -43,27 +43,57 @@ public:
}
};
+/// \brief Derived from base class to make it easy to test
+/// its internals.
+class DerivedRRsetCache: public RRsetCache {
+public:
+ DerivedRRsetCache(uint32_t cache_size, uint16_t rrset_class):
+ RRsetCache(cache_size, rrset_class)
+ {}
+
+ /// \brief Remove one rrset entry from rrset cache.
+ void removeRRsetEntry(Name& name, const RRType& type) {
+ const string entry_name = genCacheEntryName(name, type);
+ HashKey entry_key = HashKey(entry_name, RRClass(class_));
+ RRsetEntryPtr rrset_entry = rrset_table_.get(entry_key);
+ if (rrset_entry) {
+ rrset_lru_.remove(rrset_entry);
+ rrset_table_.remove(entry_key);
+ }
+ }
+};
+
class MessageCacheTest: public testing::Test {
public:
MessageCacheTest(): message_parse(Message::PARSE),
message_render(Message::RENDER)
{
uint16_t class_ = RRClass::IN().getCode();
- rrset_cache_.reset(new RRsetCache(RRSET_CACHE_DEFAULT_SIZE, class_));
- message_cache_.reset(new DerivedMessageCache(rrset_cache_,
- MESSAGE_CACHE_DEFAULT_SIZE, class_ ));
+ rrset_cache_.reset(new DerivedRRsetCache(RRSET_CACHE_DEFAULT_SIZE, class_));
+ // Set the message cache size to 1, make it easy for unittest.
+ message_cache_.reset(new DerivedMessageCache(rrset_cache_, 1, class_ ));
}
protected:
boost::shared_ptr<DerivedMessageCache> message_cache_;
- RRsetCachePtr rrset_cache_;
+ boost::shared_ptr<DerivedRRsetCache> rrset_cache_;
Message message_parse;
Message message_render;
};
+void
+updateMessageCache(const char* message_file,
+ boost::shared_ptr<DerivedMessageCache> cache)
+{
+ Message msg(Message::PARSE);
+ messageFromFile(msg, message_file);
+ cache->update(msg);
+}
+
TEST_F(MessageCacheTest, testLookup) {
messageFromFile(message_parse, "message_fromWire1");
EXPECT_TRUE(message_cache_->update(message_parse));
+
Name qname("test.example.com.");
EXPECT_TRUE(message_cache_->lookup(qname, RRType::A(), message_render));
EXPECT_EQ(message_cache_->messages_count(), 1);
@@ -75,6 +105,20 @@ TEST_F(MessageCacheTest, testLookup) {
Name qname1("test.example.net.");
EXPECT_TRUE(message_cache_->lookup(qname1, RRType::A(), message_render));
+
+ // Test looking up message which has expired rrset or some rrset
+ // has been removed from the rrset cache.
+ rrset_cache_->removeRRsetEntry(qname1, RRType::A());
+ EXPECT_FALSE(message_cache_->lookup(qname1, RRType::A(), message_render));
+
+ // Update one message entry which has expired to message cache.
+ updateMessageCache("message_fromWire9", message_cache_);
+ EXPECT_EQ(message_cache_->messages_count(), 3);
+ // The message entry has been added, but can't be looked up, since
+ // it has expired and is removed automatically when being looked up.
+ Name qname_org("test.example.org.");
+ EXPECT_FALSE(message_cache_->lookup(qname_org, RRType::A(), message_render));
+ EXPECT_EQ(message_cache_->messages_count(), 2);
}
TEST_F(MessageCacheTest, testUpdate) {
@@ -93,5 +137,22 @@ TEST_F(MessageCacheTest, testUpdate) {
EXPECT_TRUE(new_msg_render.getHeaderFlag(Message::HEADERFLAG_AA));
}
+TEST_F(MessageCacheTest, testCacheLruBehavior) {
+ // qname = "test.example.com.", qtype = A
+ updateMessageCache("message_fromWire1", message_cache_);
+ // qname = "test.example.net.", qtype = A
+ updateMessageCache("message_fromWire2", message_cache_);
+ // qname = "example.com.", qtype = SOA
+ updateMessageCache("message_fromWire4", message_cache_);
+
+ Name qname_net("test.example.net.");
+ EXPECT_TRUE(message_cache_->lookup(qname_net, RRType::A(), message_render));
+
+ // qname = "a.example.com.", qtype = A
+ updateMessageCache("message_fromWire5", message_cache_);
+ Name qname_com("test.example.com.");
+ EXPECT_FALSE(message_cache_->lookup(qname_com, RRType::A(), message_render));
+}
+
} // namespace
diff --git a/src/lib/cache/tests/message_entry_unittest.cc b/src/lib/cache/tests/message_entry_unittest.cc
index 3b2711a..a96c441 100644
--- a/src/lib/cache/tests/message_entry_unittest.cc
+++ b/src/lib/cache/tests/message_entry_unittest.cc
@@ -29,7 +29,7 @@ using namespace isc;
using namespace isc::dns;
using namespace std;
-static uint32_t MAX_UINT32 = numeric_limits<uint32_t>::max();
+static uint32_t MAX_UINT32 = numeric_limits<uint32_t>::max();
namespace {
@@ -74,7 +74,6 @@ public:
message_parse(Message::PARSE),
message_render(Message::RENDER)
{
-
rrset_cache_.reset(new RRsetCache(RRSET_CACHE_DEFAULT_SIZE, class_));
}
@@ -108,7 +107,6 @@ TEST_F(MessageEntryTest, testParseRRset) {
TEST_F(MessageEntryTest, testGetRRsetTrustLevel_AA) {
messageFromFile(message_parse, "message_fromWire3");
DerivedMessageEntry message_entry(message_parse, rrset_cache_);
-
RRsetIterator rrset_iter = message_parse.beginSection(Message::SECTION_ANSWER);
RRsetTrustLevel level = message_entry.getRRsetTrustLevelForTest(message_parse,
@@ -164,7 +162,54 @@ TEST_F(MessageEntryTest, testGetRRsetTrustLevel_CNAME) {
level = message_entry.getRRsetTrustLevelForTest(message_parse,
*rrset_iter,
Message::SECTION_ANSWER);
+ EXPECT_EQ(level, RRSET_TRUST_ANSWER_NONAA);
+}
+
+TEST_F(MessageEntryTest, testGetRRsetTrustLevel_CNAME_and_DNAME) {
+ messageFromFile(message_parse, "message_fromWire7");
+ DerivedMessageEntry message_entry(message_parse, rrset_cache_);
+ RRsetIterator rrset_iter = message_parse.beginSection(Message::SECTION_ANSWER);
+ RRsetTrustLevel level = message_entry.getRRsetTrustLevelForTest(message_parse,
+ *rrset_iter,
+ Message::SECTION_ANSWER);
+ EXPECT_EQ(level, RRSET_TRUST_ANSWER_AA);
+ // All the left rrset are non-authoritative
+ ++rrset_iter;
+ while (rrset_iter != message_parse.endSection(Message::SECTION_ANSWER)) {
+ level = message_entry.getRRsetTrustLevelForTest(message_parse,
+ *rrset_iter,
+ Message::SECTION_ANSWER);
+ ++rrset_iter;
+ EXPECT_EQ(level, RRSET_TRUST_ANSWER_NONAA);
+ }
+}
+
+TEST_F(MessageEntryTest, testGetRRsetTrustLevel_DNAME_and_CNAME) {
+ messageFromFile(message_parse, "message_fromWire8");
+ DerivedMessageEntry message_entry(message_parse, rrset_cache_);
+ RRsetIterator rrset_iter = message_parse.beginSection(Message::SECTION_ANSWER);
+ RRsetTrustLevel level = message_entry.getRRsetTrustLevelForTest(message_parse,
+ *rrset_iter,
+ Message::SECTION_ANSWER);
+ // Test the deepest DNAME
EXPECT_EQ(level, RRSET_TRUST_ANSWER_AA);
+ ++rrset_iter;
+ // Test the synchronized CNAME
+ level = message_entry.getRRsetTrustLevelForTest(message_parse,
+ *rrset_iter,
+ Message::SECTION_ANSWER);
+ ++rrset_iter;
+ EXPECT_EQ(level, RRSET_TRUST_ANSWER_AA);
+
+ ++rrset_iter;
+ // All the left rrset are non-authoritative
+ while (rrset_iter != message_parse.endSection(Message::SECTION_ANSWER)) {
+ level = message_entry.getRRsetTrustLevelForTest(message_parse,
+ *rrset_iter,
+ Message::SECTION_ANSWER);
+ ++rrset_iter;
+ EXPECT_EQ(level, RRSET_TRUST_ANSWER_NONAA);
+ }
}
TEST_F(MessageEntryTest, testGetRRsetTrustLevel_DNAME) {
@@ -186,7 +231,7 @@ TEST_F(MessageEntryTest, testGetRRsetTrustLevel_DNAME) {
level = message_entry.getRRsetTrustLevelForTest(message_parse,
*rrset_iter,
Message::SECTION_ANSWER);
- EXPECT_EQ(level, RRSET_TRUST_ANSWER_AA);
+ EXPECT_EQ(level, RRSET_TRUST_ANSWER_NONAA);
}
// We only test the expire_time of the message entry.
@@ -204,8 +249,8 @@ TEST_F(MessageEntryTest, testGetRRsetEntries) {
messageFromFile(message_parse, "message_fromWire3");
DerivedMessageEntry message_entry(message_parse, rrset_cache_);
vector<RRsetEntryPtr> vec;
-
- // the time is bigger than the smallest expire time of
+
+ // the time is bigger than the smallest expire time of
// the rrset in message.
time_t expire_time = time(NULL) + 10802;
EXPECT_FALSE(message_entry.getRRsetEntriesForTest(vec, expire_time));
@@ -215,17 +260,17 @@ TEST_F(MessageEntryTest, testGenMessage) {
messageFromFile(message_parse, "message_fromWire3");
DerivedMessageEntry message_entry(message_parse, rrset_cache_);
time_t expire_time = message_entry.getExpireTime();
-
+
Message msg(Message::RENDER);
EXPECT_FALSE(message_entry.genMessage(expire_time + 2, msg));
message_entry.genMessage(time(NULL), msg);
// Check whether the generated message is same with cached one.
-
+
EXPECT_TRUE(msg.getHeaderFlag(Message::HEADERFLAG_AA));
EXPECT_FALSE(msg.getHeaderFlag(Message::HEADERFLAG_TC));
- EXPECT_EQ(1, sectionRRsetCount(msg, Message::SECTION_ANSWER));
- EXPECT_EQ(1, sectionRRsetCount(msg, Message::SECTION_AUTHORITY));
- EXPECT_EQ(5, sectionRRsetCount(msg, Message::SECTION_ADDITIONAL));
+ EXPECT_EQ(1, sectionRRsetCount(msg, Message::SECTION_ANSWER));
+ EXPECT_EQ(1, sectionRRsetCount(msg, Message::SECTION_AUTHORITY));
+ EXPECT_EQ(5, sectionRRsetCount(msg, Message::SECTION_ADDITIONAL));
// Check the rrset in answer section.
EXPECT_EQ(1, msg.getRRCount(Message::SECTION_ANSWER));
diff --git a/src/lib/cache/tests/rrset_cache_unittest.cc b/src/lib/cache/tests/rrset_cache_unittest.cc
index afb7eaa..b61f5c4 100644
--- a/src/lib/cache/tests/rrset_cache_unittest.cc
+++ b/src/lib/cache/tests/rrset_cache_unittest.cc
@@ -34,50 +34,94 @@ namespace {
class RRsetCacheTest : public testing::Test {
protected:
RRsetCacheTest():
- cache(RRSET_CACHE_DEFAULT_SIZE, RRClass::IN().getCode()),
- name("example.com"),
- rrset1(name, RRClass::IN(), RRType::A(), RRTTL(20)),
- rrset2(name, RRClass::IN(), RRType::A(), RRTTL(10)),
- rrset_entry1(rrset1, RRSET_TRUST_ADDITIONAL_AA),
- rrset_entry2(rrset2, RRSET_TRUST_PRIM_ZONE_NONGLUE)
+ cache_(1, RRClass::IN().getCode()),
+ name_("example.com"),
+ rrset1_(name_, RRClass::IN(), RRType::A(), RRTTL(20)),
+ rrset2_(name_, RRClass::IN(), RRType::A(), RRTTL(10)),
+ rrset_entry1_(rrset1_, RRSET_TRUST_ADDITIONAL_AA),
+ rrset_entry2_(rrset2_, RRSET_TRUST_PRIM_ZONE_NONGLUE)
{
}
- RRsetCache cache;
- Name name;
- RRset rrset1;
- RRset rrset2;
- RRsetEntry rrset_entry1;
- RRsetEntry rrset_entry2;
+ RRsetCache cache_;
+ Name name_;
+ RRset rrset1_;
+ RRset rrset2_;
+ RRsetEntry rrset_entry1_;
+ RRsetEntry rrset_entry2_;
};
+void
+updateRRsetCache(RRsetCache& cache, Name& rrset_name,
+ uint32_t ttl = 20,
+ RRsetTrustLevel level = RRSET_TRUST_ADDITIONAL_AA)
+{
+ RRset rrset(rrset_name, RRClass::IN(), RRType::A(), RRTTL(ttl));
+ cache.update(rrset, level);
+}
+
TEST_F(RRsetCacheTest, lookup) {
const RRType& type = RRType::A();
- EXPECT_TRUE(cache.lookup(name, type) == NULL);
-
- cache.update(rrset1, rrset_entry1.getTrustLevel());
- RRsetEntryPtr rrset_entry_ptr = cache.lookup(name, type);
- EXPECT_EQ(rrset_entry_ptr->getTrustLevel(), rrset_entry1.getTrustLevel());
- EXPECT_EQ(rrset_entry_ptr->getRRset()->getName(), rrset_entry1.getRRset()->getName());
- EXPECT_EQ(rrset_entry_ptr->getRRset()->getType(), rrset_entry1.getRRset()->getType());
- EXPECT_EQ(rrset_entry_ptr->getRRset()->getClass(), rrset_entry1.getRRset()->getClass());
+ EXPECT_TRUE(cache_.lookup(name_, type) == NULL);
+
+ cache_.update(rrset1_, rrset_entry1_.getTrustLevel());
+ RRsetEntryPtr rrset_entry_ptr = cache_.lookup(name_, type);
+ EXPECT_EQ(rrset_entry_ptr->getTrustLevel(), rrset_entry1_.getTrustLevel());
+ EXPECT_EQ(rrset_entry_ptr->getRRset()->getName(), rrset_entry1_.getRRset()->getName());
+ EXPECT_EQ(rrset_entry_ptr->getRRset()->getType(), rrset_entry1_.getRRset()->getType());
+ EXPECT_EQ(rrset_entry_ptr->getRRset()->getClass(), rrset_entry1_.getRRset()->getClass());
+
+ // Check whether the expired rrset entry will be removed automatically
+ // when looking up.
+ Name name_test("test.example.com.");
+ updateRRsetCache(cache_, name_test, 0); // Add a rrset with TTL 0 to cache.
+ EXPECT_FALSE(cache_.lookup(name_test, RRType::A()));
}
TEST_F(RRsetCacheTest, update) {
const RRType& type = RRType::A();
- cache.update(rrset1, rrset_entry1.getTrustLevel());
- RRsetEntryPtr rrset_entry_ptr = cache.lookup(name, type);
- EXPECT_EQ(rrset_entry_ptr->getTrustLevel(), rrset_entry1.getTrustLevel());
+ cache_.update(rrset1_, rrset_entry1_.getTrustLevel());
+ RRsetEntryPtr rrset_entry_ptr = cache_.lookup(name_, type);
+ EXPECT_EQ(rrset_entry_ptr->getTrustLevel(), rrset_entry1_.getTrustLevel());
- cache.update(rrset2, rrset_entry2.getTrustLevel());
- rrset_entry_ptr = cache.lookup(name, type);
+ cache_.update(rrset2_, rrset_entry2_.getTrustLevel());
+ rrset_entry_ptr = cache_.lookup(name_, type);
// The trust level should be updated
- EXPECT_EQ(rrset_entry_ptr->getTrustLevel(), rrset_entry2.getTrustLevel());
+ EXPECT_EQ(rrset_entry_ptr->getTrustLevel(), rrset_entry2_.getTrustLevel());
- cache.update(rrset1, rrset_entry1.getTrustLevel());
+ cache_.update(rrset1_, rrset_entry1_.getTrustLevel());
// The trust level should not be updated
- EXPECT_EQ(rrset_entry_ptr->getTrustLevel(), rrset_entry2.getTrustLevel());
+ EXPECT_EQ(rrset_entry_ptr->getTrustLevel(), rrset_entry2_.getTrustLevel());
+}
+
+// Test whether the lru list in rrset cache works as expected.
+TEST_F(RRsetCacheTest, cacheLruBehavior) {
+ Name name1("1.example.com.");
+ Name name2("2.example.com.");
+ Name name3("3.example.com.");
+ Name name4("4.example.com.");
+
+ updateRRsetCache(cache_, name1);
+ updateRRsetCache(cache_, name2);
+ updateRRsetCache(cache_, name3);
+
+ EXPECT_TRUE(cache_.lookup(name1, RRType::A()));
+
+ // Now update the fourth rrset, rrset with name "2.example.com."
+ // should has been removed from cache.
+ updateRRsetCache(cache_, name4);
+ EXPECT_FALSE(cache_.lookup(name2, RRType::A()));
+
+ // Test Update rrset with higher trust level
+ updateRRsetCache(cache_, name1, RRSET_TRUST_PRIM_GLUE);
+ // Test update rrset with lower trust level.
+ updateRRsetCache(cache_, name3, RRSET_TRUST_ADDITIONAL_NONAA);
+
+ // When add rrset with name2, rrset with name4
+ // has been removed from the cache.
+ updateRRsetCache(cache_, name2);
+ EXPECT_FALSE(cache_.lookup(name4, RRType::A()));
}
}
diff --git a/src/lib/cache/tests/testdata/message_fromWire7 b/src/lib/cache/tests/testdata/message_fromWire7
new file mode 100644
index 0000000..7b10b5d
--- /dev/null
+++ b/src/lib/cache/tests/testdata/message_fromWire7
@@ -0,0 +1,27 @@
+#
+# ;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 1155
+# ;; flags: qr aa rd; QUERY: 1, ANSWER: 5, AUTHORITY: 0, ADDITIONAL: 0
+# ;; WARNING: recursion requested but not available
+#
+# ;; QUESTION SECTION:
+# ;test.example.com. IN A
+#
+# ;; ANSWER SECTION:
+# test.example.com. 21600 IN CNAME cname.a.dname.example.com.
+# dname.example.com. 21600 IN DNAME dname.example.org.
+# cname.a.dname.example.com. 21600 IN CNAME cname.a.dname.example.org.
+# dname.example.org. 21600 IN DNAME dname.example.org.
+# cname.a.dname.example.org. 21600 IN CNAME cname.a.dname.example.org.
+
+0424 8500
+ 00 01 00 05 00 00 00 00 04 74 65 73
+ 74 07 65 78 61 6d 70 6c 65 03 63 6f 6d 00 00 01
+ 00 01 c0 0c 00 05 00 01 00 00 54 60 00 10 05 63
+ 6e 61 6d 65 01 61 05 64 6e 61 6d 65 c0 11 c0 36
+ 00 27 00 01 00 00 54 60 00 13 05 64 6e 61 6d 65
+ 07 65 78 61 6d 70 6c 65 03 6f 72 67 00 c0 2e 00
+ 05 00 01 00 00 54 60 00 0a 05 63 6e 61 6d 65 01
+ 61 c0 4a c0 4a 00 27 00 01 00 00 54 60 00 13 05
+ 64 6e 61 6d 65 07 65 78 61 6d 70 6c 65 03 6f 72
+ 67 00 c0 69 00 05 00 01 00 00 54 60 00 02 c0 69
+
diff --git a/src/lib/cache/tests/testdata/message_fromWire8 b/src/lib/cache/tests/testdata/message_fromWire8
new file mode 100644
index 0000000..bc9e144
--- /dev/null
+++ b/src/lib/cache/tests/testdata/message_fromWire8
@@ -0,0 +1,23 @@
+# A response includes multiple DNAME and synchronized CNAME records
+# ;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 900
+# ;; flags: qr aa rd; QUERY: 1, ANSWER: 4, AUTHORITY: 0, ADDITIONAL: 0
+# ;; WARNING: recursion requested but not available
+#
+# ;; QUESTION SECTION:
+# ;a.dname.example.com. IN NS
+#
+# ;; ANSWER SECTION:
+# dname.example.com. 21600 IN DNAME dname.example.org.
+# a.dname.example.com. 21600 IN CNAME a.dname.example.org.
+# dname.example.org. 21600 IN DNAME dname.example.org.
+# a.dname.example.org. 21600 IN CNAME a.dname.example.org.
+0384 8500
+ 00 01 00 04 00 00 00 00 01 61 05 64
+ 6e 61 6d 65 07 65 78 61 6d 70 6c 65 03 63 6f 6d
+ 00 00 02 00 01 c0 0e 00 27 00 01 00 00 54 60 00
+ 13 05 64 6e 61 6d 65 07 65 78 61 6d 70 6c 65 03
+ 6f 72 67 00 c0 0c 00 05 00 01 00 00 54 60 00 04
+ 01 61 c0 31 c0 31 00 27 00 01 00 00 54 60 00 13
+ 05 64 6e 61 6d 65 07 65 78 61 6d 70 6c 65 03 6f
+ 72 67 00 c0 50 00 05 00 01 00 00 54 60 00 02 c0
+ 50
diff --git a/src/lib/cache/tests/testdata/message_fromWire9 b/src/lib/cache/tests/testdata/message_fromWire9
new file mode 100644
index 0000000..e2dbd06
--- /dev/null
+++ b/src/lib/cache/tests/testdata/message_fromWire9
@@ -0,0 +1,25 @@
+#
+# The TTL for a record in answer section is 0, so it
+# will expire immediately after being cached.
+#
+# A simple DNS response message
+# ID = 0x1035
+# QR=1 (response), Opcode=0, AA=1, RD=1 (other fields are 0)
+# QDCOUNT=1, ANCOUNT=2, other COUNTS=0
+# Question: test.example.org. IN A
+# Answer:
+# test.example.org. 0000 IN A 192.0.2.1
+# test.example.org. 7200 IN A 192.0.2.2
+#
+1035 8500
+0001 0002 0000 0000
+#(4) t e s t (7) e x a m p l e (3) o r g .
+ 04 74 65 73 74 07 65 78 61 6d 70 6c 65 03 6f 72 67 00
+0001 0001
+# same name, fully compressed
+c0 0c
+# TTL=3600, A, IN, RDLENGTH=4, RDATA
+0001 0001 00000000 0004 c0 00 02 01
+# mostly same, with the slight difference in RDATA and TTL
+c0 0c
+0001 0001 00001c20 0004 c0 00 02 02
diff --git a/src/lib/datasrc/rbtree.h b/src/lib/datasrc/rbtree.h
index bd04066..03a6967 100644
--- a/src/lib/datasrc/rbtree.h
+++ b/src/lib/datasrc/rbtree.h
@@ -533,12 +533,9 @@ private:
private:
// The max label count for one domain name is Name::MAX_LABELS (128).
- // Since each node in rbtree stores at least one label, and the root
- // name always shares the same level with some others (which means
- // all top level nodes except the one for the root name contain at least
- // two labels), the possible maximum level is MAX_LABELS - 1.
- // It's also the possible maximum nodes stored in a chain.
- const static int RBT_MAX_LEVEL = isc::dns::Name::MAX_LABELS - 1;
+ // Since each node in rbtree stores at least one label, it's also equal
+ // to the possible maximum level.
+ const static int RBT_MAX_LEVEL = isc::dns::Name::MAX_LABELS;
int node_count_;
const RBNode<T>* nodes_[RBT_MAX_LEVEL];
@@ -999,8 +996,15 @@ RBTree<T>::find(const isc::dns::Name& target_name,
const int common_label_count =
node_path.last_comparison_.getCommonLabels();
// If the common label count is 1, there is no common label between
- // the two names, except the trailing "dot".
- if (common_label_count == 1) {
+ // the two names, except the trailing "dot". In this case the two
+ // sequences of labels have essentially no hierarchical
+ // relationship in terms of matching, so we should continue the
+ // binary search. One important exception is when the node
+ // represents the root name ("."), in which case the comparison
+ // result must indeed be considered subdomain matching. (We use
+ // getLength() to check if the name is root, which is an equivalent
+ // but cheaper way).
+ if (common_label_count == 1 && node->name_.getLength() != 1) {
node = (node_path.last_comparison_.getOrder() < 0) ?
node->left_ : node->right_;
} else if (relation == isc::dns::NameComparisonResult::SUBDOMAIN) {
@@ -1093,7 +1097,8 @@ RBTree<T>::insert(const isc::dns::Name& target_name, RBNode<T>** new_node) {
return (ALREADYEXISTS);
} else {
const int common_label_count = compare_result.getCommonLabels();
- if (common_label_count == 1) {
+ // Note: see find() for the check of getLength().
+ if (common_label_count == 1 && current->name_.getLength() != 1) {
parent = current;
order = compare_result.getOrder();
current = order < 0 ? current->left_ : current->right_;
diff --git a/src/lib/datasrc/tests/rbtree_unittest.cc b/src/lib/datasrc/tests/rbtree_unittest.cc
index 23b25f4..dd1b7fe 100644
--- a/src/lib/datasrc/tests/rbtree_unittest.cc
+++ b/src/lib/datasrc/tests/rbtree_unittest.cc
@@ -284,21 +284,21 @@ TEST_F(RBTreeTest, chainLevel) {
EXPECT_EQ(1, chain.getLevelCount());
/*
- * Now creating a possibly deepest tree with MAX_LABELS - 1 levels.
+ * Now creating a possibly deepest tree with MAX_LABELS levels.
* it should look like:
+ * (.)
+ * |
* a
- * /|
- * (.)a
* |
* a
* : (MAX_LABELS - 1) "a"'s
*
* then confirm that find() for the deepest name succeeds without any
* disruption, and the resulting chain has the expected level.
- * Note that "a." and the root name (".") belong to the same level.
- * So the possible maximum level is MAX_LABELS - 1, not MAX_LABELS.
+ * Note that the root name (".") solely belongs to a single level,
+ * so the levels begin with 2.
*/
- for (unsigned int i = 1; i < Name::MAX_LABELS; ++i) {
+ for (unsigned int i = 2; i <= Name::MAX_LABELS; ++i) {
node_name = Name("a.").concatenate(node_name);
EXPECT_EQ(RBTree<int>::SUCCESS, tree.insert(node_name, &rbtnode));
RBTreeNodeChain<int> found_chain;
@@ -523,4 +523,44 @@ TEST_F(RBTreeTest, swap) {
tree2.dumpTree(out);
ASSERT_EQ(str1.str(), out.str());
}
+
+// Matching in the "root zone" may be special (e.g. there's no parent,
+// any domain names should be considered a subdomain of it), so it makes
+// sense to test cases with the root zone explicitly.
+TEST_F(RBTreeTest, root) {
+ RBTree<int> root;
+ root.insert(Name::ROOT_NAME(), &rbtnode);
+ rbtnode->setData(RBNode<int>::NodeDataPtr(new int(1)));
+
+ EXPECT_EQ(RBTree<int>::EXACTMATCH,
+ root.find(Name::ROOT_NAME(), &crbtnode));
+ EXPECT_EQ(rbtnode, crbtnode);
+ EXPECT_EQ(RBTree<int>::PARTIALMATCH,
+ root.find(Name("example.com"), &crbtnode));
+ EXPECT_EQ(rbtnode, crbtnode);
+
+ // Insert a new name that better matches the query name. find() should
+ // find the better one.
+ root.insert(Name("com"), &rbtnode);
+ rbtnode->setData(RBNode<int>::NodeDataPtr(new int(2)));
+ EXPECT_EQ(RBTree<int>::PARTIALMATCH,
+ root.find(Name("example.com"), &crbtnode));
+ EXPECT_EQ(rbtnode, crbtnode);
+
+ // Perform the same tests for the tree that allows matching against empty
+ // nodes.
+ RBTree<int> root_emptyok(true);
+ root_emptyok.insert(Name::ROOT_NAME(), &rbtnode);
+ EXPECT_EQ(RBTree<int>::EXACTMATCH,
+ root_emptyok.find(Name::ROOT_NAME(), &crbtnode));
+ EXPECT_EQ(rbtnode, crbtnode);
+ EXPECT_EQ(RBTree<int>::PARTIALMATCH,
+ root_emptyok.find(Name("example.com"), &crbtnode));
+ EXPECT_EQ(rbtnode, crbtnode);
+
+ root.insert(Name("com"), &rbtnode);
+ EXPECT_EQ(RBTree<int>::PARTIALMATCH,
+ root.find(Name("example.com"), &crbtnode));
+ EXPECT_EQ(rbtnode, crbtnode);
+}
}
diff --git a/src/lib/log/Makefile.am b/src/lib/log/Makefile.am
index 5bcafa8..d941b01 100644
--- a/src/lib/log/Makefile.am
+++ b/src/lib/log/Makefile.am
@@ -23,6 +23,11 @@ liblog_la_SOURCES += message_types.h
liblog_la_SOURCES += root_logger_name.cc root_logger_name.h
liblog_la_SOURCES += strutil.h strutil.cc
+EXTRA_DIST = README
+EXTRA_DIST += messagedef.mes
+EXTRA_DIST += logger_impl_log4cxx.cc logger_impl_log4cxx.h
+EXTRA_DIST += xdebuglevel.cc xdebuglevel.h
+
# Note: the ordering matters: -Wno-... must follow -Wextra (defined in
# B10_CXXFLAGS)
liblog_la_CXXFLAGS = $(AM_CXXFLAGS)
diff --git a/src/lib/log/README b/src/lib/log/README
new file mode 100644
index 0000000..072649e
--- /dev/null
+++ b/src/lib/log/README
@@ -0,0 +1,376 @@
+This directory holds the first release of the logging system.
+
+Basic Ideas
+===========
+The BIND-10 logging system merges two ideas:
+
+* A hierarchical logging system similar to that used in Java (i.e. log4j)
+* Separation of message definitions and text
+
+
+Hierarchical Logging System
+===========================
+When a program writes a message to the logging system, it does so using an
+instance of the Logger class. As well as performing the write of the message,
+the logger identifies the source of the message: different sources can write
+to different destinations and can log different severities of messages.
+For example, the "cache" logger could write messages of DEBUG severity or
+above to a file while all other components write messages of "INFO" severity
+or above to the Syslog file.
+
+The loggers are hierarchical in that each logger is the child of another
+logger. The top of the hierarchy is the root logger, which does not have
+a parent. The point of the hierarchy is that unless a logger is explicitly
+assigned an attribute (such as severity of message being logger), it picks
+it up from the parent. (In BIND-10, there is the root logger (named after
+the program) and every other logger is a child of that.) So in the example
+above, the INFO/Syslog attributes could be associated with the root logger
+while the DEBUG/file attributes are associated with the "cache" logger.
+
+
+Separation of Messages Definitions And Text
+===========================================
+The reason for this is to allow the message text to be overridden by versions
+in a local language. To do this, each message is identified by an identifier
+e.g. "OPENIN". Within the program, this is the symbol passed to the logging
+system. The logger system uses the symbol as an index into a dictionary to
+retrieve the message associated with it (e.g. "unable to open %s for input").
+substitutes any message parameters (in this example, the string that is an
+invalid filename) and logs it to the destination.
+
+In the BIND-10 system, a set of default messages are linked into the
+program. At run-time. each program reads a message file, updating the
+stored definitions; this updated text is logged. However, to aid support,
+the message identifier so in the example above, the message finally logged
+would be something like:
+
+ OPENIN, unable to open a.txt for input
+
+
+Using The System
+================
+The steps in using the system are:
+
+1. Create a message file. This defines messages by an identification - a
+ mnemonic for the message, typically 6-12 characters long - and a message.
+ The file is described in more detail below.
+
+ Ideally the file should have a file type of ".msg".
+
+2. Run it through the message compiler to produce the .h and .cc files. It
+ is intended that this step be included in the build process. However,
+ for now run the compiler (found in the "compiler" subdirectory) manually.
+ The only argument is the name of the message file: it will produce as
+ output two files, having the same name as the input file but with file
+ types of ".h" and ".cc".
+
+ The compiler is built in the "compiler" subdirectory of the "src/lib/log"
+ directory.
+
+3. Include the .h file in your source code to define message symbols, and
+ make sure that the .cc file is compiled and linked into your program -
+ static initialization will add the symbols to the global dictionary.
+
+4. Declare loggers in your code and use them to log messages. This is
+ described in more detail below.
+
+5. To set the debug level and run-time message file, call initLogger (declared
+ in logger_support.h) in the main program unit. This is a temporary solution
+ for Year 2, and will be replaced at a later date, the information coming
+ from the configuration database.
+
+
+Message Files
+=============
+
+File Contents and Format
+------------------------
+A message file is a file containing message definitions. Typically there
+will be one message file for each component that declares message symbols.
+An example file could be:
+
+-- BEGIN --
+
+# Example message file
+# $ID:$
+
+$PREFIX TEST_
+$NAMESPACE isc::log
+TEST1 message %s is much too large
++ This message is a test for the general message code
+
+UNKNOWN unknown message
++ Issued when the message is unknown.
+
+-- END --
+
+Points to note:
+* Leading and trailing space are trimmed from the line. Although the above
+ example has every line starting at column 1, the lines could be indented
+ if desired.
+
+* Blank lines are ignored.
+
+* Lines starting with "#" are comments are are ignored. Comments must be on
+ a line by themselves - inline comments will be interpreted as part of the
+ text of the line.
+
+* Lines starting $ are directives. At present, two directives are recognised:
+
+ * $PREFIX, which has one argument: the string used to prefix symbols. If
+ absent, there is no prefix to the symbols. (Prefixes are explained below.)
+ * $NAMESPACE, which has one argument: the namespace in which the symbols are
+ created. In the absence of a $NAMESPACE directive, symbols will be put
+ in the global namespace.
+
+* Lines starting + indicate an explanation for the preceding message. These
+ are intended to be processed by a separate program and used to generate
+ an error messages manual. However they are treated like comments by the
+ message compiler. As with comments, these must be on a line by themselves;
+ if inline, the text (including the leading "+") will be interpreted as
+ part of the line.
+
+* Message lines. These comprise a symbol name and a message, which may
+ include zero or more printf-style tokens. Symbol names will be upper-cased
+ by the compiler.
+
+
+Message Compiler
+----------------
+The message compiler is a program built in the src/log/compiler directory.
+It is invoked by the command:
+
+ message [-h] [-v] <message-file>
+
+("-v" prints the version number and exits; "-h" prints brief help text.)
+The message compiler processes the message file to produce two files:
+
+1) A C++ header file (called <message-file-name>.h) that holds lines of
+the form:
+
+ namespace <namespace> {
+ extern const isc::log::MessageID PREFIX_IDENTIFIER;
+ :
+ }
+
+The symbols define the keys in the global message dictionary.
+
+The namespace enclosing the symbols is set by the $NAMESPACE directive.
+
+The "PREFIX_" part of the symbol name is the string defined in the $PREFIX
+the argument to the directive. So "$PREFIX MSG_" would prefix the identifer
+ABC with "MSG_" to give the symbol MSG_ABC. Similarly "$PREFIX E" would
+prefix it with "E" to give the symbol EABC. If no $PREFIX is given, no
+prefix appears (so the symbol in this example would be ABC).
+
+2) A C++ source file (called <message-file-name>.cc) that holds the definitions
+of the global symbols and code to insert the symbols and messages into the map.
+
+Symbols are defined to be equal to strings holding the identifier, e.g.
+
+ extern const isc::log::MessageID MSG_DUPLNS = "DUPLNS";
+
+(The implementation allows symbols to be compared. However, use of strings
+should not be assumed - a future implementation may change this.)
+
+In addition, the file declares an array of identifiers/messages and an object
+to add them to the global dictionary:
+
+ namespace {
+ const char* values[] = {
+ identifier1, text1,
+ identifier2, text2,
+ :
+ NULL
+ };
+
+ const isc::log::MessageInitializer initializer(values);
+ }
+
+The constructor of the MessageInitializer object retrieves the singleton
+global Dictionary object (created using standard methods to avoid the
+"static initialization fiasco") and adds each identifier and text to it.
+A check is made as each is added; if the identifier already exists, it is
+added to "overflow" vector; the vector is printed to the main logging output
+when logging is finally enabled (to indicate a programming error).
+
+
+Using the Logging
+=================
+To use the current version of the logging:
+
+1. Build message header file and source file as describe above.
+
+2. In the main module of the program, declare an instance of the
+ RootLoggerName class to define the name of the program's root logger, e.g.
+
+ #include <log/root_logger_name.h>
+
+ isc::log::RootLoggerName("b10-auth");
+
+ This can be declared inside or outside an execution unit.
+
+2. In the code that needs to do logging, declare a logger with a given name,
+ e.g.
+
+ #include <log/logger.h>
+ :
+ isc::log::Logger logger("myname"); // "myname" can be anything
+
+ The above example assumes declaration outside a function. If declaring
+ non-statically within a function, declare it as:
+
+ isc::log::Logger logger("myname", true);
+
+ (The argument is required to support a possible future implementation of
+ logging. Currently it has no effect.)
+
+3. The main program unit should include a call to isc::log::initLogger()
+ (defined in logger_support.h) to set the logging severity, debug log level,
+ and external message file.
+
+ a) The logging severity is one of the enum defined in logger.h, i.e.
+
+ isc::log::DEBUG
+ isc::log::INFO
+ isc::log::WARN
+ isc::log::ERROR
+ isc::log::FATAL
+ isc::log::NONE
+
+ b) The debug log level is only interpreted when the severity is DEBUG and
+ is an integer ranging from 0 to 99. 0 should be used for the
+ highest-level debug messages and 99 for the lowest-level (and typically
+ more verbose) messages.
+
+ c) Name of an external message file. This is the same as a standard message
+ file, although it should not include any directives. (A single directive
+ of a particular type will be ignored; multiple directives will cause the
+ read of the file to fail with an error.) If a message is replaced, the
+ message should include the same printf-format directives in the same order
+ as the original message.
+
+4. Issue logging calls using methods on logger, e.g.
+
+ logger.error(DPS_NSTIMEOUT, "isc.org");
+
+ (where, in the example above we might have defined the symbol in the message
+ file with something along the lines of:
+
+ $PREFIX DPS_
+ :
+ NSTIMEOUT queries to all nameservers for %s have timed out
+
+ At present, the only logging is to the console.
+
+
+Severity Guidelines
+===================
+When using logging, the question arises, what severity should a message be
+logged at? The following is a suggestion - as always, the decision must be
+made in the context of which the message is logged.
+
+FATAL
+-----
+The program has encountered an error that is so severe that it cannot
+continue (or there is no point in continuing). When a fatal error has been
+logged, the program will usually exit immediately (via a call to abort()) or
+shortly afterwards, after dumping some diagnostic information.
+
+ERROR
+-----
+Something has happened such that the program can continue but the results
+for the current (or future) operations cannot be guaranteed to be correct,
+or the results will be correct but the service is impaired. For example,
+the program started but attempts to open one or more network interfaces failed.
+
+WARN
+----
+An unusual event happened. Although the program will continue working
+normally, the event was sufficiently out of the ordinary to warrant drawing
+attention to it. For example, at program start-up a zone was loaded that
+contained no resource records,
+
+INFO
+----
+A normal but significant event has occurred that should be recorded,
+e.g. the program has started or is just about to terminate, a new zone has
+been created, etc.
+
+DEBUG
+-----
+This severity is only enabled on for debugging purposes. A debug level is
+associated with debug messages, level 0 (the default) being for high-level
+messages and level 99 (the maximum) for the lowest level. How the messages
+are distributed between the levels is up to the developer. So if debugging
+the NSAS (for example), a level 0 message might record the creation of a new
+zone, a level 10 recording a timeout when trying to get a nameserver address,
+but a level 50 would record every query for an address. (And we might add
+level 51 to record every update of the RTT.)
+
+Note that like severities, levels are cumulative; so if level 25 is set as the
+debug level, all debug levels from 0 to 25 will be output. In fact, it is
+probably easier to visualise the debug levels as part of the severity system:
+
+ FATAL High
+ ERROR
+ WARN
+ INFO
+ DEBUG level 0
+ DEBUG level 1
+ :
+ DEBUG level 99 Low
+
+When a particular severity is set, it - and all severities and/or debug
+levels above it - will be logged.
+
+Logging Sources v Logging Severities
+------------------------------------
+When logging events, make a distinction between events related to the server
+and events related to DNS messages received. Caution needs to be exercised
+with the latter as, if the logging is enabled in the normal course of events,
+such logging could be a denial of service vector. For example, suppose that
+the main authoritiative service logger were to log both zone loading and
+unloading as INFO and a warning message if it received an invalid packet. An
+attacker could make the INFO messages unusable by flooding the server with
+malformed packets.
+
+There are two approaches to get round this:
+
+a) Make the logging of packet-dependent events a DEBUG-severity message.
+DEBUG is not enabled by default, so these events will not be recorded unless
+DEBUG is specifically chosen.
+
+b) Record system-related and packet-related messages via different loggers
+(e.g. in the example given, server events could be logged using the logger
+"auth" and packet-related events at that level logged using the logger
+"pkt-auth".) As the loggers are independent and the severity levels
+independent, fine-tuning of what and what is not recorded can be achieved.
+
+
+Notes
+=====
+The message compiler is written in C++ (instead of Python) because it
+contains a component that reads the message file. This component is used
+in both the message compiler and the server; in the server it is used when
+the server starts up (or when triggered by a command) to read in a message
+file to overwrite the internal dictionary. Writing it in C++ means there
+is only one piece of code that does this functionality.
+
+
+Outstanding Issues
+==================
+* Ability to configure system according to configuration database.
+* Update the build procedure to create .cc and .h files from the .msg file
+ during the build process. (Requires that the message compiler is built
+ first.)
+
+
+log4cxx Issues
+==============
+Some experimental code to utilise log4cxx as an underlying implementation
+is present in the source code directory although it is not currently used.
+The files are:
+
+ logger_impl_log4cxx.{cc,h}
+ xdebuglevel.{cc,h}
diff --git a/src/lib/log/compiler/Makefile.am b/src/lib/log/compiler/Makefile.am
index 2475036..9343793 100644
--- a/src/lib/log/compiler/Makefile.am
+++ b/src/lib/log/compiler/Makefile.am
@@ -10,11 +10,9 @@ if USE_STATIC_LINK
AM_LDFLAGS = -static
endif
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-
CLEANFILES = *.gcno *.gcda
-pkglibexec_PROGRAMS = message
+noinst_PROGRAMS = message
message_SOURCES = message.cc
message_LDADD = $(top_builddir)/src/lib/log/liblog.la
diff --git a/src/lib/log/documentation.txt b/src/lib/log/documentation.txt
deleted file mode 100644
index 0501587..0000000
--- a/src/lib/log/documentation.txt
+++ /dev/null
@@ -1,434 +0,0 @@
-This directory holds the first release of the logging system.
-
-Basic Ideas
-===========
-The BIND-10 logging system merges two ideas:
-
-* A hierarchical logging system similar to that used in Java (i.e. log4j)
-* Separation of message definitions and text
-
-
-Hierarchical Logging System
-===========================
-When a program writes a message to the logging system, it does so using an
-instance of the Logger class. As well as performing the write of the message,
-the logger identifies the source of the message: different sources can write
-to different destinations and can log different severities of messages.
-For example, the "cache" logger could write messages of DEBUG severity or
-above to a file while all other components write messages of "INFO" severity
-or above to the Syslog file.
-
-The loggers are hierarchical in that each logger is the child of another
-logger. The top of the hierarchy is the root logger, which does not have
-a parent. The point of the hierarchy is that unless a logger is explicitly
-assigned an attribute (such as severity of message being logger), it picks
-it up from the parent. (In BIND-10, there is the root logger (named after
-the program) and every other logger is a child of that.) So in the example
-above, the INFO/Syslog attributes could be associated with the root logger
-while the DEBUG/file attributes are associated with the "cache" logger.
-
-
-Separation of Messages Definitions And Text
-===========================================
-The reason for this is to allow the message text to be overridden by versions
-in a local language. To do this, each message is identified by an identifier
-e.g. "OPENIN". Within the program, this is the symbol passed to the logging
-system. The logger system uses the symbol as an index into a dictionary to
-retrieve the message associated with it (e.g. "unable to open %s for input").
-substitutes any message parameters (in this example, the string that is an
-invalid filename) and logs it to the destination.
-
-In the BIND-10 system, a set of default messages are linked into the
-program. At run-time. each program reads a message file, updating the
-stored definitions; this updated text is logged. However, to aid support,
-the message identifier so in the example above, the message finally logged
-would be something like:
-
- OPENIN, unable to open a.txt for input
-
-
-Using The System
-================
-The steps in using the system are:
-
-1. Create a message file. This defines messages by an identification - a
- mnemonic for the message, typically 6-12 characters long - and a message.
- The file is described in more detail below.
-
- Ideally the file should have a file type of ".msg".
-
-2. Run it through the message compiler to produce the .h and .cc files. It
- is intended that this step be included in the build process. However,
- for not run the compiler (found in the "compiler" subdirectory) manually.
- The only argument is the name of the message file: it will produce as
- output two files, having the same name as the input file but with file
- types of ".h" and ".cc".
-
- The compiler is built in the "compiler" subdirectory of the "src/lib/log"
- directory.
-
-3. Include the .h file in your source code to define message symbols, and
- make sure that the .cc file is compiled and linked into your program -
- static initialization will add the symbols to the global dictionary.
-
-4. Declare loggers in your code and use them to log messages. This is
- described in more detail below.
-
-5. To set the debug level and run-time message file, call runTimeInit (declared
- in logger_support.h) in the main program unit. This is a temporary solution
- for Year 2, and will be replaced at a later date, the information coming
- from the configuration database.
-
-
-Message Files
-=============
-
-File Contents and Format
-------------------------
-A message file is a file containing message definitions. Typically there
-will be one message file for each component that declares message symbols.
-An example file could be:
-
--- BEGIN --
-
-# Example message file
-# $ID:$
-
-$PREFIX TEST_
-$NAMESPACE isc::log
-TEST1 message %s is much too large
-+ This message is a test for the general message code
-
-UNKNOWN unknown message
-+ Issued when the message is unknown.
-
--- END --
-
-Points to note:
-* Leading and trailing space are trimmed from the line. Although the above
- exampl,e has every line starting at column 1, the lines could be indented
- if desired.
-
-* Blank lines are ignored.
-
-* Lines starting with "#" are comments are are ignored. Comments must be on
- a line by themselves - inline comments will be interpreted as part of the
- text of the line.
-
-* Lines starting $ are directives. At present, two directives are recognised:
-
- * $PREFIX, which has one argument: the string used to prefix symbols. If
- absent, there is no prefix to the symbols. (Prefixes are explained below.)
- * $NAMESPACE, which has one argument: the namespace in which the symbols are
- created. (Specifying the argument as a double colon - i.e. "$NAMESPACE
- ::" puts the symbol definitions in the unnamed namespace. And not
- including a $NAMESPACE directive will result in the symbols note being
- put in any namespace.
-
-* Lines starting + indicate an explanation for the preceding message. These
- are intended to be processed by a separate program and used to generate
- an error messages manual. However they are treated like comments by the
- message compiler. As with comments, these must be on a line by themselves;
- if inline, the text (including the leading "+") will be interpreted as
- part of the line.
-
-* Message lines. These comprise a symbol name and a message, which may
- include zero or more printf-style tokens. Symbol names will be upper-cased
- by the compiler.
-
-
-Message Compiler
-----------------
-The message compiler is a program built in the src/log/compiler directory.
-It is invoked by the command:
-
- message [-h] [-v] <message-file>
-
-("-v" prints the version number and exits; "-h" prints brief help text.)
-The message compiler processes the message file to produce two files:
-
-1) A C++ header file (called <message-file-name>.h) that holds lines of
-the form:
-
- namespace <namespace> {
- isc::log::MessageID PREFIX_IDENTIFIER = "IDENTIFIER";
- :
- }
-
-The symbols define the keys in the global message dictionary. At present
-they are defined as std::strings, but a future implementation could redefine
-them as numeric values.
-
-The namespace enclosing the symbols is set by the $NAMESPACE directive.
-
-The "PREFIX_" part of the symbol name is the string defined in the $PREFIX
-the argument to the directive. So "$PREFIX MSG_" would prefix the identifer
-ABC with "MSG_" to give the symbol MSG_ABC. Similarly "$PREFIX E" would
-prefix it with "E" to give the symbol EABC. If no $PREFIX is given, no
-prefix appears (so the symbol in this example would be ABC).
-
-The header file also includes a couple of lines to ensure that the message
-text is included in the final program image.
-
-
-2) A C++ source file (called <message-file-name>.cc) that holds the code to
-insert the symbols and messages into the map.
-
-This file declares an array of identifiers/messages in the form:
-
- namespace {
- const char* values[] = {
- identifier1, text1,
- identifier2, text2,
- :
- NULL
- };
- }
-
-(A more complex structure to group identifiers and their messages could be
-imposed, but as the array is generated by code and will be read by code,
-it is not needed.)
-
-It then declares an object that will add information to the global dictionary:
-
- MessageInitializer <message-file-name>_<time>(values);
-
-(Declaring the object as "static" or in the anonymous namespace runs the risk
-of it being optimised away when the module is compiled with optimisation.
-But giving it a standard name would cause a clash when multiple files are
-used, hence an attempt at disambiguation.)
-
-The constructor of the MessageInitializer object retrieves the singleton
-global Dictionary object (created using standard methods to avoid the
-"static initialization fiasco") and adds each identifier and text to it.
-A check is made as each is added; if the identifier already exists, it is
-added to "overflow" vector; the vector is printed to the main logging output
-when logging is finally enabled (to indicate a programming error).
-
-
-Using the Logging
-=================
-To use the current version of the logging:
-
-1. Build message header file and source file as describe above.
-
-2. In the main module of the program, declare an instance of the
- RootLoggerName class to define the name of the program's root logger, e.g.
-
- #include <log/root_logger_name.h>
-
- isc::log::RootLoggerName("b10-auth");
-
- This can be declared inside or outside an execution unit.
-
-2. In the code that needs to do logging, declare a logger with a given name,
- e.g.
-
- #include <log/logger.h>
- :
- isc::log::Logger logger("myname"); // "myname" can be anything
-
- The above example assumes declaration outside a function. If declaring
- non-statically within a function, declare it as:
-
- isc::log::Logger logger("myname", true);
-
- The argument is ignored for underlying implementations other than log4cxx.
- See below for the use of this argument.
-
-3. The main program unit should include a call to isc::log::runTimeInit()
- (defined in logger_support.h) to set the logging severity, debug log level,
- and external message file.
-
- a) The logging severity is one of the enum defined in logger.h, i.e.
-
- isc::log::DEBUG
- isc::log::INFO
- isc::log::WARN
- isc::log::ERROR
- isc::log::FATAL
- isc::log::NONE
-
- b) The debug log level is only interpreted when the severity is DEBUG and
- is an integer raning from 0 to 99. 0 should be used for the highest-level
- debug messages and 99 for the lowest-level (and typically more verbose)
- messages.
-
- c) Name of an external message file. This is the same as a standard message
- file, although it should not include the $PREFIX directive. (A single
- $PREFIX directive will be ignored; multiple directives will cause the
- read of the file to fail with an error.) If a message is replaced, the
- message should include the same printf-format directives in the same order
- as the original message.
-
-4. Issue logging calls using methods on logger, e.g.
-
- logger.error(DPS_NSTIMEOUT, "isc.org");
-
- (where, in the example above we might have defined the symbol in the message
- file with something along the lines of:
-
- $PREFIX DPS_
- :
- NSTIMEOUT queries to all nameservers for %s have timed out
-
- At present, the only logging is to the console.
-
-
-Severity Guidelines
-===================
-When using logging, the question arises, what severity should a message be
-logged at? The following is a suggestion - as always, the decision must be
-made in the context of which the message is logged.
-
-FATAL
------
-The program has encountered an error that is so severe that it cannot
-continue (or there is no point in continuing). When a fatal error has been
-logged, the program will usually exit immediately (via a call to abort()) or
-shortly afterwards, after dumping some diagnostic information.
-
-ERROR
------
-Something has happened such that the program can continue but the results
-for the current (or future) operations cannot be guaranteed to be correct,
-or the results will be correct but the service is impaired. For example,
-the program started but attempts to open one or more network interfaces failed.
-
-WARN
-----
-An unusual event happened. Although the program will continue working
-normally, the event was sufficiently out of the ordinary to warrant drawing
-attention to it. For example, at program start-up a zone was loaded that
-contained no resource records,
-
-INFO
-----
-A normal but significant event has occurred that should be recorded,
-e.g. the program has started or is just about to terminate, a new zone has
-been created, etc.
-
-DEBUG
------
-This severity is only enabled on for debugging purposes. A debug level is
-associated with debug messages, level 0 (the default) being for high-level
-messages and level 99 (the maximum) for the lowest level. How the messages
-are distributed between the levels is up to the developer. So if debugging
-the NSAS (for example), a level 0 message might record the creation of a new
-zone, a level 10 recording a timeout when trying to get a nameserver address,
-but a level 50 would record every query for an address. (And we might add
-level 51 to record every update of the RTT.)
-
-Note that like severities, levels are cumulative; so if level 25 is set as the
-debug level, all debug levels from 0 to 25 will be output. In fact, it is
-probably easier to visualise the debug levels as part of the severity system:
-
- FATAL High
- ERROR
- WARN
- INFO
- DEBUG level 0
- DEBUG level 1
- :
- DEBUG level 99 Low
-
-When a particular severity is set, it - and all severities and/or debug
-levels above it - will be logged.
-
-Logging Sources v Logging Severities
-------------------------------------
-When logging events, make a distinction between events related to the server
-and events related to DNS messages received. Caution needs to be exercised
-with the latter as, if the logging is enabled in the normal course of events,
-such logging could be a denoial of service vector. For example, suppose that
-the main authoritiative service logger were to log both zone loading and
-unloading as INFO and a warning message if it received an invalid packet. An
-attacker could make the INFO messages unusable by flooding the server with
-malformed packets.
-
-There are two approaches to get round this:
-
-a) Make the logging of packet-dependent events a DEBUG-severity message.
-DEBUG is not enabled by default, so these events will not be recorded unless
-DEBUG is specifically chosen.
-
-b) Record system-related and packet-related messages via different loggers
-(e.g. in the example given, sever events could be logged using the logger
-"auth" and packet-related events at that level logged using the logger
-"pkt-auth".) As the loggers are independent and the severity levels
-independent, fine-tuning of what and what is not recorded can be achieved.
-
-
-Notes
-=====
-The message compiler is written in C++ (instead of Python) because it
-contains a component that reads the message file. This component is used
-in both the message compiler and the server; in the server it is used when
-the server starts up (or when triggered by a command) to read in a message
-file to overwrite the internal dictionary. Writing it in C++ means there
-is only one piece of code that does this functionality.
-
-
-Outstanding Issues
-==================
-* Ability to configure system according to configuration database.
-* Update the build procedure to create .cc and .h files from the .msg file
- during the build process. (Requires that the message compiler is built
- first.)
-
-
-log4cxx Issues
-==============
-
-Second Argument in Logger Constructor
--------------------------------------
-As noted above, when using log4cxx as the underlying implementation, the
-argument to the logger's constructor should be set true if declaring the
-logger within a method and set false (or omitted) if declaring the logger
-external to an execution unit.
-
-This is due to an apparent bug in the underlying log4cxx, where the deletion
-of a statically-declared object at program termination can cause a segment
-fault. (The destruction of internal memory structures can sometimes happen
-out of order.) By default the Logger class creates the structures in
-its constructor but does not delete them in the destruction. The default
-behavious works because instead of reclaiming memory at program run-down,
-the operating system reclaims it when the process is deleted.
-
-Setting the second argument "true" causes the Logger's destructor to delete
-the log4cxx structures. This does not cause a problem if the program is
-not terminating. So use the second form when declaring an automatic instance
-of isc::log::Logger on the stack.
-
-Building with log4cxx
----------------------
-Owing to issues with versions of log4cxx on different systems, log4cxx was
-temporarily disabled. To use log4cxx on your system:
-
-* Uncomment the log4cxx lines in configure.ac
-* In src/lib/log, replace the logger_impl.{cc,h} files with their log4cxx
- equivalents, i.e.
-
- cp logger_impl_log4cxx.h logger_impl.h
- cp logger_impl_log4cxx.cc logger_impl.cc
-
-* In src/lib/log/Makefile.am, uncomment the lines:
-
- # AM_CPPFLAGS += $(LOG4CXX_INCLUDES)
-
- # liblog_la_SOURCES += xdebuglevel.cc xdebuglevel.h
-
- # liblog_la_LDFLAGS = $(LOG4CXX_LDFLAGS)
-
-* In src/lib/log/test, re-enable testing of the log4cxx implementation
- class, i.e.
-
- cp logger_impl_log4cxx_unittest.cc logger_impl_unittest.cc
-
- ... and uncomment the following lines in Makefile.am:
-
- # run_unittests_SOURCES += logger_impl_unittest.cc
-
- # run_unittests_SOURCES += xdebuglevel_unittest.cc
-
-Then rebuild the system from scratch.
diff --git a/src/lib/python/isc/config/cfgmgr.py b/src/lib/python/isc/config/cfgmgr.py
index d8f08bb..ba62ee9 100644
--- a/src/lib/python/isc/config/cfgmgr.py
+++ b/src/lib/python/isc/config/cfgmgr.py
@@ -44,7 +44,7 @@ class ConfigManagerData:
"""This class hold the actual configuration information, and
reads it from and writes it to persistent storage"""
- def __init__(self, data_path, file_name = "b10-config.db"):
+ def __init__(self, data_path, file_name="b10-config.db"):
"""Initialize the data for the configuration manager, and
set the version and path for the data store. Initializing
this does not yet read the database, a call to
@@ -62,7 +62,7 @@ class ConfigManagerData:
self.db_filename = data_path + os.sep + file_name
self.data_path = data_path
- def read_from_file(data_path, file_name = "b10-config.db"):
+ def read_from_file(data_path, file_name):
"""Read the current configuration found in the file file_name.
If file_name is absolute, data_path is ignored. Otherwise
we look for the file_name in data_path directory.
@@ -153,7 +153,8 @@ class ConfigManagerData:
class ConfigManager:
"""Creates a configuration manager. The data_path is the path
- to the directory containing the b10-config.db file.
+ to the directory containing the configuraton file,
+ database_filename points to the configuration file.
If session is set, this will be used as the communication
channel session. If not, a new session will be created.
The ability to specify a custom session is for testing purposes
@@ -162,7 +163,8 @@ class ConfigManager:
session=None):
"""Initialize the configuration manager. The data_path string
is the path to the directory where the configuration is
- stored (in <data_path>/b10-config.db). The dabase_filename
+ stored (in <data_path>/<database_filename> or in
+ <database_filename>, if it is absolute). The dabase_filename
is the config file to load. Session is an optional
cc-channel session. If this is not given, a new one is
created."""
@@ -237,8 +239,7 @@ class ConfigManager:
return commands
def read_config(self):
- """Read the current configuration from the b10-config.db file
- at the path specificied at init()"""
+ """Read the current configuration from the file specificied at init()"""
try:
self.config = ConfigManagerData.read_from_file(self.data_path,
self.\
@@ -249,8 +250,7 @@ class ConfigManager:
self.database_filename)
def write_config(self):
- """Write the current configuration to the b10-config.db file
- at the path specificied at init()"""
+ """Write the current configuration to the file specificied at init()"""
self.config.write_to_file()
def _handle_get_module_spec(self, cmd):
diff --git a/src/lib/python/isc/config/tests/cfgmgr_test.py b/src/lib/python/isc/config/tests/cfgmgr_test.py
index 0ed3e2c..40d9af0 100644
--- a/src/lib/python/isc/config/tests/cfgmgr_test.py
+++ b/src/lib/python/isc/config/tests/cfgmgr_test.py
@@ -49,10 +49,10 @@ class TestConfigManagerData(unittest.TestCase):
self.writable_data_path + os.sep + "b10-config.db")
def test_read_from_file(self):
- ConfigManagerData.read_from_file(self.writable_data_path)
+ ConfigManagerData.read_from_file(self.writable_data_path, "b10-config.db")
self.assertRaises(ConfigManagerDataEmpty,
ConfigManagerData.read_from_file,
- "doesnotexist")
+ "doesnotexist", "b10-config.db")
self.assertRaises(ConfigManagerDataReadError,
ConfigManagerData.read_from_file,
self.data_path, "b10-config-bad1.db")
@@ -105,11 +105,13 @@ class TestConfigManager(unittest.TestCase):
Test data_path and database filename is passed trough to
underlying ConfigManagerData.
"""
- cm = ConfigManager("/data/path", "filename", self.fake_session)
- self.assertEqual("/data/path/filename", cm.config.db_filename)
+ cm = ConfigManager("datapath", "filename", self.fake_session)
+ self.assertEqual("datapath" + os.sep + "filename",
+ cm.config.db_filename)
# It should preserve it while reading
cm.read_config()
- self.assertEqual("/data/path/filename", cm.config.db_filename)
+ self.assertEqual("datapath" + os.sep + "filename",
+ cm.config.db_filename)
def test_init(self):
self.assert_(self.cm.module_specs == {})
diff --git a/src/lib/server_common/tests/Makefile.am b/src/lib/server_common/tests/Makefile.am
index 22fe104..560398f 100644
--- a/src/lib/server_common/tests/Makefile.am
+++ b/src/lib/server_common/tests/Makefile.am
@@ -33,6 +33,9 @@ run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
run_unittests_LDADD = $(GTEST_LDADD)
run_unittests_LDADD += $(top_builddir)/src/lib/server_common/libserver_common.la
+run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
+run_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
+run_unittests_LDADD += $(top_builddir)/src/lib/cc/libcc.la
endif
noinst_PROGRAMS = $(TESTS)
diff --git a/src/lib/testutils/portconfig.h b/src/lib/testutils/portconfig.h
index 486b256..8e61ffc 100644
--- a/src/lib/testutils/portconfig.h
+++ b/src/lib/testutils/portconfig.h
@@ -51,8 +51,8 @@ listenAddresses(Server& server) {
// Try putting there some addresses
AddressList addresses;
- addresses.push_back(AddressPair("127.0.0.1", 5321));
- addresses.push_back(AddressPair("::1", 5321));
+ addresses.push_back(AddressPair("127.0.0.1", 53210));
+ addresses.push_back(AddressPair("::1", 53210));
server.setListenAddresses(addresses);
EXPECT_EQ(2, server.getListenAddresses().size());
EXPECT_EQ("::1", server.getListenAddresses()[1].first);
@@ -85,7 +85,7 @@ listenAddressConfig(Server& server) {
"\"listen_on\": ["
" {"
" \"address\": \"127.0.0.1\","
- " \"port\": 5321"
+ " \"port\": 53210"
" }"
"]"
"}"));
@@ -93,7 +93,7 @@ listenAddressConfig(Server& server) {
EXPECT_EQ(result->toWire(), isc::config::createAnswer()->toWire());
ASSERT_EQ(1, server.getListenAddresses().size());
EXPECT_EQ("127.0.0.1", server.getListenAddresses()[0].first);
- EXPECT_EQ(5321, server.getListenAddresses()[0].second);
+ EXPECT_EQ(53210, server.getListenAddresses()[0].second);
// As this is example address, the machine should not have it on
// any interface
@@ -101,7 +101,7 @@ listenAddressConfig(Server& server) {
"\"listen_on\": ["
" {"
" \"address\": \"192.0.2.0\","
- " \"port\": 5321"
+ " \"port\": 53210"
" }"
"]"
"}");
@@ -109,7 +109,7 @@ listenAddressConfig(Server& server) {
EXPECT_FALSE(result->equals(*isc::config::createAnswer()));
ASSERT_EQ(1, server.getListenAddresses().size());
EXPECT_EQ("127.0.0.1", server.getListenAddresses()[0].first);
- EXPECT_EQ(5321, server.getListenAddresses()[0].second);
+ EXPECT_EQ(53210, server.getListenAddresses()[0].second);
}
diff --git a/src/lib/testutils/srv_test.cc b/src/lib/testutils/srv_test.cc
index c0d6e0f..4fec4ca 100644
--- a/src/lib/testutils/srv_test.cc
+++ b/src/lib/testutils/srv_test.cc
@@ -60,7 +60,7 @@ SrvTestBase::createDataFromFile(const char* const datafile,
delete endpoint;
endpoint = IOEndpoint::create(protocol,
- IOAddress(DEFAULT_REMOTE_ADDRESS), 5300);
+ IOAddress(DEFAULT_REMOTE_ADDRESS), 53210);
UnitTestUtil::readWireData(datafile, data);
io_sock = (protocol == IPPROTO_UDP) ? &IOSocket::getDummyUDPSocket() :
&IOSocket::getDummyTCPSocket();
@@ -76,7 +76,7 @@ SrvTestBase::createRequestPacket(Message& message,
delete io_message;
endpoint = IOEndpoint::create(protocol,
- IOAddress(DEFAULT_REMOTE_ADDRESS), 5300);
+ IOAddress(DEFAULT_REMOTE_ADDRESS), 53210);
io_sock = (protocol == IPPROTO_UDP) ? &IOSocket::getDummyUDPSocket() :
&IOSocket::getDummyTCPSocket();
io_message = new IOMessage(request_renderer.getData(),
diff --git a/tests/Makefile.am b/tests/Makefile.am
new file mode 100644
index 0000000..d4008c0
--- /dev/null
+++ b/tests/Makefile.am
@@ -0,0 +1 @@
+SUBDIRS = system
diff --git a/tests/system/Makefile.am b/tests/system/Makefile.am
new file mode 100644
index 0000000..663258b
--- /dev/null
+++ b/tests/system/Makefile.am
@@ -0,0 +1,16 @@
+systest:
+ sh $(srcdir)/runall.sh
+
+distclean-local:
+ sh $(srcdir)/cleanall.sh
+
+# Most of the files under this directory (including test subdirectories)
+# must be listed in EXTRA_DIST.
+EXTRA_DIST = README cleanall.sh ifconfig.sh start.pl stop.pl run.sh runall.sh
+EXTRA_DIST += common/default_user.csv
+EXTRA_DIST += glue/auth.good glue/example.good glue/noglue.good glue/test.good
+EXTRA_DIST += glue/tests.sh glue/clean.sh
+EXTRA_DIST += glue/nsx1/com.db glue/nsx1/net.db glue/nsx1/root-servers.nil.db
+EXTRA_DIST += glue/nsx1/root.db
+EXTRA_DIST += bindctl/tests.sh bindctl/clean.sh bindctl/setup.sh
+EXTRA_DIST += bindctl/nsx1/root.db bindctl/nsx1/example-normalized.db
diff --git a/tests/system/README b/tests/system/README
new file mode 100644
index 0000000..a43d49e
--- /dev/null
+++ b/tests/system/README
@@ -0,0 +1,63 @@
+Copyright (C) 2004, 2010, 2011 Internet Systems Consortium, Inc. ("ISC")
+Copyright (C) 2000, 2001 Internet Software Consortium.
+See COPYRIGHT in the source root or http://isc.org/copyright.html for terms.
+
+This is a simple test environment for running BIND 10 system tests
+involving multiple name servers. It was originally developed for BIND
+9, and has been ported to test BIND 10 implementations. Ideally we
+should share the same framework for both versions, so some part of
+the original setup are kept, even though they are BIND 9 specific and
+not currently used.
+
+Also, these tests generally rely on BIND 9 programs, most commonly its
+dig, and will sometimes be its name server (named). So, the test
+environment assumes that there's a source tree of BIND 9 where its
+programs are built, and that an environment variable "BIND9_TOP" is
+set to point to the top directory of the source tree.
+
+There are multiple test suites, each in a separate subdirectory and
+involving a different DNS setup. They are:
+
+ bindctl/ Some basic management operations using the bindctl tool
+ glue/ Glue handling tests
+(the following tests are planned to be added soon)
+ dnssec/ DNSSEC tests
+ masterfile/ Master file parser
+ xfer/ Zone transfer tests
+
+Typically each test suite sets up 2-5 instances of BIND 10 (or BIND 9
+named) and then performs one or more tests against them. Within the
+test suite subdirectory, each instance has a separate subdirectory
+containing its configuration data. By convention, these
+subdirectories are named "nsx1", "nsx2", etc for BIND 10 ("x" means
+BIND 10), and "ns1", "ns2", etc. for BIND 9.
+
+The tests are completely self-contained and do not require access to
+the real DNS. Generally, one of the test servers (ns[x]1) is set up
+as a root name server and is listed in the hints file of the others.
+
+To enable all servers to run on the same machine, they bind to
+separate virtual IP address on the loopback interface. ns[x]1 runs on
+10.53.0.1, ns[x]2 on 10.53.0.2, etc. Before running any tests, you
+must set up these addresses by running "ifconfig.sh up" as root.
+
+Mac OS X:
+If you wish to make the interfaces survive across reboots
+copy org.isc.bind.system and org.isc.bind.system to
+/Library/LaunchDaemons then run
+"launchctl load /Library/LaunchDaemons/org.isc.bind.system.plist" as
+root.
+
+The servers use port 53210 instead of the usual port 53, so they can be
+run without root privileges once the interfaces have been set up.
+
+The tests can be run individually like this:
+
+ sh run.sh xfer
+ sh run.sh glue
+ etc.
+
+To run all the tests, just type "make systest" either on this directory
+or on the top source directory. Note: currently these tests cannot be
+run when built under a separate build directory. Everything must be
+run within the original source tree.
diff --git a/tests/system/bindctl/clean.sh b/tests/system/bindctl/clean.sh
new file mode 100755
index 0000000..f691512
--- /dev/null
+++ b/tests/system/bindctl/clean.sh
@@ -0,0 +1,20 @@
+#!/bin/sh
+#
+# Copyright (C) 2004, 2007 Internet Systems Consortium, Inc. ("ISC")
+# Copyright (C) 2000, 2001 Internet Software Consortium.
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+rm -f */b10-config.db
+rm -f dig.out.* bindctl.out.*
+rm -f */msgq_socket */zone.sqlite3
diff --git a/tests/system/bindctl/nsx1/b10-config.db.template.in b/tests/system/bindctl/nsx1/b10-config.db.template.in
new file mode 100644
index 0000000..162329a
--- /dev/null
+++ b/tests/system/bindctl/nsx1/b10-config.db.template.in
@@ -0,0 +1,10 @@
+{"version": 2,
+ "Auth": {
+ "listen_on": [{"address": "10.53.0.1", "port": 53210}],
+ "database_file": "@abs_builddir@/zone.sqlite3",
+ "statistics-interval": 1
+ },
+ "Xfrout": {
+ "log_file": "@abs_builddir@/Xfrout.log"
+ }
+}
diff --git a/tests/system/bindctl/nsx1/example-normalized.db b/tests/system/bindctl/nsx1/example-normalized.db
new file mode 100644
index 0000000..7129522
--- /dev/null
+++ b/tests/system/bindctl/nsx1/example-normalized.db
@@ -0,0 +1,3 @@
+com. 300 IN SOA postmaster.example. ns.example.com. 2000042100 600 600 1200 600
+com. 300 IN NS ns.example.com.
+ns.example.com. 300 IN A 192.0.2.2
diff --git a/tests/system/bindctl/nsx1/root.db b/tests/system/bindctl/nsx1/root.db
new file mode 100644
index 0000000..31293de
--- /dev/null
+++ b/tests/system/bindctl/nsx1/root.db
@@ -0,0 +1,25 @@
+; Copyright (C) 2004, 2007 Internet Systems Consortium, Inc. ("ISC")
+; Copyright (C) 2000, 2001 Internet Software Consortium.
+;
+; Permission to use, copy, modify, and/or distribute this software for any
+; purpose with or without fee is hereby granted, provided that the above
+; copyright notice and this permission notice appear in all copies.
+;
+; THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+; REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+; AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+; INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+; LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+; OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+; PERFORMANCE OF THIS SOFTWARE.
+
+$TTL 300
+. IN SOA postmaster.example. a.root.servers.nil. (
+ 2000042100 ; serial
+ 600 ; refresh
+ 600 ; retry
+ 1200 ; expire
+ 600 ; minimum
+ )
+. NS ns.example.com.
+ns.example.com. A 192.0.2.1
diff --git a/tests/system/bindctl/setup.sh b/tests/system/bindctl/setup.sh
new file mode 100755
index 0000000..55afcc1
--- /dev/null
+++ b/tests/system/bindctl/setup.sh
@@ -0,0 +1,26 @@
+#!/bin/sh
+#
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+SYSTEMTESTTOP=..
+. $SYSTEMTESTTOP/conf.sh
+
+SUBTEST_TOP=${TEST_TOP}/bindctl
+
+cp ${SUBTEST_TOP}/nsx1/b10-config.db.template ${SUBTEST_TOP}/nsx1/b10-config.db
+
+rm -f ${SUBTEST_TOP}/*/zone.sqlite3
+${B10_LOADZONE} -o . -d ${SUBTEST_TOP}/nsx1/zone.sqlite3 \
+ ${SUBTEST_TOP}//nsx1/root.db
diff --git a/tests/system/bindctl/tests.sh b/tests/system/bindctl/tests.sh
new file mode 100755
index 0000000..354f349
--- /dev/null
+++ b/tests/system/bindctl/tests.sh
@@ -0,0 +1,106 @@
+#!/bin/sh
+#
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+SYSTEMTESTTOP=..
+. $SYSTEMTESTTOP/conf.sh
+
+#
+# Do bindctl tests.
+#
+
+status=0
+n=0
+
+echo "I:Checking b10-auth is working by default ($n)"
+$DIG +norec @10.53.0.1 -p 53210 ns.example.com. A >dig.out.$n || status=1
+# perform a simple check on the output (digcomp would be too much for this)
+grep 192.0.2.1 dig.out.$n > /dev/null || status=1
+if [ $status != 0 ]; then echo "I:failed"; fi
+n=`expr $n + 1`
+
+echo "I:Checking BIND 10 statistics after a pose ($n)"
+# wait for 2sec to make sure b10-stats gets the latest statistics.
+# note that we set statistics-interval to 1.
+sleep 2
+echo 'Stats show
+' | $RUN_BINDCTL \
+ --csv-file-dir=$BINDCTL_CSV_DIR > bindctl.out.$n || status=1
+# the server should have received 1 UDP and 1 TCP queries (TCP query was
+# sent from the server startup script)
+grep "\"auth.queries.tcp\": 1," bindctl.out.$n > /dev/null || status=1
+grep "\"auth.queries.udp\": 1," bindctl.out.$n > /dev/null || status=1
+if [ $status != 0 ]; then echo "I:failed"; fi
+n=`expr $n + 1`
+
+echo "I:Stopping b10-auth and checking that ($n)"
+echo 'config set Boss/start_auth false
+config commit
+quit
+' | $RUN_BINDCTL \
+ --csv-file-dir=$BINDCTL_CSV_DIR 2>&1 > /dev/null || status=1
+# dig should exit with a failure code.
+$DIG +tcp +norec @10.53.0.1 -p 53210 ns.example.com. A && status=1
+if [ $status != 0 ]; then echo "I:failed"; fi
+n=`expr $n + 1`
+
+echo "I:Restarting b10-auth and checking that ($n)"
+echo 'config set Boss/start_auth true
+config commit
+quit
+' | $RUN_BINDCTL \
+ --csv-file-dir=$BINDCTL_CSV_DIR 2>&1 > /dev/null || status=1
+$DIG +norec @10.53.0.1 -p 53210 ns.example.com. A >dig.out.$n || status=1
+grep 192.0.2.1 dig.out.$n > /dev/null || status=1
+if [ $status != 0 ]; then echo "I:failed"; fi
+n=`expr $n + 1`
+
+echo "I:Rechecking BIND 10 statistics after a pose ($n)"
+sleep 2
+echo 'Stats show
+' | $RUN_BINDCTL \
+ --csv-file-dir=$BINDCTL_CSV_DIR > bindctl.out.$n || status=1
+# The statistics counters should have been reset while stop/start.
+grep "\"auth.queries.tcp\": 0," bindctl.out.$n > /dev/null || status=1
+grep "\"auth.queries.udp\": 1," bindctl.out.$n > /dev/null || status=1
+if [ $status != 0 ]; then echo "I:failed"; fi
+n=`expr $n + 1`
+
+echo "I:Changing the data source from sqlite3 to in-memory ($n)"
+DATASRC_SPEC='[{"type": "memory", "zones": [{"origin": "com","file":'
+DATASRC_SPEC="${DATASRC_SPEC} \"${TEST_TOP}/bindctl/nsx1/example-normalized.db\"}]}]"
+echo "config set Auth/datasources ${DATASRC_SPEC}
+config commit
+quit
+" | $RUN_BINDCTL \
+ --csv-file-dir=$BINDCTL_CSV_DIR > bindctl.out.$n || status=1
+$DIG +norec @10.53.0.1 -p 53210 ns.example.com. A >dig.out.$n || status=1
+grep 192.0.2.2 dig.out.$n > /dev/null || status=1
+if [ $status != 0 ]; then echo "I:failed"; fi
+n=`expr $n + 1`
+
+echo "I:Rechecking BIND 10 statistics after changing the datasource ($n)"
+sleep 2
+echo 'Stats show
+' | $RUN_BINDCTL \
+ --csv-file-dir=$BINDCTL_CSV_DIR > bindctl.out.$n || status=1
+# The statistics counters shouldn't be reset due to hot-swapping datasource.
+grep "\"auth.queries.tcp\": 0," bindctl.out.$n > /dev/null || status=1
+grep "\"auth.queries.udp\": 2," bindctl.out.$n > /dev/null || status=1
+if [ $status != 0 ]; then echo "I:failed"; fi
+n=`expr $n + 1`
+
+echo "I:exit status: $status"
+exit $status
diff --git a/tests/system/cleanall.sh b/tests/system/cleanall.sh
new file mode 100755
index 0000000..17c3d4a
--- /dev/null
+++ b/tests/system/cleanall.sh
@@ -0,0 +1,33 @@
+#!/bin/sh
+#
+# Copyright (C) 2004, 2007 Internet Systems Consortium, Inc. ("ISC")
+# Copyright (C) 2000, 2001 Internet Software Consortium.
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+#
+# Clean up after system tests.
+#
+
+find . -type f \( \
+ -name 'K*' -o -name '*~' -o -name '*.core' -o -name '*.log' \
+ -o -name '*.pid' -o -name '*.keyset' -o -name named.run \
+ -o -name bind10.run -o -name lwresd.run -o -name ans.run \) -print | \
+ xargs rm -f
+
+status=0
+
+for d in `find . -type d -maxdepth 1 -mindepth 1 -print`
+do
+ test ! -f $d/clean.sh || ( cd $d && sh clean.sh )
+done
diff --git a/tests/system/common/default_user.csv b/tests/system/common/default_user.csv
new file mode 100644
index 0000000..e13e194
--- /dev/null
+++ b/tests/system/common/default_user.csv
@@ -0,0 +1 @@
+root,bind10
diff --git a/tests/system/conf.sh.in b/tests/system/conf.sh.in
new file mode 100755
index 0000000..66aa3f5
--- /dev/null
+++ b/tests/system/conf.sh.in
@@ -0,0 +1,57 @@
+#!/bin/sh
+#
+# Copyright (C) 2004-2011 Internet Systems Consortium, Inc. ("ISC")
+# Copyright (C) 2000-2003 Internet Software Consortium.
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+#
+# Common configuration data for system tests, to be sourced into
+# other shell scripts.
+#
+
+# Prerequisite check
+if [ @srcdir@ != @builddir@ ]; then
+ echo "Currently systest doesn't work for a separate build tree."
+ echo "Rebuild BIND 10 on the source tree and run the tests."
+ exit 1
+fi
+
+if [ -z $BIND9_TOP ]; then
+ echo "systest assumes there's a compiled tree of BIND 9 which can be"
+ echo "accessed via the BIND9_TOP environment variable."
+ echo "Please make sure this assumption is met."
+ exit 1
+fi
+
+# Find the top of the source and test trees.
+TOP=@abs_top_srcdir@
+TEST_TOP=@abs_builddir@
+
+RUN_BIND10=$TOP/src/bin/bind10/run_bind10.sh
+RUN_BINDCTL=$TOP/src/bin/bindctl/run_bindctl.sh
+BINDCTL_CSV_DIR=@abs_srcdir@/common/
+B10_LOADZONE=$TOP/src/bin/loadzone/run_loadzone.sh
+BIND9_NAMED=$BIND9_TOP/bin/named/named
+DIG=$BIND9_TOP/bin/dig/dig
+# Test tools borrowed from BIND 9's system test (without change).
+TESTSOCK=$BIND9_TOP/bin/tests/system/testsock.pl
+DIGCOMP=$BIND9_TOP/bin/tests/system/digcomp.pl
+
+SUBDIRS="bindctl glue"
+#SUBDIRS="dnssec masterfile xfer"
+
+# PERL will be an empty string if no perl interpreter was found.
+PERL=@PERL@
+
+export RUN_BIND10 BIND9_NAMED DIG SUBDIRS PERL TESTSOCK
diff --git a/tests/system/glue/auth.good b/tests/system/glue/auth.good
new file mode 100644
index 0000000..2c619f6
--- /dev/null
+++ b/tests/system/glue/auth.good
@@ -0,0 +1,15 @@
+
+; <<>> DiG 9.0 <<>> +norec @10.53.0.1 -p 5300 foo.bar.example.org. a
+;; global options: printcmd
+;; Got answer:
+;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 41239
+;; flags: qr ad; QUERY: 1, ANSWER: 0, AUTHORITY: 1, ADDITIONAL: 1
+
+;; QUESTION SECTION:
+;foo.bar.example.org. IN A
+
+;; AUTHORITY SECTION:
+example.org. 172800 IN NS b.root-servers.nil.
+
+;; ADDITIONAL SECTION:
+b.root-servers.nil. 300 IN A 10.53.0.2
diff --git a/tests/system/glue/clean.sh b/tests/system/glue/clean.sh
new file mode 100755
index 0000000..b2c1e02
--- /dev/null
+++ b/tests/system/glue/clean.sh
@@ -0,0 +1,23 @@
+#!/bin/sh
+#
+# Copyright (C) 2004, 2007 Internet Systems Consortium, Inc. ("ISC")
+# Copyright (C) 2000, 2001 Internet Software Consortium.
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+#
+# Clean up after glue tests.
+#
+
+rm -f dig.out.*
+rm -f */msgq_socket */zone.sqlite3
diff --git a/tests/system/glue/example.good b/tests/system/glue/example.good
new file mode 100644
index 0000000..3b7bbb8
--- /dev/null
+++ b/tests/system/glue/example.good
@@ -0,0 +1,19 @@
+
+; <<>> DiG 9.0 <<>> +norec @10.53.0.1 -p 5300 foo.bar.example. A
+;; global options: printcmd
+;; Got answer:
+;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 58772
+;; flags: qr ad; QUERY: 1, ANSWER: 0, AUTHORITY: 6, ADDITIONAL: 7
+
+;; QUESTION SECTION:
+;foo.bar.example. IN A
+
+;; AUTHORITY SECTION:
+example. 172800 IN NS NS1.example.COM.
+example. 172800 IN NS NS.example.
+
+;; ADDITIONAL SECTION:
+NS.example. 172800 IN A 192.0.2.1
+NS.example. 172800 IN A 192.0.2.2
+NS1.example.COM. 172800 IN A 192.0.2.101
+NS1.example.COM. 172800 IN AAAA 2001:db8::1
diff --git a/tests/system/glue/noglue.good b/tests/system/glue/noglue.good
new file mode 100644
index 0000000..57a2211
--- /dev/null
+++ b/tests/system/glue/noglue.good
@@ -0,0 +1,14 @@
+
+; <<>> DiG 9.0 <<>> @10.53.0.1 -p 5300 example.net a
+;; global options: printcmd
+;; Got answer:
+;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 29409
+;; flags: qr rd ad; QUERY: 1, ANSWER: 0, AUTHORITY: 2, ADDITIONAL: 0
+
+;; QUESTION SECTION:
+;example.net. IN A
+
+;; AUTHORITY SECTION:
+example.net. 300 IN NS ns2.example.info.
+example.net. 300 IN NS ns1.example.info.
+
diff --git a/tests/system/glue/nsx1/b10-config.db.in b/tests/system/glue/nsx1/b10-config.db.in
new file mode 100644
index 0000000..acd040c
--- /dev/null
+++ b/tests/system/glue/nsx1/b10-config.db.in
@@ -0,0 +1,9 @@
+{"version": 2,
+ "Auth": {
+ "listen_on": [{"address": "10.53.0.1", "port": 53210}],
+ "database_file": "@abs_builddir@/zone.sqlite3"
+ },
+ "Xfrout": {
+ "log_file": "@abs_builddir@/Xfrout.log"
+ }
+}
diff --git a/tests/system/glue/nsx1/com.db b/tests/system/glue/nsx1/com.db
new file mode 100644
index 0000000..c4b94e1
--- /dev/null
+++ b/tests/system/glue/nsx1/com.db
@@ -0,0 +1,31 @@
+; Copyright (C) 2004, 2007 Internet Systems Consortium, Inc. ("ISC")
+; Copyright (C) 2000, 2001 Internet Software Consortium.
+;
+; Permission to use, copy, modify, and/or distribute this software for any
+; purpose with or without fee is hereby granted, provided that the above
+; copyright notice and this permission notice appear in all copies.
+;
+; THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+; REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+; AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+; INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+; LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+; OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+; PERFORMANCE OF THIS SOFTWARE.
+
+$ORIGIN com.
+$TTL 300
+@ IN SOA root.example.com. a.root.servers.nil. (
+ 2000042100 ; serial
+ 600 ; refresh
+ 600 ; retry
+ 1200 ; expire
+ 600 ; minimum
+ )
+@ NS a.root-servers.nil.
+
+example.com. NS ns1.example.com.
+example.com. NS ns2.example.com.
+ns1.example.com. 172800 IN A 192.0.2.101
+ns1.example.com. 172800 IN AAAA 2001:db8::1
+ns2.example.com. 172800 IN A 192.0.2.102
diff --git a/tests/system/glue/nsx1/net.db b/tests/system/glue/nsx1/net.db
new file mode 100644
index 0000000..8b66521
--- /dev/null
+++ b/tests/system/glue/nsx1/net.db
@@ -0,0 +1,32 @@
+; Copyright (C) 2004, 2007 Internet Systems Consortium, Inc. ("ISC")
+; Copyright (C) 2000, 2001 Internet Software Consortium.
+;
+; Permission to use, copy, modify, and/or distribute this software for any
+; purpose with or without fee is hereby granted, provided that the above
+; copyright notice and this permission notice appear in all copies.
+;
+; THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+; REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+; AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+; INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+; LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+; OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+; PERFORMANCE OF THIS SOFTWARE.
+
+$ORIGIN net.
+$TTL 300
+@ IN SOA root.example.net. a.root.servers.nil. (
+ 2000042100 ; serial
+ 600 ; refresh
+ 600 ; retry
+ 1200 ; expire
+ 600 ; minimum
+ )
+@ NS a.root-servers.nil.
+
+; Referral outside of server authority, but with glue records present.
+; Don't hand out the glue.
+example.net. NS ns1.example.info.
+example.net. NS ns2.example.info.
+ns1.example.info. 172800 IN A 192.0.2.101
+ns2.example.info. 172800 IN A 192.0.2.102
diff --git a/tests/system/glue/nsx1/root-servers.nil.db b/tests/system/glue/nsx1/root-servers.nil.db
new file mode 100644
index 0000000..45050a9
--- /dev/null
+++ b/tests/system/glue/nsx1/root-servers.nil.db
@@ -0,0 +1,26 @@
+; Copyright (C) 2004, 2007 Internet Systems Consortium, Inc. ("ISC")
+; Copyright (C) 2000, 2001 Internet Software Consortium.
+;
+; Permission to use, copy, modify, and/or distribute this software for any
+; purpose with or without fee is hereby granted, provided that the above
+; copyright notice and this permission notice appear in all copies.
+;
+; THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+; REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+; AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+; INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+; LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+; OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+; PERFORMANCE OF THIS SOFTWARE.
+
+$TTL 300
+@ IN SOA ns hostmaster (
+ 1
+ 3600
+ 1800
+ 1814400
+ 3600
+ )
+ NS a
+a A 10.53.0.1
+b A 10.53.0.2
diff --git a/tests/system/glue/nsx1/root.db b/tests/system/glue/nsx1/root.db
new file mode 100644
index 0000000..e43f2d2
--- /dev/null
+++ b/tests/system/glue/nsx1/root.db
@@ -0,0 +1,55 @@
+; Copyright (C) 2004, 2007 Internet Systems Consortium, Inc. ("ISC")
+; Copyright (C) 2000, 2001 Internet Software Consortium.
+;
+; Permission to use, copy, modify, and/or distribute this software for any
+; purpose with or without fee is hereby granted, provided that the above
+; copyright notice and this permission notice appear in all copies.
+;
+; THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+; REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+; AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+; INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+; LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+; OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+; PERFORMANCE OF THIS SOFTWARE.
+
+$TTL 300
+. IN SOA postmaster.example. a.root.servers.nil. (
+ 2000042100 ; serial
+ 600 ; refresh
+ 600 ; retry
+ 1200 ; expire
+ 600 ; minimum
+ )
+. NS a.root-servers.nil.
+
+root-servers.nil. NS a.root-servers.nil.
+a.root-servers.nil. A 10.53.0.1
+
+; Delegate some domains that contain name servers for the sample
+; ccTLDs below.
+com. 172800 IN NS a.root-servers.nil.
+
+;
+; A sample TLD
+;
+example. 172800 IN NS NS.example.
+example. 172800 IN NS NS1.example.COM.
+NS.example. 172800 IN A 192.0.2.1
+NS.example. 172800 IN A 192.0.2.2
+; this "glue" is below a zone cut for com. BIND 9 still uses it for
+; the delegation to example. BIND 10 (with sqlite3 data source) doesn't.
+NS1.example.COM. 172800 IN A 192.0.2.3
+
+;
+;
+;
+test. 172800 IN NS ns.test.
+test. 172800 IN NS ns1.example.net.
+ns.test. 172800 IN A 192.0.2.200
+ns1.example.net. 172800 IN A 192.0.2.201
+
+;
+; A hypothetical ccTLD where we are authoritative for the NS glue.
+;
+example.org 172800 IN NS b.root-servers.nil.
diff --git a/tests/system/glue/setup.sh.in b/tests/system/glue/setup.sh.in
new file mode 100755
index 0000000..dc5b28a
--- /dev/null
+++ b/tests/system/glue/setup.sh.in
@@ -0,0 +1,25 @@
+#!/bin/sh
+#
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+SYSTEMTESTTOP=..
+. $SYSTEMTESTTOP/conf.sh
+
+rm -f */zone.sqlite3
+${B10_LOADZONE} -o . -d @builddir@/nsx1/zone.sqlite3 @builddir@/nsx1/root.db
+${B10_LOADZONE} -o root-servers.nil -d @builddir@/nsx1/zone.sqlite3 \
+ @builddir@/nsx1/root-servers.nil.db
+${B10_LOADZONE} -o com -d @builddir@/nsx1/zone.sqlite3 @builddir@/nsx1/com.db
+${B10_LOADZONE} -o net -d @builddir@/nsx1/zone.sqlite3 @builddir@/nsx1/net.db
diff --git a/tests/system/glue/test.good b/tests/system/glue/test.good
new file mode 100644
index 0000000..b9b4719
--- /dev/null
+++ b/tests/system/glue/test.good
@@ -0,0 +1,19 @@
+
+; <<>> DiG 9.8.0 <<>> @127.0.0.1 -p 5300 foo.bar.test
+; (1 server found)
+;; global options: +cmd
+;; Got answer:
+;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 55069
+;; flags: qr rd; QUERY: 1, ANSWER: 0, AUTHORITY: 2, ADDITIONAL: 2
+;; WARNING: recursion requested but not available
+
+;; QUESTION SECTION:
+;foo.bar.test. IN A
+
+;; AUTHORITY SECTION:
+test. 172800 IN NS ns.test.
+test. 172800 IN NS ns1.example.net.
+
+;; ADDITIONAL SECTION:
+ns.test. 172800 IN A 192.0.2.200
+ns1.example.net. 172800 IN A 192.0.2.201
diff --git a/tests/system/glue/tests.sh b/tests/system/glue/tests.sh
new file mode 100755
index 0000000..50b2330
--- /dev/null
+++ b/tests/system/glue/tests.sh
@@ -0,0 +1,66 @@
+#!/bin/sh
+#
+# Copyright (C) 2004, 2007 Internet Systems Consortium, Inc. ("ISC")
+# Copyright (C) 2000, 2001, 2003 Internet Software Consortium.
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+SYSTEMTESTTOP=..
+. $SYSTEMTESTTOP/conf.sh
+
+#
+# Do glue tests.
+#
+
+status=0
+n=0
+
+# This query should result in a delegation with two NS; one in the delegated
+# zone and one in a so called out-of-bailiwick zone for which the auth server
+# has authority, too. For the former, the server should return glue in the
+# parent zone. For the latter, BIND 9 and BIND 10 behave differently; BIND 9
+# uses "glue" in the parent zone (since this is the root zone everything can
+# be considered a valid glue). BIND 10 (using sqlite3 data source) searches
+# the other zone and uses the authoritative data in that zone (which is
+# intentionally different from the glue in the root zone).
+echo "I:testing that a TLD referral gets a full glue set from the root zone ($n)"
+$DIG +norec @10.53.0.1 -p 53210 foo.bar.example. A >dig.out.$n || status=1
+$PERL $DIGCOMP example.good dig.out.$n || status=1
+n=`expr $n + 1`
+
+echo "I:testing that we find glue A RRs we are authoritative for ($n)"
+$DIG +norec @10.53.0.1 -p 53210 foo.bar.example.org. a >dig.out.$n || status=1
+$PERL $DIGCOMP auth.good dig.out.$n || status=1
+n=`expr $n + 1`
+
+# We cannot do this test for BIND 10 because b10-auth doesn't act as a
+# recursive (caching) server (by design)
+# echo "I:testing that we find glue A/AAAA RRs in the cache ($n)"
+# $DIG +norec @10.53.0.1 -p 53210 foo.bar.yy. a >dig.out.$n || status=1
+# $PERL $DIGCOMP yy.good dig.out.$n || status=1
+# n=`expr $n + 1`
+
+echo "I:testing that we don't find out-of-zone glue ($n)"
+$DIG +norec @10.53.0.1 -p 53210 example.net. a > dig.out.$n || status=1
+$PERL $DIGCOMP noglue.good dig.out.$n || status=1
+n=`expr $n + 1`
+
+# This test currently fails (additional section will be empty, which is
+# incorrect). See Trac ticket #646.
+#echo "I:testing that we are finding partial out-of-zone glue ($n)"
+#$DIG +norec @10.53.0.1 -p 53210 foo.bar.test. a >dig.out.$n || status=1
+#$PERL $DIGCOMP test.good dig.out.$n || status=1
+#n=`expr $n + 1`
+
+echo "I:exit status: $status"
+exit $status
diff --git a/tests/system/ifconfig.sh b/tests/system/ifconfig.sh
new file mode 100755
index 0000000..c0c365a
--- /dev/null
+++ b/tests/system/ifconfig.sh
@@ -0,0 +1,226 @@
+#!/bin/sh
+#
+# Copyright (C) 2004, 2007-2010 Internet Systems Consortium, Inc. ("ISC")
+# Copyright (C) 2000-2003 Internet Software Consortium.
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+#
+# Set up interface aliases for bind9 system tests.
+#
+# IPv4: 10.53.0.{1..7} RFC 1918
+# IPv6: fd92:7065:b8e:ffff::{1..7} ULA
+#
+
+config_guess=""
+for f in ./config.guess ../../config.guess
+do
+ if test -f $f
+ then
+ config_guess=$f
+ fi
+done
+
+if test "X$config_guess" = "X"
+then
+ cat <<EOF >&2
+$0: must be run from the top level source directory or the
+bin/tests/system directory
+EOF
+ exit 1
+fi
+
+# If running on hp-ux, don't even try to run config.guess.
+# It will try to create a temporary file in the current directory,
+# which fails when running as root with the current directory
+# on a NFS mounted disk.
+
+case `uname -a` in
+ *HP-UX*) sys=hpux ;;
+ *) sys=`sh $config_guess` ;;
+esac
+
+case "$2" in
+[0-9]|[1-9][0-9]|[1-9][0-9][0-9]) base=$2;;
+*) base=""
+esac
+
+case "$3" in
+[0-9]|[1-9][0-9]|[1-9][0-9][0-9]) base6=$2;;
+*) base6=""
+esac
+
+case "$1" in
+
+ start|up)
+ for ns in 1 2 3 4 5 6 7
+ do
+ if test -n "$base"
+ then
+ int=`expr $ns + $base - 1`
+ else
+ int=$ns
+ fi
+ if test -n "$base6"
+ then
+ int6=`expr $ns + $base6 - 1`
+ else
+ int6=$ns
+ fi
+ case "$sys" in
+ *-pc-solaris2.5.1)
+ ifconfig lo0:$int 10.53.0.$ns netmask 0xffffffff up
+ ;;
+ *-sun-solaris2.[6-7])
+ ifconfig lo0:$int 10.53.0.$ns netmask 0xffffffff up
+ ;;
+ *-*-solaris2.[8-9]|*-*-solaris2.1[0-9])
+ /sbin/ifconfig lo0:$int plumb
+ /sbin/ifconfig lo0:$int 10.53.0.$ns up
+ if test -n "$int6"
+ then
+ /sbin/ifconfig lo0:$int6 inet6 plumb
+ /sbin/ifconfig lo0:$int6 \
+ inet6 fd92:7065:b8e:ffff::$ns up
+ fi
+ ;;
+ *-*-linux*)
+ ifconfig lo:$int 10.53.0.$ns up netmask 255.255.255.0
+ ifconfig lo inet6 add fd92:7065:b8e:ffff::$ns/64
+ ;;
+ *-unknown-freebsd*)
+ ifconfig lo0 10.53.0.$ns alias netmask 0xffffffff
+ ifconfig lo0 inet6 fd92:7065:b8e:ffff::$ns alias
+ ;;
+ *-unknown-netbsd*)
+ ifconfig lo0 10.53.0.$ns alias netmask 255.255.255.0
+ ifconfig lo0 inet6 fd92:7065:b8e:ffff::$ns alias
+ ;;
+ *-unknown-openbsd*)
+ ifconfig lo0 10.53.0.$ns alias netmask 255.255.255.0
+ ifconfig lo0 inet6 fd92:7065:b8e:ffff::$ns alias
+ ;;
+ *-*-bsdi[3-5].*)
+ ifconfig lo0 add 10.53.0.$ns netmask 255.255.255.0
+ ;;
+ *-dec-osf[4-5].*)
+ ifconfig lo0 alias 10.53.0.$ns
+ ;;
+ *-sgi-irix6.*)
+ ifconfig lo0 alias 10.53.0.$ns
+ ;;
+ *-*-sysv5uw7*|*-*-sysv*UnixWare*|*-*-sysv*OpenUNIX*)
+ ifconfig lo0 10.53.0.$ns alias netmask 0xffffffff
+ ;;
+ *-ibm-aix4.*|*-ibm-aix5.*)
+ ifconfig lo0 alias 10.53.0.$ns
+ ifconfig lo0 inet6 alias -dad fd92:7065:b8e:ffff::$ns/64
+ ;;
+ hpux)
+ ifconfig lo0:$int 10.53.0.$ns netmask 255.255.255.0 up
+ ifconfig lo0:$int inet6 fd92:7065:b8e:ffff::$ns up
+ ;;
+ *-sco3.2v*)
+ ifconfig lo0 alias 10.53.0.$ns
+ ;;
+ *-darwin*)
+ ifconfig lo0 alias 10.53.0.$ns
+ ifconfig lo0 inet6 fd92:7065:b8e:ffff::$ns alias
+ ;;
+ *)
+ echo "Don't know how to set up interface. Giving up."
+ exit 1
+ esac
+ done
+ ;;
+
+ stop|down)
+ for ns in 7 6 5 4 3 2 1
+ do
+ if test -n "$base"
+ then
+ int=`expr $ns + $base - 1`
+ else
+ int=$ns
+ fi
+ case "$sys" in
+ *-pc-solaris2.5.1)
+ ifconfig lo0:$int 0.0.0.0 down
+ ;;
+ *-sun-solaris2.[6-7])
+ ifconfig lo0:$int 10.53.0.$ns down
+ ;;
+ *-*-solaris2.[8-9]|*-*-solaris2.1[0-9])
+ ifconfig lo0:$int 10.53.0.$ns down
+ ifconfig lo0:$int 10.53.0.$ns unplumb
+ if test -n "$int6"
+ then
+ ifconfig lo0:$int6 inet6 down
+ ifconfig lo0:$int6 inet6 unplumb
+ fi
+ ;;
+ *-*-linux*)
+ ifconfig lo:$int 10.53.0.$ns down
+ ifconfig lo inet6 del fd92:7065:b8e:ffff::$ns/64
+ ;;
+ *-unknown-freebsd*)
+ ifconfig lo0 10.53.0.$ns delete
+ ifconfig lo0 inet6 fd92:7065:b8e:ffff::$ns delete
+ ;;
+ *-unknown-netbsd*)
+ ifconfig lo0 10.53.0.$ns delete
+ ifconfig lo0 inet6 fd92:7065:b8e:ffff::$ns delete
+ ;;
+ *-unknown-openbsd*)
+ ifconfig lo0 10.53.0.$ns delete
+ ifconfig lo0 inet6 fd92:7065:b8e:ffff::$ns delete
+ ;;
+ *-*-bsdi[3-5].*)
+ ifconfig lo0 remove 10.53.0.$ns
+ ;;
+ *-dec-osf[4-5].*)
+ ifconfig lo0 -alias 10.53.0.$ns
+ ;;
+ *-sgi-irix6.*)
+ ifconfig lo0 -alias 10.53.0.$ns
+ ;;
+ *-*-sysv5uw7*|*-*-sysv*UnixWare*|*-*-sysv*OpenUNIX*)
+ ifconfig lo0 -alias 10.53.0.$ns
+ ;;
+ *-ibm-aix4.*|*-ibm-aix5.*)
+ ifconfig lo0 delete 10.53.0.$ns
+ ifconfig lo0 delete inet6 fd92:7065:b8e:ffff::$ns/64
+ ;;
+ hpux)
+ ifconfig lo0:$int 0.0.0.0
+ ifconfig lo0:$int inet6 ::
+ ;;
+ *-sco3.2v*)
+ ifconfig lo0 -alias 10.53.0.$ns
+ ;;
+ *darwin*)
+ ifconfig lo0 -alias 10.53.0.$ns
+ ifconfig lo0 inet6 fd92:7065:b8e:ffff::$ns delete
+ ;;
+ *)
+ echo "Don't know how to destroy interface. Giving up."
+ exit 1
+ esac
+ done
+
+ ;;
+
+ *)
+ echo "Usage: $0 { up | down } [base]"
+ exit 1
+esac
diff --git a/tests/system/run.sh b/tests/system/run.sh
new file mode 100755
index 0000000..4f852f4
--- /dev/null
+++ b/tests/system/run.sh
@@ -0,0 +1,125 @@
+#!/bin/sh
+#
+# Copyright (C) 2004, 2007, 2010 Internet Systems Consortium, Inc. ("ISC")
+# Copyright (C) 2000, 2001 Internet Software Consortium.
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+#
+# Run a system test.
+#
+
+SYSTEMTESTTOP=.
+. $SYSTEMTESTTOP/conf.sh
+
+stopservers=true
+
+case $1 in
+ --keep) stopservers=false; shift ;;
+esac
+
+test $# -gt 0 || { echo "usage: $0 [--keep] test-directory" >&2; exit 1; }
+
+test=$1
+shift
+
+test -d $test || { echo "$0: $test: no such test" >&2; exit 1; }
+
+echo "S:$test:`date`" >&2
+echo "T:$test:1:A" >&2
+echo "A:System test $test" >&2
+
+if [ x$PERL = x ]
+then
+ echo "I:Perl not available. Skipping test." >&2
+ echo "R:UNTESTED" >&2
+ echo "E:$test:`date`" >&2
+ exit 0;
+fi
+
+$PERL $TESTSOCK || {
+ echo "I:Network interface aliases not set up. Skipping test." >&2;
+ echo "R:UNTESTED" >&2;
+ echo "E:$test:`date`" >&2;
+ exit 0;
+}
+
+
+# Check for test-specific prerequisites.
+test ! -f $test/prereq.sh || ( cd $test && sh prereq.sh "$@" )
+result=$?
+
+if [ $result -eq 0 ]; then
+ : prereqs ok
+else
+ echo "I:Prerequisites for $test missing, skipping test." >&2
+ [ $result -eq 255 ] && echo "R:SKIPPED" || echo "R:UNTESTED"
+ echo "E:$test:`date`" >&2
+ exit 0
+fi
+
+# Check for PKCS#11 support
+if
+ test ! -f $test/usepkcs11 || sh cleanpkcs11.sh
+then
+ : pkcs11 ok
+else
+ echo "I:Need PKCS#11 for $test, skipping test." >&2
+ echo "R:PKCS11ONLY" >&2
+ echo "E:$test:`date`" >&2
+ exit 0
+fi
+
+# Set up any dynamically generated test data
+if test -f $test/setup.sh
+then
+ ( cd $test && sh setup.sh "$@" )
+fi
+
+# Start name servers running
+$PERL start.pl $test || exit 1
+
+# Run the tests
+( cd $test ; sh tests.sh )
+
+status=$?
+
+if $stopservers
+then
+ :
+else
+ exit $status
+fi
+
+# Shutdown
+$PERL stop.pl $test
+
+status=`expr $status + $?`
+
+if [ $status != 0 ]; then
+ echo "R:FAIL"
+ # Don't clean up - we need the evidence.
+ find . -name core -exec chmod 0644 '{}' \;
+else
+ echo "R:PASS"
+
+ # Clean up.
+ if test -f $test/clean.sh
+ then
+ ( cd $test && sh clean.sh "$@" )
+ fi
+fi
+
+echo "E:$test:`date`"
+
+exit $status
diff --git a/tests/system/runall.sh b/tests/system/runall.sh
new file mode 100755
index 0000000..5d0fe9b
--- /dev/null
+++ b/tests/system/runall.sh
@@ -0,0 +1,44 @@
+#!/bin/sh
+#
+# Copyright (C) 2004, 2007, 2010 Internet Systems Consortium, Inc. ("ISC")
+# Copyright (C) 2000, 2001 Internet Software Consortium.
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+#
+# Run all the system tests.
+#
+
+SYSTEMTESTTOP=.
+. $SYSTEMTESTTOP/conf.sh
+
+status=0
+
+for d in $SUBDIRS
+do
+ sh run.sh $d || status=1
+done
+
+$PERL $TESTSOCK || {
+ cat <<EOF >&2
+I:
+I:NOTE: Many of the tests were skipped because they require that
+I: the IP addresses 10.53.0.1 through 10.53.0.7 are configured
+I: as alias addresses on the loopback interface. Please run
+I: "tests/system/ifconfig.sh up" as root to configure them
+I: and rerun the tests.
+EOF
+ exit 0;
+}
+
+exit $status
diff --git a/tests/system/start.pl b/tests/system/start.pl
new file mode 100755
index 0000000..56f00c4
--- /dev/null
+++ b/tests/system/start.pl
@@ -0,0 +1,226 @@
+#!/usr/bin/perl -w
+#
+# Copyright (C) 2004-2008, 2010 Internet Systems Consortium, Inc. ("ISC")
+# Copyright (C) 2001 Internet Software Consortium.
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# Framework for starting test servers.
+# Based on the type of server specified, check for port availability, remove
+# temporary files, start the server, and verify that the server is running.
+# If a server is specified, start it. Otherwise, start all servers for test.
+
+use strict;
+use Cwd 'abs_path';
+use Getopt::Long;
+
+# Option handling
+# --noclean test [server [options]]
+#
+# --noclean - Do not cleanup files in server directory
+# test - name of the test directory
+# server - name of the server directory
+# options - alternate options for the server
+
+my $usage = "usage: $0 [--noclean] test-directory [server-directory [server-options]]";
+my $noclean;
+GetOptions('noclean' => \$noclean);
+my $test = $ARGV[0];
+my $server = $ARGV[1];
+my $options = $ARGV[2];
+
+if (!$test) {
+ print "$usage\n";
+}
+if (!-d $test) {
+ print "No test directory: \"$test\"\n";
+}
+if ($server && !-d "$test/$server") {
+ print "No server directory: \"$test/$server\"\n";
+}
+
+# Global variables
+my $topdir = abs_path("$test/..");
+my $testdir = abs_path("$test");
+my $RUN_BIND10 = $ENV{'RUN_BIND10'};
+my $NAMED = $ENV{'NAMED'};
+my $LWRESD = $ENV{'LWRESD'};
+my $DIG = $ENV{'DIG'};
+my $PERL = $ENV{'PERL'};
+my $TESTSOCK = $ENV{'TESTSOCK'};
+
+# Start the server(s)
+
+if ($server) {
+ if ($server =~ /^ns/) {
+ &check_ports($server);
+ }
+ &start_server($server, $options);
+ if ($server =~ /^ns/) {
+ &verify_server($server);
+ }
+} else {
+ # Determine which servers need to be started for this test.
+ opendir DIR, $testdir;
+ my @files = sort readdir DIR;
+ closedir DIR;
+
+ my @ns = grep /^nsx?[0-9]*$/, @files;
+ my @lwresd = grep /^lwresd[0-9]*$/, @files;
+ my @ans = grep /^ans[0-9]*$/, @files;
+
+ # Start the servers we found.
+ &check_ports();
+ foreach my $s (@ns, @lwresd, @ans) {
+ &start_server($s);
+ }
+ foreach my $s (@ns) {
+ &verify_server($s);
+ }
+}
+
+# Subroutines
+
+sub check_ports {
+ my $server = shift;
+ my $options = "";
+
+ if ($server && $server =~ /(\d+)$/) {
+ $options = "-i $1";
+ }
+
+ my $tries = 0;
+ while (1) {
+ my $return = system("$PERL $TESTSOCK -p 53210 $options");
+ last if ($return == 0);
+ if (++$tries > 4) {
+ print "$0: could not bind to server addresses, still running?\n";
+ print "I:server sockets not available\n";
+ print "R:FAIL\n";
+ system("$PERL $topdir/stop.pl $testdir"); # Is this the correct behavior?
+ exit 1;
+ }
+ print "I:Couldn't bind to socket (yet)\n";
+ sleep 2;
+ }
+}
+
+sub start_server {
+ my $server = shift;
+ my $options = shift;
+
+ my $cleanup_files;
+ my $command;
+ my $pid_file;
+
+ if ($server =~ /^nsx/) {
+ $cleanup_files = "{bind10.run}";
+ $command = "B10_FROM_SOURCE_LOCALSTATEDIR=$testdir/$server/ ";
+ $command .= "$RUN_BIND10 ";
+ if ($options) {
+ $command .= "$options";
+ } else {
+ $command .= "--msgq-socket-file=$testdir/$server/msgq_socket ";
+ $command .= "--pid-file=$testdir/$server/bind10.pid ";
+ $command .= "-v";
+ }
+ $command .= " >bind10.run 2>&1 &";
+ $pid_file = "bind10.pid";
+ } elsif ($server =~ /^ns/) {
+ $cleanup_files = "{*.jnl,*.bk,*.st,named.run}";
+ $command = "$NAMED ";
+ if ($options) {
+ $command .= "$options";
+ } else {
+ $command .= "-m record,size,mctx ";
+ $command .= "-T clienttest ";
+ $command .= "-T nosoa "
+ if (-e "$testdir/$server/named.nosoa");
+ $command .= "-T noaa "
+ if (-e "$testdir/$server/named.noaa");
+ $command .= "-c named.conf -d 99 -g";
+ }
+ $command .= " >named.run 2>&1 &";
+ $pid_file = "named.pid";
+ } elsif ($server =~ /^lwresd/) {
+ $cleanup_files = "{lwresd.run}";
+ $command = "$LWRESD ";
+ if ($options) {
+ $command .= "$options";
+ } else {
+ $command .= "-m record,size,mctx ";
+ $command .= "-T clienttest ";
+ $command .= "-C resolv.conf -d 99 -g ";
+ $command .= "-i lwresd.pid -P 9210 -p 53210";
+ }
+ $command .= " >lwresd.run 2>&1 &";
+ $pid_file = "lwresd.pid";
+ } elsif ($server =~ /^ans/) {
+ $cleanup_files = "{ans.run}";
+ $command = "$PERL ./ans.pl ";
+ if ($options) {
+ $command .= "$options";
+ } else {
+ $command .= "";
+ }
+ $command .= " >ans.run 2>&1 &";
+ $pid_file = "ans.pid";
+ } else {
+ print "I:Unknown server type $server\n";
+ print "R:FAIL\n";
+ system "$PERL $topdir/stop.pl $testdir";
+ exit 1;
+ }
+
+ # print "I:starting server $server\n";
+
+ chdir "$testdir/$server";
+
+ unless ($noclean) {
+ unlink glob $cleanup_files;
+ }
+
+ system "$command";
+
+ my $tries = 0;
+ while (!-f $pid_file) {
+ if (++$tries > 14) {
+ print "I:Couldn't start server $server\n";
+ print "R:FAIL\n";
+ system "$PERL $topdir/stop.pl $testdir";
+ exit 1;
+ }
+ sleep 1;
+ }
+}
+
+sub verify_server {
+ my $server = shift;
+ my $n = $server;
+ $n =~ s/^nsx?//;
+
+ my $tries = 0;
+ while (1) {
+ my $return = system("$DIG +tcp +noadd +nosea +nostat +noquest +nocomm +nocmd -p 53210 version.bind. chaos txt \@10.53.0.$n > dig.out");
+ last if ($return == 0);
+ print `grep ";" dig.out`;
+ if (++$tries >= 30) {
+ print "I:no response from $server\n";
+ print "R:FAIL\n";
+ system("$PERL $topdir/stop.pl $testdir");
+ exit 1;
+ }
+ sleep 2;
+ }
+ unlink "dig.out";
+}
diff --git a/tests/system/stop.pl b/tests/system/stop.pl
new file mode 100755
index 0000000..a803f52
--- /dev/null
+++ b/tests/system/stop.pl
@@ -0,0 +1,188 @@
+#!/usr/bin/perl -w
+#
+# Copyright (C) 2004-2007 Internet Systems Consortium, Inc. ("ISC")
+# Copyright (C) 2001 Internet Software Consortium.
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# Framework for stopping test servers
+# Based on the type of server specified, signal the server to stop, wait
+# briefly for it to die, and then kill it if it is still alive.
+# If a server is specified, stop it. Otherwise, stop all servers for test.
+
+use strict;
+use Cwd 'abs_path';
+
+# Option handling
+# [--use-rndc] test [server]
+#
+# test - name of the test directory
+# server - name of the server directory
+
+my $usage = "usage: $0 [--use-rndc] test-directory [server-directory]";
+my $use_rndc;
+
+while (@ARGV && $ARGV[0] =~ /^-/) {
+ my $opt = shift @ARGV;
+ if ($opt eq '--use-rndc') {
+ $use_rndc = 1;
+ } else {
+ die "$usage\n";
+ }
+}
+
+my $test = $ARGV[0];
+my $server = $ARGV[1];
+
+my $errors = 0;
+
+die "$usage\n" unless defined($test);
+die "No test directory: \"$test\"\n" unless (-d $test);
+die "No server directory: \"$server\"\n" if (defined($server) && !-d "$test/$server");
+
+# Global variables
+my $testdir = abs_path($test);
+my @servers;
+
+
+# Determine which servers need to be stopped.
+if (defined $server) {
+ @servers = ($server);
+} else {
+ local *DIR;
+ opendir DIR, $testdir or die "$testdir: $!\n";
+ my @files = sort readdir DIR;
+ closedir DIR;
+
+ my @ns = grep /^nsx?[0-9]*$/, @files;
+ my @lwresd = grep /^lwresd[0-9]*$/, @files;
+ my @ans = grep /^ans[0-9]*$/, @files;
+
+ push @servers, @ns, @lwresd, @ans;
+}
+
+
+# Stop the server(s), pass 1: rndc.
+if ($use_rndc) {
+ foreach my $server (grep /^ns/, @servers) {
+ stop_rndc($server);
+ }
+
+ wait_for_servers(30, grep /^ns/, @servers);
+}
+
+
+# Pass 2: SIGTERM
+foreach my $server (@servers) {
+ stop_signal($server, "TERM");
+}
+
+wait_for_servers(60, @servers);
+
+# Pass 3: SIGABRT
+foreach my $server (@servers) {
+ stop_signal($server, "ABRT");
+}
+
+exit($errors ? 1 : 0);
+
+# Subroutines
+
+# Return the full path to a given server's PID file.
+sub server_pid_file {
+ my($server) = @_;
+
+ my $pid_file;
+ if ($server =~ /^nsx/) {
+ $pid_file = "bind10.pid";
+ } elsif ($server =~ /^ns/) {
+ $pid_file = "named.pid";
+ } elsif ($server =~ /^lwresd/) {
+ $pid_file = "lwresd.pid";
+ } elsif ($server =~ /^ans/) {
+ $pid_file = "ans.pid";
+ } else {
+ print "I:Unknown server type $server\n";
+ exit 1;
+ }
+ $pid_file = "$testdir/$server/$pid_file";
+}
+
+# Read a PID.
+sub read_pid {
+ my($pid_file) = @_;
+
+ local *FH;
+ my $result = open FH, "< $pid_file";
+ if (!$result) {
+ print "I:$pid_file: $!\n";
+ unlink $pid_file;
+ return;
+ }
+
+ my $pid = <FH>;
+ chomp($pid);
+ return $pid;
+}
+
+# Stop a named process with rndc.
+sub stop_rndc {
+ my($server) = @_;
+
+ return unless ($server =~ /^ns(\d+)$/);
+ my $ip = "10.53.0.$1";
+
+ # Ugly, but should work.
+ system("$ENV{RNDC} -c $testdir/../common/rndc.conf -s $ip -p 9953 stop | sed 's/^/I:$server /'");
+ return;
+}
+
+# Stop a server by sending a signal to it.
+sub stop_signal {
+ my($server, $sig) = @_;
+
+ my $pid_file = server_pid_file($server);
+ return unless -f $pid_file;
+
+ my $pid = read_pid($pid_file);
+ return unless defined($pid);
+
+ if ($sig eq 'ABRT') {
+ print "I:$server didn't die when sent a SIGTERM\n";
+ $errors++;
+ }
+
+ my $result = kill $sig, $pid;
+ if (!$result) {
+ print "I:$server died before a SIG$sig was sent\n";
+ unlink $pid_file;
+ $errors++;
+ }
+
+ return;
+}
+
+sub wait_for_servers {
+ my($timeout, @servers) = @_;
+
+ my @pid_files = grep { defined($_) }
+ map { server_pid_file($_) } @servers;
+
+ while ($timeout > 0 && @pid_files > 0) {
+ @pid_files = grep { -f $_ } @pid_files;
+ sleep 1 if (@pid_files > 0);
+ $timeout--;
+ }
+
+ return;
+}
More information about the bind10-changes
mailing list