BIND 10 trac2673, updated. 3e367ff100f7d1b76ec8743385917ba747b9ad34 [2673] Merge branch 'master' into trac2673
BIND 10 source code commits
bind10-changes at lists.isc.org
Mon Feb 11 14:42:45 UTC 2013
The branch, trac2673 has been updated
via 3e367ff100f7d1b76ec8743385917ba747b9ad34 (commit)
via 5aa5b4e403893b1de767cea00b4a3f9d9a17422e (commit)
via dd2dbeb5a4507ba50ce1547972ab4ea1b56b4655 (commit)
via 7782afcefe47162534a377769d9eda2c0fa960ff (commit)
via 660a0d164feaf055677f375977f7ed327ead893e (commit)
via 9b16b116c909bedfc1353147f3948ba19f42fb5a (commit)
via 1d0c2004865d1bf322bf78d13630d992e39179fd (commit)
via b4de3233542a0c98c04c0cf730bd8222efe897b7 (commit)
via 1a80b1dd71902c7942e11316b53c6cba4a16565d (commit)
via 59c744cf4838c919abe8763501208e02aff9526d (commit)
via b5e2be95d21ed750ad7cf5e15de2058aa8bc45f4 (commit)
via 1d6a2e3fb2715b445ce835847d7d353886495fea (commit)
via a12aed4bde955f0edb68717ee23895bbc78baccf (commit)
via ac75b8db7bb9c0b104672d0987d98ec8055c698c (commit)
via 3fa52fbaed9589ecad689ccf105bbf7365d26d62 (commit)
via 55f8f410621028a556a3b0af8d2d41bc0f60b08e (commit)
via 4d6818eb58a726d0abd92a171a0f18334a9eb3b7 (commit)
via 530f569e47b06f49402611bda07e1956cdf04a24 (commit)
via da67c0642c9403f08e278e2424bc7bfde74e034a (commit)
via 6f83737a9b9deaacd5ce0799cbda9e18fdb81c4b (commit)
via 4c439a4cca7768510b4549c73e0f43120d4c9739 (commit)
via fbf11f41c327130fbdb39fcf64daa16f278eb197 (commit)
via 733d42fa3d0b0b0f426a4817dcd022c764158d0d (commit)
via e5005185351cf73d4a611407c2cfcd163f80e428 (commit)
via bfefbfda28cb512b12643555790149b7c64414f3 (commit)
via 4d074c3e7048f8dde151e078bee4967949d3b32e (commit)
via f7a26a8f9ee4adf64d754e6c2a6c07977854c40c (commit)
via 89fbc1a1f41da33150176d8d0ba83ae8e88a03da (commit)
via 3b03a16056601b26d27db3a4cd0baced7e4ba756 (commit)
via 564f4b8990e4759f57033f4fc9de2359e3baf829 (commit)
via 005cba1c8d62e2f44ad05b512ac9b7be639da725 (commit)
via d85b2e22c5c45050d3191ee73c508bbd3cd1251f (commit)
via 64fb39c963cd9e6494f71dfe14d9dabdf869fdc8 (commit)
via 3a185f59245200be3c6b2e86340ac1c2ae464efb (commit)
via b43c93c8cb4e0256677c01d5f649093fc27c998a (commit)
via ffd4a283b18f8eaa453712e1e615e2440b12aa0d (commit)
via 0749f9e194505698031990eb7c544e8ec076fe10 (commit)
via 39c1d353784c56ac2f1c42836348393b7d80303e (commit)
via 2a1d32f1610c1b99a2b6bcfdf350fcf123c51e19 (commit)
via d76159997442d71928d459041d46d89a01fbdefc (commit)
via ebeb7923963456d7f62721327290b75572ab4279 (commit)
via aa4dcd59d930af330b7f082c40a395d0bc424d97 (commit)
via 8df1853f98c6fdfbdc186062426904d047259d53 (commit)
via 936279ca980c2c2b5b711f147703665f722ffd9c (commit)
via 6dae0d1f016190f74098fc92cc5200bdc9ea165f (commit)
via 37743e4ec13cbb7e4b2864ce54e3d9732a1e8101 (commit)
via 8b2f7c325534431cb4f6cca82c5d314583e03248 (commit)
via 06fa3ca48f34c5bb6c235a1fbda2cf00be69c2c8 (commit)
via 2baf0ceb67f23351cd20ac77978b29366f7c5638 (commit)
via 6ded04bbd0fb963754e4123cee99cb07de07a618 (commit)
via bae3798603affdb276f370c1ac6b33b011a5ed4f (commit)
via 6dc113cc0e20a4781ad0f991871966d244371440 (commit)
via 28c7d972110b86833c31631d563b07a2824efbab (commit)
via 3c200a3decdd5bbc30bbdb9c81f9f206eda98ad9 (commit)
via 3a8e933c07b9493fd5deb97d86433532c68cce8e (commit)
via 87da92a958f4eb914c981d0b3ddab3cf68d41803 (commit)
via f7a77b8a0b46d3a01fc3a31a303e029da1c5f6b3 (commit)
via ee5c66336b0baf8424d5be04670dd5b96344e07e (commit)
via 6eb06132ab8aa9e1e4c6b73a233952b4bf23baff (commit)
via 6b09c6be32b221b73a05f98de7814c650597883a (commit)
via d1ae5b15754d5d359c52fd6f58e23e0e0907d4f3 (commit)
via 3188ee1246e7e1b52b334cfa6edb04d2123ea759 (commit)
via 076dad2aada1624b0b632e4eee3e6a1907c51a16 (commit)
via cb95ae3f178ce497a8dce0aad4173a8586dc9ca2 (commit)
via f07dbb59715b26afeef7ff682bba887187ad169b (commit)
via b1f09a967614c9668bedc3d877bb12f1622222f1 (commit)
via db90d30b97219e365c3643fd6c79878eaf1ac518 (commit)
via 9a7c7bf6690bf3a6d85b5f3dc802d819825b3955 (commit)
via 8b84c4beb47bf82e9c8302c39b2144a4717f7d1b (commit)
via 2dffde2cef71ec693097f59f81fe40ada8035975 (commit)
via 19a4de9fd0738a4332169f432035fd4734311f53 (commit)
via 8404133f0a362cede4819322da6506b83968e79b (commit)
via 10d63236169056c2c14cf4807d05017599c0a3bd (commit)
via f95627b501f5bf720e74de1d2cdb17219c826f02 (commit)
via 2eeab2ebf0b7e9de2a7c73e553572dbc313d06a7 (commit)
via 8eb3dbe543fa39f7702a48422777df9752dd8afb (commit)
via 74110729288f3014cfde862c943bb68bc1fceb69 (commit)
via 4c56f4c81c03d04a1d2b8be3250ef1bea9979ef4 (commit)
via ddfbc7fa1ca5fc712e16801f5f24807f90d8e88b (commit)
via fbb78097f3aca9bd8fbf4cce1f1c7169719d04e5 (commit)
via a85aff6a94f4f8e08bf3beaaf4da6e282b28e253 (commit)
via 476b3eff5488d89b03d7ac34830ad52973e9b0bd (commit)
via 94df9fad7ddbe91fff09b601cfaf07fcfd346659 (commit)
via 5a0d055137287f81e23fbeedd35236fee274596d (commit)
via a01569277cda3f78b1171bbf79f15ecf502e81e2 (commit)
via 6aa1c3cc7dbb55d32304ada9b88037346ba929b0 (commit)
via bd93846c0d74b780b2e6a6547686c9bc2db6fe23 (commit)
via 971ac6698f44e468f072fab7baaea5eb6f6b77a3 (commit)
via 7d086e721bf8e5b081aeed8a95d9cf517f3cf7d8 (commit)
via acd8358522b4ac20a75684b6ec616269bcc705f2 (commit)
via 23610f5bf4d613f503793bf7c8526c67f95df223 (commit)
via 1c50c5a6ee7e9675e3ab154f2c7f975ef519fca2 (commit)
via b6b57fb469c8a0483c1050ec64dc46b6dfb1b40e (commit)
via 709b6c520e3e86c880655ded5cbe375d29f80aa9 (commit)
via 677e990f61d47065da92899bd3c82115cd977c8c (commit)
via 878c361337c79e35816bb7df20595b8a5faa3491 (commit)
via 0a6fc5a8a77ac5f0e579dbb57da5ea5eb1923c39 (commit)
via 95b8519b040545420529f7dee5944826f6cc1224 (commit)
via 826ac1b1e637a6aa8b5763c4b810755ac0551446 (commit)
via cfa26ab8df9b533d32b8dd5aa405bf237bb58f12 (commit)
via 266bfcd67adafe326c13c6d835f5875628972d9f (commit)
via 666395baa9b73aca0312787b683a7d3a96e7ca86 (commit)
via 8431fb8b25cde01d16bfdefdc52b2eb2b07cb756 (commit)
via ac23914ba629242fa651b4d68b61e9e5380c0643 (commit)
via 2a1e4152da4fb42c78e57ada20de7e6a4d64da51 (commit)
via 4df79169011293abac9c08a1cc47e4f16f5e1750 (commit)
via 73ca6b06d15e426c0d57c41a0e126d7433acc598 (commit)
via 271e37c6b0befddbf9291577460868f0c67ea428 (commit)
via f59c311cfd135589af52c6f8a9afa45c03f84318 (commit)
via 814eb8ad33d8f85621d3daacd4c64c4d7e3dc43d (commit)
via 11f5ae7cddca20ae003331cd69b817be4aea790c (commit)
via fe0db0adf0796432ace4794c157efc2bfb79e008 (commit)
via 269cd1867284c4ebf44ebabacadc99c6413bcacb (commit)
via ed8243603f26e46d8ae48f0fb4bac7545c5ff4c9 (commit)
via 583de5551d162a5feb89bf5759e573b32d80f142 (commit)
via bc42c4b3d31ed960581808d76e7150e0fc3eb1d9 (commit)
via 10833250f2751b41b6d9e9b86be5396ec3b4e062 (commit)
via 691f87a3076fba4d5ce60a0960c1c07a35055e05 (commit)
via d8c50d0baef0ad3a1339b1c03bd0d37a32719d86 (commit)
via 7e110b499a751ca43a473a94eb4448bdbe78a32a (commit)
via 2db190194445d191e1d1eb75f0fc1936d0a791df (commit)
via 17a9c25450a3129fa5787a5deb70d371a602f311 (commit)
via c5efaec3f9196e4b4cabccde691f30ad9de380ba (commit)
via ae25fcc7abecb79eb3d4b9df1e0b463c27f77258 (commit)
via 58ccf7aa110fe656c52060cbc7b2cc26b4a9ae9d (commit)
via 0aafc9b4325734e8d8d16805ad459f8dde153fdb (commit)
via af2672cf1515f459dfc5db30919c407abcb8273b (commit)
via e4cd5386850b79a824320a63bfed69f03358574f (commit)
via 7ddf561d6304d62ce755344b0e28ab9b025b9f27 (commit)
via e6603386c50335787bca7443a9716414cf68c7bc (commit)
via 5d1563f963952af222030a404b21a0bc8171e2ac (commit)
via 859b69891fe1fc3be67bbd293397fbe7b989eb26 (commit)
via 3ca5de640a863aa8b84e5d34b241a26a63992e11 (commit)
via ff930eca8ab894a76eb0a695ebde98a7bf117d45 (commit)
via 3d35bcb14cbe4eba0cf2a2605bad4ede5ae6c338 (commit)
via e8c5250c5b3924c34159c92b2e3a23b8878572e6 (commit)
via b8e8e7ed5ff301eb18db71ff83c46479984f5fde (commit)
via 9c42816e5f405de02ef30728d82fcb045cf6a803 (commit)
via f18d297fd5b1651ebf61d5d72ca0d64b92fece30 (commit)
via a5a7f330f48cf6a63e7f67d690ea3487c11c2cce (commit)
via e1ed220b645ea62b01d9a467f8724be4fd59a696 (commit)
via adb9e821ff8fa75c3ecc1ecc853ff3e90d1edfd7 (commit)
via 463700b34d94d933cc4244480e46f975c798d777 (commit)
via f575d8fc0316d69b2ea54788b43948488dface2e (commit)
via 0479cb528e77e29f67b3a05ed79a83c2ef483455 (commit)
via d985d0dde44a6c4b090cf211c295ab10a65924e1 (commit)
via 4f0716782cba2f94c577e80cc23e4759b2440907 (commit)
via 56e9d0e542d1396e9d0f373bb8cc1fd5d0a945fe (commit)
via 3f74193c572fefc2e718ca0952962494a9680ddb (commit)
via ba7573eb62566aece58b514dfcb3b5a322adfed1 (commit)
via 83b06fb184c17f70a28ab307347e9eb075ee048d (commit)
via 1ec1ffb481e68fdd06d04fa38aaf6b4348669649 (commit)
via 6ad900eeff1c9e2c704dd5259565c28b8846aa37 (commit)
via 62bb1c4ddcb01d285edfdb04016d710597c708e7 (commit)
via 521189be3a1dc89fc9010dfa9fa13a31ee233a38 (commit)
via 1e2111996aa44a583fcdf0997a6aa170542f3ea2 (commit)
via fc03d665a777c47920a85bae41f3e359c61fa42e (commit)
via 2a9656fb9ac69e87273f14f4a6807986ee9739a8 (commit)
via 3d1ea31fe2461f28b7337c78623834eb987c73a5 (commit)
via 388c202a9b3c95f3d8d1aa0c76803b1f398974f3 (commit)
via 368ceac8bfb49a0bd4c0a0c98fe624d623de9c6d (commit)
from 1eef52e751a97720ba679256b24b2c90ed98b5fd (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
- Log -----------------------------------------------------------------
commit 3e367ff100f7d1b76ec8743385917ba747b9ad34
Merge: 1eef52e 5aa5b4e
Author: Stephen Morris <stephen at isc.org>
Date: Mon Feb 11 14:39:34 2013 +0000
[2673] Merge branch 'master' into trac2673
-----------------------------------------------------------------------
Summary of changes:
ChangeLog | 54 +
configure.ac | 31 +-
doc/guide/bind10-guide.xml | 1097 +++++++++++++++++---
examples/configure.ac | 5 +-
examples/m4/ax_isc_rpath.m4 | 62 +-
src/bin/auth/auth_config.cc | 2 +-
src/bin/auth/auth_messages.mes | 9 +-
src/bin/auth/auth_srv.cc | 6 +-
src/bin/auth/b10-auth.xml | 6 +-
src/bin/auth/main.cc | 34 +-
src/bin/auth/tests/config_unittest.cc | 2 +-
src/bin/bind10/.gitignore | 3 +-
src/bin/bind10/Makefile.am | 32 +-
src/bin/bind10/README | 5 +-
src/bin/bind10/{bind10.xml => b10-init.xml} | 74 +-
src/bin/bind10/bind10.in | 11 +
src/bin/bind10/bind10.xml | 461 +-------
src/bin/bind10/creatorapi.txt | 28 +-
src/bin/bind10/{bind10_src.py.in => init.py.in} | 95 +-
src/bin/bind10/{bob.spec => init.spec} | 6 +-
.../{bind10_messages.mes => init_messages.mes} | 110 +-
src/bin/bind10/run_bind10.sh.in | 2 +-
src/bin/bind10/tests/Makefile.am | 2 +-
src/bin/bind10/tests/args_test.py | 100 +-
.../tests/{bind10_test.py.in => init_test.py.in} | 1078 +++++++++----------
src/bin/bindctl/bindcmd.py | 2 +-
src/bin/bindctl/bindctl.xml | 2 +-
src/bin/bindctl/bindctl_main.py.in | 2 +-
src/bin/bindctl/command_sets.py | 24 +-
src/bin/bindctl/run_bindctl.sh.in | 2 +-
src/bin/cfgmgr/b10-cfgmgr.py.in | 2 +-
src/bin/cfgmgr/tests/b10-cfgmgr_test.py.in | 8 +-
src/bin/cmdctl/Makefile.am | 17 +-
src/bin/cmdctl/b10-cmdctl.xml | 4 +-
src/bin/cmdctl/cmdctl.py.in | 4 +-
src/bin/cmdctl/cmdctl_messages.mes | 3 +
src/bin/ddns/b10-ddns.xml | 8 +-
src/bin/ddns/ddns.py.in | 8 +-
src/bin/ddns/ddns_messages.mes | 4 +-
src/bin/ddns/tests/ddns_test.py | 34 +-
src/bin/dhcp4/ctrl_dhcp4_srv.h | 2 +-
src/bin/dhcp4/tests/dhcp4_test.py | 2 +-
src/bin/dhcp6/ctrl_dhcp6_srv.h | 2 +-
src/bin/dhcp6/tests/dhcp6_test.py | 4 +-
src/bin/loadzone/loadzone.py.in | 2 +-
src/bin/loadzone/tests/correct/example.db | 14 +-
src/bin/loadzone/tests/correct/include.db | 4 +-
src/bin/loadzone/tests/correct/mix1.db | 4 +-
src/bin/loadzone/tests/correct/mix2.db | 4 +-
src/bin/loadzone/tests/correct/ttl1.db | 4 +-
src/bin/loadzone/tests/correct/ttl2.db | 4 +-
src/bin/loadzone/tests/correct/ttlext.db | 4 +-
src/bin/loadzone/tests/loadzone_test.py | 6 +-
src/bin/msgq/msgq.py.in | 159 ++-
src/bin/msgq/msgq_messages.mes | 74 +-
src/bin/msgq/tests/msgq_test.py | 173 +++
src/bin/resolver/b10-resolver.xml | 6 +-
src/bin/resolver/resolver.cc | 13 +-
src/bin/resolver/resolver_messages.mes | 13 +-
src/bin/sockcreator/tests/sockcreator_tests.cc | 9 +-
src/bin/stats/b10-stats-httpd.xml | 6 +-
src/bin/stats/b10-stats.xml | 10 +-
src/bin/stats/stats.py.in | 11 +-
src/bin/stats/stats.spec | 2 +-
src/bin/stats/stats_httpd.py.in | 10 +-
src/bin/stats/stats_httpd_messages.mes | 17 +-
src/bin/stats/stats_messages.mes | 3 +
src/bin/stats/tests/b10-stats-httpd_test.py | 10 +-
src/bin/stats/tests/b10-stats_test.py | 66 +-
src/bin/stats/tests/test_utils.py | 16 +-
src/bin/stats/tests/testdata/b10-config.db | 2 +-
src/bin/sysinfo/run_sysinfo.sh.in | 14 +-
src/bin/tests/process_rename_test.py.in | 2 +-
src/bin/xfrin/b10-xfrin.xml | 8 +-
src/bin/xfrin/tests/xfrin_test.py | 300 +++---
src/bin/xfrin/xfrin.py.in | 64 +-
src/bin/xfrin/xfrin_messages.mes | 7 +-
src/bin/xfrout/b10-xfrout.xml | 6 +-
src/bin/xfrout/tests/xfrout_test.py.in | 124 +--
src/bin/xfrout/xfrout.py.in | 71 +-
src/bin/xfrout/xfrout_messages.mes | 5 +-
src/bin/zonemgr/b10-zonemgr.xml | 8 +-
src/bin/zonemgr/zonemgr.py.in | 4 +-
src/bin/zonemgr/zonemgr_messages.mes | 4 +-
src/lib/cc/cc_messages.mes | 4 +
src/lib/cc/session.cc | 1 +
src/lib/datasrc/memory/zone_finder.h | 2 +-
src/lib/datasrc/tests/database_unittest.cc | 8 +-
.../tests/memory/rdata_serialization_unittest.cc | 8 +-
.../tests/memory/treenode_rrset_unittest.cc | 20 +-
.../datasrc/tests/memory/zone_finder_unittest.cc | 4 +-
src/lib/datasrc/tests/memory_datasrc_unittest.cc | 2 +-
src/lib/dhcpsrv/lease_mgr.cc | 13 +-
src/lib/dhcpsrv/mysql_lease_mgr.cc | 25 +-
src/lib/dns/Makefile.am | 4 +
src/lib/dns/gen-rdatacode.py.in | 124 ++-
src/lib/dns/python/opcode_python.cc | 140 ---
src/lib/dns/python/pydnspp.cc | 230 ++--
src/lib/dns/python/rcode_python.cc | 146 ---
src/lib/dns/python/rrclass_python.cc | 43 -
src/lib/dns/python/rrtype_python.cc | 144 ---
src/lib/dns/python/tests/edns_python_test.py | 6 +-
src/lib/dns/python/tests/message_python_test.py | 34 +-
.../python/tests/messagerenderer_python_test.py | 8 +-
src/lib/dns/python/tests/nsec3hash_python_test.py | 46 +-
src/lib/dns/python/tests/opcode_python_test.py | 82 +-
src/lib/dns/python/tests/rcode_python_test.py | 60 +-
src/lib/dns/python/tests/rrclass_python_test.py | 20 +-
.../python/tests/rrset_collection_python_test.py | 46 +-
src/lib/dns/python/tests/rrset_python_test.py | 8 +-
src/lib/dns/python/tests/rrtype_python_test.py | 44 +-
src/lib/dns/python/tests/tsig_python_test.py | 36 +-
src/lib/dns/python/tests/tsigerror_python_test.py | 26 +-
.../dns/python/tests/zone_checker_python_test.py | 61 +-
src/lib/dns/rdata/generic/cname_5.cc | 59 +-
src/lib/dns/rdata/generic/dname_39.cc | 59 +-
src/lib/dns/rdata/generic/mx_15.cc | 82 +-
src/lib/dns/rdata/generic/ns_2.cc | 59 +-
src/lib/dns/rdata/generic/ptr_12.cc | 59 +-
src/lib/dns/rdata/in_1/srv_33.cc | 129 ++-
src/lib/dns/rdata/template.h | 4 +
src/lib/dns/rrclass-placeholder.h | 14 -
src/lib/dns/rrtype-placeholder.h | 30 -
src/lib/dns/tests/rdata_cname_unittest.cc | 36 +-
src/lib/dns/tests/rdata_dname_unittest.cc | 36 +-
src/lib/dns/tests/rdata_mx_unittest.cc | 35 +-
src/lib/dns/tests/rdata_ns_unittest.cc | 42 +-
src/lib/dns/tests/rdata_ptr_unittest.cc | 40 +-
src/lib/dns/tests/rdata_srv_unittest.cc | 45 +-
src/lib/dns/tests/rrclass_unittest.cc | 25 +
src/lib/dns/tests/rrset_unittest.cc | 4 +-
src/lib/dns/tests/rrtype_unittest.cc | 53 +
src/lib/dns/tests/zone_checker_unittest.cc | 20 +-
.../tests/nameserver_address_store_unittest.cc | 2 +-
src/lib/nsas/tests/nsas_test.h | 4 +-
src/lib/python/bind10_config.py.in | 6 +-
src/lib/python/isc/__init__.py | 10 +-
src/lib/python/isc/bind10/Makefile.am | 5 +
src/lib/python/isc/bind10/component.py | 57 +-
src/lib/python/isc/bind10/sockcreator.py | 6 +-
src/lib/python/isc/bind10/socket_cache.py | 17 +-
src/lib/python/isc/bind10/special_component.py | 52 +-
src/lib/python/isc/bind10/tests/component_test.py | 39 +-
src/lib/python/isc/cc/Makefile.am | 15 +-
src/lib/python/isc/{server_common => cc}/logger.py | 12 +-
.../lib/python/isc/cc/pycc_messages.mes | 20 +-
src/lib/python/isc/cc/session.py | 5 +
src/lib/python/isc/config/cfgmgr.py | 83 +-
src/lib/python/isc/config/cfgmgr_messages.mes | 13 +
src/lib/python/isc/config/config_data.py | 2 +-
src/lib/python/isc/config/tests/cfgmgr_test.py | 54 +-
.../python/isc/datasrc/tests/clientlist_test.py | 10 +-
src/lib/python/isc/datasrc/tests/datasrc_test.py | 140 +--
.../python/isc/datasrc/tests/zone_loader_test.py | 5 +-
src/lib/python/isc/ddns/libddns_messages.mes | 4 +-
src/lib/python/isc/ddns/session.py | 118 +--
src/lib/python/isc/ddns/tests/session_tests.py | 619 ++++++-----
src/lib/python/isc/ddns/tests/zone_config_tests.py | 14 +-
src/lib/python/isc/log_messages/Makefile.am | 6 +-
src/lib/python/isc/log_messages/bind10_messages.py | 1 -
src/lib/python/isc/log_messages/init_messages.py | 1 +
src/lib/python/isc/log_messages/pycc_messages.py | 1 +
src/lib/python/isc/notify/notify_out.py | 18 +-
src/lib/python/isc/notify/tests/notify_out_test.py | 14 +-
src/lib/python/isc/server_common/dns_tcp.py | 2 +-
.../isc/server_common/server_common_messages.mes | 2 +-
.../python/isc/statistics/tests/counters_test.py | 6 +-
src/lib/python/isc/sysinfo/sysinfo.py | 15 +-
src/lib/python/isc/testutils/rrset_utils.py | 28 +-
src/lib/python/isc/xfrin/diff.py | 10 +-
src/lib/python/isc/xfrin/tests/diff_tests.py | 60 +-
src/lib/resolve/recursive_query.cc | 2 +-
src/lib/resolve/resolve_messages.mes | 2 +-
.../resolve/tests/response_classifier_unittest.cc | 4 +-
src/lib/server_common/portconfig.cc | 2 +-
src/lib/server_common/portconfig.h | 7 +-
src/lib/server_common/socket_request.cc | 36 +-
src/lib/server_common/tests/portconfig_unittest.cc | 4 +-
.../server_common/tests/socket_requestor_test.cc | 8 +-
src/lib/util/unittests/fork.cc | 10 +-
src/lib/util/unittests/fork.h | 4 +-
.../configurations/auth/auth_badzone.config.orig | 4 +-
.../configurations/auth/auth_basic.config.orig | 4 +-
.../configurations/bindctl/bindctl.config.orig | 4 +-
.../configurations/bindctl_commands.config.orig | 4 +-
tests/lettuce/configurations/ddns/ddns.config.orig | 4 +-
.../lettuce/configurations/ddns/noddns.config.orig | 4 +-
tests/lettuce/configurations/default.config | 2 +-
.../lettuce/configurations/example.org.config.orig | 4 +-
.../configurations/example.org.inmem.config | 4 +-
tests/lettuce/configurations/example2.org.config | 4 +-
.../inmemory_over_sqlite3/secondary.conf | 4 +-
.../configurations/ixfr-out/testset1-config.db | 4 +-
.../multi_instance/multi_auth.config.orig | 4 +-
tests/lettuce/configurations/no_db_file.config | 4 +-
.../lettuce/configurations/nsec3/nsec3_auth.config | 4 +-
.../resolver/resolver_basic.config.orig | 32 +-
.../lettuce/configurations/xfrin/inmem_slave.conf | 4 +-
.../xfrin/retransfer_master.conf.orig | 4 +-
.../xfrin/retransfer_master_nons.conf.orig | 4 +-
.../xfrin/retransfer_slave.conf.orig | 4 +-
.../xfrin/retransfer_slave_notify.conf | 4 +-
tests/lettuce/data/commands/bad_command | 6 +-
tests/lettuce/features/bindctl_commands.feature | 22 +-
tests/lettuce/features/ddns_system.feature | 4 +-
tests/lettuce/features/default.feature | 2 +-
tests/lettuce/features/multi_instance.feature | 10 +-
tests/lettuce/features/terrain/bind10_control.py | 20 +-
tests/system/Makefile.am | 2 +-
tests/system/bindctl/tests.sh | 16 +-
tests/system/glue/nsx1/b10-config.db.in | 2 +-
tests/system/ixfr/b10-config.db.in | 2 +-
tools/query_cmp/src/lib/handledns.py | 2 +-
213 files changed, 4934 insertions(+), 3757 deletions(-)
copy src/bin/bind10/{bind10.xml => b10-init.xml} (87%)
create mode 100755 src/bin/bind10/bind10.in
rename src/bin/bind10/{bind10_src.py.in => init.py.in} (95%)
rename src/bin/bind10/{bob.spec => init.spec} (94%)
rename src/bin/bind10/{bind10_messages.mes => init_messages.mes} (78%)
rename src/bin/bind10/tests/{bind10_test.py.in => init_test.py.in} (70%)
mode change 100644 => 100755 src/bin/stats/stats_httpd.py.in
copy src/lib/python/isc/{server_common => cc}/logger.py (69%)
copy tests/system/ixfr/clean_ns.sh => src/lib/python/isc/cc/pycc_messages.mes (66%)
delete mode 100644 src/lib/python/isc/log_messages/bind10_messages.py
create mode 100644 src/lib/python/isc/log_messages/init_messages.py
create mode 100644 src/lib/python/isc/log_messages/pycc_messages.py
-----------------------------------------------------------------------
diff --git a/ChangeLog b/ChangeLog
index 32a9108..b835d41 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,57 @@
+569. [bug] tomek
+ b10-dhcp4: Fix bug whereby a DHCP packet without a client ID
+ could crash the MySQL lease database backend.
+ (Trac #2697, git b5e2be95d21ed750ad7cf5e15de2058aa8bc45f4)
+
+568. [func] muks
+ Various message IDs have been renamed to remove the word 'ERROR'
+ from them when they are not logged at ERROR severity level.
+ (Trac #2672, git 660a0d164feaf055677f375977f7ed327ead893e)
+
+567. [doc] marcin, stephen, tomek
+ Update DHCP sections of the BIND 10 guide.
+ (Trac #2657, git 1d0c2004865d1bf322bf78d13630d992e39179fd)
+
+566. [func]* jinmei
+ libdns++/Python isc.dns: In Python isc.dns, function style
+ constants for RRType, RRClass, Rcode and Opcode were deprecated
+ and replaced with straightforward object constants, e.g., from
+ RRType.AAAA() to RRType.AAAA. This is a backward incompatible
+ change (see the Trac ticket for a conversion script if needed).
+ Also, these constants are now more consistent between C++
+ and Python, and RRType constants for all currently standardized
+ types are now supported (even if Rdata for these are not yet
+ available).
+ (Trac #1866 and #2409, git e5005185351cf73d4a611407c2cfcd163f80e428)
+
+565. [func]* jelte
+ The main initializer script (formerly known as either 'bind10',
+ 'boss', or 'bob'), has been renamed to b10-init (and Init in
+ configuration). Configuring which components are run is henceforth
+ done through '/Init/components', and the sbin/bind10 script is now
+ simply a shellscript that runs b10-init. Existing configuration is
+ automatically updated. NOTE: once configuration with this update
+ has been saved (by committing any new change with bindctl), you
+ cannot run older versions of BIND 10 anymore with this configuration.
+ (Trac #1901, git bae3798603affdb276f370c1ac6b33b011a5ed4f)
+
+564. [func] muks
+ libdns++: the CNAME, DNAME, MX, NS, PTR and SRV Rdata classes now
+ use the generic lexer in constructors from text. This means that
+ the name fields in such RRs in a zone file can now be non-absolute
+ (the origin name in that context will be used), e.g., when loaded
+ by b10-loadzone. One additional change to the libdns++ API is that
+ the existing string constructors for these Rdata classes also use
+ the generic lexer, and they now expect an absolute name (with the
+ trailing '.') in the name fields.
+ (Trac #2390, git a01569277cda3f78b1171bbf79f15ecf502e81e2)
+ (Trac #2656, git 5a0d055137287f81e23fbeedd35236fee274596d)
+
+563. [build] jinmei
+ Added --disable-rpath configure option to avoid embedding library
+ paths to binaries. Patch from Adam Tkac.
+ (Trac #2667, git 1c50c5a6ee7e9675e3ab154f2c7f975ef519fca2)
+
562. [func]* vorner
The b10-xfrin now performs basic sanity check on just received
zone. It'll reject severely broken zones (such as missng NS
diff --git a/configure.ac b/configure.ac
index 6d1a388..eb6f125 100644
--- a/configure.ac
+++ b/configure.ac
@@ -2,9 +2,15 @@
# Process this file with autoconf to produce a configure script.
AC_PREREQ([2.59])
-AC_INIT(bind10, 20121219, bind10-dev at isc.org)
+AC_INIT(bind10, 20130205, bind10-dev at isc.org)
AC_CONFIG_SRCDIR(README)
-AM_INIT_AUTOMAKE([foreign])
+# serial-tests is not available in automake version before 1.13. In
+# automake 1.13 and higher, AM_PROG_INSTALL is undefined, so we'll check
+# that and conditionally use serial-tests.
+AM_INIT_AUTOMAKE(
+ [foreign]
+ m4_ifndef([AM_PROG_INSTALL], [serial-tests])
+)
m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])dnl be backward compatible
AC_CONFIG_HEADERS([config.h])
AC_CONFIG_MACRO_DIR([m4macros])
@@ -232,6 +238,21 @@ AM_CONDITIONAL(SET_ENV_LIBRARY_PATH, test $SET_ENV_LIBRARY_PATH = yes)
AC_SUBST(SET_ENV_LIBRARY_PATH)
AC_SUBST(ENV_LIBRARY_PATH)
+# Our experiments have shown Solaris 10 has broken support for the
+# IPV6_USE_MIN_MTU socket option for getsockopt(); it doesn't return the value
+# previously set via setsockopt(). We know it doesn't happen on one instance
+# on Solaris 11, but we don't know whether it happens for any Solaris 10
+# implementations or for earlier versions of Solaris. In any case, at the
+# moment this matters for only one unittest case, so we'll simply disable
+# the affected test using the following definition with the specific hardcoding
+# of that version of Solaris.
+case "$host" in
+*-solaris2.10)
+ AC_DEFINE([HAVE_BROKEN_GET_IPV6_USE_MIN_MTU], [1],
+ [Define to 1 if getsockopt(IPV6_USE_MIN_MTU) does not work])
+ ;;
+esac
+
m4_define([_AM_PYTHON_INTERPRETER_LIST], [python python3.3 python3.2 python3.1 python3])
AC_ARG_WITH([pythonpath],
AC_HELP_STRING([--with-pythonpath=PATH],
@@ -1127,6 +1148,7 @@ AC_CONFIG_FILES([Makefile
compatcheck/Makefile
src/Makefile
src/bin/Makefile
+ src/bin/bind10/bind10
src/bin/bind10/Makefile
src/bin/bind10/tests/Makefile
src/bin/cmdctl/Makefile
@@ -1310,9 +1332,9 @@ AC_OUTPUT([doc/version.ent
src/bin/sysinfo/run_sysinfo.sh
src/bin/stats/stats.py
src/bin/stats/stats_httpd.py
- src/bin/bind10/bind10_src.py
+ src/bin/bind10/init.py
src/bin/bind10/run_bind10.sh
- src/bin/bind10/tests/bind10_test.py
+ src/bin/bind10/tests/init_test.py
src/bin/bindctl/run_bindctl.sh
src/bin/bindctl/bindctl_main.py
src/bin/bindctl/tests/bindctl_test
@@ -1376,6 +1398,7 @@ AC_OUTPUT([doc/version.ent
chmod +x src/bin/xfrin/run_b10-xfrin.sh
chmod +x src/bin/xfrout/run_b10-xfrout.sh
chmod +x src/bin/zonemgr/run_b10-zonemgr.sh
+ chmod +x src/bin/bind10/bind10
chmod +x src/bin/bind10/run_bind10.sh
chmod +x src/bin/cmdctl/tests/cmdctl_test
chmod +x src/bin/dbutil/run_dbutil.sh
diff --git a/doc/guide/bind10-guide.xml b/doc/guide/bind10-guide.xml
index 37934cd..0d1913f 100644
--- a/doc/guide/bind10-guide.xml
+++ b/doc/guide/bind10-guide.xml
@@ -7,7 +7,7 @@
]>
<!--
- - Copyright (C) 2010-2012 Internet Systems Consortium, Inc. ("ISC")
+ - Copyright (C) 2010-2013 Internet Systems Consortium, Inc. ("ISC")
-
- Permission to use, copy, modify, and/or distribute this software for any
- purpose with or without fee is hereby granted, provided that the above
@@ -347,7 +347,7 @@ share/
share/bind10/
auth.spec
b10-cmdctl.pem
- bob.spec
+ init.spec
passwd.csv
man/
var/
@@ -432,9 +432,9 @@ var/
run): <screen>$ <userinput>bindctl</userinput></screen>
(Login with the provided default username and password.)
<screen>
-> <userinput>config add Boss/components b10-auth</userinput>
-> <userinput>config set Boss/components/b10-auth/special auth</userinput>
-> <userinput>config set Boss/components/b10-auth/kind needed</userinput>
+> <userinput>config add Init/components b10-auth</userinput>
+> <userinput>config set Init/components/b10-auth/special auth</userinput>
+> <userinput>config set Init/components/b10-auth/kind needed</userinput>
> <userinput>config commit</userinput>
> <userinput>quit</userinput>
</screen>
@@ -740,17 +740,13 @@ as a dependency earlier -->
</listitem>
</varlistentry>
- <varlistentry>
- <term>--with-dhcp-mysql</term>
- <listitem>
- <simpara>Enable MySQL support for BIND 10 DHCP. For notes on configuring
- and building DHCP with MySQL see <xref linkend="dhcp-install-configure">.</xref>
- </simpara>
- </listitem>
- </varlistentry>
-
</variablelist>
-
+ <note>
+ <para>
+ For additional instructions concerning the building and installation of
+ BIND 10 DHCP, see <xref linkend="dhcp-install-configure"/>.
+ </para>
+ </note>
</para>
<!-- TODO: lcov -->
@@ -1274,10 +1270,10 @@ TODO
<screen><userinput><module> <command> <replaceable>[argument(s)]</replaceable></userinput></screen>
- For example, the Boss module has a 'shutdown' command to shut down
+ For example, the Init module has a 'shutdown' command to shut down
BIND 10, with an optional argument 'help':
- <screen>> <userinput>Boss shutdown help</userinput>
+ <screen>> <userinput>Init shutdown help</userinput>
Command shutdown (Shut down BIND 10)
help (Get help for command)
This command has no parameters
@@ -1300,12 +1296,12 @@ Available module names:
</screen>
When 'help' is used as a command to a module, it shows the supported commands for the module; for example:
- <screen>> <userinput>Boss help</userinput>
-Module Boss Master process
+ <screen>> <userinput>Init help</userinput>
+Module Init Master process
Available commands:
help Get help for module.
shutdown Shut down BIND 10
- ping Ping the boss process
+ ping Ping the Init process
show_processes
List the running BIND 10 processes
</screen>
@@ -1646,7 +1642,7 @@ Parameters:
to maps.
</simpara>
<simpara>
- For example, the <command>Boss/components</command>
+ For example, the <command>Init/components</command>
elements is a named set;
adding, showing, and then removing an element
can be done with the following three commands (note
@@ -1654,13 +1650,13 @@ Parameters:
'example_module'):
</simpara>
<simpara>
- <command>config add Boss/components example_module</command>
+ <command>config add Init/components example_module</command>
</simpara>
<simpara>
- <command>config show Boss/components/example_module</command>
+ <command>config show Init/components/example_module</command>
</simpara>
<simpara>
- <command>config remove Boss/components example_module</command>
+ <command>config remove Init/components example_module</command>
</simpara>
</listitem>
</varlistentry>
@@ -1708,21 +1704,21 @@ Parameters:
<screen>> <userinput>execute init_authoritative_server show</userinput>
!echo adding Authoritative server component
-config add /Boss/components b10-auth
-config set /Boss/components/b10-auth/kind needed
-config set /Boss/components/b10-auth/special auth
+config add /Init/components b10-auth
+config set /Init/components/b10-auth/kind needed
+config set /Init/components/b10-auth/special auth
!echo adding Xfrin component
-config add /Boss/components b10-xfrin
-config set /Boss/components/b10-xfrin/address Xfrin
-config set /Boss/components/b10-xfrin/kind dispensable
+config add /Init/components b10-xfrin
+config set /Init/components/b10-xfrin/address Xfrin
+config set /Init/components/b10-xfrin/kind dispensable
!echo adding Xfrout component
-config add /Boss/components b10-xfrout
-config set /Boss/components/b10-xfrout/address Xfrout
-config set /Boss/components/b10-xfrout/kind dispensable
+config add /Init/components b10-xfrout
+config set /Init/components/b10-xfrout/address Xfrout
+config set /Init/components/b10-xfrout/kind dispensable
!echo adding Zone Manager component
-config add /Boss/components b10-zonemgr
-config set /Boss/components/b10-zonemgr/address Zonemgr
-config set /Boss/components/b10-zonemgr/kind dispensable
+config add /Init/components b10-zonemgr
+config set /Init/components/b10-zonemgr/address Zonemgr
+config set /Init/components/b10-zonemgr/kind dispensable
!echo Components added. Please enter "config commit" to
!echo finalize initial setup and run the components.
</screen>
@@ -1770,7 +1766,7 @@ config set /Boss/components/b10-zonemgr/kind dispensable
<section id="bindctl_execute_notes">
<title>Notes on execute scripts</title>
Within scripts, you can add or remove modules with the normal
- configuration commands for <command>Boss/components</command>.
+ configuration commands for <command>Init/components</command>.
However, as module
configuration and commands do not show up until the module is
running, it is currently not possible to add a module and set
@@ -2091,7 +2087,7 @@ AND_MATCH := "ALL": [ RULE_RAW, RULE_RAW, ... ]
<para>
The BIND 10 suite may be shut down by stopping the
parent <command>bind10</command> process. This may be done
- by running the <userinput>Boss shutdown</userinput> command
+ by running the <userinput>Init shutdown</userinput> command
at the <command>bindctl</command> prompt.
</para>
</section>
@@ -2105,7 +2101,7 @@ AND_MATCH := "ALL": [ RULE_RAW, RULE_RAW, ... ]
of the required <command>b10-sockcreator</command>,
<command>b10-msgq</command> and <command>b10-cfgmgr</command>
components.
- The configuration is in the <varname>Boss/components</varname>
+ The configuration is in the <varname>Init/components</varname>
section. Each element represents one component, which is
an abstraction of a process.
</para>
@@ -2113,10 +2109,10 @@ AND_MATCH := "ALL": [ RULE_RAW, RULE_RAW, ... ]
<para>
To add a process to the set, let's say the resolver (which
is not started by default), you would do this:
- <screen>> <userinput>config add Boss/components b10-resolver</userinput>
-> <userinput>config set Boss/components/b10-resolver/special resolver</userinput>
-> <userinput>config set Boss/components/b10-resolver/kind needed</userinput>
-> <userinput>config set Boss/components/b10-resolver/priority 10</userinput>
+ <screen>> <userinput>config add Init/components b10-resolver</userinput>
+> <userinput>config set Init/components/b10-resolver/special resolver</userinput>
+> <userinput>config set Init/components/b10-resolver/kind needed</userinput>
+> <userinput>config set Init/components/b10-resolver/priority 10</userinput>
> <userinput>config commit</userinput></screen></para>
<para>
@@ -2146,7 +2142,7 @@ AND_MATCH := "ALL": [ RULE_RAW, RULE_RAW, ... ]
<row><entry>b10-auth</entry><entry>auth</entry><entry>Authoritative DNS server</entry></row>
<row><entry>b10-resolver</entry><entry>resolver</entry><entry>DNS resolver</entry></row>
<row><entry>b10-cmdctl</entry><entry>cmdctl</entry><entry>Command control (remote control interface)</entry></row>
- <!-- TODO Either add xfrin and xfrout as well or clean up the workarounds in boss before the release -->
+ <!-- TODO Either add xfrin and xfrout as well or clean up the workarounds in b10-init before the release -->
</tbody>
</tgroup>
</table>
@@ -2230,9 +2226,9 @@ address, but the usual ones don't." mean? -->
You might want to do that to gain more performance (each one uses only
single core). Just put multiple entries under different names, like
this, with the same config:
- <screen>> <userinput>config add Boss/components b10-resolver-2</userinput>
-> <userinput>config set Boss/components/b10-resolver-2/special resolver</userinput>
-> <userinput>config set Boss/components/b10-resolver-2/kind needed</userinput>
+ <screen>> <userinput>config add Init/components b10-resolver-2</userinput>
+> <userinput>config set Init/components/b10-resolver-2/special resolver</userinput>
+> <userinput>config set Init/components/b10-resolver-2/kind needed</userinput>
> <userinput>config commit</userinput></screen>
</para>
<para>
@@ -2248,7 +2244,7 @@ address, but the usual ones don't." mean? -->
<para>
The running processes started by <command>bind10</command>
- may be listed by running <userinput>Boss show_processes</userinput>
+ may be listed by running <userinput>Init show_processes</userinput>
using <command>bindctl</command>.
</para>
@@ -2420,7 +2416,7 @@ can use various data source backends.
<simpara>Stop the authoritative DNS server.
This has an optional <varname>pid</varname> argument to
select the process ID to stop.
- (Note that the BIND 10 boss process may restart this service
+ (Note that the BIND 10 init process may restart this service
if configured.)
</simpara>
</listitem>
@@ -3024,9 +3020,9 @@ what is XfroutClient xfr_client??
It can be done by using the <command>bindctl</command>
utility. For example:
<screen>
-> <userinput>config add Boss/components b10-ddns</userinput>
-> <userinput>config set Boss/components/b10-ddns/address DDNS</userinput>
-> <userinput>config set Boss/components/b10-ddns/kind dispensable</userinput>
+> <userinput>config add Init/components b10-ddns</userinput>
+> <userinput>config set Init/components/b10-ddns/address DDNS</userinput>
+> <userinput>config set Init/components/b10-ddns/kind dispensable</userinput>
> <userinput>config commit</userinput>
</screen>
<note><simpara>
@@ -3217,10 +3213,10 @@ what is XfroutClient xfr_client??
<command>bindctl</command>, for example:
<screen>
-> <userinput>config add Boss/components b10-resolver</userinput>
-> <userinput>config set Boss/components/b10-resolver/special resolver</userinput>
-> <userinput>config set Boss/components/b10-resolver/kind needed</userinput>
-> <userinput>config set Boss/components/b10-resolver/priority 10</userinput>
+> <userinput>config add Init/components b10-resolver</userinput>
+> <userinput>config set Init/components/b10-resolver/special resolver</userinput>
+> <userinput>config set Init/components/b10-resolver/kind needed</userinput>
+> <userinput>config set Init/components/b10-resolver/priority 10</userinput>
> <userinput>config commit</userinput>
</screen>
@@ -3340,7 +3336,7 @@ then change those defaults with config set Resolver/forward_addresses[0]/address
both servers. DHCPv4-specific details are covered in <xref linkend="dhcp4"/>,
while those details specific to DHCPv6 are described in <xref linkend="dhcp6"/>
</para>
-
+
<section id="dhcp-install-configure">
<title>DHCP Database Installation and Configuration</title>
<para>
@@ -3363,7 +3359,7 @@ then change those defaults with config set Resolver/forward_addresses[0]/address
Build and install BIND 10 as described in <xref linkend="installation"/>, with
the following modification: to enable the MySQL database code, at the
"configure" step (see <xref linkend="configure"/>), specify the location of the
- MySQL configuration program "mysql_config" with the "--with-mysql-config" switch,
+ MySQL configuration program "mysql_config" with the "--with-dhcp-mysql" switch,
i.e.
<screen><userinput>./configure [other-options] --with-dhcp-mysql</userinput></screen>
...if MySQL was installed in the default location, or:
@@ -3409,7 +3405,7 @@ $</screen>
</section>
</chapter>
-
+
<chapter id="dhcp4">
<title>The DHCPv4 Server</title>
@@ -3425,16 +3421,16 @@ $</screen>
After starting BIND 10 and entering bindctl, the first step
in configuring the server is to add it to the list of running BIND 10 services.
<screen>
-> <userinput>config add Boss/components b10-dhcp4</userinput>
-> <userinput>config set Boss/components/b10-dhcp4/kind dispensable</userinput>
+> <userinput>config add Init/components b10-dhcp4</userinput>
+> <userinput>config set Init/components/b10-dhcp4/kind dispensable</userinput>
> <userinput>config commit</userinput>
</screen>
</para>
<para>
To remove <command>b10-dhcp4</command> from the set of running services,
- the <command>b10-dhcp4</command> is removed from list of Boss components:
+ the <command>b10-dhcp4</command> is removed from list of Init components:
<screen>
-> <userinput>config remove Boss/components b10-dhcp4</userinput>
+> <userinput>config remove Init/components b10-dhcp4</userinput>
> <userinput>config commit</userinput>
</screen>
</para>
@@ -3502,7 +3498,7 @@ Dhcp4/subnet4 [] list (default)
The server comes with an in-memory database ("memfile") configured as the default
database. This is used for internal testing and is not supported. In addition,
it does not store lease information on disk: lease information will be lost if the
- server is restarted.
+ server is restarted.
</para>
</footnote>, and so the server must be configured to
access the correct database with the appropriate credentials.
@@ -3552,7 +3548,7 @@ Dhcp4/subnet4 [] list (default)
database. Improved password security will be added in a future version of BIND 10 DHCP</para>
</note>
</section>
-
+
<section id="dhcp4-address-config">
<title>Configuration of Address Pools</title>
<para>
@@ -3609,25 +3605,495 @@ Dhcp4/subnet4 [] list (default)
network configurations. If you want to avoid this, please use the "min-max" notation.
</para>
</section>
+
+ <section id="dhcp4-std-options">
+ <title>Standard DHCPv4 options</title>
+ <para>
+ One of the major features of DHCPv4 server is to provide configuration
+ options to clients. Although there are several options that require
+ special behavior, most options are sent by the server only if the client
+ explicitly requested them. The following example shows how to
+ configure DNS servers, which is one of the most frequently used
+ options. Options specified in this way are considered global and apply
+ to all configured subnets.
+
+ <screen>
+> <userinput>config add Dhcp4/option-data</userinput>
+> <userinput>config set Dhcp4/option-data[0]/name "domain-name-servers"</userinput>
+> <userinput>config set Dhcp4/option-data[0]/code 6</userinput>
+> <userinput>config set Dhcp4/option-data[0]/space "dhcp4"</userinput>
+> <userinput>config set Dhcp4/option-data[0]/csv-format true</userinput>
+> <userinput>config set Dhcp4/option-data[0]/data "192.0.3.1, 192.0.3.2"</userinput>
+> <userinput>config commit</userinput>
+</screen>
+ </para>
+ <para>
+ The first line creates new entry in option-data table. It
+ contains information on all global options that the server is
+ supposed to configure in all subnets. The second line specifies
+ option name. For a complete list of currently supported names,
+ see <xref linkend="dhcp4-std-options-list"/> below.
+ The third line specifies option code, which must match one of the
+ values from that list. Line 4 specifies option space, which must always
+ be set to "dhcp4" as these are standard DHCPv4 options. For
+ other option spaces, including custom option spaces, see <xref
+ linkend="dhcp4-option-spaces"/>. The fifth line specifies the format in
+ which the data will be entered: use of CSV (comma
+ separated values) is recommended. The sixth line gives the actual value to
+ be sent to clients. Data is specified as a normal text, with
+ values separated by commas if more than one value is
+ allowed.
+ </para>
+
+ <para>
+ Options can also be configured as hexadecimal values. If csv-format is
+ set to false, option data must be specified as a hex string. The
+ following commands configure the domain-name-servers option for all
+ subnets with the following addresses: 192.0.3.1 and 192.0.3.2.
+ Note that csv-format is set to false.
+ <screen>
+> <userinput>config add Dhcp4/option-data</userinput>
+> <userinput>config set Dhcp4/option-data[0]/name "domain-name-servers"</userinput>
+> <userinput>config set Dhcp4/option-data[0]/code 6</userinput>
+> <userinput>config set Dhcp4/option-data[0]/space "dhcp4"</userinput>
+> <userinput>config set Dhcp4/option-data[0]/csv-format false</userinput>
+> <userinput>config set Dhcp4/option-data[0]/data "C0 00 03 01 C0 00 03 02"</userinput>
+> <userinput>config commit</userinput>
+ </screen>
+ </para>
+
+ <para>
+ It is possible to override options on a per-subnet basis. If
+ clients connected to most of your subnets are expected to get the
+ same values of a given option, you should use global options: you
+ can then override specific values for a small number of subnets.
+ On the other hand, if you use different values in each subnet,
+ it does not make sense to specify global option values
+ (Dhcp4/option-data), rather you should set only subnet-specific values
+ (Dhcp4/subnet[X]/option-data[Y]).
+ </para>
+ <para>
+ The following commands override the global
+ DNS servers option for a particular subnet, setting a single DNS
+ server with address 2001:db8:1::3.
+ <screen>
+> <userinput>config add Dhcp4/subnet4[0]/option-data</userinput>
+> <userinput>config set Dhcp4/subnet4[0]/option-data[0]/name "domain-name-servers"</userinput>
+> <userinput>config set Dhcp4/subnet4[0]/option-data[0]/code 6</userinput>
+> <userinput>config set Dhcp4/subnet4[0]/option-data[0]/space "dhcp4"</userinput>
+> <userinput>config set Dhcp4/subnet4[0]/option-data[0]/csv-format true</userinput>
+> <userinput>config set Dhcp4/subnet4[0]/option-data[0]/data "192.0.2.3"</userinput>
+> <userinput>config commit</userinput></screen>
+ </para>
+
+ <note>
+ <para>In a future version of Kea, it will not be necessary to specify
+ the option code, space and csv-format fields as they will be set
+ automatically.</para>
+ </note>
+
+ <para>
+ Below is a list of currently supported standard DHCPv4 options. The "Name" and "Code"
+ are the values that should be used as a name in the option-data
+ structures. "Type" designates the format of the data: the meanings of
+ the various types is given in <xref linkend="dhcp-types"/>.
+ </para>
+ <para>
+ Some options are designated as arrays, which means that more than one
+ value is allowed in such an option. For example the option time-servers
+ allows the specification of more than one IPv4 address, so allowing
+ clients to obtain the the addresses of multiple NTP servers.
+ </para>
+ <!-- @todo: describe record types -->
+
+ <para>
+ <table border="1" cellpadding="5%" id="dhcp4-std-options-list">
+ <caption>List of standard DHCPv4 options</caption>
+ <thead>
+ <tr><th>Name</th><th>Code</th><th>Type</th><th>Array?</th></tr>
+ </thead>
+ <tbody>
+<tr><td>subnet-mask</td><td>1</td><td>ipv4-address</td><td>false</td></tr>
+<tr><td>time-offset</td><td>2</td><td>uint32</td><td>false</td></tr>
+<tr><td>routers</td><td>3</td><td>ipv4-address</td><td>true</td></tr>
+<tr><td>time-servers</td><td>4</td><td>ipv4-address</td><td>true</td></tr>
+<tr><td>name-servers</td><td>5</td><td>ipv4-address</td><td>false</td></tr>
+<tr><td>domain-name-servers</td><td>6</td><td>ipv4-address</td><td>true</td></tr>
+<tr><td>log-servers</td><td>7</td><td>ipv4-address</td><td>true</td></tr>
+<tr><td>cookie-servers</td><td>8</td><td>ipv4-address</td><td>true</td></tr>
+<tr><td>lpr-servers</td><td>9</td><td>ipv4-address</td><td>true</td></tr>
+<tr><td>impress-servers</td><td>10</td><td>ipv4-address</td><td>true</td></tr>
+<tr><td>resource-location-servers</td><td>11</td><td>ipv4-address</td><td>true</td></tr>
+<tr><td>host-name</td><td>12</td><td>string</td><td>false</td></tr>
+<tr><td>boot-size</td><td>13</td><td>uint16</td><td>false</td></tr>
+<tr><td>merit-dump</td><td>14</td><td>string</td><td>false</td></tr>
+<tr><td>domain-name</td><td>15</td><td>fqdn</td><td>false</td></tr>
+<tr><td>swap-server</td><td>16</td><td>ipv4-address</td><td>false</td></tr>
+<tr><td>root-path</td><td>17</td><td>string</td><td>false</td></tr>
+<tr><td>extensions-path</td><td>18</td><td>string</td><td>false</td></tr>
+<tr><td>ip-forwarding</td><td>19</td><td>boolean</td><td>false</td></tr>
+<tr><td>non-local-source-routing</td><td>20</td><td>boolean</td><td>false</td></tr>
+<tr><td>policy-filter</td><td>21</td><td>ipv4-address</td><td>true</td></tr>
+<tr><td>max-dgram-reassembly</td><td>22</td><td>uint16</td><td>false</td></tr>
+<tr><td>default-ip-ttl</td><td>23</td><td>uint8</td><td>false</td></tr>
+<tr><td>path-mtu-aging-timeout</td><td>24</td><td>uint32</td><td>false</td></tr>
+<tr><td>path-mtu-plateau-table</td><td>25</td><td>uint16</td><td>true</td></tr>
+<tr><td>interface-mtu</td><td>26</td><td>uint16</td><td>false</td></tr>
+<tr><td>all-subnets-local</td><td>27</td><td>boolean</td><td>false</td></tr>
+<tr><td>broadcast-address</td><td>28</td><td>ipv4-address</td><td>false</td></tr>
+<tr><td>perform-mask-discovery</td><td>29</td><td>boolean</td><td>false</td></tr>
+<tr><td>mask-supplier</td><td>30</td><td>boolean</td><td>false</td></tr>
+<tr><td>router-discovery</td><td>31</td><td>boolean</td><td>false</td></tr>
+<tr><td>router-solicitation-address</td><td>32</td><td>ipv4-address</td><td>false</td></tr>
+<tr><td>static-routes</td><td>33</td><td>ipv4-address</td><td>true</td></tr>
+<tr><td>trailer-encapsulation</td><td>34</td><td>boolean</td><td>false</td></tr>
+<tr><td>arp-cache-timeout</td><td>35</td><td>uint32</td><td>false</td></tr>
+<tr><td>ieee802-3-encapsulation</td><td>36</td><td>boolean</td><td>false</td></tr>
+<tr><td>default-tcp-ttl</td><td>37</td><td>uint8</td><td>false</td></tr>
+<tr><td>tcp-keepalive-internal</td><td>38</td><td>uint32</td><td>false</td></tr>
+<tr><td>tcp-keepalive-garbage</td><td>39</td><td>boolean</td><td>false</td></tr>
+<tr><td>nis-domain</td><td>40</td><td>string</td><td>false</td></tr>
+<tr><td>nis-servers</td><td>41</td><td>ipv4-address</td><td>true</td></tr>
+<tr><td>ntp-servers</td><td>42</td><td>ipv4-address</td><td>true</td></tr>
+<tr><td>vendor-encapsulated-options</td><td>43</td><td>empty</td><td>false</td></tr>
+<tr><td>netbios-name-servers</td><td>44</td><td>ipv4-address</td><td>true</td></tr>
+<tr><td>netbios-dd-server</td><td>45</td><td>ipv4-address</td><td>true</td></tr>
+<tr><td>netbios-node-type</td><td>46</td><td>uint8</td><td>false</td></tr>
+<tr><td>netbios-scope</td><td>47</td><td>string</td><td>false</td></tr>
+<tr><td>font-servers</td><td>48</td><td>ipv4-address</td><td>true</td></tr>
+<tr><td>x-display-manager</td><td>49</td><td>ipv4-address</td><td>true</td></tr>
+<tr><td>dhcp-requested-address</td><td>50</td><td>ipv4-address</td><td>false</td></tr>
+<!-- Lease time should not be configured by a user.
+<tr><td>dhcp-lease-time</td><td>51</td><td>uint32</td><td>false</td></tr>
+-->
+<tr><td>dhcp-option-overload</td><td>52</td><td>uint8</td><td>false</td></tr>
+<!-- Message Type, Server Identifier and Parameter Request List should not be configured by a user.
+<tr><td>dhcp-message-type</td><td>53</td><td>uint8</td><td>false</td></tr>
+<tr><td>dhcp-server-identifier</td><td>54</td><td>ipv4-address</td><td>false</td></tr>
+<tr><td>dhcp-parameter-request-list</td><td>55</td><td>uint8</td><td>true</td></tr>
+-->
+<tr><td>dhcp-message</td><td>56</td><td>string</td><td>false</td></tr>
+<tr><td>dhcp-max-message-size</td><td>57</td><td>uint16</td><td>false</td></tr>
+<!-- Renewal and rebinding time should not be configured by a user.
+<tr><td>dhcp-renewal-time</td><td>58</td><td>uint32</td><td>false</td></tr>
+<tr><td>dhcp-rebinding-time</td><td>59</td><td>uint32</td><td>false</td></tr>
+-->
+<tr><td>vendor-class-identifier</td><td>60</td><td>binary</td><td>false</td></tr>
+<!-- Client identifier should not be configured by a user.
+<tr><td>dhcp-client-identifier</td><td>61</td><td>binary</td><td>false</td></tr>
+-->
+<tr><td>nwip-domain-name</td><td>62</td><td>string</td><td>false</td></tr>
+<tr><td>nwip-suboptions</td><td>63</td><td>binary</td><td>false</td></tr>
+<tr><td>user-class</td><td>77</td><td>binary</td><td>false</td></tr>
+<tr><td>fqdn</td><td>81</td><td>record</td><td>false</td></tr>
+<tr><td>dhcp-agent-options</td><td>82</td><td>empty</td><td>false</td></tr>
+<tr><td>authenticate</td><td>90</td><td>binary</td><td>false</td></tr>
+<tr><td>client-last-transaction-time</td><td>91</td><td>uint32</td><td>false</td></tr>
+<tr><td>associated-ip</td><td>92</td><td>ipv4-address</td><td>true</td></tr>
+<tr><td>subnet-selection</td><td>118</td><td>ipv4-address</td><td>false</td></tr>
+<tr><td>domain-search</td><td>119</td><td>binary</td><td>false</td></tr>
+<tr><td>vivco-suboptions</td><td>124</td><td>binary</td><td>false</td></tr>
+<tr><td>vivso-suboptions</td><td>125</td><td>binary</td><td>false</td></tr>
+ </tbody>
+ </table>
+ </para>
+ <para>
+ <table border="1" cellpadding="5%" id="dhcp-types">
+ <caption>List of standard DHCP option types</caption>
+ <thead>
+ <tr><th>Name</th><th>Meaning</th></tr>
+ </thead>
+ <tbody>
+ <tr><td>binary</td><td>An arbitrary string of bytes, specified as a set of hexadecimal digits.</td></tr>
+ <tr><td>boolean</td><td>Boolean value with allowed values true or false</td></tr>
+ <tr><td>empty</td><td>No value, data is carried in suboptions</td></tr>
+ <tr><td>fqdn</td><td>Fully qualified domain name (e.g. www.example.com)</td></tr>
+ <tr><td>ipv4-address</td><td>IPv4 address in the usual dotted-decimal notation (e.g. 192.0.2.1)</td></tr>
+ <tr><td>ipv6-address</td><td>IPv6 address in the usual colon notation (e.g. 2001:db8::1)</td></tr>
+ <tr><td>record</td><td>Structured data that may comprise any types (except "record" and "empty")</td></tr>
+ <tr><td>string</td><td>Any text</td></tr>
+ <tr><td>uint8</td><td>8 bit unsigned integer with allowed values 0 to 255</td></tr>
+ <tr><td>uint16</td><td>16 bit unsinged integer with allowed values 0 to 65535</td></tr>
+ <tr><td>uint32</td><td>32 bit unsigned integer with allowed values 0 to 4294967295</td></tr>
+ </tbody>
+ </table>
+ </para>
</section>
+ <section id="dhcp4-custom-options">
+ <title>Custom DHCPv4 options</title>
+ <para>It is also possible to define options other than the standard ones.
+ Assume that we want to define a new DHCPv4 option called "foo" which will have
+ code 222 and will convey a single unsigned 32 bit integer value. We can define
+ such an option by using the following commands:
+ <screen>
+> <userinput>config add Dhcp4/option-def</userinput>
+> <userinput>config set Dhcp4/option-def[0]/name "foo"</userinput>
+> <userinput>config set Dhcp4/option-def[0]/code 222</userinput>
+> <userinput>config set Dhcp4/option-def[0]/type "uint32"</userinput>
+> <userinput>config set Dhcp4/option-def[0]/array false</userinput>
+> <userinput>config set Dhcp4/option-def[0]/record-types ""</userinput>
+> <userinput>config set Dhcp4/option-def[0]/space "dhcp4"</userinput>
+> <userinput>config set Dhcp4/option-def[0]/encapsulate ""</userinput>
+> <userinput>config commit</userinput></screen>
+ The "false" value of the "array" parameter determines that the option
+ does NOT comprise an array of "uint32" values but rather a single value.
+ Two other parameters have been left blank: "record-types" and "encapsulate".
+ The former specifies the comma separated list of option data fields if the
+ option comprises a record of data fields. The "record-fields" value should
+ be non-empty if the "type" is set to "record". Otherwise it must be left
+ blank. The latter parameter specifies the name of the option space being
+ encapsulated by the particular option. If the particular option does not
+ encapsulate any option space it should be left blank.
+ Note that the above set of comments define the format of the new option and do not
+ set its values.
+ </para>
+ <note>
+ <para>
+ In the current release the default values are not propagated to the
+ parser when the new configuration is being set. Therefore, all
+ parameters must be specified at all times, even if their values are
+ left blank.
+ </para>
+ </note>
+
+ <para>Once the new option format is defined, its value is set
+ in the same way as for a standard option. For example the following
+ commands set a global value that applies to all subnets.
+ <screen>
+> <userinput>config add Dhcp4/option-data</userinput>
+> <userinput>config set Dhcp4/option-data[0]/name "foo"</userinput>
+> <userinput>config set Dhcp4/option-data[0]/code 222</userinput>
+> <userinput>config set Dhcp4/option-data[0]/space "dhcp4"</userinput>
+> <userinput>config set Dhcp4/option-data[0]/csv-format true</userinput>
+> <userinput>config set Dhcp4/option-data[0]/data "12345"</userinput>
+> <userinput>config commit</userinput></screen>
+ </para>
+
+ <para>New options can take more complex forms than simple use of
+ primitives (uint8, string, ipv4-address etc): it is possible to
+ define an option comprising a number of existing primitives.
+ </para>
+ <para>Assume we
+ want to define a new option that will consist of an IPv4
+ address, followed by unsigned 16 bit integer, followed by a text
+ string. Such an option could be defined in the following way:
+<screen>
+> <userinput>config add Dhcp4/option-def</userinput>
+> <userinput>config set Dhcp4/option-def[0]/name "bar"</userinput>
+> <userinput>config set Dhcp4/option-def[0]/code 223</userinput>
+> <userinput>config set Dhcp4/option-def[0]/space "dhcp4"</userinput>
+> <userinput>config set Dhcp4/option-def[0]/type "record"</userinput>
+> <userinput>config set Dhcp4/option-def[0]/array false</userinput>
+> <userinput>config set Dhcp4/option-def[0]/record-types "ipv4-address, uint16, string"</userinput>
+> <userinput>config set Dhcp4/option-def[0]/encapsulate ""</userinput>
+</screen>
+ The "type" is set to "record" to indicate that the option contains
+ multiple values of different types. These types are given as a comma-separated
+ list in the "record-types" field and should be those listed in <xref linkend="dhcp-types"/>.
+ </para>
+ <para>
+ The values of the option are set as follows:
+<screen>
+> <userinput>config add Dhcp4/option-data</userinput>
+> <userinput>config set Dhcp4/option-data[0]/name "bar"</userinput>
+> <userinput>config set Dhcp4/option-data[0]/space "dhcp4"</userinput>
+> <userinput>config set Dhcp4/option-data[0]/code 223</userinput>
+> <userinput>config set Dhcp4/option-data[0]/csv-format true</userinput>
+> <userinput>config set Dhcp4/option-data[0]/data "192.0.2.100, 123, Hello World"</userinput>
+> <userinput>config commit</userinput></screen>
+ </para>
+ "csv-format" is set "true" to indicate that the "data" field comprises a command-separated
+ list of values. The values in the "data" must correspond to the types set in
+ the "record-types" field of the option definition.
+ </section>
+
+ <section id="dhcp4-vendor-opts">
+ <title>DHCPv4 vendor specific options</title>
+ <para>
+ Currently there are three option spaces defined: dhcp4 (to
+ be used in DHCPv4 daemon) and dhcp6 (for the DHCPv6 daemon); there
+ is also vendor-encapsulated-options-space, which is empty by default, but options
+ can be defined in it. Those options are called vendor-specific
+ information options. The following examples show how to define
+ an option "foo" with code 1 that consists of an IPv4 address, an
+ unsigned 16 bit integer and a string. The "foo" option is conveyed
+ in a vendor specific information option.
+ </para>
+ <para>
+ The first step is to define the format of the option:
+<screen>
+> <userinput>config add Dhcp4/option-def</userinput>
+> <userinput>config set Dhcp4/option-def[0]/name "foo"</userinput>
+> <userinput>config set Dhcp4/option-def[0]/code 1</userinput>
+> <userinput>config set Dhcp4/option-def[0]/space "vendor-encapsulated-options-space"</userinput>
+> <userinput>config set Dhcp4/option-def[0]/type "record"</userinput>
+> <userinput>config set Dhcp4/option-def[0]/array false</userinput>
+> <userinput>config set Dhcp4/option-def[0]/record-types "ipv4-address, uint16, string"</userinput>
+> <userinput>config set Dhcp4/option-def[0]/encapsulates ""</userinput>
+> <userinput>config commit</userinput>
+</screen>
+ (Note that the option space is set to "vendor-encapsulated-options-space".)
+ Once the option format is defined, the next step is to define actual values
+ for that option:
+ <screen>
+> <userinput>config add Dhcp4/option-data</userinput>
+> <userinput>config set Dhcp4/option-data[0]/name "foo"</userinput>
+> <userinput>config set Dhcp4/option-data[0]/space "vendor-encapsulated-options-space"</userinput>
+> <userinput>config set Dhcp4/option-data[0]/code 1</userinput>
+> <userinput>config set Dhcp4/option-data[0]/csv-format true</userinput>
+> <userinput>config set Dhcp4/option-data[0]/data "192.0.2.3, 123, Hello World"</userinput>
+> <userinput>config commit</userinput></screen>
+ We also set up a dummy value for vendor-opts, the option that conveys our sub-option "foo".
+ This is required else the option will not be included in messages sent to the client.
+ <screen>
+> <userinput>config add Dhcp4/option-data</userinput>
+> <userinput>config set Dhcp4/option-data[1]/name "vendor-encapsulated-options"</userinput>
+> <userinput>config set Dhcp4/option-data[1]/space "dhcp4"</userinput>
+> <userinput>config set Dhcp4/option-data[1]/code 43</userinput>
+> <userinput>config set Dhcp4/option-data[1]/csv-format false</userinput>
+> <userinput>config set Dhcp4/option-data[1]/data ""</userinput>
+> <userinput>config commit</userinput></screen>
+ </para>
+
+ <note>
+ <para>
+ With this version of BIND 10, the "vendor-encapsulated-options" option
+ must be specified in the configuration although it has no configurable
+ parameters. If it is not specified, the server will assume that it is
+ not configured and will not send it to a client. In the future there
+ will be no need to include this option in the configuration.
+ </para>
+ </note>
+
+ </section>
+
+ <section id="dhcp4-option-spaces">
+
+ <title>Nested DHCPv4 options (custom option spaces)</title>
+ <para>It is sometimes useful to define completely new option
+ space. This is the case when user creates new option in the
+ standard option space ("dhcp4 or "dhcp6") and wants this option
+ to convey sub-options. Thanks to being in the separate space,
+ sub-option codes will have a separate numbering scheme and may
+ overlap with codes of standard options.
+ </para>
+ <para>Note that creation of a new option space when defining
+ sub-options for a standard option is not required, because it is
+ created by default if the standard option is meant to convey any
+ sub-options (see <xref linkend="dhcp4-vendor-opts"/>).
+ </para>
+ <para>
+ Assume that we want to have a DHCPv4 option called "container" with
+ code 222 that conveys two sub-options with codes 1 and 2.
+ First we need to define the new sub-options:
+<screen>
+> <userinput>config add Dhcp4/option-def</userinput>
+> <userinput>config set Dhcp4/option-def[0]/name "subopt1"</userinput>
+> <userinput>config set Dhcp4/option-def[0]/code 1</userinput>
+> <userinput>config set Dhcp4/option-def[0]/space "isc"</userinput>
+> <userinput>config set Dhcp4/option-def[0]/type "ipv4-address"</userinput>
+> <userinput>config set Dhcp4/option-def[0]/record-types ""</userinput>
+> <userinput>config set Dhcp4/option-def[0]/array false</userinput>
+> <userinput>config set Dhcp4/option-def[0]/encapsulate ""</userinput>
+> <userinput>config commit</userinput>
+
+> <userinput>config add Dhcp4/option-def</userinput>
+> <userinput>config set Dhcp4/option-def[1]/name "subopt2"</userinput>
+> <userinput>config set Dhcp4/option-def[1]/code 2</userinput>
+> <userinput>config set Dhcp4/option-def[1]/space "isc"</userinput>
+> <userinput>config set Dhcp4/option-def[1]/type "string"</userinput>
+> <userinput>config set Dhcp4/option-def[1]/record-types ""</userinput>
+> <userinput>config set Dhcp4/option-def[1]/array false</userinput>
+> <userinput>config set Dhcp4/option-def[1]/encapsulate ""</userinput>
+> <userinput>config commit</userinput>
+</screen>
+ Note that we have defined the options to belong to a new option space
+ (in this case, "isc").
+ </para>
+ <para>
+ The next step is to define a regular DHCPv4 option with our desired
+ code and specify that it should include options from the new option space:
+<screen>
+> <userinput>add Dhcp4/option-def</userinput>
+> <userinput>set Dhcp4/option-def[2]/name "container"</userinput>
+> <userinput>set Dhcp4/option-def[2]/code 222</userinput>
+> <userinput>set Dhcp4/option-def[2]/space "dhcp4"</userinput>
+> <userinput>set Dhcp4/option-def[2]/type "empty"</userinput>
+> <userinput>set Dhcp4/option-def[2]/array false</userinput>
+> <userinput>set Dhcp4/option-def[2]/record-types ""</userinput>
+> <userinput>set Dhcp4/option-def[2]/encapsulate "isc"</userinput>
+> <userinput>commit</userinput>
+</screen>
+ The name of the option space in which the sub-options are defined
+ is set in the "encapsulate" field. The "type" field is set to "empty"
+ to indicate that this option does not carry any data other than
+ sub-options.
+ </para>
+ <para>
+ Finally, we can set values for the new options:
+<screen>
+> <userinput>config add Dhcp4/option-data</userinput>
+> <userinput>config set Dhcp4/option-data[0]/name "subopt1"</userinput>
+> <userinput>config set Dhcp4/option-data[0]/space "isc"</userinput>
+> <userinput>config set Dhcp4/option-data[0]/code 1</userinput>
+> <userinput>config set Dhcp4/option-data[0]/csv-format true</userinput>
+> <userinput>config set Dhcp4/option-data[0]/data "192.0.2.3"</userinput>
+> <userinput>config commit</userinput>
+<userinput></userinput>
+> <userinput>config add Dhcp4/option-data</userinput>
+> <userinput>config set Dhcp4/option-data[1]/name "subopt2"</userinput>
+> <userinput>config set Dhcp4/option-data[1]/space "isc"</userinput>
+> <userinput>config set Dhcp4/option-data[1]/code 2</userinput>
+> <userinput>config set Dhcp4/option-data[1]/csv-format true</userinput>
+> <userinput>config set Dhcp4/option-data[1]/data "Hello world"</userinput>
+> <userinput>config commit</userinput>
+<userinput></userinput>
+> <userinput>config add Dhcp4/option-data</userinput>
+> <userinput>config set Dhcp4/option-data[2]/name "container"</userinput>
+> <userinput>config set Dhcp4/option-data[2]/space "dhcp4"</userinput>
+> <userinput>config set Dhcp4/option-data[2]/code 222</userinput>
+> <userinput>config set Dhcp4/option-data[2]/csv-format true</userinput>
+> <userinput>config set Dhcp4/option-data[2]/data ""</userinput>
+> <userinput>config commit</userinput>
+</screen>
+ Even though the "container" option does not carry any data except
+ sub-options, the "data" field must be explictly set to an empty value.
+ This is required because in the current version of BIND 10 DHCP, the
+ default configuration values are not propagated to the configuration parsers:
+ if the "data" is not set the parser will assume that this
+ parameter is not specified and an error will be reported.
+ </para>
+ <para>Note that it is possible to create an option which carries some data
+ in addition to the sub-options defined in the encapsulated option space. For example,
+ if the "container" option from the previous example was required to carry an uint16
+ value as well as the sub-options, the "type" value would have to be set to "uint16" in
+ the option definition. (Such an option would then have the following
+ data structure: DHCP header, uint16 value, sub-options.) The value specified
+ with the "data" parameter - which should be a valid integer enclosed in quotes,
+ e.g. "123" - would then be assigned to the uint16 field in the "container" option.
+ </para>
+ </section>
+ </section>
<section id="dhcp4-serverid">
<title>Server Identifier in DHCPv4</title>
- <para>The DHCPv4 protocol uses a "server identifier" for clients to be able
- to discriminate between several servers present on the same link: this
- value is an IPv4 address of the server. When started for the first time,
- the DHCPv4 server will choose one of its IPv4 addresses as its server-id,
- and store the chosen value to a file. (The file is named b10-dhcp4-serverid and is
- stored in the "local state directory". This is set during installation
- when "configure" is run, and can be changed by using "--localstatedir"
- on the "configure" command line.) That file will be read by the server
- and the contained value used whenever the server is subsequently started.
+ <para>
+ The DHCPv4 protocol uses a "server identifier" for clients to be able
+ to discriminate between several servers present on the same link: this
+ value is an IPv4 address of the server. When started for the first time,
+ the DHCPv4 server will choose one of its IPv4 addresses as its server-id,
+ and store the chosen value to a file. That file will be read by the server
+ and the contained value used whenever the server is subsequently started.
</para>
<para>
- It is unlikely that this parameter needs to be changed. If such a need
- arises, please stop the server, edit the file and restart the server.
- It is a text file that should contain an IPv4 address. Spaces are
- ignored. No extra characters are allowed in this file.
+ It is unlikely that this parameter should ever need to be changed.
+ However, if such a need arises, stop the server, edit the file and restart
+ the server. (The file is named b10-dhcp4-serverid and by default is
+ stored in the "var" subdirectory of the directory in which BIND 10 is installed.
+ This can be changed when BIND 10 is built by using "--localstatedir"
+ on the "configure" command line.) The file is a text file that should
+ contain an IPv4 address. Spaces are ignored, and no extra characters are allowed
+ in this file.
</para>
</section>
@@ -3705,7 +4171,9 @@ Dhcp4/renew-timer 1000 integer (default)
<simpara>Address rebinding (REBIND) and duplication report (DECLINE)
are not supported yet.</simpara>
</listitem>
-
+ <listitem>
+ <simpara>DNS Update is not yet supported.</simpara>
+ </listitem>
</itemizedlist>
</section>
@@ -3726,30 +4194,20 @@ Dhcp4/renew-timer 1000 integer (default)
After starting BIND 10 and starting <command>bindctl</command>, the first step
in configuring the server is to add <command>b10-dhcp6</command> to the list of running BIND 10 services.
<screen>
-> <userinput>config add Boss/components b10-dhcp6</userinput>
-> <userinput>config set Boss/components/b10-dhcp6/kind dispensable</userinput>
+> <userinput>config add Init/components b10-dhcp6</userinput>
+> <userinput>config set Init/components/b10-dhcp6/kind dispensable</userinput>
> <userinput>config commit</userinput>
</screen>
</para>
<para>
To remove <command>b10-dhcp6</command> from the set of running services,
- the <command>b10-dhcp4</command> is removed from list of Boss components:
+ the <command>b10-dhcp4</command> is removed from list of Init components:
<screen>
-> <userinput>config remove Boss/components b10-dhcp6</userinput>
+> <userinput>config remove Init/components b10-dhcp6</userinput>
> <userinput>config commit</userinput>
</screen>
</para>
- <para>
- To change one of the parameters, simply follow
- the usual <command>bindctl</command> procedure. For example, to make the
- leases longer, change their valid-lifetime parameter:
- <screen>
-> <userinput>config set Dhcp6/valid-lifetime 7200</userinput>
-> <userinput>config commit</userinput></screen>
- Please note that most Dhcp6 parameters are of global scope
- and apply to all defined subnets, unless they are overridden on a
- per-subnet basis.
- </para>
+
<para>
During start-up the server will detect available network interfaces
@@ -3782,7 +4240,7 @@ Dhcp6/lease-database/name "" string (default)
Dhcp6/lease-database/user "" string (default)
Dhcp6/lease-database/host "" string (default)
Dhcp6/lease-database/password "" string (default)
-Dhcp6/subnet6/ list
+Dhcp6/subnet6/ list
</screen>
</para>
<para>
@@ -3813,7 +4271,7 @@ Dhcp6/subnet6/ list
The server comes with an in-memory database ("memfile") configured as the default
database. This is used for internal testing and is not supported. In addition,
it does not store lease information on disk: lease information will be lost if the
- server is restarted.
+ server is restarted.
</para>
</footnote>, and so the server must be configured to
access the correct database with the appropriate credentials.
@@ -3921,51 +4379,420 @@ Dhcp6/subnet6/ list
2001:db8:: address may be assigned as well. If you want to avoid this,
please use the "min-max" notation.
</para>
+ </section>
+
+ <section id="dhcp6-std-options">
+ <title>Standard DHCPv6 options</title>
<para>
- Options can also be configured: the following commands configure
- the DNS-SERVERS option for all subnets with the following addresses:
- 2001:db8:1::1 and 2001:db8:1::2
+ One of the major features of DHCPv6 server is to provide configuration
+ options to clients. Although there are several options that require
+ special behavior, most options are sent by the server only if the client
+ explicitly requested them. The following example shows how to
+ configure DNS servers, which is one of the most frequently used
+ options. Numbers in the first column are added for easier reference and
+ will not appear on screen. Options specified in this way are considered
+ global and apply to all configured subnets.
+
<screen>
+1. > <userinput>config add Dhcp6/option-data</userinput>
+2. > <userinput>config set Dhcp6/option-data[0]/name "dns-servers"</userinput>
+3. > <userinput>config set Dhcp6/option-data[0]/code 23</userinput>
+4. > <userinput>config set Dhcp6/option-data[0]/space "dhcp6"</userinput>
+5. > <userinput>config set Dhcp6/option-data[0]/csv-format true</userinput>
+6. > <userinput>config set Dhcp6/option-data[0]/data "2001:db8::cafe, 2001:db8::babe"</userinput>
+7. > <userinput>config commit</userinput>
+</screen>
+ </para>
+ <para>
+ The first line creates new entry in option-data table. It
+ contains information on all global options that the server is
+ supposed to configure in all subnets. The second line specifies
+ option name. For a complete list of currently supported names,
+ see <xref linkend="dhcp6-std-options-list"/> below.
+ The third line specifies option code, which must match one of the
+ values from that
+ list. Line 4 specifies option space, which must always
+ be set to "dhcp6" as these are standard DHCPv6 options. For
+ other name spaces, including custom option spaces, see <xref
+ linkend="dhcp6-option-spaces"/>. The fifth line specifies the format in
+ which the data will be entered: use of CSV (comma
+ separated values) is recommended. The sixth line gives the actual value to
+ be sent to clients. Data is specified as a normal text, with
+ values separated by commas if more than one value is
+ allowed.
+ </para>
+
+ <para>
+ Options can also be configured as hexadecimal values. If csv-format is
+ set to false, the option data must be specified as a string of hexadecimal
+ numbers. The
+ following commands configure the DNS-SERVERS option for all
+ subnets with the following addresses: 2001:db8:1::cafe and
+ 2001:db8:1::babe.
+ <screen>
> <userinput>config add Dhcp6/option-data</userinput>
> <userinput>config set Dhcp6/option-data[0]/name "dns-servers"</userinput>
> <userinput>config set Dhcp6/option-data[0]/code 23</userinput>
+> <userinput>config set Dhcp6/option-data[0]/space "dhcp6"</userinput>
+> <userinput>config set Dhcp6/option-data[0]/csv-format false</userinput>
> <userinput>config set Dhcp6/option-data[0]/data "2001 0DB8 0001 0000 0000 0000</userinput>
- <userinput>0000 0001 2001 0DB8 0001 0000 0000 0000 0000 0002"</userinput>
+ <userinput>0000 CAFE 2001 0DB8 0001 0000 0000 0000 0000 BABE"</userinput>
> <userinput>config commit</userinput>
</screen>
(The value for the setting of the "data" element is split across two
- lines in this document for clarity: when entering the command, the whole
- string should be entered on the same line.)
+ lines in this document for clarity: when entering the command, the
+ whole string should be entered on the same line.)
</para>
- <para>
- Currently the only way to set option data is to specify the
- data as a string of hexadecimal digits. It is planned to allow
- alternative ways of specifying the data as a comma-separated list,
- e.g. "2001:db8:1::1,2001:db8:1::2".
- </para>
- <para>
- As with global settings, it is also possible to override options on a
- per-subnet basis, e.g. the following commands override the global DNS
- servers option for a particular subnet, setting a single DNS server with
- address 2001:db8:1::3.
- <screen>
+
+ <para>
+ It is possible to override options on a per-subnet basis. If
+ clients connected to most of your subnets are expected to get the
+ same values of a given option, you should use global options: you
+ can then override specific values for a small number of subnets.
+ On the other hand, if you use different values in each subnet,
+ it does not make sense to specify global option values
+ (Dhcp6/option-data), rather you should set only subnet-specific values
+ (Dhcp6/subnet[X]/option-data[Y]).
+ </para>
+ <para>
+ The following commands override the global
+ DNS servers option for a particular subnet, setting a single DNS
+ server with address 2001:db8:1::3.
+ <screen>
> <userinput>config add Dhcp6/subnet6[0]/option-data</userinput>
> <userinput>config set Dhcp6/subnet6[0]/option-data[0]/name "dns-servers"</userinput>
> <userinput>config set Dhcp6/subnet6[0]/option-data[0]/code 23</userinput>
-> <userinput>config set Dhcp6/subnet6[0]/option-data[0]/data "2001 0DB8 0001 0000</userinput>
- <userinput>0000 0000 0000 0003"</userinput>
+> <userinput>config set Dhcp6/subnet6[0]/option-data[0]/space "dhcp6"</userinput>
+> <userinput>config set Dhcp6/subnet6[0]/option-data[0]/csv-format true</userinput>
+> <userinput>config set Dhcp6/subnet6[0]/option-data[0]/data "2001:db8:1::3"</userinput>
+> <userinput>config commit</userinput></screen>
+ </para>
+
+ <note>
+ <para>
+ In future versions of BIND 10 DHCP, it will not be necessary to specify
+ option code, space and csv-format fields, as those fields will be set
+ automatically.
+ </para>
+ </note>
+
+
+ <para>
+ Below is a list of currently supported standard DHCPv6 options. The "Name" and "Code"
+ are the values that should be used as a name in the option-data
+ structures. "Type" designates the format of the data: the meanings of
+ the various types is given in <xref linkend="dhcp-types"/>.
+ </para>
+ <para>
+ Some options are designated as arrays, which means that more than one
+ value is allowed in such an option. For example the option dns-servers
+ allows the specification of more than one IPv6 address, so allowing
+ clients to obtain the the addresses of multiple DNS servers.
+ </para>
+
+<!-- @todo: describe record types -->
+
+ <para>
+ <table border="1" cellpadding="5%" id="dhcp6-std-options-list">
+ <caption>List of standard DHCPv6 options</caption>
+ <thead>
+ <tr><th>Name</th><th>Code</th><th>Type</th><th>Array?</th></tr>
+ <tr></tr>
+ </thead>
+ <tbody>
+<!-- Our engine uses those options on its own, admin must not configure them on his own
+<tr><td>clientid</td><td>1</td><td>binary</td><td>false</td></tr>
+<tr><td>serverid</td><td>2</td><td>binary</td><td>false</td></tr>
+<tr><td>ia-na</td><td>3</td><td>record</td><td>false</td></tr>
+<tr><td>ia-ta</td><td>4</td><td>uint32</td><td>false</td></tr>
+<tr><td>iaaddr</td><td>5</td><td>record</td><td>false</td></tr>
+<tr><td>oro</td><td>6</td><td>uint16</td><td>true</td></tr> -->
+<tr><td>preference</td><td>7</td><td>uint8</td><td>false</td></tr>
+
+<!-- Our engine uses those options on its own, admin must not configure them on his own
+<tr><td>elapsed-time</td><td>8</td><td>uint16</td><td>false</td></tr>
+<tr><td>relay-msg</td><td>9</td><td>binary</td><td>false</td></tr>
+<tr><td>auth</td><td>11</td><td>binary</td><td>false</td></tr>
+<tr><td>unicast</td><td>12</td><td>ipv6-address</td><td>false</td></tr>
+<tr><td>status-code</td><td>13</td><td>record</td><td>false</td></tr>
+<tr><td>rapid-commit</td><td>14</td><td>empty</td><td>false</td></tr>
+<tr><td>user-class</td><td>15</td><td>binary</td><td>false</td></tr>
+<tr><td>vendor-class</td><td>16</td><td>record</td><td>false</td></tr>
+<tr><td>vendor-opts</td><td>17</td><td>uint32</td><td>false</td></tr>
+<tr><td>interface-id</td><td>18</td><td>binary</td><td>false</td></tr>
+<tr><td>reconf-msg</td><td>19</td><td>uint8</td><td>false</td></tr>
+<tr><td>reconf-accept</td><td>20</td><td>empty</td><td>false</td></tr> -->
+<tr><td>sip-server-dns</td><td>21</td><td>fqdn</td><td>true</td></tr>
+<tr><td>sip-server-addr</td><td>22</td><td>ipv6-address</td><td>true</td></tr>
+<tr><td>dns-servers</td><td>23</td><td>ipv6-address</td><td>true</td></tr>
+<tr><td>domain-search</td><td>24</td><td>fqdn</td><td>true</td></tr>
+<!-- <tr><td>ia-pd</td><td>25</td><td>record</td><td>false</td></tr> -->
+<!-- <tr><td>iaprefix</td><td>26</td><td>record</td><td>false</td></tr> -->
+<tr><td>nis-servers</td><td>27</td><td>ipv6-address</td><td>true</td></tr>
+<tr><td>nisp-servers</td><td>28</td><td>ipv6-address</td><td>true</td></tr>
+<tr><td>nis-domain-name</td><td>29</td><td>fqdn</td><td>true</td></tr>
+<tr><td>nisp-domain-name</td><td>30</td><td>fqdn</td><td>true</td></tr>
+<tr><td>sntp-servers</td><td>31</td><td>ipv6-address</td><td>true</td></tr>
+<tr><td>information-refresh-time</td><td>32</td><td>uint32</td><td>false</td></tr>
+<tr><td>bcmcs-server-dns</td><td>33</td><td>fqdn</td><td>true</td></tr>
+<tr><td>bcmcs-server-addr</td><td>34</td><td>ipv6-address</td><td>true</td></tr>
+<tr><td>geoconf-civic</td><td>36</td><td>record</td><td>false</td></tr>
+<tr><td>remote-id</td><td>37</td><td>record</td><td>false</td></tr>
+<tr><td>subscriber-id</td><td>38</td><td>binary</td><td>false</td></tr>
+<tr><td>client-fqdn</td><td>39</td><td>record</td><td>false</td></tr>
+<tr><td>pana-agent</td><td>40</td><td>ipv6-address</td><td>true</td></tr>
+<tr><td>new-posix-timezone</td><td>41</td><td>string</td><td>false</td></tr>
+<tr><td>new-tzdb-timezone</td><td>42</td><td>string</td><td>false</td></tr>
+<tr><td>ero</td><td>43</td><td>uint16</td><td>true</td></tr>
+<tr><td>lq-query</td><td>44</td><td>record</td><td>false</td></tr>
+<tr><td>client-data</td><td>45</td><td>empty</td><td>false</td></tr>
+<tr><td>clt-time</td><td>46</td><td>uint32</td><td>false</td></tr>
+<tr><td>lq-relay-data</td><td>47</td><td>record</td><td>false</td></tr>
+<tr><td>lq-client-link</td><td>48</td><td>ipv6-address</td><td>true</td></tr>
+ </tbody>
+ </table>
+ </para>
+ </section>
+
+ <section id="dhcp6-custom-options">
+ <title>Custom DHCPv6 options</title>
+ <para>It is also possible to define options other than the standard ones.
+ Assume that we want to define a new DHCPv6 option called "foo" which will have
+ code 100 and will convey a single unsigned 32 bit integer value. We can define
+ such an option by using the following commands:
+ <screen>
+> <userinput>config add Dhcp6/option-def</userinput>
+> <userinput>config set Dhcp6/option-def[0]/name "foo"</userinput>
+> <userinput>config set Dhcp6/option-def[0]/code 100</userinput>
+> <userinput>config set Dhcp6/option-def[0]/type "uint32"</userinput>
+> <userinput>config set Dhcp6/option-def[0]/array false</userinput>
+> <userinput>config set Dhcp6/option-def[0]/record-types ""</userinput>
+> <userinput>config set Dhcp6/option-def[0]/space "dhcp6"</userinput>
+> <userinput>config set Dhcp6/option-def[0]/encapsulate ""</userinput>
> <userinput>config commit</userinput></screen>
- (As before, the setting of the "data" element has been split across two
- lines for clarity.)
+ The "false" value of the "array" parameter determines that the option
+ does NOT comprise an array of "uint32" values but rather a single value.
+ Two other parameters have been left blank: "record-types" and "encapsulate".
+ The former specifies the comma separated list of option data fields if the
+ option comprises a record of data fields. The "record-fields" value should
+ be non-empty if the "type" is set to "record". Otherwise it must be left
+ blank. The latter parameter specifies the name of the option space being
+ encapsulated by the particular option. If the particular option does not
+ encapsulate any option space it should be left blank.
+ Note that the above set of comments define the format of the new option and do not
+ set its values.
</para>
+ <para>Once the new option format is defined, its value is set
+ in the same way as for a standard option. For example the following
+ commands set a global value that applies to all subnets.
+ <screen>
+> <userinput>config add Dhcp6/option-data</userinput>
+> <userinput>config set Dhcp6/option-data[0]/name "foo"</userinput>
+> <userinput>config set Dhcp6/option-data[0]/code 100</userinput>
+> <userinput>config set Dhcp6/option-data[0]/space "dhcp6"</userinput>
+> <userinput>config set Dhcp6/option-data[0]/csv-format true</userinput>
+> <userinput>config set Dhcp6/option-data[0]/data "12345"</userinput>
+> <userinput>config commit</userinput></screen>
+ </para>
+
+ <para>New options can take more complex forms than simple use of
+ primitives (uint8, string, ipv6-address etc): it is possible to
+ define an option comprising a number of existing primitives.
+ </para>
+ <para>
+ Assume we
+ want to define a new option that will consist of an IPv6
+ address, followed by unsigned 16 bit integer, followed by a text
+ string. Such an option could be defined in the following way:
+<screen>
+> <userinput>config add Dhcp6/option-def</userinput>
+> <userinput>config set Dhcp6/option-def[0]/name "bar"</userinput>
+> <userinput>config set Dhcp6/option-def[0]/code 101</userinput>
+> <userinput>config set Dhcp6/option-def[0]/space "dhcp6"</userinput>
+> <userinput>config set Dhcp6/option-def[0]/type "record"</userinput>
+> <userinput>config set Dhcp6/option-def[0]/array false</userinput>
+> <userinput>config set Dhcp6/option-def[0]/record-types "ipv6-address, uint16, string"</userinput>
+> <userinput>config set Dhcp6/option-def[0]/encapsulate ""</userinput>
+</screen>
+ The "type" is set to "record" to indicate that the option contains
+ multiple values of different types. These types are given as a comma-separated
+ list in the "record-types" field and should be those listed in <xref linkend="dhcp-types"/>.
+ </para>
+ <para>
+ The values of the option are set as follows:
+<screen>
+> <userinput>config add Dhcp6/option-data</userinput>
+> <userinput>config set Dhcp6/option-data[0]/name "bar"</userinput>
+> <userinput>config set Dhcp6/option-data[0]/space "dhcp6"</userinput>
+> <userinput>config set Dhcp6/option-data[0]/code 101</userinput>
+> <userinput>config set Dhcp6/option-data[0]/csv-format true</userinput>
+> <userinput>config set Dhcp6/option-data[0]/data "2001:db8:1::10, 123, Hello World"</userinput>
+> <userinput>config commit</userinput></screen>
+ </para>
+ "csv-format" is set "true" to indicate that the "data" field comprises a command-separated
+ list of values. The values in the "data" must correspond to the types set in
+ the "record-types" field of the option definition.
</section>
-
+
+ <section id="dhcp6-vendor-opts">
+ <title>DHCPv6 vendor specific options</title>
+ <para>
+ Currently there are three option spaces defined: dhcp4 (to be used
+ in DHCPv4 daemon) and dhcp6 (for the DHCPv6 daemon); there is also
+ vendor-opts-space, which is empty by default, but options can be
+ defined in it. Those options are called vendor-specific information
+ options. The following examples show how to define an option "foo"
+ with code 1 that consists of an IPv6 address, an unsigned 16 bit integer
+ and a string. The "foo" option is conveyed in a vendor specific
+ information option. This option comprises a single uint32 value
+ that is set to "12345". The sub-option "foo" follows the data
+ field holding this value.
+ <screen>
+> <userinput>config add Dhcp6/option-def</userinput>
+> <userinput>config set Dhcp6/option-def[0]/name "foo"</userinput>
+> <userinput>config set Dhcp6/option-def[0]/code 1</userinput>
+> <userinput>config set Dhcp6/option-def[0]/space "vendor-opts-space"</userinput>
+> <userinput>config set Dhcp6/option-def[0]/type "record"</userinput>
+> <userinput>config set Dhcp6/option-def[0]/array false</userinput>
+> <userinput>config set Dhcp6/option-def[0]/record-types "ipv6-address, uint16, string"</userinput>
+> <userinput>config set Dhcp6/option-def[0]/encapsulates ""</userinput>
+> <userinput>config commit</userinput>
+</screen>
+ (Note that the option space is set to "vendor-opts-space".)
+ Once the option format is defined, the next step is to define actual values
+ for that option:
+ <screen>
+> <userinput>config add Dhcp6/option-data</userinput>
+> <userinput>config set Dhcp6/option-data[0]/name "foo"</userinput>
+> <userinput>config set Dhcp6/option-data[0]/space "vendor-opts-space"</userinput>
+> <userinput>config set Dhcp6/option-data[0]/code 1</userinput>
+> <userinput>config set Dhcp6/option-data[0]/csv-format true</userinput>
+> <userinput>config set Dhcp6/option-data[0]/data "2001:db8:1::10, 123, Hello World"</userinput>
+> <userinput>config commit</userinput></screen>
+ We should also define values for the vendor-opts, that will convey our option foo.
+ <screen>
+> <userinput>config add Dhcp6/option-data</userinput>
+> <userinput>config set Dhcp6/option-data[1]/name "vendor-opts"</userinput>
+> <userinput>config set Dhcp6/option-data[1]/space "dhcp6"</userinput>
+> <userinput>config set Dhcp6/option-data[1]/code 17</userinput>
+> <userinput>config set Dhcp6/option-data[1]/csv-format true</userinput>
+> <userinput>config set Dhcp6/option-data[1]/data "12345"</userinput>
+> <userinput>config commit</userinput></screen>
+ </para>
+ </section>
+
+ <section id="dhcp6-option-spaces">
+ <title>Nested DHCPv6 options (custom option spaces)</title>
+ <para>It is sometimes useful to define completely new option
+ spaces. This is useful if the user wants his new option to
+ convey sub-options that use separate numbering scheme, for
+ example sub-options with codes 1 and 2. Those option codes
+ conflict with standard DHCPv6 options, so a separate option
+ space must be defined.
+ </para>
+ <para>Note that it is not required to create new option space when
+ defining sub-options for a standard option because it is by
+ default created if the standard option is meant to convey
+ any sub-options (see <xref linkend="dhcp6-vendor-opts"/>).
+ </para>
+ <para>
+ Assume that we want to have a DHCPv6 option called "container"
+ with code 102 that conveys two sub-options with codes 1 and 2.
+ First we need to define the new sub-options:
+<screen>
+> <userinput>config add Dhcp6/option-def</userinput>
+> <userinput>config set Dhcp6/option-def[0]/name "subopt1"</userinput>
+> <userinput>config set Dhcp6/option-def[0]/code 1</userinput>
+> <userinput>config set Dhcp6/option-def[0]/space "isc"</userinput>
+> <userinput>config set Dhcp6/option-def[0]/type "ipv6-address"</userinput>
+> <userinput>config set Dhcp6/option-def[0]/record-types ""</userinput>
+> <userinput>config set Dhcp6/option-def[0]/array false</userinput>
+> <userinput>config set Dhcp6/option-def[0]/encapsulate ""</userinput>
+> <userinput>config commit</userinput>
+> <userinput></userinput>
+> <userinput>config add Dhcp6/option-def</userinput>
+> <userinput>config set Dhcp6/option-def[1]/name "subopt2"</userinput>
+> <userinput>config set Dhcp6/option-def[1]/code 2</userinput>
+> <userinput>config set Dhcp6/option-def[1]/space "isc"</userinput>
+> <userinput>config set Dhcp6/option-def[1]/type "string"</userinput>
+> <userinput>config set Dhcp6/option-def[1]/record-types ""</userinput>
+> <userinput>config set Dhcp6/option-def[1]/array false</userinput>
+> <userinput>config set Dhcp6/option-def[1]/encapsulate ""</userinput>
+> <userinput>config commit</userinput>
+</screen>
+ Note that we have defined the options to belong to a new option space
+ (in this case, "isc").
+ </para>
+ <para>
+The next step is to define a regular DHCPv6 option and specify that it
+should include options from the isc option space:
+<screen>
+> <userinput>config add Dhcp6/option-def</userinput>
+> <userinput>config set Dhcp6/option-def[2]/name "container"</userinput>
+> <userinput>config set Dhcp6/option-def[2]/code 102</userinput>
+> <userinput>config set Dhcp6/option-def[2]/space "dhcp6"</userinput>
+> <userinput>config set Dhcp6/option-def[2]/type "empty"</userinput>
+> <userinput>config set Dhcp6/option-def[2]/array false</userinput>
+> <userinput>config set Dhcp6/option-def[2]/record-types ""</userinput>
+> <userinput>config set Dhcp6/option-def[2]/encapsulate "isc"</userinput>
+> <userinput>config commit</userinput>
+</screen>
+ The name of the option space in which the sub-options are defined
+ is set in the "encapsulate" field. The "type" field is set to "empty"
+ which imposes that this option does not carry any data other than
+ sub-options.
+ </para>
+ <para>
+ Finally, we can set values for the new options:
+<screen>
+> <userinput>config add Dhcp6/option-data</userinput>
+> <userinput>config set Dhcp6/option-data[0]/name "subopt1"</userinput>
+> <userinput>config set Dhcp6/option-data[0]/space "isc"</userinput>
+> <userinput>config set Dhcp6/option-data[0]/code 1</userinput>
+> <userinput>config set Dhcp6/option-data[0]/csv-format true</userinput>
+> <userinput>config set Dhcp6/option-data[0]/data "2001:db8::abcd"</userinput>
+> <userinput>config commit</userinput>
+> <userinput></userinput>
+> <userinput>config add Dhcp6/option-data</userinput>
+> <userinput>config set Dhcp6/option-data[1]/name "subopt2"</userinput>
+> <userinput>config set Dhcp6/option-data[1]/space "isc"</userinput>
+> <userinput>config set Dhcp6/option-data[1]/code 2</userinput>
+> <userinput>config set Dhcp6/option-data[1]/csv-format true</userinput>
+> <userinput>config set Dhcp6/option-data[1]/data "Hello world"</userinput>
+> <userinput>config commit</userinput>
+> <userinput></userinput>
+> <userinput>config add Dhcp6/option-data</userinput>
+> <userinput>config set Dhcp6/option-data[2]/name "container"</userinput>
+> <userinput>config set Dhcp6/option-data[2]/space "dhcp6"</userinput>
+> <userinput>config set Dhcp6/option-data[2]/code 102</userinput>
+> <userinput>config set Dhcp6/option-data[2]/csv-format true</userinput>
+> <userinput>config set Dhcp6/option-data[2]/data ""</userinput>
+> <userinput>config commit</userinput>
+</screen>
+ Even though the "container" option does not carry any data except
+ sub-options, the "data" field must be explictly set to an empty value.
+ This is required because in the current version of BIND 10 DHCP, the
+ default configuration values are not propagated to the configuration parsers:
+ if the "data" is not set the parser will assume that this
+ parameter is not specified and an error will be reported.
+ </para>
+ <para>Note that it is possible to create an option which carries some data
+ in addition to the sub-options defined in the encapsulated option space. For example,
+ if the "container" option from the previous example was required to carry an uint16
+ value as well as the sub-options, the "type" value would have to be set to "uint16" in
+ the option definition. (Such an option would then have the following
+ data structure: DHCP header, uint16 value, sub-options.) The value specified
+ with the "data" parameter - which should be a valid integer enclosed in quotes,
+ e.g. "123" - would then be assigned to the uint16 field in the "container" option.
+ </para>
+ </section>
+
<section id="dhcp6-config-subnets">
<title>Subnet Selection</title>
<para>
- The DHCPv6 server may receive requests from local (connected to the same
- subnet as the server) and remote (connecting via relays)
- clients.
+ The DHCPv6 server may receive requests from local (connected
+ to the same subnet as the server) and remote (connecting via
+ relays) clients.
<note>
<para>
Currently relayed DHCPv6 traffic is not supported. The server will
@@ -3994,8 +4821,8 @@ Dhcp6/subnet6/ list
> <userinput>config commit</userinput>
</screen>
</para>
- </section>
-
+ </section>
+
</section>
<section id="dhcp6-serverid">
@@ -4003,28 +4830,30 @@ Dhcp6/subnet6/ list
<para>The DHCPv6 protocol uses a "server identifier" (also known
as a DUID) for clients to be able to discriminate between several
servers present on the same link. There are several types of
- DUIDs defined, but RFC 3315 instructs servers to use DUID-LLT if
+ DUIDs defined, but <ulink url="http://tools.ietf.org/html/rfc3315">RFC 3315</ulink> instructs servers to use DUID-LLT if
possible. This format consists of a link-layer (MAC) address and a
timestamp. When started for the first time, the DHCPv6 server will
automatically generate such a DUID and store the chosen value to
- a file (The file is named b10-dhcp6-serverid and is stored in the
- "local state directory". This is set during installation when
- "configure" is run, and can be changed by using "--localstatedir"
- on the "configure" command line.) That file will be read by the server
+ a file. That file is read by the server
and the contained value used whenever the server is subsequently started.
</para>
<para>
- It is unlikely that this parameter needs to be changed. If such a need
- arises, please stop the server, edit the file and start the server
- again. It is a text file that contains double digit hexadecimal values
+ It is unlikely that this parameter should ever need to be changed.
+ However, if such a need arises, stop the server, edit the file and restart
+ the server. (The file is named b10-dhcp6-serverid and by default is
+ stored in the "var" subdirectory of the directory in which BIND 10 is installed.
+ This can be changed when BIND 10 is built by using "--localstatedir"
+ on the "configure" command line.) The file is a text file that contains
+ double digit hexadecimal values
separated by colons. This format is similar to typical MAC address
format. Spaces are ignored. No extra characters are allowed in this
file.
</para>
+
</section>
<section id="dhcp6-std">
- <title>Supported DHCPv6 Standards</title>
+ <title>Supported Standards</title>
<para>The following standards and draft standards are currently
supported:</para>
<itemizedlist>
@@ -4164,7 +4993,7 @@ Dhcp6/renew-timer 1000 integer (default)
"queries.tcp": 1749,
"queries.udp": 867868
},
- "Boss": {
+ "Init": {
"boot_time": "2011-01-20T16:59:03Z"
},
"Stats": {
diff --git a/examples/configure.ac b/examples/configure.ac
index 37515d9..850e7ef 100644
--- a/examples/configure.ac
+++ b/examples/configure.ac
@@ -14,9 +14,10 @@ AC_LANG([C++])
# Checks for BIND 10 headers and libraries
AX_ISC_BIND10
-# We use -R, -rpath etc so the resulting program will be more likekly to
+# We use -R option etc so the resulting program will be more likekly to
# "just work" by default. Embedding a specific library path is a controversial
-# practice, though; if you don't like it you can remove the following setting.
+# practice, though; if you don't like it you can remove the following setting,
+# or use the --disable-rpath option.
if test "x$BIND10_RPATH" != "x"; then
LDFLAGS="$LDFLAGS $BIND10_RPATH"
fi
diff --git a/examples/m4/ax_isc_rpath.m4 b/examples/m4/ax_isc_rpath.m4
index 91d9b8a..ee1e472 100644
--- a/examples/m4/ax_isc_rpath.m4
+++ b/examples/m4/ax_isc_rpath.m4
@@ -3,44 +3,54 @@ dnl
dnl @summary figure out whether and which "rpath" linker option is available
dnl
dnl This macro checks if the linker supports an option to embed a path
-dnl to a runtime library (often installed in an uncommon place), such as
-dnl gcc's -rpath option. If found, it sets the ISC_RPATH_FLAG variable to
+dnl to a runtime library (often installed in an uncommon place), such as the
+dnl commonly used -R option. If found, it sets the ISC_RPATH_FLAG variable to
dnl the found option flag. The main configure.ac can use it as follows:
dnl if test "x$ISC_RPATH_FLAG" != "x"; then
dnl LDFLAGS="$LDFLAGS ${ISC_RPATH_FLAG}/usr/local/lib/some_library"
dnl fi
+dnl
+dnl If you pass --disable-rpath to configure, ISC_RPATH_FLAG is not set
AC_DEFUN([AX_ISC_RPATH], [
-# We'll tweak both CXXFLAGS and CCFLAGS so this function will work whichever
-# language is used in the main script. Note also that it's not LDFLAGS;
-# technically this is a linker flag, but we've noticed $LDFLAGS can be placed
-# where the compiler could interpret it as a compiler option, leading to
-# subtle failure mode. So, in the check below using the compiler flag is
-# safer (in the actual Makefiles the flag should be set in LDFLAGS).
-CXXFLAGS_SAVED="$CXXFLAGS"
-CXXFLAGS="$CXXFLAGS -Wl,-R/usr/lib"
-CCFLAGS_SAVED="$CCFLAGS"
-CCFLAGS="$CCFLAGS -Wl,-R/usr/lib"
+AC_ARG_ENABLE(rpath,
+ [AC_HELP_STRING([--disable-rpath], [don't hardcode library path into binaries])],
+ rpath=$enableval, rpath=yes)
+
+if test x$rpath != xno; then
+ # We'll tweak both CXXFLAGS and CCFLAGS so this function will work
+ # whichever language is used in the main script. Note also that it's not
+ #LDFLAGS; technically this is a linker flag, but we've noticed $LDFLAGS
+ # can be placed where the compiler could interpret it as a compiler
+ # option, leading to subtle failure mode. So, in the check below using
+ # the compiler flag is safer (in the actual Makefiles the flag should be
+ # set in LDFLAGS).
+ CXXFLAGS_SAVED="$CXXFLAGS"
+ CXXFLAGS="$CXXFLAGS -Wl,-R/usr/lib"
+ CCFLAGS_SAVED="$CCFLAGS"
+ CCFLAGS="$CCFLAGS -Wl,-R/usr/lib"
-# check -Wl,-R and -R rather than gcc specific -rpath to be as portable
-# as possible. -Wl,-R seems to be safer, so we try it first. In some cases
-# -R is not actually recognized but AC_TRY_LINK doesn't fail due to that.
-AC_MSG_CHECKING([whether -Wl,-R flag is available in linker])
-AC_TRY_LINK([],[],
- [ AC_MSG_RESULT(yes)
- ISC_RPATH_FLAG=-Wl,-R
- ],[ AC_MSG_RESULT(no)
- AC_MSG_CHECKING([whether -R flag is available in linker])
- CXXFLAGS="$CXXFLAGS_SAVED -R"
- CCFLAGS="$CCFLAGS_SAVED -R"
+ # check -Wl,-R and -R rather than gcc specific -rpath to be as portable
+ # as possible. -Wl,-R seems to be safer, so we try it first. In some
+ # cases -R is not actually recognized but AC_TRY_LINK doesn't fail due to
+ # that.
+ AC_MSG_CHECKING([whether -Wl,-R flag is available in linker])
+ AC_TRY_LINK([],[],
+ [ AC_MSG_RESULT(yes)
+ ISC_RPATH_FLAG=-Wl,-R
+ ],[ AC_MSG_RESULT(no)
+ AC_MSG_CHECKING([whether -R flag is available in linker])
+ CXXFLAGS="$CXXFLAGS_SAVED -R"
+ CCFLAGS="$CCFLAGS_SAVED -R"
AC_TRY_LINK([], [],
[ AC_MSG_RESULT([yes; note that -R is more sensitive about the position in option arguments])
ISC_RPATH_FLAG=-R
],[ AC_MSG_RESULT(no) ])
- ])
+ ])
-CXXFLAGS=$CXXFLAGS_SAVED
-CCFLAGS=$CCFLAGS_SAVED
+ CXXFLAGS=$CXXFLAGS_SAVED
+ CCFLAGS=$CCFLAGS_SAVED
+fi
])dnl AX_ISC_RPATH
diff --git a/src/bin/auth/auth_config.cc b/src/bin/auth/auth_config.cc
index e8592ac..de8325b 100644
--- a/src/bin/auth/auth_config.cc
+++ b/src/bin/auth/auth_config.cc
@@ -106,7 +106,7 @@ public:
rollbackAddresses_ = old;
}
virtual void commit() {
- rollbackAddresses_.release();
+ rollbackAddresses_.reset();
}
private:
AuthSrv& server_;
diff --git a/src/bin/auth/auth_messages.mes b/src/bin/auth/auth_messages.mes
index d93da51..77b20b1 100644
--- a/src/bin/auth/auth_messages.mes
+++ b/src/bin/auth/auth_messages.mes
@@ -14,7 +14,7 @@
$NAMESPACE isc::auth
-% AUTH_AXFR_ERROR error handling AXFR request: %1
+% AUTH_AXFR_PROBLEM error handling AXFR request: %1
This is a debug message produced by the authoritative server when it
has encountered an error processing an AXFR request. The message gives
the reason for the error, and the server will return a SERVFAIL code to
@@ -232,13 +232,13 @@ This is a debug message produced by the authoritative server when it receives
a NOTIFY packet but the XFRIN process is not running. The packet will be
dropped and nothing returned to the sender.
-% AUTH_PACKET_PARSE_ERROR unable to parse received DNS packet: %1
+% AUTH_PACKET_PARSE_FAILED unable to parse received DNS packet: %1
This is a debug message, generated by the authoritative server when an
attempt to parse a received DNS packet has failed due to something other
than a protocol error. The reason for the failure is given in the message;
the server will return a SERVFAIL error code to the sender.
-% AUTH_PACKET_PROTOCOL_ERROR DNS packet protocol error: %1. Returning %2
+% AUTH_PACKET_PROTOCOL_FAILURE DNS packet protocol error: %1. Returning %2
This is a debug message, generated by the authoritative server when an
attempt to parse a received DNS packet has failed due to a protocol error.
The reason for the failure is given in the message, as is the error code
@@ -312,6 +312,9 @@ been created and is initializing. The AUTH_SERVER_STARTED message will be
output when initialization has successfully completed and the server starts
accepting queries.
+% AUTH_SERVER_EXITING exiting
+The authoritative server is exiting.
+
% AUTH_SERVER_FAILED server failed: %1
The authoritative server has encountered a fatal error and is terminating. The
reason for the failure is included in the message.
diff --git a/src/bin/auth/auth_srv.cc b/src/bin/auth/auth_srv.cc
index 26a8489..ca323e0 100644
--- a/src/bin/auth/auth_srv.cc
+++ b/src/bin/auth/auth_srv.cc
@@ -526,13 +526,13 @@ AuthSrv::processMessage(const IOMessage& io_message, Message& message,
// Parse the message.
message.fromWire(request_buffer);
} catch (const DNSProtocolError& error) {
- LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_PACKET_PROTOCOL_ERROR)
+ LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_PACKET_PROTOCOL_FAILURE)
.arg(error.getRcode().toText()).arg(error.what());
makeErrorMessage(impl_->renderer_, message, buffer, error.getRcode());
impl_->resumeServer(server, message, stats_attrs, true);
return;
} catch (const Exception& ex) {
- LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_PACKET_PARSE_ERROR)
+ LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_PACKET_PARSE_FAILED)
.arg(ex.what());
makeErrorMessage(impl_->renderer_, message, buffer, Rcode::SERVFAIL());
impl_->resumeServer(server, message, stats_attrs, true);
@@ -725,7 +725,7 @@ AuthSrvImpl::processXfrQuery(const IOMessage& io_message, Message& message,
xfrout_connected_ = false;
}
- LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_AXFR_ERROR)
+ LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_AXFR_PROBLEM)
.arg(err.what());
makeErrorMessage(renderer_, message, buffer, Rcode::SERVFAIL(),
tsig_context);
diff --git a/src/bin/auth/b10-auth.xml b/src/bin/auth/b10-auth.xml
index 88f80e2..08a2fde 100644
--- a/src/bin/auth/b10-auth.xml
+++ b/src/bin/auth/b10-auth.xml
@@ -53,8 +53,8 @@
<para>The <command>b10-auth</command> daemon provides the BIND 10
authoritative DNS server.
Normally it is started by the
- <citerefentry><refentrytitle>bind10</refentrytitle><manvolnum>8</manvolnum></citerefentry>
- boss process.
+ <citerefentry><refentrytitle>b10-init</refentrytitle><manvolnum>8</manvolnum></citerefentry>
+ process.
</para>
<para>
@@ -194,7 +194,7 @@
<command>shutdown</command> exits <command>b10-auth</command>.
This has an optional <varname>pid</varname> argument to
select the process ID to stop.
- (Note that the BIND 10 boss process may restart this service
+ (Note that the b10-init process may restart this service
if configured.)
</para>
diff --git a/src/bin/auth/main.cc b/src/bin/auth/main.cc
index e90d199..dc03be2 100644
--- a/src/bin/auth/main.cc
+++ b/src/bin/auth/main.cc
@@ -44,6 +44,7 @@
#include <server_common/socket_request.h>
#include <boost/bind.hpp>
+#include <boost/scoped_ptr.hpp>
#include <sys/types.h>
#include <sys/socket.h>
@@ -152,10 +153,11 @@ main(int argc, char* argv[]) {
int ret = 0;
// XXX: we should eventually pass io_service here.
- Session* cc_session = NULL;
- Session* xfrin_session = NULL;
+ boost::scoped_ptr<AuthSrv> auth_server_; // placeholder
+ boost::scoped_ptr<Session> cc_session;
+ boost::scoped_ptr<Session> xfrin_session;
bool xfrin_session_established = false; // XXX (see Trac #287)
- ModuleCCSession* config_session = NULL;
+ boost::scoped_ptr<ModuleCCSession> config_session;
XfroutClient xfrout_client(getXfroutSocketPath());
SocketSessionForwarder ddns_forwarder(getDDNSSocketPath());
try {
@@ -167,7 +169,8 @@ main(int argc, char* argv[]) {
specfile = string(AUTH_SPECFILE_LOCATION);
}
- auth_server = new AuthSrv(xfrout_client, ddns_forwarder);
+ auth_server_.reset(new AuthSrv(xfrout_client, ddns_forwarder));
+ auth_server = auth_server_.get();
LOG_INFO(auth_logger, AUTH_SERVER_CREATED);
SimpleCallback* checkin = auth_server->getCheckinProvider();
@@ -179,7 +182,7 @@ main(int argc, char* argv[]) {
auth_server->setDNSService(dns_service);
LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_DNS_SERVICES_CREATED);
- cc_session = new Session(io_service.get_io_service());
+ cc_session.reset(new Session(io_service.get_io_service()));
LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_CONFIG_CHANNEL_CREATED);
// Initialize the Socket Requestor
isc::server_common::initSocketRequestor(*cc_session, AUTH_NAME);
@@ -187,22 +190,22 @@ main(int argc, char* argv[]) {
// We delay starting listening to new commands/config just before we
// go into the main loop to avoid confusion due to mixture of
// synchronous and asynchronous operations (this would happen in
- // initial communication with the boss that takes place in
+ // initial communication with b10-init that takes place in
// updateConfig() for listen_on and in initializing TSIG keys below).
// Until then all operations on the CC session will take place
// synchronously.
- config_session = new ModuleCCSession(specfile, *cc_session,
- my_config_handler,
- my_command_handler, false);
+ config_session.reset(new ModuleCCSession(specfile, *cc_session,
+ my_config_handler,
+ my_command_handler, false));
LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_CONFIG_CHANNEL_ESTABLISHED);
- xfrin_session = new Session(io_service.get_io_service());
+ xfrin_session.reset(new Session(io_service.get_io_service()));
LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_XFRIN_CHANNEL_CREATED);
xfrin_session->establish(NULL);
xfrin_session_established = true;
LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_XFRIN_CHANNEL_ESTABLISHED);
- auth_server->setXfrinSession(xfrin_session);
+ auth_server->setXfrinSession(xfrin_session.get());
// Configure the server. configureAuthServer() is expected to install
// all initial configurations, but as a short term workaround we
@@ -210,7 +213,7 @@ main(int argc, char* argv[]) {
// updateConfig().
// if server load configure failed, we won't exit, give user second
// chance to correct the configure.
- auth_server->setConfigSession(config_session);
+ auth_server->setConfigSession(config_session.get());
try {
configureAuthServer(*auth_server, config_session->getFullConfig());
auth_server->updateConfig(ElementPtr());
@@ -228,7 +231,7 @@ main(int argc, char* argv[]) {
config_session->addRemoteConfig("data_sources",
boost::bind(datasrcConfigHandler,
auth_server, &first_time,
- config_session,
+ config_session.get(),
_1, _2, _3),
false);
@@ -260,10 +263,7 @@ main(int argc, char* argv[]) {
config_session->removeRemoteConfig("data_sources");
}
- delete xfrin_session;
- delete config_session;
- delete cc_session;
- delete auth_server;
+ LOG_INFO(auth_logger, AUTH_SERVER_EXITING);
return (ret);
}
diff --git a/src/bin/auth/tests/config_unittest.cc b/src/bin/auth/tests/config_unittest.cc
index 830de0d..05c6cce 100644
--- a/src/bin/auth/tests/config_unittest.cc
+++ b/src/bin/auth/tests/config_unittest.cc
@@ -130,7 +130,7 @@ TEST_F(AuthConfigTest, invalidListenAddressConfig) {
isc::testutils::portconfig::invalidListenAddressConfig(server);
}
-// Try setting addresses trough config
+// Try setting addresses through config
TEST_F(AuthConfigTest, listenAddressConfig) {
isc::testutils::portconfig::listenAddressConfig(server);
diff --git a/src/bin/bind10/.gitignore b/src/bin/bind10/.gitignore
index 2cf6b50..ce6632d 100644
--- a/src/bin/bind10/.gitignore
+++ b/src/bin/bind10/.gitignore
@@ -1,4 +1,3 @@
-/bind10
-/bind10_src.py
+/b10-init.py
/run_bind10.sh
/bind10.8
diff --git a/src/bin/bind10/Makefile.am b/src/bin/bind10/Makefile.am
index 86c6595..728fc4a 100644
--- a/src/bin/bind10/Makefile.am
+++ b/src/bin/bind10/Makefile.am
@@ -1,29 +1,33 @@
SUBDIRS = . tests
sbin_SCRIPTS = bind10
-CLEANFILES = bind10 bind10_src.pyc
-CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.py
-CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.pyc
+pkglibexec_SCRIPTS = b10-init
+CLEANFILES = b10-init b10-init.pyc
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/init_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/init_messages.pyc
pkglibexecdir = $(libexecdir)/@PACKAGE@
-nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.py
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/init_messages.py
pylogmessagedir = $(pyexecdir)/isc/log_messages/
noinst_SCRIPTS = run_bind10.sh
bind10dir = $(pkgdatadir)
-bind10_DATA = bob.spec
-EXTRA_DIST = bob.spec
+bind10_DATA = init.spec
+EXTRA_DIST = init.spec bind10.in
-man_MANS = bind10.8
-DISTCLEANFILES = $(man_MANS)
-EXTRA_DIST += $(man_MANS) bind10.xml bind10_messages.mes
+man_MANS = b10-init.8 bind10.8
+DISTCLEANFILES = $(man_MANS) bind10
+EXTRA_DIST += $(man_MANS) b10-init.xml bind10.xml init_messages.mes
if GENERATE_DOCS
bind10.8: bind10.xml
- @XSLTPROC@ --novalid --xinclude --nonet -o $@ http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl $(srcdir)/bind10.xml
+ @XSLTPROC@ --novalid --xinclude --nonet -o $@ http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl $(srcdir)/bind10.xml
+
+b10-init.8: b10-init.xml
+ @XSLTPROC@ --novalid --xinclude --nonet -o $@ http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl $(srcdir)/b10-init.xml
#dist-local-check-mans-enabled:
# @if grep "Man generation disabled" $(man_MANS) >/dev/null; then $(RM) $(man_MANS); fi
@@ -40,15 +44,15 @@ $(man_MANS):
endif
-$(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.py : bind10_messages.mes
+$(PYTHON_LOGMSGPKG_DIR)/work/init_messages.py : init_messages.mes
$(top_builddir)/src/lib/log/compiler/message \
- -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/bind10_messages.mes
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/init_messages.mes
# this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
-bind10: bind10_src.py $(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.py
+b10-init: init.py $(PYTHON_LOGMSGPKG_DIR)/work/init_messages.py
$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
-e "s|@@LIBDIR@@|$(libdir)|" \
- -e "s|@@LIBEXECDIR@@|$(pkglibexecdir)|" bind10_src.py >$@
+ -e "s|@@LIBEXECDIR@@|$(pkglibexecdir)|" init.py >$@
chmod a+x $@
pytest:
diff --git a/src/bin/bind10/README b/src/bin/bind10/README
index e1d2d89..d75c0cd 100644
--- a/src/bin/bind10/README
+++ b/src/bin/bind10/README
@@ -1,11 +1,12 @@
-This directory contains the source for the "Boss of Bind" program.
+This directory contains the source for the "b10-init" program, as well as
+the "bind10" script that runs it.
Files:
Makefile.am - build information
README - this file
TODO - remaining development tasks for this program
bind10.py.in - used to make bind10.py with proper Python paths
- bob.spec - defines the options and commands
+ init.spec - defines the options and commands
run_bind10.sh.in - use to make run_bind10.sh with proper Python paths
The "tests" directory contains unit tests for the application.
diff --git a/src/bin/bind10/b10-init.xml b/src/bin/bind10/b10-init.xml
new file mode 100644
index 0000000..f166683
--- /dev/null
+++ b/src/bin/bind10/b10-init.xml
@@ -0,0 +1,513 @@
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd"
+ [<!ENTITY mdash "—">]>
+<!--
+ - Copyright (C) 2010-2012 Internet Systems Consortium, Inc. ("ISC")
+ -
+ - Permission to use, copy, modify, and/or distribute this software for any
+ - purpose with or without fee is hereby granted, provided that the above
+ - copyright notice and this permission notice appear in all copies.
+ -
+ - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+ - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ - AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+ - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ - PERFORMANCE OF THIS SOFTWARE.
+-->
+
+<refentry>
+
+ <refentryinfo>
+ <date>February 5, 2013</date>
+ </refentryinfo>
+
+ <refmeta>
+ <refentrytitle>b10-init</refentrytitle>
+ <manvolnum>8</manvolnum>
+ <refmiscinfo>BIND10</refmiscinfo>
+ </refmeta>
+
+ <refnamediv>
+ <refname>b10-init</refname>
+ <refpurpose>BIND 10 Init process</refpurpose>
+ </refnamediv>
+
+ <docinfo>
+ <copyright>
+ <year>2010-2013</year>
+ <holder>Internet Systems Consortium, Inc. ("ISC")</holder>
+ </copyright>
+ </docinfo>
+
+ <refsynopsisdiv>
+ <cmdsynopsis>
+ <command>b10-init</command>
+ <arg><option>-c <replaceable>config-filename</replaceable></option></arg>
+ <arg><option>-i</option></arg>
+ <arg><option>-m <replaceable>file</replaceable></option></arg>
+ <arg><option>-p <replaceable>data_path</replaceable></option></arg>
+ <arg><option>-u <replaceable>user</replaceable></option></arg>
+ <arg><option>-v</option></arg>
+ <arg><option>-w <replaceable>wait_time</replaceable></option></arg>
+ <arg><option>--clear-config</option></arg>
+ <arg><option>--cmdctl-port</option> <replaceable>port</replaceable></arg>
+ <arg><option>--config-file</option> <replaceable>config-filename</replaceable></arg>
+ <arg><option>--data-path</option> <replaceable>directory</replaceable></arg>
+ <arg><option>--msgq-socket-file <replaceable>file</replaceable></option></arg>
+ <arg><option>--no-kill</option></arg>
+ <arg><option>--pid-file</option> <replaceable>filename</replaceable></arg>
+ <arg><option>--pretty-name <replaceable>name</replaceable></option></arg>
+ <arg><option>--user <replaceable>user</replaceable></option></arg>
+ <arg><option>--verbose</option></arg>
+ <arg><option>--wait <replaceable>wait_time</replaceable></option></arg>
+ </cmdsynopsis>
+ </refsynopsisdiv>
+
+ <refsect1>
+ <title>DESCRIPTION</title>
+
+ <para>The <command>b10-init</command> daemon starts up other
+ BIND 10 required daemons. It handles restarting of exiting
+ programs and also the shutdown of all managed daemons.</para>
+
+<!-- TODO: list what it starts here? -->
+
+<!-- TODO
+ <para>The configuration of the <command>b10-init</command> daemon
+ is defined in the TODO configuration file, as described in the
+ <citerefentry><refentrytitle>TODO</refentrytitle><manvolnum>5</manvolnum></citerefentry>
+ manual page.</para>
+-->
+
+ </refsect1>
+
+ <refsect1>
+ <title>ARGUMENTS</title>
+
+ <para>The arguments are as follows:</para>
+
+ <variablelist>
+
+ <varlistentry>
+ <term>
+ <option>-c</option> <replaceable>config-filename</replaceable>,
+ <option>--config-file</option> <replaceable>config-filename</replaceable>
+ </term>
+ <listitem>
+ <para>The configuration filename to use. Can be either absolute or
+ relative to data path. In case it is absolute, value of data path is
+ not considered.
+ Defaults to <filename>b10-config.db</filename>.</para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>
+ <option>--clear-config</option>
+ </term>
+ <listitem>
+ <para>
+ This will create a backup of the existing configuration
+ file, remove it and start
+ <refentrytitle>b10-cfgmgr</refentrytitle><manvolnum>8</manvolnum>
+ with the default configuration.
+ The name of the backup file can be found in the logs
+ (<varname>CFGMGR_BACKED_UP_CONFIG_FILE</varname>).
+ (It will append a number to the backup filename if a
+ previous backup file exists.)
+
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>
+ <option>--cmdctl-port</option> <replaceable>port</replaceable>
+ </term>
+ <listitem>
+ <para>The <command>b10-cmdctl</command> daemon will listen
+ on this port.
+ (See
+ <refentrytitle>b10-cmdctl</refentrytitle><manvolnum>8</manvolnum>
+ for the default.)
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>
+ <option>-p</option> <replaceable>directory</replaceable>,
+ <option>--data-path</option> <replaceable>directory</replaceable>
+ </term>
+ <listitem>
+ <para>The path where BIND 10 programs look for various data files.
+ Currently only
+ <citerefentry><refentrytitle>b10-cfgmgr</refentrytitle><manvolnum>8</manvolnum></citerefentry>
+ uses it to locate the configuration file, but the usage
+ might be extended for other programs and other types of
+ files.</para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>-m</option> <replaceable>file</replaceable>,
+ <option>--msgq-socket-file</option> <replaceable>file</replaceable></term>
+
+ <listitem>
+ <para>The UNIX domain socket file for the
+ <citerefentry><refentrytitle>b10-msgq</refentrytitle><manvolnum>8</manvolnum></citerefentry>
+ daemon to use.
+ The default is
+ <filename>/usr/local/var/bind10/msg_socket</filename>.
+<!-- @localstatedir@/@PACKAGE_NAME@/msg_socket -->
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>-i</option>, <option>--no-kill</option></term>
+ <listitem>
+ <para>When this option is passed, <command>b10-init</command>
+ does not send SIGTERM and SIGKILL signals to modules during
+ shutdown. (This option was introduced for use during
+ testing.)</para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>-u</option> <replaceable>user</replaceable>, <option>--user</option> <replaceable>name</replaceable></term>
+<!-- TODO: example more detail. -->
+ <listitem>
+ <para>The username for <command>b10-init</command> to run as.
+ <command>b10-init</command> must be initially ran as the
+ root user to use this option.
+ The default is to run as the current user.</para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>--pid-file</option> <replaceable>filename</replaceable></term>
+ <listitem>
+ <para>If defined, the PID of the <command>b10-init</command> is stored
+ in this file.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>--pretty-name <replaceable>name</replaceable></option></term>
+
+ <listitem>
+ <para>The name this process should have in tools like
+ <command>ps</command> or <command>top</command>. This
+ is handy if you have multiple versions/installations
+ of <command>b10-init</command>.
+<!-- TODO: only supported with setproctitle feature
+The default is the basename of ARG 0.
+-->
+</para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>-v</option>, <option>--verbose</option></term>
+ <listitem>
+ <para>Display more about what is going on for
+ <command>b10-init</command> and its child processes.</para>
+<!-- TODO: not true about all children yet -->
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>-w</option> <replaceable>wait_time</replaceable>, <option>--wait</option> <replaceable>wait_time</replaceable></term>
+ <listitem>
+ <para>Sets the amount of time that BIND 10 will wait for
+ the configuration manager (a key component of BIND 10)
+ to initialize itself before abandoning the start up and
+ terminating with an error. The
+ <replaceable>wait_time</replaceable> is specified in
+ seconds and has a default value of 10.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ </variablelist>
+ </refsect1>
+
+<!--
+TODO: configuration section
+-->
+
+ <refsect1>
+ <title>CONFIGURATION AND COMMANDS</title>
+
+ <para>
+ The configuration provides settings for components for
+ <command>b10-init</command> to manage under
+ <varname>/Init/components/</varname>.
+ The default elements are:
+ </para>
+
+ <itemizedlist>
+
+ <listitem>
+ <para> <varname>/Init/components/b10-cmdctl</varname> </para>
+ </listitem>
+
+ <listitem>
+ <para> <varname>/Init/components/b10-stats</varname> </para>
+ </listitem>
+
+ </itemizedlist>
+
+ <para>
+ (Note that the startup of <command>b10-sockcreator</command>,
+ <command>b10-cfgmgr</command>, and <command>b10-msgq</command>
+ is not configurable. They are hardcoded and <command>b10-init</command>
+ will not run without them.)
+ </para>
+
+ <para>
+ The named sets for components contain the following settings:
+ </para>
+
+ <variablelist>
+
+ <varlistentry>
+ <term><varname>address</varname></term>
+ <listitem>
+ <para>The name used for communicating to it on the message
+ bus.</para>
+<!-- NOTE: vorner said:
+These can be null, because the components are special ones, and
+the special class there already knows the address. It is (I hope)
+explained in the guide. I'd like to get rid of the special components
+sometime and I'd like it to teach to guess the address.
+-->
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><varname>kind</varname></term>
+ <listitem>
+ <para>
+ This defines how required a component is.
+ The possible settings for <varname>kind</varname> are:
+ <varname>core</varname> (system won't start if it won't
+ start and <command>b10-init</command> will shutdown if
+ a <quote>core</quote> component crashes),
+ <varname>dispensable</varname> (<command>b10-init</command>
+ will restart failing component),
+ and
+ <varname>needed</varname> (<command>b10-init</command>
+ will shutdown if component won't initially start, but
+ if crashes later, it will attempt to restart).
+ This setting is required.
+<!-- TODO: formatting -->
+ </para>
+ </listitem>
+ </varlistentry>
+
+<!--
+TODO: currently not used
+ <varlistentry>
+ <term> <varname>params</varname> </term>
+ <listitem>
+ <para>
+list
+</para>
+ </listitem>
+ </varlistentry>
+-->
+
+ <varlistentry>
+ <term> <varname>priority</varname> </term>
+ <listitem>
+ <para>This is an integer. <command>b10-init</command>
+ will start the components with largest priority numbers first.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term> <varname>process</varname> </term>
+ <listitem>
+ <para>This is the filename of the executable to be started.
+ If not defined, then <command>b10-init</command> will
+ use the component name instead.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term> <varname>special</varname> </term>
+ <listitem>
+ <para>
+ This defines if the component is started a special, hardcoded
+ way.
+<!--
+TODO: document this ... but maybe some of these will be removed
+once we get rid of some using switches for components?
+
+auth
+cfgmgr
+cmdctl
+msgq
+resolver
+sockcreator
+xfrin
+-->
+
+</para>
+ </listitem>
+ </varlistentry>
+
+ </variablelist>
+
+<!-- TODO: formating -->
+ <para>
+ The <varname>Init</varname> configuration commands are:
+ </para>
+
+<!-- TODO -->
+<!--
+ <para>
+ <command>drop_socket</command>
+ This is an internal command and not exposed to the administrator.
+ </para>
+-->
+
+<!-- TODO -->
+<!--
+ <para>
+ <command>get_socket</command>
+ This is an internal command and not exposed to the administrator.
+ </para>
+-->
+
+ <para>
+ <command>getstats</command> tells <command>b10-init</command>
+ to send its statistics data to the <command>b10-stats</command>
+ daemon.
+ This is an internal command and not exposed to the administrator.
+<!-- not defined in spec -->
+ </para>
+
+ <para>
+ <command>ping</command> is used to check the connection with the
+ <command>b10-init</command> daemon.
+ It returns the text <quote>pong</quote>.
+ </para>
+
+ <para>
+ <command>show_processes</command> lists the current processes
+ managed by <command>b10-init</command>.
+ The output is an array in JSON format containing the process
+ ID, the name for each and the address name used on each message bus.
+<!-- TODO: what is name? -->
+<!-- TODO: change to JSON object format? -->
+<!-- TODO: ticket #1406 -->
+ </para>
+
+ <para>
+ <command>shutdown</command> tells <command>b10-init</command>
+ to shutdown the BIND 10 servers.
+ It will tell each process it manages to shutdown and, when
+ complete, <command>b10-init</command> will exit.
+ </para>
+
+ </refsect1>
+
+ <refsect1>
+ <title>STATISTICS DATA</title>
+
+ <para>
+ The statistics data collected by the <command>b10-stats</command>
+ daemon for <quote>Init</quote> include:
+ </para>
+
+ <variablelist>
+
+ <varlistentry>
+ <term>boot_time</term>
+ <listitem><para>
+ The date and time that the <command>b10-init</command>
+ process started.
+ This is represented in ISO 8601 format.
+ </para></listitem>
+ </varlistentry>
+
+ </variablelist>
+
+ </refsect1>
+
+ <refsect1>
+ <title>FILES</title>
+ <para><filename>sockcreator-XXXXXX/sockcreator</filename>
+ —
+ the Unix Domain socket located in a temporary file directory for
+ <command>b10-sockcreator</command>
+<!-- <citerefentry><refentrytitle>b10-sockcreator</refentrytitle><manvolnum>8</manvolnum></citerefentry> -->
+ communication.
+ </para>
+ </refsect1>
+
+ <refsect1>
+ <title>SEE ALSO</title>
+ <para>
+ <citerefentry>
+ <refentrytitle>bindctl</refentrytitle><manvolnum>1</manvolnum>
+ </citerefentry>,
+ <citerefentry>
+ <refentrytitle>b10-auth</refentrytitle><manvolnum>8</manvolnum>
+ </citerefentry>,
+ <citerefentry>
+ <refentrytitle>b10-cfgmgr</refentrytitle><manvolnum>8</manvolnum>
+ </citerefentry>,
+ <citerefentry>
+ <refentrytitle>b10-cmdctl</refentrytitle><manvolnum>8</manvolnum>
+ </citerefentry>,
+ <citerefentry>
+ <refentrytitle>b10-msgq</refentrytitle><manvolnum>8</manvolnum>
+ </citerefentry>,
+ <citerefentry>
+ <refentrytitle>b10-xfrin</refentrytitle><manvolnum>8</manvolnum>
+ </citerefentry>,
+ <citerefentry>
+ <refentrytitle>b10-xfrout</refentrytitle><manvolnum>8</manvolnum>
+ </citerefentry>,
+ <citerefentry>
+ <refentrytitle>b10-zonemgr</refentrytitle><manvolnum>8</manvolnum>
+ </citerefentry>,
+ <citerefentry>
+ <refentrytitle>b10-stats</refentrytitle><manvolnum>8</manvolnum>
+ </citerefentry>,
+ <citetitle>BIND 10 Guide</citetitle>.
+ </para>
+ </refsect1>
+<!-- <citerefentry>
+ <refentrytitle>b10-sockcreator</refentrytitle><manvolnum>8</manvolnum>
+ </citerefentry>, -->
+
+ <refsect1 id='history'><title>HISTORY</title>
+ <para>The development of <command>b10-init</command>
+ was started in October 2009.
+ It was renamed and its configuration identifier changed
+ in February 2013.
+ </para>
+ </refsect1>
+
+ <refsect1>
+ <title>AUTHORS</title>
+ <para>
+ The <command>b10-init</command>
+ daemon was initially designed by Shane Kerr of ISC.
+ </para>
+ </refsect1>
+</refentry><!--
+ - Local variables:
+ - mode: sgml
+ - End:
+-->
diff --git a/src/bin/bind10/bind10.in b/src/bin/bind10/bind10.in
new file mode 100755
index 0000000..88c45c9
--- /dev/null
+++ b/src/bin/bind10/bind10.in
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+# We use this wrapper script both for production and in-source tests; in
+# the latter case B10_FROM_BUILD environment is expected to be defined.
+if test -n "${B10_FROM_BUILD}"; then
+ exec ${B10_FROM_BUILD}/src/bin/bind10/b10-init $*
+else
+ prefix=@prefix@
+ exec_prefix=@exec_prefix@
+ exec @libexecdir@/@PACKAGE@/b10-init $*
+fi
diff --git a/src/bin/bind10/bind10.xml b/src/bin/bind10/bind10.xml
index e32544a..16082f3 100644
--- a/src/bin/bind10/bind10.xml
+++ b/src/bin/bind10/bind10.xml
@@ -2,7 +2,7 @@
"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd"
[<!ENTITY mdash "—">]>
<!--
- - Copyright (C) 2010-2012 Internet Systems Consortium, Inc. ("ISC")
+ - Copyright (C) 2013 Internet Systems Consortium, Inc. ("ISC")
-
- Permission to use, copy, modify, and/or distribute this software for any
- purpose with or without fee is hereby granted, provided that the above
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>April 12, 2012</date>
+ <date>February 5, 2013</date>
</refentryinfo>
<refmeta>
@@ -31,12 +31,12 @@
<refnamediv>
<refname>bind10</refname>
- <refpurpose>BIND 10 boss process</refpurpose>
+ <refpurpose>BIND 10 start script</refpurpose>
</refnamediv>
<docinfo>
<copyright>
- <year>2010-2012</year>
+ <year>2013</year>
<holder>Internet Systems Consortium, Inc. ("ISC")</holder>
</copyright>
</docinfo>
@@ -44,468 +44,33 @@
<refsynopsisdiv>
<cmdsynopsis>
<command>bind10</command>
- <arg><option>-c <replaceable>config-filename</replaceable></option></arg>
- <arg><option>-i</option></arg>
- <arg><option>-m <replaceable>file</replaceable></option></arg>
- <arg><option>-p <replaceable>data_path</replaceable></option></arg>
- <arg><option>-u <replaceable>user</replaceable></option></arg>
- <arg><option>-v</option></arg>
- <arg><option>-w <replaceable>wait_time</replaceable></option></arg>
- <arg><option>--clear-config</option></arg>
- <arg><option>--cmdctl-port</option> <replaceable>port</replaceable></arg>
- <arg><option>--config-file</option> <replaceable>config-filename</replaceable></arg>
- <arg><option>--data-path</option> <replaceable>directory</replaceable></arg>
- <arg><option>--msgq-socket-file <replaceable>file</replaceable></option></arg>
- <arg><option>--no-kill</option></arg>
- <arg><option>--pid-file</option> <replaceable>filename</replaceable></arg>
- <arg><option>--pretty-name <replaceable>name</replaceable></option></arg>
- <arg><option>--user <replaceable>user</replaceable></option></arg>
- <arg><option>--verbose</option></arg>
- <arg><option>--wait <replaceable>wait_time</replaceable></option></arg>
+ <arg><option>options</option></arg>
</cmdsynopsis>
</refsynopsisdiv>
<refsect1>
<title>DESCRIPTION</title>
- <para>The <command>bind10</command> daemon starts up other
- BIND 10 required daemons. It handles restarting of exiting
- programs and also the shutdown of all managed daemons.</para>
+ <para>The <command>bind10</command> script is a simple wrapper that
+ starts BIND 10 by running the <command>b10-init</command> daemon. All
+ options passed to <command>bind10</command> are directly passed on to
+ <command>b10-init</command>.</para>
-<!-- TODO: list what it starts here? -->
-
-<!-- TODO
- <para>The configuration of the <command>bind10</command> daemon
- is defined in the TODO configuration file, as described in the
- <citerefentry><refentrytitle>TODO</refentrytitle><manvolnum>5</manvolnum></citerefentry>
- manual page.</para>
--->
-
- </refsect1>
-
- <refsect1>
- <title>ARGUMENTS</title>
-
- <para>The arguments are as follows:</para>
-
- <variablelist>
-
- <varlistentry>
- <term>
- <option>-c</option> <replaceable>config-filename</replaceable>,
- <option>--config-file</option> <replaceable>config-filename</replaceable>
- </term>
- <listitem>
- <para>The configuration filename to use. Can be either absolute or
- relative to data path. In case it is absolute, value of data path is
- not considered.
- Defaults to <filename>b10-config.db</filename>.</para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term>
- <option>--clear-config</option>
- </term>
- <listitem>
- <para>
- This will create a backup of the existing configuration
- file, remove it and start
- <refentrytitle>b10-cfgmgr</refentrytitle><manvolnum>8</manvolnum>
- with the default configuration.
- The name of the backup file can be found in the logs
- (<varname>CFGMGR_BACKED_UP_CONFIG_FILE</varname>).
- (It will append a number to the backup filename if a
- previous backup file exists.)
-
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term>
- <option>--cmdctl-port</option> <replaceable>port</replaceable>
- </term>
- <listitem>
- <para>The <command>b10-cmdctl</command> daemon will listen
- on this port.
- (See
- <refentrytitle>b10-cmdctl</refentrytitle><manvolnum>8</manvolnum>
- for the default.)
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term>
- <option>-p</option> <replaceable>directory</replaceable>,
- <option>--data-path</option> <replaceable>directory</replaceable>
- </term>
- <listitem>
- <para>The path where BIND 10 programs look for various data files.
- Currently only
- <citerefentry><refentrytitle>b10-cfgmgr</refentrytitle><manvolnum>8</manvolnum></citerefentry>
- uses it to locate the configuration file, but the usage
- might be extended for other programs and other types of
- files.</para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><option>-m</option> <replaceable>file</replaceable>,
- <option>--msgq-socket-file</option> <replaceable>file</replaceable></term>
-
- <listitem>
- <para>The UNIX domain socket file for the
- <citerefentry><refentrytitle>b10-msgq</refentrytitle><manvolnum>8</manvolnum></citerefentry>
- daemon to use.
- The default is
- <filename>/usr/local/var/bind10/msg_socket</filename>.
-<!-- @localstatedir@/@PACKAGE_NAME@/msg_socket -->
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><option>-i</option>, <option>--no-kill</option></term>
- <listitem>
- <para>When this option is passed, <command>bind10</command>
- does not send SIGTERM and SIGKILL signals to modules during
- shutdown. (This option was introduced for use during
- testing.)</para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><option>-u</option> <replaceable>user</replaceable>, <option>--user</option> <replaceable>name</replaceable></term>
-<!-- TODO: example more detail. -->
- <listitem>
- <para>The username for <command>bind10</command> to run as.
- <command>bind10</command> must be initially ran as the
- root user to use this option.
- The default is to run as the current user.</para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><option>--pid-file</option> <replaceable>filename</replaceable></term>
- <listitem>
- <para>If defined, the PID of the <command>bind10</command> is stored
- in this file.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><option>--pretty-name <replaceable>name</replaceable></option></term>
-
- <listitem>
- <para>The name this process should have in tools like
- <command>ps</command> or <command>top</command>. This
- is handy if you have multiple versions/installations
- of <command>bind10</command>.
-<!-- TODO: only supported with setproctitle feature
-The default is the basename of ARG 0.
--->
-</para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><option>-v</option>, <option>--verbose</option></term>
- <listitem>
- <para>Display more about what is going on for
- <command>bind10</command> and its child processes.</para>
-<!-- TODO: not true about all children yet -->
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><option>-w</option> <replaceable>wait_time</replaceable>, <option>--wait</option> <replaceable>wait_time</replaceable></term>
- <listitem>
- <para>Sets the amount of time that BIND 10 will wait for
- the configuration manager (a key component of BIND 10)
- to initialize itself before abandoning the start up and
- terminating with an error. The
- <replaceable>wait_time</replaceable> is specified in
- seconds and has a default value of 10.
- </para>
- </listitem>
- </varlistentry>
-
- </variablelist>
- </refsect1>
-
-<!--
-TODO: configuration section
--->
-
- <refsect1>
- <title>CONFIGURATION AND COMMANDS</title>
-
- <para>
- The configuration provides settings for components for
- <command>bind10</command> to manage under
- <varname>/Boss/components/</varname>.
- The default elements are:
- </para>
-
- <itemizedlist>
-
- <listitem>
- <para> <varname>/Boss/components/b10-cmdctl</varname> </para>
- </listitem>
-
- <listitem>
- <para> <varname>/Boss/components/b10-stats</varname> </para>
- </listitem>
-
- </itemizedlist>
-
- <para>
- (Note that the startup of <command>b10-sockcreator</command>,
- <command>b10-cfgmgr</command>, and <command>b10-msgq</command>
- is not configurable. They are hardcoded and <command>bind10</command>
- will not run without them.)
- </para>
-
- <para>
- The named sets for components contain the following settings:
- </para>
-
- <variablelist>
-
- <varlistentry>
- <term><varname>address</varname></term>
- <listitem>
- <para>The name used for communicating to it on the message
- bus.</para>
-<!-- NOTE: vorner said:
-These can be null, because the components are special ones, and
-the special class there already knows the address. It is (I hope)
-explained in the guide. I'd like to get rid of the special components
-sometime and I'd like it to teach to guess the address.
--->
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><varname>kind</varname></term>
- <listitem>
- <para>
- This defines how required a component is.
- The possible settings for <varname>kind</varname> are:
- <varname>core</varname> (system won't start if it won't
- start and <command>bind10</command> will shutdown if
- a <quote>core</quote> component crashes),
- <varname>dispensable</varname> (<command>bind10</command>
- will restart failing component),
- and
- <varname>needed</varname> (<command>bind10</command>
- will shutdown if component won't initially start, but
- if crashes later, it will attempt to restart).
- This setting is required.
-<!-- TODO: formatting -->
- </para>
- </listitem>
- </varlistentry>
-
-<!--
-TODO: currently not used
- <varlistentry>
- <term> <varname>params</varname> </term>
- <listitem>
- <para>
-list
-</para>
- </listitem>
- </varlistentry>
--->
-
- <varlistentry>
- <term> <varname>priority</varname> </term>
- <listitem>
- <para>This is an integer. <command>bind10</command>
- will start the components with largest priority numbers first.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term> <varname>process</varname> </term>
- <listitem>
- <para>This is the filename of the executable to be started.
- If not defined, then <command>bind10</command> will
- use the component name instead.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term> <varname>special</varname> </term>
- <listitem>
- <para>
- This defines if the component is started a special, hardcoded
- way.
-<!--
-TODO: document this ... but maybe some of these will be removed
-once we get rid of some using switches for components?
-
-auth
-cfgmgr
-cmdctl
-msgq
-resolver
-sockcreator
-xfrin
--->
-
-</para>
- </listitem>
- </varlistentry>
-
- </variablelist>
-
-<!-- TODO: formating -->
- <para>
- The <varname>Boss</varname> configuration commands are:
- </para>
-<!-- TODO: let's just let bind10 be known as bind10 and not Boss -->
-
-<!-- TODO -->
-<!--
- <para>
- <command>drop_socket</command>
- This is an internal command and not exposed to the administrator.
- </para>
--->
-
-<!-- TODO -->
-<!--
- <para>
- <command>get_socket</command>
- This is an internal command and not exposed to the administrator.
- </para>
--->
-
- <para>
- <command>getstats</command> tells <command>bind10</command>
- to send its statistics data to the <command>b10-stats</command>
- daemon.
- This is an internal command and not exposed to the administrator.
-<!-- not defined in spec -->
- </para>
-
- <para>
- <command>ping</command> is used to check the connection with the
- <command>bind10</command> daemon.
- It returns the text <quote>pong</quote>.
- </para>
-
- <para>
- <command>show_processes</command> lists the current processes
- managed by <command>bind10</command>.
- The output is an array in JSON format containing the process
- ID, the name for each and the address name used on each message bus.
-<!-- TODO: what is name? -->
-<!-- TODO: change to JSON object format? -->
-<!-- TODO: ticket #1406 -->
- </para>
-
- <para>
- <command>shutdown</command> tells <command>bind10</command>
- to shutdown the BIND 10 servers.
- It will tell each process it manages to shutdown and, when
- complete, <command>bind10</command> will exit.
- </para>
-
- </refsect1>
-
- <refsect1>
- <title>STATISTICS DATA</title>
-
- <para>
- The statistics data collected by the <command>b10-stats</command>
- daemon for <quote>Boss</quote> include:
- </para>
-
- <variablelist>
-
- <varlistentry>
- <term>boot_time</term>
- <listitem><para>
- The date and time that the <command>bind10</command>
- process started.
- This is represented in ISO 8601 format.
- </para></listitem>
- </varlistentry>
-
- </variablelist>
-
- </refsect1>
-
- <refsect1>
- <title>FILES</title>
- <para><filename>sockcreator-XXXXXX/sockcreator</filename>
- —
- the Unix Domain socket located in a temporary file directory for
- <command>b10-sockcreator</command>
-<!-- <citerefentry><refentrytitle>b10-sockcreator</refentrytitle><manvolnum>8</manvolnum></citerefentry> -->
- communication.
- </para>
</refsect1>
<refsect1>
<title>SEE ALSO</title>
<para>
<citerefentry>
- <refentrytitle>bindctl</refentrytitle><manvolnum>1</manvolnum>
- </citerefentry>,
- <citerefentry>
- <refentrytitle>b10-auth</refentrytitle><manvolnum>8</manvolnum>
- </citerefentry>,
- <citerefentry>
- <refentrytitle>b10-cfgmgr</refentrytitle><manvolnum>8</manvolnum>
- </citerefentry>,
- <citerefentry>
- <refentrytitle>b10-cmdctl</refentrytitle><manvolnum>8</manvolnum>
- </citerefentry>,
- <citerefentry>
- <refentrytitle>b10-msgq</refentrytitle><manvolnum>8</manvolnum>
- </citerefentry>,
- <citerefentry>
- <refentrytitle>b10-xfrin</refentrytitle><manvolnum>8</manvolnum>
- </citerefentry>,
- <citerefentry>
- <refentrytitle>b10-xfrout</refentrytitle><manvolnum>8</manvolnum>
- </citerefentry>,
- <citerefentry>
- <refentrytitle>b10-zonemgr</refentrytitle><manvolnum>8</manvolnum>
- </citerefentry>,
- <citerefentry>
- <refentrytitle>b10-stats</refentrytitle><manvolnum>8</manvolnum>
+ <refentrytitle>b10-init</refentrytitle><manvolnum>8</manvolnum>
</citerefentry>,
<citetitle>BIND 10 Guide</citetitle>.
</para>
</refsect1>
-<!-- <citerefentry>
- <refentrytitle>b10-sockcreator</refentrytitle><manvolnum>8</manvolnum>
- </citerefentry>, -->
<refsect1 id='history'><title>HISTORY</title>
- <para>The development of <command>bind10</command>
- was started in October 2009.</para>
- </refsect1>
-
- <refsect1>
- <title>AUTHORS</title>
- <para>
- The <command>bind10</command>
- daemon was initially designed by Shane Kerr of ISC.
+ <para>The <command>bind10</command> script was added in February 2013.
</para>
</refsect1>
-</refentry><!--
- - Local variables:
- - mode: sgml
- - End:
--->
+
+</refentry>
diff --git a/src/bin/bind10/bind10_messages.mes b/src/bin/bind10/bind10_messages.mes
deleted file mode 100644
index 9414ed6..0000000
--- a/src/bin/bind10/bind10_messages.mes
+++ /dev/null
@@ -1,327 +0,0 @@
-# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-# No namespace declaration - these constants go in the global namespace
-# of the xfrin messages python module.
-
-% BIND10_CHECK_MSGQ_ALREADY_RUNNING checking if msgq is already running
-The boss process is starting up and will now check if the message bus
-daemon is already running. If so, it will not be able to start, as it
-needs a dedicated message bus.
-
-% BIND10_COMPONENT_FAILED component %1 (pid %2) failed: %3
-The process terminated, but the bind10 boss didn't expect it to, which means
-it must have failed.
-
-% BIND10_COMPONENT_RESTART component %1 is about to restart
-The named component failed previously and we will try to restart it to provide
-as flawless service as possible, but it should be investigated what happened,
-as it could happen again.
-
-% BIND10_COMPONENT_START component %1 is starting
-The named component is about to be started by the boss process.
-
-% BIND10_COMPONENT_START_EXCEPTION component %1 failed to start: %2
-An exception (mentioned in the message) happened during the startup of the
-named component. The componet is not considered started and further actions
-will be taken about it.
-
-% BIND10_COMPONENT_STOP component %1 is being stopped
-A component is about to be asked to stop willingly by the boss.
-
-% BIND10_COMPONENT_UNSATISFIED component %1 is required to run and failed
-A component failed for some reason (see previous messages). It is either a core
-component or needed component that was just started. In any case, the system
-can't continue without it and will terminate.
-
-% BIND10_CONFIGURATOR_BUILD building plan '%1' -> '%2'
-A debug message. This indicates that the configurator is building a plan
-how to change configuration from the older one to newer one. This does no
-real work yet, it just does the planning what needs to be done.
-
-% BIND10_CONFIGURATOR_PLAN_INTERRUPTED configurator plan interrupted, only %1 of %2 done
-There was an exception during some planned task. The plan will not continue and
-only some tasks of the plan were completed. The rest is aborted. The exception
-will be propagated.
-
-% BIND10_CONFIGURATOR_RECONFIGURE reconfiguring running components
-A different configuration of which components should be running is being
-installed. All components that are no longer needed will be stopped and
-newly introduced ones started. This happens at startup, when the configuration
-is read the first time, or when an operator changes configuration of the boss.
-
-% BIND10_CONFIGURATOR_RUN running plan of %1 tasks
-A debug message. The configurator is about to execute a plan of actions it
-computed previously.
-
-% BIND10_CONFIGURATOR_START bind10 component configurator is starting up
-The part that cares about starting and stopping the right component from the
-boss process is starting up. This happens only once at the startup of the
-boss process. It will start the basic set of processes now (the ones boss
-needs to read the configuration), the rest will be started after the
-configuration is known.
-
-% BIND10_CONFIGURATOR_STOP bind10 component configurator is shutting down
-The part that cares about starting and stopping processes in the boss is
-shutting down. All started components will be shut down now (more precisely,
-asked to terminate by their own, if they fail to comply, other parts of
-the boss process will try to force them).
-
-% BIND10_CONFIGURATOR_TASK performing task %1 on %2
-A debug message. The configurator is about to perform one task of the plan it
-is currently executing on the named component.
-
-% BIND10_CONNECTING_TO_CC_FAIL failed to connect to configuration/command channel; try -v to see output from msgq
-The boss process tried to connect to the communication channel for
-commands and configuration updates during initialization, but it
-failed. This is a fatal startup error, and process will soon
-terminate after some cleanup. There can be several reasons for the
-failure, but the most likely cause is that the msgq daemon failed to
-start, and the most likely cause of the msgq failure is that it
-doesn't have a permission to create a socket file for the
-communication. To confirm that, you can see debug messages from msgq
-by starting BIND 10 with the -v command line option. If it indicates
-permission problem for msgq, make sure the directory where the socket
-file is to be created is writable for the msgq process. Note that if
-you specify the -u option to change process users, the directory must
-be writable for that user.
-
-% BIND10_INVALID_STATISTICS_DATA invalid specification of statistics data specified
-An error was encountered when the boss module specified
-statistics data which is invalid for the boss specification file.
-
-% BIND10_INVALID_USER invalid user: %1
-The boss process was started with the -u option, to drop root privileges
-and continue running as the specified user, but the user is unknown.
-
-% BIND10_KILLING_ALL_PROCESSES killing all started processes
-The boss module was not able to start every process it needed to start
-during startup, and will now kill the processes that did get started.
-
-% BIND10_LOST_SOCKET_CONSUMER consumer %1 of sockets disconnected, considering all its sockets closed
-A connection from one of the applications which requested a socket was
-closed. This means the application has terminated, so all the sockets it was
-using are now closed and bind10 process can release them as well, unless the
-same sockets are used by yet another application.
-
-% BIND10_MSGQ_ALREADY_RUNNING msgq daemon already running, cannot start
-There already appears to be a message bus daemon running. Either an
-old process was not shut down correctly, and needs to be killed, or
-another instance of BIND10, with the same msgq domain socket, is
-running, which needs to be stopped.
-
-% BIND10_MSGQ_DISAPPEARED msgq channel disappeared
-While listening on the message bus channel for messages, it suddenly
-disappeared. The msgq daemon may have died. This might lead to an
-inconsistent state of the system, and BIND 10 will now shut down.
-
-% BIND10_NO_SOCKET couldn't send a socket for token %1 because of error: %2
-An error occurred when the bind10 process was asked to send a socket file
-descriptor. The error is mentioned, most common reason is that the request
-is invalid and may not come from bind10 process at all.
-
-% BIND10_PROCESS_ENDED process %2 of %1 ended with status %3
-This indicates a process started previously terminated. The process id
-and component owning the process are indicated, as well as the exit code.
-This doesn't distinguish if the process was supposed to terminate or not.
-
-% BIND10_READING_BOSS_CONFIGURATION reading boss configuration
-The boss process is starting up, and will now process the initial
-configuration, as received from the configuration manager.
-
-% BIND10_RECEIVED_COMMAND received command: %1
-The boss module received a command and shall now process it. The command
-is printed.
-
-% BIND10_RECEIVED_NEW_CONFIGURATION received new configuration: %1
-The boss module received a configuration update and is going to apply
-it now. The new configuration is printed.
-
-% BIND10_RECEIVED_SIGNAL received signal %1
-The boss module received the given signal.
-
-% BIND10_RESTART_COMPONENT_SKIPPED Skipped restarting a component %1
-The boss module tried to restart a component after it failed (crashed)
-unexpectedly, but the boss then found that the component had been removed
-from its local configuration of components to run. This is an unusual
-situation but can happen if the administrator removes the component from
-the configuration after the component's crash and before the restart time.
-The boss module simply skipped restarting that module, and the whole system
-went back to the expected state (except that the crash itself is likely
-to be a bug).
-
-% BIND10_RESURRECTED_PROCESS resurrected %1 (PID %2)
-The given process has been restarted successfully, and is now running
-with the given process id.
-
-% BIND10_RESURRECTING_PROCESS resurrecting dead %1 process...
-The given process has ended unexpectedly, and is now restarted.
-
-% BIND10_SELECT_ERROR error in select() call: %1
-There was a fatal error in the call to select(), used to see if a child
-process has ended or if there is a message on the message bus. This
-should not happen under normal circumstances and is considered fatal,
-so BIND 10 will now shut down. The specific error is printed.
-
-% BIND10_SEND_SIGKILL sending SIGKILL to %1 (PID %2)
-The boss module is sending a SIGKILL signal to the given process.
-
-% BIND10_SEND_SIGNAL_FAIL sending %1 to %2 (PID %3) failed: %4
-The boss module sent a single (either SIGTERM or SIGKILL) to a process,
-but it failed due to some system level error. There are two major cases:
-the target process has already terminated but the boss module had sent
-the signal before it noticed the termination. In this case an error
-message should indicate something like "no such process". This can be
-safely ignored. The other case is that the boss module doesn't have
-the privilege to send a signal to the process. It can typically
-happen when the boss module started as a privileged process, spawned a
-subprocess, and then dropped the privilege. It includes the case for
-the socket creator when the boss process runs with the -u command line
-option. In this case, the boss module simply gives up to terminate
-the process explicitly because it's unlikely to succeed by keeping
-sending the signal. Although the socket creator is implemented so
-that it will terminate automatically when the boss process exits
-(and that should be the case for any other future process running with
-a higher privilege), but it's recommended to check if there's any
-remaining BIND 10 process if this message is logged. For all other
-cases, the boss module will keep sending the signal until it confirms
-all child processes terminate. Although unlikely, this could prevent
-the boss module from exiting, just keeping sending the signals. So,
-again, it's advisable to check if it really terminates when this
-message is logged.
-
-% BIND10_SEND_SIGTERM sending SIGTERM to %1 (PID %2)
-The boss module is sending a SIGTERM signal to the given process.
-
-% BIND10_SETGID setting GID to %1
-The boss switches the process group ID to the given value. This happens
-when BIND 10 starts with the -u option, and the group ID will be set to
-that of the specified user.
-
-% BIND10_SETUID setting UID to %1
-The boss switches the user it runs as to the given UID.
-
-% BIND10_SHUTDOWN stopping the server
-The boss process received a command or signal telling it to shut down.
-It will send a shutdown command to each process. The processes that do
-not shut down will then receive a SIGTERM signal. If that doesn't work,
-it shall send SIGKILL signals to the processes still alive.
-
-% BIND10_SHUTDOWN_COMPLETE all processes ended, shutdown complete
-All child processes have been stopped, and the boss process will now
-stop itself.
-
-% BIND10_SOCKCREATOR_BAD_CAUSE unknown error cause from socket creator: %1
-The socket creator reported an error when creating a socket. But the function
-which failed is unknown (not one of 'S' for socket or 'B' for bind).
-
-% BIND10_SOCKCREATOR_BAD_RESPONSE unknown response for socket request: %1
-The boss requested a socket from the creator, but the answer is unknown. This
-looks like a programmer error.
-
-% BIND10_SOCKCREATOR_EOF eof while expecting data from socket creator
-There should be more data from the socket creator, but it closed the socket.
-It probably crashed.
-
-% BIND10_SOCKCREATOR_INIT initializing socket creator parser
-The boss module initializes routines for parsing the socket creator
-protocol.
-
-% BIND10_SOCKCREATOR_KILL killing the socket creator
-The socket creator is being terminated the aggressive way, by sending it
-sigkill. This should not happen usually.
-
-% BIND10_SOCKCREATOR_TERMINATE terminating socket creator
-The boss module sends a request to terminate to the socket creator.
-
-% BIND10_SOCKCREATOR_TRANSPORT_ERROR transport error when talking to the socket creator: %1
-Either sending or receiving data from the socket creator failed with the given
-error. The creator probably crashed or some serious OS-level problem happened,
-as the communication happens only on local host.
-
-% BIND10_SOCKET_CREATED successfully created socket %1
-The socket creator successfully created and sent a requested socket, it has
-the given file number.
-
-% BIND10_SOCKET_ERROR error on %1 call in the creator: %2/%3
-The socket creator failed to create the requested socket. It failed on the
-indicated OS API function with given error.
-
-% BIND10_SOCKET_GET requesting socket [%1]:%2 of type %3 from the creator
-The boss forwards a request for a socket to the socket creator.
-
-% BIND10_STARTED_CC started configuration/command session
-Debug message given when BIND 10 has successfully started the object that
-handles configuration and commands.
-
-% BIND10_STARTED_PROCESS started %1
-The given process has successfully been started.
-
-% BIND10_STARTED_PROCESS_PID started %1 (PID %2)
-The given process has successfully been started, and has the given PID.
-
-% BIND10_STARTING starting BIND10: %1
-Informational message on startup that shows the full version.
-
-% BIND10_STARTING_CC starting configuration/command session
-Informational message given when BIND 10 is starting the session object
-that handles configuration and commands.
-
-% BIND10_STARTING_PROCESS starting process %1
-The boss module is starting the given process.
-
-% BIND10_STARTING_PROCESS_PORT starting process %1 (to listen on port %2)
-The boss module is starting the given process, which will listen on the
-given port number.
-
-% BIND10_STARTING_PROCESS_PORT_ADDRESS starting process %1 (to listen on %2#%3)
-The boss module is starting the given process, which will listen on the
-given address and port number (written as <address>#<port>).
-
-% BIND10_STARTUP_COMPLETE BIND 10 started
-All modules have been successfully started, and BIND 10 is now running.
-
-% BIND10_STARTUP_ERROR error during startup: %1
-There was a fatal error when BIND10 was trying to start. The error is
-shown, and BIND10 will now shut down.
-
-% BIND10_STARTUP_UNEXPECTED_MESSAGE unrecognised startup message %1
-During the startup process, a number of messages are exchanged between the
-Boss process and the processes it starts. This error is output when a
-message received by the Boss process is recognised as being of the
-correct format but is unexpected. It may be that processes are starting
-of sequence.
-
-% BIND10_STARTUP_UNRECOGNISED_MESSAGE unrecognised startup message %1
-During the startup process, a number of messages are exchanged between the
-Boss process and the processes it starts. This error is output when a
-message received by the Boss process is not recognised.
-
-% BIND10_STOP_PROCESS asking %1 to shut down
-The boss module is sending a shutdown command to the given module over
-the message channel.
-
-% BIND10_UNKNOWN_CHILD_PROCESS_ENDED unknown child pid %1 exited
-An unknown child process has exited. The PID is printed, but no further
-action will be taken by the boss process.
-
-% BIND10_WAIT_CFGMGR waiting for configuration manager process to initialize
-The configuration manager process is so critical to operation of BIND 10
-that after starting it, the Boss module will wait for it to initialize
-itself before continuing. This debug message is produced during the
-wait and may be output zero or more times depending on how long it takes
-the configuration manager to start up. The total length of time Boss
-will wait for the configuration manager before reporting an error is
-set with the command line --wait switch, which has a default value of
-ten seconds.
diff --git a/src/bin/bind10/bind10_src.py.in b/src/bin/bind10/bind10_src.py.in
deleted file mode 100755
index 9f41804..0000000
--- a/src/bin/bind10/bind10_src.py.in
+++ /dev/null
@@ -1,1318 +0,0 @@
-#!@PYTHON@
-
-# Copyright (C) 2010,2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-This file implements the Boss of Bind (BoB, or bob) program.
-
-Its purpose is to start up the BIND 10 system, and then manage the
-processes, by starting and stopping processes, plus restarting
-processes that exit.
-
-To start the system, it first runs the c-channel program (msgq), then
-connects to that. It then runs the configuration manager, and reads
-its own configuration. Then it proceeds to starting other modules.
-
-The Python subprocess module is used for starting processes, but
-because this is not efficient for managing groups of processes,
-SIGCHLD signals are caught and processed using the signal module.
-
-Most of the logic is contained in the BoB class. However, since Python
-requires that signal processing happen in the main thread, we do
-signal handling outside of that class, in the code running for
-__main__.
-"""
-
-import sys; sys.path.append ('@@PYTHONPATH@@')
-import os
-
-# If B10_FROM_SOURCE is set in the environment, we use data files
-# from a directory relative to that, otherwise we use the ones
-# installed on the system
-if "B10_FROM_SOURCE" in os.environ:
- SPECFILE_LOCATION = os.environ["B10_FROM_SOURCE"] + "/src/bin/bind10/bob.spec"
-else:
- PREFIX = "@prefix@"
- DATAROOTDIR = "@datarootdir@"
- SPECFILE_LOCATION = "@datadir@/@PACKAGE@/bob.spec".replace("${datarootdir}", DATAROOTDIR).replace("${prefix}", PREFIX)
-
-import subprocess
-import signal
-import re
-import errno
-import time
-import select
-import random
-import socket
-from optparse import OptionParser, OptionValueError
-import io
-import pwd
-import posix
-import copy
-
-from bind10_config import LIBEXECPATH
-import bind10_config
-import isc.cc
-import isc.util.process
-import isc.net.parse
-import isc.log
-from isc.log_messages.bind10_messages import *
-import isc.bind10.component
-import isc.bind10.special_component
-import isc.bind10.socket_cache
-import libutil_io_python
-import tempfile
-
-isc.log.init("b10-boss", buffer=True)
-logger = isc.log.Logger("boss")
-
-# Pending system-wide debug level definitions, the ones we
-# use here are hardcoded for now
-DBG_PROCESS = logger.DBGLVL_TRACE_BASIC
-DBG_COMMANDS = logger.DBGLVL_TRACE_DETAIL
-
-# Messages sent over the unix domain socket to indicate if it is followed by a real socket
-CREATOR_SOCKET_OK = b"1\n"
-CREATOR_SOCKET_UNAVAILABLE = b"0\n"
-
-# RCodes of known exceptions for the get_token command
-CREATOR_SOCKET_ERROR = 2
-CREATOR_SHARE_ERROR = 3
-
-# Assign this process some longer name
-isc.util.process.rename(sys.argv[0])
-
-# This is the version that gets displayed to the user.
-# The VERSION string consists of the module name, the module version
-# number, and the overall BIND 10 version number (set in configure.ac).
-VERSION = "bind10 20110223 (BIND 10 @PACKAGE_VERSION@)"
-
-# This is for boot_time of Boss
-_BASETIME = time.gmtime()
-
-# Detailed error message commonly used on startup failure, possibly due to
-# permission issue regarding log lock file. We dump verbose message because
-# it may not be clear exactly what to do if it simply says
-# "failed to open <filename>: permission denied"
-NOTE_ON_LOCK_FILE = """\
-TIP: if this is about permission error for a lock file, check if the directory
-of the file is writable for the user of the bind10 process; often you need
-to start bind10 as a super user. Also, if you specify the -u option to
-change the user and group, the directory must be writable for the group,
-and the created lock file must be writable for that user. Finally, make sure
-the lock file is not left in the directly before restarting.
-"""
-
-class ProcessInfoError(Exception): pass
-
-class ChangeUserError(Exception):
- '''Exception raised when setuid/setgid fails.
-
- When raised, it's expected to be propagated via underlying component
- management modules to the top level so that it will help provide useful
- fatal error message.
-
- '''
- pass
-
-class ProcessInfo:
- """Information about a process"""
-
- dev_null = open(os.devnull, "w")
-
- def __init__(self, name, args, env={}, dev_null_stdout=False,
- dev_null_stderr=False):
- self.name = name
- self.args = args
- self.env = env
- self.dev_null_stdout = dev_null_stdout
- self.dev_null_stderr = dev_null_stderr
- self.process = None
- self.pid = None
-
- def _preexec_work(self):
- """Function used before running a program that needs to run as a
- different user."""
- # First, put us into a separate process group so we don't get
- # SIGINT signals on Ctrl-C (the boss will shut everthing down by
- # other means).
- os.setpgrp()
-
- def _spawn(self):
- if self.dev_null_stdout:
- spawn_stdout = self.dev_null
- else:
- spawn_stdout = None
- if self.dev_null_stderr:
- spawn_stderr = self.dev_null
- else:
- spawn_stderr = None
- # Environment variables for the child process will be a copy of those
- # of the boss process with any additional specific variables given
- # on construction (self.env).
- spawn_env = copy.deepcopy(os.environ)
- spawn_env.update(self.env)
- spawn_env['PATH'] = LIBEXECPATH + ':' + spawn_env['PATH']
- self.process = subprocess.Popen(self.args,
- stdin=subprocess.PIPE,
- stdout=spawn_stdout,
- stderr=spawn_stderr,
- close_fds=True,
- env=spawn_env,
- preexec_fn=self._preexec_work)
- self.pid = self.process.pid
-
- # spawn() and respawn() are the same for now, but in the future they
- # may have different functionality
- def spawn(self):
- self._spawn()
-
- def respawn(self):
- self._spawn()
-
-class CChannelConnectError(Exception): pass
-
-class ProcessStartError(Exception): pass
-
-class BoB:
- """Boss of BIND class."""
-
- def __init__(self, msgq_socket_file=None, data_path=None,
- config_filename=None, clear_config=False,
- verbose=False, nokill=False, setuid=None, setgid=None,
- username=None, cmdctl_port=None, wait_time=10):
- """
- Initialize the Boss of BIND. This is a singleton (only one can run).
-
- The msgq_socket_file specifies the UNIX domain socket file that the
- msgq process listens on. If verbose is True, then the boss reports
- what it is doing.
-
- Data path and config filename are passed through to config manager
- (if provided) and specify the config file to be used.
-
- The cmdctl_port is passed to cmdctl and specify on which port it
- should listen.
-
- wait_time controls the amount of time (in seconds) that Boss waits
- for selected processes to initialize before continuing with the
- initialization. Currently this is only the configuration manager.
- """
- self.cc_session = None
- self.ccs = None
- self.curproc = None
- self.msgq_socket_file = msgq_socket_file
- self.component_config = {}
- # Some time in future, it may happen that a single component has
- # multple processes (like a pipeline-like component). If so happens,
- # name "components" may be inapropriate. But as the code isn't probably
- # completely ready for it, we leave it at components for now. We also
- # want to support multiple instances of a single component. If it turns
- # out that we'll have a single component with multiple same processes
- # or if we start multiple components with the same configuration (we do
- # this now, but it might change) is an open question.
- self.components = {}
- # Simply list of components that died and need to wait for a
- # restart. Components manage their own restart schedule now
- self.components_to_restart = []
- self.runnable = False
- self.__uid = setuid
- self.__gid = setgid
- self.username = username
- self.verbose = verbose
- self.nokill = nokill
- self.data_path = data_path
- self.config_filename = config_filename
- self.clear_config = clear_config
- self.cmdctl_port = cmdctl_port
- self.wait_time = wait_time
- self.msgq_timeout = 5
-
- # _run_under_unittests is only meant to be used when testing. It
- # bypasses execution of some code to help with testing.
- self._run_under_unittests = False
-
- self._component_configurator = isc.bind10.component.Configurator(self,
- isc.bind10.special_component.get_specials())
- # The priorities here make them start in the correct order. First
- # the socket creator (which would drop root privileges by then),
- # then message queue and after that the config manager (which uses
- # the config manager)
- self.__core_components = {
- 'sockcreator': {
- 'kind': 'core',
- 'special': 'sockcreator',
- 'priority': 200
- },
- 'msgq': {
- 'kind': 'core',
- 'special': 'msgq',
- 'priority': 199
- },
- 'cfgmgr': {
- 'kind': 'core',
- 'special': 'cfgmgr',
- 'priority': 198
- }
- }
- self.__started = False
- self.exitcode = 0
-
- # If -v was set, enable full debug logging.
- if self.verbose:
- logger.set_severity("DEBUG", 99)
- # This is set in init_socket_srv
- self._socket_path = None
- self._socket_cache = None
- self._tmpdir = None
- self._srv_socket = None
- self._unix_sockets = {}
-
- def __propagate_component_config(self, config):
- comps = dict(config)
- # Fill in the core components, so they stay alive
- for comp in self.__core_components:
- if comp in comps:
- raise Exception(comp + " is core component managed by " +
- "bind10 boss, do not set it")
- comps[comp] = self.__core_components[comp]
- # Update the configuration
- self._component_configurator.reconfigure(comps)
-
- def change_user(self):
- '''Change the user and group to those specified on construction.
-
- This method is expected to be called by a component on initial
- startup when the system is ready to switch the user and group
- (i.e., once all components that need the privilege of the original
- user have started).
- '''
- try:
- if self.__gid is not None:
- logger.info(BIND10_SETGID, self.__gid)
- posix.setgid(self.__gid)
- except Exception as ex:
- raise ChangeUserError('failed to change group: ' + str(ex))
-
- try:
- if self.__uid is not None:
- posix.setuid(self.__uid)
- # We use one-shot logger after setuid here. This will
- # detect any permission issue regarding logging due to the
- # result of setuid at the earliest opportunity.
- isc.log.Logger("boss").info(BIND10_SETUID, self.__uid)
- except Exception as ex:
- raise ChangeUserError('failed to change user: ' + str(ex))
-
- def config_handler(self, new_config):
- # If this is initial update, don't do anything now, leave it to startup
- if not self.runnable:
- return
- logger.debug(DBG_COMMANDS, BIND10_RECEIVED_NEW_CONFIGURATION,
- new_config)
- try:
- if 'components' in new_config:
- self.__propagate_component_config(new_config['components'])
- return isc.config.ccsession.create_answer(0)
- except Exception as e:
- return isc.config.ccsession.create_answer(1, str(e))
-
- def get_processes(self):
- pids = list(self.components.keys())
- pids.sort()
- process_list = [ ]
- for pid in pids:
- process_list.append([pid, self.components[pid].name(),
- self.components[pid].address()])
- return process_list
-
- def _get_stats_data(self):
- return { 'boot_time':
- time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME)
- }
-
- def command_handler(self, command, args):
- logger.debug(DBG_COMMANDS, BIND10_RECEIVED_COMMAND, command)
- answer = isc.config.ccsession.create_answer(1, "command not implemented")
- if type(command) != str:
- answer = isc.config.ccsession.create_answer(1, "bad command")
- else:
- if command == "shutdown":
- self.runnable = False
- answer = isc.config.ccsession.create_answer(0)
- elif command == "getstats":
- answer = isc.config.ccsession.create_answer(
- 0, self._get_stats_data())
- elif command == "ping":
- answer = isc.config.ccsession.create_answer(0, "pong")
- elif command == "show_processes":
- answer = isc.config.ccsession. \
- create_answer(0, self.get_processes())
- elif command == "get_socket":
- answer = self._get_socket(args)
- elif command == "drop_socket":
- if "token" not in args:
- answer = isc.config.ccsession. \
- create_answer(1, "Missing token parameter")
- else:
- try:
- self._socket_cache.drop_socket(args["token"])
- answer = isc.config.ccsession.create_answer(0)
- except Exception as e:
- answer = isc.config.ccsession.create_answer(1, str(e))
- else:
- answer = isc.config.ccsession.create_answer(1,
- "Unknown command")
- return answer
-
- def kill_started_components(self):
- """
- Called as part of the exception handling when a process fails to
- start, this runs through the list of started processes, killing
- each one. It then clears that list.
- """
- logger.info(BIND10_KILLING_ALL_PROCESSES)
- self.__kill_children(True)
- self.components = {}
-
- def _read_bind10_config(self):
- """
- Reads the parameters associated with the BoB module itself.
-
- This means the list of components we should start now.
-
- This could easily be combined into start_all_processes, but
- it stays because of historical reasons and because the tests
- replace the method sometimes.
- """
- logger.info(BIND10_READING_BOSS_CONFIGURATION)
-
- config_data = self.ccs.get_full_config()
- self.__propagate_component_config(config_data['components'])
-
- def log_starting(self, process, port = None, address = None):
- """
- A convenience function to output a "Starting xxx" message if the
- logging is set to DEBUG with debuglevel DBG_PROCESS or higher.
- Putting this into a separate method ensures
- that the output form is consistent across all processes.
-
- The process name (passed as the first argument) is put into
- self.curproc, and is used to indicate which process failed to
- start if there is an error (and is used in the "Started" message
- on success). The optional port and address information are
- appended to the message (if present).
- """
- self.curproc = process
- if port is None and address is None:
- logger.info(BIND10_STARTING_PROCESS, self.curproc)
- elif address is None:
- logger.info(BIND10_STARTING_PROCESS_PORT, self.curproc,
- port)
- else:
- logger.info(BIND10_STARTING_PROCESS_PORT_ADDRESS,
- self.curproc, address, port)
-
- def log_started(self, pid = None):
- """
- A convenience function to output a 'Started xxxx (PID yyyy)'
- message. As with starting_message(), this ensures a consistent
- format.
- """
- if pid is None:
- logger.debug(DBG_PROCESS, BIND10_STARTED_PROCESS, self.curproc)
- else:
- logger.debug(DBG_PROCESS, BIND10_STARTED_PROCESS_PID, self.curproc, pid)
-
- def process_running(self, msg, who):
- """
- Some processes return a message to the Boss after they have
- started to indicate that they are running. The form of the
- message is a dictionary with contents {"running:", "<process>"}.
- This method checks the passed message and returns True if the
- "who" process is contained in the message (so is presumably
- running). It returns False for all other conditions and will
- log an error if appropriate.
- """
- if msg is not None:
- try:
- if msg["running"] == who:
- return True
- else:
- logger.error(BIND10_STARTUP_UNEXPECTED_MESSAGE, msg)
- except:
- logger.error(BIND10_STARTUP_UNRECOGNISED_MESSAGE, msg)
-
- return False
-
- # The next few methods start the individual processes of BIND-10. They
- # are called via start_all_processes(). If any fail, an exception is
- # raised which is caught by the caller of start_all_processes(); this kills
- # processes started up to that point before terminating the program.
-
- def _make_process_info(self, name, args, env,
- dev_null_stdout=False, dev_null_stderr=False):
- """
- Wrapper around ProcessInfo(), useful to override
- ProcessInfo() creation during testing.
- """
- return ProcessInfo(name, args, env, dev_null_stdout, dev_null_stderr)
-
- def start_msgq(self):
- """
- Start the message queue and connect to the command channel.
- """
- self.log_starting("b10-msgq")
- msgq_proc = self._make_process_info("b10-msgq", ["b10-msgq"],
- self.c_channel_env,
- True, not self.verbose)
- msgq_proc.spawn()
- self.log_started(msgq_proc.pid)
-
- # Now connect to the c-channel
- cc_connect_start = time.time()
- while self.cc_session is None:
- # if we are run under unittests, break
- if self._run_under_unittests:
- break
-
- # if we have been trying for "a while" give up
- if (time.time() - cc_connect_start) > self.msgq_timeout:
- if msgq_proc.process:
- msgq_proc.process.kill()
- logger.error(BIND10_CONNECTING_TO_CC_FAIL)
- raise CChannelConnectError("Unable to connect to c-channel after 5 seconds")
-
- # try to connect, and if we can't wait a short while
- try:
- self.cc_session = isc.cc.Session(self.msgq_socket_file)
- except isc.cc.session.SessionError:
- time.sleep(0.1)
-
- # Subscribe to the message queue. The only messages we expect to receive
- # on this channel are once relating to process startup.
- if self.cc_session is not None:
- self.cc_session.group_subscribe("Boss")
-
- return msgq_proc
-
- def start_cfgmgr(self):
- """
- Starts the configuration manager process
- """
- self.log_starting("b10-cfgmgr")
- args = ["b10-cfgmgr"]
- if self.data_path is not None:
- args.append("--data-path=" + self.data_path)
- if self.config_filename is not None:
- args.append("--config-filename=" + self.config_filename)
- if self.clear_config:
- args.append("--clear-config")
- bind_cfgd = self._make_process_info("b10-cfgmgr", args,
- self.c_channel_env)
- bind_cfgd.spawn()
- self.log_started(bind_cfgd.pid)
-
- # Wait for the configuration manager to start up as
- # subsequent initialization cannot proceed without it. The
- # time to wait can be set on the command line.
- time_remaining = self.wait_time
- msg, env = self.cc_session.group_recvmsg()
- while time_remaining > 0 and not self.process_running(msg, "ConfigManager"):
- logger.debug(DBG_PROCESS, BIND10_WAIT_CFGMGR)
- time.sleep(1)
- time_remaining = time_remaining - 1
- msg, env = self.cc_session.group_recvmsg()
-
- if not self.process_running(msg, "ConfigManager"):
- raise ProcessStartError("Configuration manager process has not started")
-
- return bind_cfgd
-
- def start_ccsession(self, c_channel_env):
- """
- Start the CC Session
-
- The argument c_channel_env is unused but is supplied to keep the
- argument list the same for all start_xxx methods.
-
- With regards to logging, note that as the CC session is not a
- process, the log_starting/log_started methods are not used.
- """
- logger.info(BIND10_STARTING_CC)
- self.ccs = isc.config.ModuleCCSession(SPECFILE_LOCATION,
- self.config_handler,
- self.command_handler,
- socket_file = self.msgq_socket_file)
- self.ccs.start()
- logger.debug(DBG_PROCESS, BIND10_STARTED_CC)
-
- # A couple of utility methods for starting processes...
-
- def start_process(self, name, args, c_channel_env, port=None, address=None):
- """
- Given a set of command arguments, start the process and output
- appropriate log messages. If the start is successful, the process
- is added to the list of started processes.
-
- The port and address arguments are for log messages only.
- """
- self.log_starting(name, port, address)
- newproc = self._make_process_info(name, args, c_channel_env)
- newproc.spawn()
- self.log_started(newproc.pid)
- return newproc
-
- def register_process(self, pid, component):
- """
- Put another process into boss to watch over it. When the process
- dies, the component.failed() is called with the exit code.
-
- It is expected the info is a isc.bind10.component.BaseComponent
- subclass (or anything having the same interface).
- """
- self.components[pid] = component
-
- def start_simple(self, name):
- """
- Most of the BIND-10 processes are started with the command:
-
- <process-name> [-v]
-
- ... where -v is appended if verbose is enabled. This method
- generates the arguments from the name and starts the process.
-
- The port and address arguments are for log messages only.
- """
- # Set up the command arguments.
- args = [name]
- if self.verbose:
- args += ['-v']
-
- # ... and start the process
- return self.start_process(name, args, self.c_channel_env)
-
- # The next few methods start up the rest of the BIND-10 processes.
- # Although many of these methods are little more than a call to
- # start_simple, they are retained (a) for testing reasons and (b) as a place
- # where modifications can be made if the process start-up sequence changes
- # for a given process.
-
- def start_auth(self):
- """
- Start the Authoritative server
- """
- authargs = ['b10-auth']
- if self.verbose:
- authargs += ['-v']
-
- # ... and start
- return self.start_process("b10-auth", authargs, self.c_channel_env)
-
- def start_resolver(self):
- """
- Start the Resolver. At present, all these arguments and switches
- are pure speculation. As with the auth daemon, they should be
- read from the configuration database.
- """
- self.curproc = "b10-resolver"
- # XXX: this must be read from the configuration manager in the future
- resargs = ['b10-resolver']
- if self.verbose:
- resargs += ['-v']
-
- # ... and start
- return self.start_process("b10-resolver", resargs, self.c_channel_env)
-
- def start_cmdctl(self):
- """
- Starts the command control process
- """
- args = ["b10-cmdctl"]
- if self.cmdctl_port is not None:
- args.append("--port=" + str(self.cmdctl_port))
- if self.verbose:
- args.append("-v")
- return self.start_process("b10-cmdctl", args, self.c_channel_env,
- self.cmdctl_port)
-
- def start_all_components(self):
- """
- Starts up all the components. Any exception generated during the
- starting of the components is handled by the caller.
- """
- # Start the real core (sockcreator, msgq, cfgmgr)
- self._component_configurator.startup(self.__core_components)
-
- # Connect to the msgq. This is not a process, so it's not handled
- # inside the configurator.
- self.start_ccsession(self.c_channel_env)
-
- # Extract the parameters associated with Bob. This can only be
- # done after the CC Session is started. Note that the logging
- # configuration may override the "-v" switch set on the command line.
- self._read_bind10_config()
-
- # TODO: Return the dropping of privileges
-
- def startup(self):
- """
- Start the BoB instance.
-
- Returns None if successful, otherwise an string describing the
- problem.
- """
- # Try to connect to the c-channel daemon, to see if it is already
- # running
- c_channel_env = {}
- if self.msgq_socket_file is not None:
- c_channel_env["BIND10_MSGQ_SOCKET_FILE"] = self.msgq_socket_file
- logger.debug(DBG_PROCESS, BIND10_CHECK_MSGQ_ALREADY_RUNNING)
- try:
- self.cc_session = isc.cc.Session(self.msgq_socket_file)
- logger.fatal(BIND10_MSGQ_ALREADY_RUNNING)
- return "b10-msgq already running, or socket file not cleaned , cannot start"
- except isc.cc.session.SessionError:
- # this is the case we want, where the msgq is not running
- pass
-
- # Start all components. If any one fails to start, kill all started
- # components and exit with an error indication.
- try:
- self.c_channel_env = c_channel_env
- self.start_all_components()
- except ChangeUserError as e:
- self.kill_started_components()
- return str(e) + '; ' + NOTE_ON_LOCK_FILE.replace('\n', ' ')
- except Exception as e:
- self.kill_started_components()
- return "Unable to start " + self.curproc + ": " + str(e)
-
- # Started successfully
- self.runnable = True
- self.__started = True
- return None
-
- def stop_process(self, process, recipient, pid):
- """
- Stop the given process, friendly-like. The process is the name it has
- (in logs, etc), the recipient is the address on msgq. The pid is the
- pid of the process (if we have multiple processes of the same name,
- it might want to choose if it is for this one).
- """
- logger.info(BIND10_STOP_PROCESS, process)
- self.cc_session.group_sendmsg(isc.config.ccsession.
- create_command('shutdown', {'pid': pid}),
- recipient, recipient)
-
- def component_shutdown(self, exitcode=0):
- """
- Stop the Boss instance from a components' request. The exitcode
- indicates the desired exit code.
-
- If we did not start yet, it raises an exception, which is meant
- to propagate through the component and configurator to the startup
- routine and abort the startup immediately. If it is started up already,
- we just mark it so we terminate soon.
-
- It does set the exit code in both cases.
- """
- self.exitcode = exitcode
- if not self.__started:
- raise Exception("Component failed during startup");
- else:
- self.runnable = False
-
- def shutdown(self):
- """Stop the BoB instance."""
- logger.info(BIND10_SHUTDOWN)
- # If ccsession is still there, inform rest of the system this module
- # is stopping. Since everything will be stopped shortly, this is not
- # really necessary, but this is done to reflect that boss is also
- # 'just' a module.
- self.ccs.send_stopping()
-
- # try using the BIND 10 request to stop
- try:
- self._component_configurator.shutdown()
- except:
- pass
- # XXX: some delay probably useful... how much is uncertain
- # I have changed the delay from 0.5 to 1, but sometime it's
- # still not enough.
- time.sleep(1)
- self.reap_children()
-
- # Send TERM and KILL signals to modules if we're not prevented
- # from doing so
- if not self.nokill:
- # next try sending a SIGTERM
- self.__kill_children(False)
- # finally, send SIGKILL (unmaskable termination) until everybody
- # dies
- while self.components:
- # XXX: some delay probably useful... how much is uncertain
- time.sleep(0.1)
- self.reap_children()
- self.__kill_children(True)
- logger.info(BIND10_SHUTDOWN_COMPLETE)
-
- def __kill_children(self, forceful):
- '''Terminate remaining subprocesses by sending a signal.
-
- The forceful paramter will be passed Component.kill().
- This is a dedicated subroutine of shutdown(), just to unify two
- similar cases.
-
- '''
- logmsg = BIND10_SEND_SIGKILL if forceful else BIND10_SEND_SIGTERM
- # We need to make a copy of values as the components may be modified
- # in the loop.
- for component in list(self.components.values()):
- logger.info(logmsg, component.name(), component.pid())
- try:
- component.kill(forceful)
- except OSError as ex:
- # If kill() failed due to EPERM, it doesn't make sense to
- # keep trying, so we just log the fact and forget that
- # component. Ignore other OSErrors (usually ESRCH because
- # the child finally exited)
- signame = "SIGKILL" if forceful else "SIGTERM"
- logger.info(BIND10_SEND_SIGNAL_FAIL, signame,
- component.name(), component.pid(), ex)
- if ex.errno == errno.EPERM:
- del self.components[component.pid()]
-
- def _get_process_exit_status(self):
- return os.waitpid(-1, os.WNOHANG)
-
- def reap_children(self):
- """Check to see if any of our child processes have exited,
- and note this for later handling.
- """
- while True:
- try:
- (pid, exit_status) = self._get_process_exit_status()
- except OSError as o:
- if o.errno == errno.ECHILD:
- break
- # XXX: should be impossible to get any other error here
- raise
- if pid == 0:
- break
- if pid in self.components:
- # One of the components we know about. Get information on it.
- component = self.components.pop(pid)
- logger.info(BIND10_PROCESS_ENDED, component.name(), pid,
- exit_status)
- if component.is_running() and self.runnable:
- # Tell it it failed. But only if it matters (we are
- # not shutting down and the component considers itself
- # to be running.
- component_restarted = component.failed(exit_status);
- # if the process wants to be restarted, but not just yet,
- # it returns False
- if not component_restarted:
- self.components_to_restart.append(component)
- else:
- logger.info(BIND10_UNKNOWN_CHILD_PROCESS_ENDED, pid)
-
- def restart_processes(self):
- """
- Restart any dead processes:
-
- * Returns the time when the next process is ready to be restarted.
- * If the server is shutting down, returns 0.
- * If there are no processes, returns None.
-
- The values returned can be safely passed into select() as the
- timeout value.
-
- """
- if not self.runnable:
- return 0
- still_dead = []
- # keep track of the first time we need to check this queue again,
- # if at all
- next_restart_time = None
- now = time.time()
- for component in self.components_to_restart:
- # If the component was removed from the configurator between since
- # scheduled to restart, just ignore it. The object will just be
- # dropped here.
- if not self._component_configurator.has_component(component):
- logger.info(BIND10_RESTART_COMPONENT_SKIPPED, component.name())
- elif not component.restart(now):
- still_dead.append(component)
- if next_restart_time is None or\
- next_restart_time > component.get_restart_time():
- next_restart_time = component.get_restart_time()
- self.components_to_restart = still_dead
-
- return next_restart_time
-
- def _get_socket(self, args):
- """
- Implementation of the get_socket CC command. It asks the cache
- to provide the token and sends the information back.
- """
- try:
- try:
- addr = isc.net.parse.addr_parse(args['address'])
- port = isc.net.parse.port_parse(args['port'])
- protocol = args['protocol']
- if protocol not in ['UDP', 'TCP']:
- raise ValueError("Protocol must be either UDP or TCP")
- share_mode = args['share_mode']
- if share_mode not in ['ANY', 'SAMEAPP', 'NO']:
- raise ValueError("Share mode must be one of ANY, SAMEAPP" +
- " or NO")
- share_name = args['share_name']
- except KeyError as ke:
- return \
- isc.config.ccsession.create_answer(1,
- "Missing parameter " +
- str(ke))
-
- # FIXME: This call contains blocking IPC. It is expected to be
- # short, but if it turns out to be problem, we'll need to do
- # something about it.
- token = self._socket_cache.get_token(protocol, addr, port,
- share_mode, share_name)
- return isc.config.ccsession.create_answer(0, {
- 'token': token,
- 'path': self._socket_path
- })
- except isc.bind10.socket_cache.SocketError as e:
- return isc.config.ccsession.create_answer(CREATOR_SOCKET_ERROR,
- str(e))
- except isc.bind10.socket_cache.ShareError as e:
- return isc.config.ccsession.create_answer(CREATOR_SHARE_ERROR,
- str(e))
- except Exception as e:
- return isc.config.ccsession.create_answer(1, str(e))
-
- def socket_request_handler(self, token, unix_socket):
- """
- This function handles a token that comes over a unix_domain socket.
- The function looks into the _socket_cache and sends the socket
- identified by the token back over the unix_socket.
- """
- try:
- token = str(token, 'ASCII') # Convert from bytes to str
- fd = self._socket_cache.get_socket(token, unix_socket.fileno())
- # FIXME: These two calls are blocking in their nature. An OS-level
- # buffer is likely to be large enough to hold all these data, but
- # if it wasn't and the remote application got stuck, we would have
- # a problem. If there appear such problems, we should do something
- # about it.
- unix_socket.sendall(CREATOR_SOCKET_OK)
- libutil_io_python.send_fd(unix_socket.fileno(), fd)
- except Exception as e:
- logger.info(BIND10_NO_SOCKET, token, e)
- unix_socket.sendall(CREATOR_SOCKET_UNAVAILABLE)
-
- def socket_consumer_dead(self, unix_socket):
- """
- This function handles when a unix_socket closes. This means all
- sockets sent to it are to be considered closed. This function signals
- so to the _socket_cache.
- """
- logger.info(BIND10_LOST_SOCKET_CONSUMER, unix_socket.fileno())
- try:
- self._socket_cache.drop_application(unix_socket.fileno())
- except ValueError:
- # This means the application holds no sockets. It's harmless, as it
- # can happen in real life - for example, it requests a socket, but
- # get_socket doesn't find it, so the application dies. It should be
- # rare, though.
- pass
-
- def set_creator(self, creator):
- """
- Registeres a socket creator into the boss. The socket creator is not
- used directly, but through a cache. The cache is created in this
- method.
-
- If called more than once, it raises a ValueError.
- """
- if self._socket_cache is not None:
- raise ValueError("A creator was inserted previously")
- self._socket_cache = isc.bind10.socket_cache.Cache(creator)
-
- def init_socket_srv(self):
- """
- Creates and listens on a unix-domain socket to be able to send out
- the sockets.
-
- This method should be called after switching user, or the switched
- applications won't be able to access the socket.
- """
- self._srv_socket = socket.socket(socket.AF_UNIX)
- # We create a temporary directory somewhere safe and unique, to avoid
- # the need to find the place ourself or bother users. Also, this
- # secures the socket on some platforms, as it creates a private
- # directory.
- self._tmpdir = tempfile.mkdtemp(prefix='sockcreator-')
- # Get the name
- self._socket_path = os.path.join(self._tmpdir, "sockcreator")
- # And bind the socket to the name
- self._srv_socket.bind(self._socket_path)
- self._srv_socket.listen(5)
-
- def remove_socket_srv(self):
- """
- Closes and removes the listening socket and the directory where it
- lives, as we created both.
-
- It does nothing if the _srv_socket is not set (eg. it was not yet
- initialized).
- """
- if self._srv_socket is not None:
- self._srv_socket.close()
- if os.path.exists(self._socket_path):
- os.remove(self._socket_path)
- if os.path.isdir(self._tmpdir):
- os.rmdir(self._tmpdir)
-
- def _srv_accept(self):
- """
- Accept a socket from the unix domain socket server and put it to the
- others we care about.
- """
- (socket, conn) = self._srv_socket.accept()
- self._unix_sockets[socket.fileno()] = (socket, b'')
-
- def _socket_data(self, socket_fileno):
- """
- This is called when a socket identified by the socket_fileno needs
- attention. We try to read data from there. If it is closed, we remove
- it.
- """
- (sock, previous) = self._unix_sockets[socket_fileno]
- while True:
- try:
- data = sock.recv(1, socket.MSG_DONTWAIT)
- except socket.error as se:
- # These two might be different on some systems
- if se.errno == errno.EAGAIN or se.errno == errno.EWOULDBLOCK:
- # No more data now. Oh, well, just store what we have.
- self._unix_sockets[socket_fileno] = (sock, previous)
- return
- else:
- data = b'' # Pretend it got closed
- if len(data) == 0: # The socket got to it's end
- del self._unix_sockets[socket_fileno]
- self.socket_consumer_dead(sock)
- sock.close()
- return
- else:
- if data == b"\n":
- # Handle this token and clear it
- self.socket_request_handler(previous, sock)
- previous = b''
- else:
- previous += data
-
- def run(self, wakeup_fd):
- """
- The main loop, waiting for sockets, commands and dead processes.
- Runs as long as the runnable is true.
-
- The wakeup_fd descriptor is the read end of pipe where CHLD signal
- handler writes.
- """
- ccs_fd = self.ccs.get_socket().fileno()
- while self.runnable:
- # clean up any processes that exited
- self.reap_children()
- next_restart = self.restart_processes()
- if next_restart is None:
- wait_time = None
- else:
- wait_time = max(next_restart - time.time(), 0)
-
- # select() can raise EINTR when a signal arrives,
- # even if they are resumable, so we have to catch
- # the exception
- try:
- (rlist, wlist, xlist) = \
- select.select([wakeup_fd, ccs_fd,
- self._srv_socket.fileno()] +
- list(self._unix_sockets.keys()), [], [],
- wait_time)
- except select.error as err:
- if err.args[0] == errno.EINTR:
- (rlist, wlist, xlist) = ([], [], [])
- else:
- logger.fatal(BIND10_SELECT_ERROR, err)
- break
-
- for fd in rlist + xlist:
- if fd == ccs_fd:
- try:
- self.ccs.check_command()
- except isc.cc.session.ProtocolError:
- logger.fatal(BIND10_MSGQ_DISAPPEARED)
- self.runnable = False
- break
- elif fd == wakeup_fd:
- os.read(wakeup_fd, 32)
- elif fd == self._srv_socket.fileno():
- self._srv_accept()
- elif fd in self._unix_sockets:
- self._socket_data(fd)
-
-# global variables, needed for signal handlers
-options = None
-boss_of_bind = None
-
-def reaper(signal_number, stack_frame):
- """A child process has died (SIGCHLD received)."""
- # don't do anything...
- # the Python signal handler has been set up to write
- # down a pipe, waking up our select() bit
- pass
-
-def get_signame(signal_number):
- """Return the symbolic name for a signal."""
- for sig in dir(signal):
- if sig.startswith("SIG") and sig[3].isalnum():
- if getattr(signal, sig) == signal_number:
- return sig
- return "Unknown signal %d" % signal_number
-
-# XXX: perhaps register atexit() function and invoke that instead
-def fatal_signal(signal_number, stack_frame):
- """We need to exit (SIGINT or SIGTERM received)."""
- global options
- global boss_of_bind
- logger.info(BIND10_RECEIVED_SIGNAL, get_signame(signal_number))
- signal.signal(signal.SIGCHLD, signal.SIG_DFL)
- boss_of_bind.runnable = False
-
-def process_rename(option, opt_str, value, parser):
- """Function that renames the process if it is requested by a option."""
- isc.util.process.rename(value)
-
-def parse_args(args=sys.argv[1:], Parser=OptionParser):
- """
- Function for parsing command line arguments. Returns the
- options object from OptionParser.
- """
- parser = Parser(version=VERSION)
- parser.add_option("-m", "--msgq-socket-file", dest="msgq_socket_file",
- type="string", default=None,
- help="UNIX domain socket file the b10-msgq daemon will use")
- parser.add_option("-i", "--no-kill", action="store_true", dest="nokill",
- default=False, help="do not send SIGTERM and SIGKILL signals to modules during shutdown")
- parser.add_option("-u", "--user", dest="user", type="string", default=None,
- help="Change user after startup (must run as root)")
- parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
- help="display more about what is going on")
- parser.add_option("--pretty-name", type="string", action="callback",
- callback=process_rename,
- help="Set the process name (displayed in ps, top, ...)")
- parser.add_option("-c", "--config-file", action="store",
- dest="config_file", default=None,
- help="Configuration database filename")
- parser.add_option("--clear-config", action="store_true",
- dest="clear_config", default=False,
- help="Create backup of the configuration file and " +
- "start with a clean configuration")
- parser.add_option("-p", "--data-path", dest="data_path",
- help="Directory to search for configuration files",
- default=None)
- parser.add_option("--cmdctl-port", dest="cmdctl_port", type="int",
- default=None, help="Port of command control")
- parser.add_option("--pid-file", dest="pid_file", type="string",
- default=None,
- help="file to dump the PID of the BIND 10 process")
- parser.add_option("-w", "--wait", dest="wait_time", type="int",
- default=10, help="Time (in seconds) to wait for config manager to start up")
-
- (options, args) = parser.parse_args(args)
-
- if options.cmdctl_port is not None:
- try:
- isc.net.parse.port_parse(options.cmdctl_port)
- except ValueError as e:
- parser.error(e)
-
- if args:
- parser.print_help()
- sys.exit(1)
-
- return options
-
-def dump_pid(pid_file):
- """
- Dump the PID of the current process to the specified file. If the given
- file is None this function does nothing. If the file already exists,
- the existing content will be removed. If a system error happens in
- creating or writing to the file, the corresponding exception will be
- propagated to the caller.
- """
- if pid_file is None:
- return
- f = open(pid_file, "w")
- f.write('%d\n' % os.getpid())
- f.close()
-
-def unlink_pid_file(pid_file):
- """
- Remove the given file, which is basically expected to be the PID file
- created by dump_pid(). The specified may or may not exist; if it
- doesn't this function does nothing. Other system level errors in removing
- the file will be propagated as the corresponding exception.
- """
- if pid_file is None:
- return
- try:
- os.unlink(pid_file)
- except OSError as error:
- if error.errno is not errno.ENOENT:
- raise
-
-def remove_lock_files():
- """
- Remove various lock files which were created by code such as in the
- logger. This function should be called after BIND 10 shutdown.
- """
-
- lockfiles = ["logger_lockfile"]
-
- lpath = bind10_config.DATA_PATH
- if "B10_FROM_BUILD" in os.environ:
- lpath = os.environ["B10_FROM_BUILD"]
- if "B10_FROM_SOURCE_LOCALSTATEDIR" in os.environ:
- lpath = os.environ["B10_FROM_SOURCE_LOCALSTATEDIR"]
- if "B10_LOCKFILE_DIR_FROM_BUILD" in os.environ:
- lpath = os.environ["B10_LOCKFILE_DIR_FROM_BUILD"]
-
- for f in lockfiles:
- fname = lpath + '/' + f
- if os.path.isfile(fname):
- try:
- os.unlink(fname)
- except OSError as e:
- # We catch and ignore permission related error on unlink.
- # This can happen if bind10 started with -u, created a lock
- # file as a privileged user, but the directory is not writable
- # for the changed user. This setup will cause immediate
- # start failure, and we leave verbose error message including
- # the leftover lock file, so it should be acceptable to ignore
- # it (note that it doesn't make sense to log this event at
- # this poitn)
- if e.errno != errno.EPERM and e.errno != errno.EACCES:
- raise
-
- return
-
-def main():
- global options
- global boss_of_bind
- # Enforce line buffering on stdout, even when not a TTY
- sys.stdout = io.TextIOWrapper(sys.stdout.detach(), line_buffering=True)
-
- options = parse_args()
-
- # Announce startup. Making this is the first log message.
- try:
- logger.info(BIND10_STARTING, VERSION)
- except RuntimeError as e:
- sys.stderr.write('ERROR: failed to write the initial log: %s\n' %
- str(e))
- sys.stderr.write(NOTE_ON_LOCK_FILE)
- sys.exit(1)
-
- # Check user ID.
- setuid = None
- setgid = None
- username = None
- if options.user:
- # Try getting information about the user, assuming UID passed.
- try:
- pw_ent = pwd.getpwuid(int(options.user))
- setuid = pw_ent.pw_uid
- setgid = pw_ent.pw_gid
- username = pw_ent.pw_name
- except ValueError:
- pass
- except KeyError:
- pass
-
- # Next try getting information about the user, assuming user name
- # passed.
- # If the information is both a valid user name and user number, we
- # prefer the name because we try it second. A minor point, hopefully.
- try:
- pw_ent = pwd.getpwnam(options.user)
- setuid = pw_ent.pw_uid
- setgid = pw_ent.pw_gid
- username = pw_ent.pw_name
- except KeyError:
- pass
-
- if setuid is None:
- logger.fatal(BIND10_INVALID_USER, options.user)
- sys.exit(1)
-
- # Create wakeup pipe for signal handlers
- wakeup_pipe = os.pipe()
- signal.set_wakeup_fd(wakeup_pipe[1])
-
- # Set signal handlers for catching child termination, as well
- # as our own demise.
- signal.signal(signal.SIGCHLD, reaper)
- signal.siginterrupt(signal.SIGCHLD, False)
- signal.signal(signal.SIGINT, fatal_signal)
- signal.signal(signal.SIGTERM, fatal_signal)
-
- # Block SIGPIPE, as we don't want it to end this process
- signal.signal(signal.SIGPIPE, signal.SIG_IGN)
-
- try:
- # Go bob!
- boss_of_bind = BoB(options.msgq_socket_file, options.data_path,
- options.config_file, options.clear_config,
- options.verbose, options.nokill,
- setuid, setgid, username, options.cmdctl_port,
- options.wait_time)
- startup_result = boss_of_bind.startup()
- if startup_result:
- logger.fatal(BIND10_STARTUP_ERROR, startup_result)
- sys.exit(1)
- boss_of_bind.init_socket_srv()
- logger.info(BIND10_STARTUP_COMPLETE)
- dump_pid(options.pid_file)
-
- # Let it run
- boss_of_bind.run(wakeup_pipe[0])
-
- # shutdown
- signal.signal(signal.SIGCHLD, signal.SIG_DFL)
- boss_of_bind.shutdown()
- finally:
- # Clean up the filesystem
- unlink_pid_file(options.pid_file)
- remove_lock_files()
- if boss_of_bind is not None:
- boss_of_bind.remove_socket_srv()
- sys.exit(boss_of_bind.exitcode)
-
-if __name__ == "__main__":
- main()
diff --git a/src/bin/bind10/bob.spec b/src/bin/bind10/bob.spec
deleted file mode 100644
index 442d46f..0000000
--- a/src/bin/bind10/bob.spec
+++ /dev/null
@@ -1,92 +0,0 @@
-{
- "module_spec": {
- "module_name": "Boss",
- "module_description": "Master process",
- "config_data": [
- {
- "item_name": "components",
- "item_type": "named_set",
- "item_optional": false,
- "item_default": {
- "b10-stats": { "address": "Stats", "kind": "dispensable" },
- "b10-cmdctl": { "special": "cmdctl", "kind": "needed" }
- },
- "named_set_item_spec": {
- "item_name": "component",
- "item_type": "map",
- "item_optional": false,
- "item_default": { },
- "map_item_spec": [
- {
- "item_name": "special",
- "item_optional": true,
- "item_type": "string"
- },
- {
- "item_name": "process",
- "item_optional": true,
- "item_type": "string"
- },
- {
- "item_name": "kind",
- "item_optional": false,
- "item_type": "string",
- "item_default": "dispensable"
- },
- {
- "item_name": "address",
- "item_optional": true,
- "item_type": "string"
- },
- {
- "item_name": "params",
- "item_optional": true,
- "item_type": "list",
- "list_item_spec": {
- "item_name": "param",
- "item_optional": false,
- "item_type": "string",
- "item_default": ""
- }
- },
- {
- "item_name": "priority",
- "item_optional": true,
- "item_type": "integer"
- }
- ]
- }
- }
- ],
- "commands": [
- {
- "command_name": "shutdown",
- "command_description": "Shut down BIND 10",
- "command_args": []
- },
- {
- "command_name": "ping",
- "command_description": "Ping the boss process",
- "command_args": []
- },
- {
- "command_name": "show_processes",
- "command_description": "List the running BIND 10 processes",
- "command_args": []
- }
- ],
- "statistics": [
- {
- "item_name": "boot_time",
- "item_type": "string",
- "item_optional": false,
- "item_default": "1970-01-01T00:00:00Z",
- "item_title": "Boot time",
- "item_description": "A date time when bind10 process starts initially",
- "item_format": "date-time"
- }
- ]
- }
-}
-
-
diff --git a/src/bin/bind10/creatorapi.txt b/src/bin/bind10/creatorapi.txt
index c23d907..d3e67f4 100644
--- a/src/bin/bind10/creatorapi.txt
+++ b/src/bin/bind10/creatorapi.txt
@@ -1,7 +1,7 @@
Socket creator API
==================
-This API is between Boss and other modules to allow them requesting of sockets.
+This API is between Init and other modules to allow them requesting of sockets.
For simplicity, we will use the socket creator for all (even non-privileged)
ports for now, but we should have some function where we can abstract it later.
@@ -25,12 +25,12 @@ It seems we are stuck with current msgq for a while and there's a chance the
new replacement will not be able to send sockets inbound. So, we need another
channel.
-The boss will create a unix-domain socket and listen on it. When something
+b10-init will create a unix-domain socket and listen on it. When something
requests a socket over the command channel and the socket is created, some kind
of token is returned to the application (which will represent the future
socket). The application then connects to the unix-domain socket, sends the
-token over the connection (so Boss will know which socket to send there, in case
-multiple applications ask for sockets simultaneously) and Boss sends the socket
+token over the connection (so Init will know which socket to send there, in case
+multiple applications ask for sockets simultaneously) and Init sends the socket
in return.
In theory, we could send the requests directly over the unix-domain
@@ -48,8 +48,8 @@ socket, but it has two disadvantages:
Caching of sockets
------------------
-To allow sending the same socket to multiple application, the Boss process will
-hold a cache. Each socket that is created and sent is kept open in Boss and
+To allow sending the same socket to multiple application, the Init process will
+hold a cache. Each socket that is created and sent is kept open in Init and
preserved there as well. A reference count is kept with each of them.
When another application asks for the same socket, it is simply sent from the
@@ -60,14 +60,14 @@ command channel), the reference count can be decreased without problems. But
when the application terminates or crashes, we need to decrease it as well.
There's a problem, since we don't know which command channel connection (eg.
lname) belongs to which PID. Furthermore, the applications don't need to be
-started by boss.
+started by b10-init.
There are two possibilities:
* Let the msgq send messages about disconnected clients (eg. group message to
some name). This one is better if we want to migrate to dbus, since dbus
already has this capability as well as sending the sockets inbound (at least it
seems so on unix) and we could get rid of the unix-domain socket completely.
-* Keep the unix-domain connections open forever. Boss can remember which socket
+* Keep the unix-domain connections open forever. Init can remember which socket
was sent to which connection and when the connection closes (because the
application crashed), it can drop all the references on the sockets. This
seems easier to implement.
@@ -75,12 +75,12 @@ There are two possibilities:
The commands
------------
* Command to release a socket. This one would have single parameter, the token
- used to get the socket. After this, boss would decrease its reference count
- and if it drops to zero, close its own copy of the socket. This should be used
- when the module stops using the socket (and after closes it). The
- library could remember the file-descriptor to token mapping (for
- common applications that don't request the same socket multiple
- times in parallel).
+ used to get the socket. After this, b10-init would decrease its reference
+ count and if it drops to zero, close its own copy of the socket. This
+ should be used when the module stops using the socket (and after closes
+ it). The library could remember the file-descriptor to token mapping (for
+ common applications that don't request the same socket multiple times in
+ parallel).
* Command to request a socket. It would have parameters to specify which socket
(IP address, address family, port) and how to allow sharing. Sharing would be
one of:
diff --git a/src/bin/bind10/init.py.in b/src/bin/bind10/init.py.in
new file mode 100755
index 0000000..f47de31
--- /dev/null
+++ b/src/bin/bind10/init.py.in
@@ -0,0 +1,1321 @@
+#!@PYTHON@
+
+# Copyright (C) 2010,2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""
+This file implements the b10-init program.
+
+Its purpose is to start up the BIND 10 system, and then manage the
+processes, by starting and stopping processes, plus restarting
+processes that exit.
+
+To start the system, it first runs the c-channel program (msgq), then
+connects to that. It then runs the configuration manager, and reads
+its own configuration. Then it proceeds to starting other modules.
+
+The Python subprocess module is used for starting processes, but
+because this is not efficient for managing groups of processes,
+SIGCHLD signals are caught and processed using the signal module.
+
+Most of the logic is contained in the Init class. However, since Python
+requires that signal processing happen in the main thread, we do
+signal handling outside of that class, in the code running for
+__main__.
+"""
+
+import sys; sys.path.append ('@@PYTHONPATH@@')
+import os
+
+# If B10_FROM_SOURCE is set in the environment, we use data files
+# from a directory relative to that, otherwise we use the ones
+# installed on the system
+if "B10_FROM_SOURCE" in os.environ:
+ SPECFILE_LOCATION = os.environ["B10_FROM_SOURCE"] +\
+ "/src/bin/bind10/init.spec"
+else:
+ PREFIX = "@prefix@"
+ DATAROOTDIR = "@datarootdir@"
+ SPECFILE_LOCATION = "@datadir@/@PACKAGE@/init.spec"\
+ .replace("${datarootdir}", DATAROOTDIR)\
+ .replace("${prefix}", PREFIX)
+
+import subprocess
+import signal
+import re
+import errno
+import time
+import select
+import random
+import socket
+from optparse import OptionParser, OptionValueError
+import io
+import pwd
+import posix
+import copy
+
+from bind10_config import LIBEXECPATH
+import bind10_config
+import isc.cc
+import isc.util.process
+import isc.net.parse
+import isc.log
+import isc.config
+from isc.log_messages.init_messages import *
+import isc.bind10.component
+import isc.bind10.special_component
+import isc.bind10.socket_cache
+import libutil_io_python
+import tempfile
+
+isc.log.init("b10-init", buffer=True)
+logger = isc.log.Logger("init")
+
+# Pending system-wide debug level definitions, the ones we
+# use here are hardcoded for now
+DBG_PROCESS = logger.DBGLVL_TRACE_BASIC
+DBG_COMMANDS = logger.DBGLVL_TRACE_DETAIL
+
+# Messages sent over the unix domain socket to indicate if it is followed by a real socket
+CREATOR_SOCKET_OK = b"1\n"
+CREATOR_SOCKET_UNAVAILABLE = b"0\n"
+
+# RCodes of known exceptions for the get_token command
+CREATOR_SOCKET_ERROR = 2
+CREATOR_SHARE_ERROR = 3
+
+# Assign this process some longer name
+isc.util.process.rename()
+
+# This is the version that gets displayed to the user.
+# The VERSION string consists of the module name, the module version
+# number, and the overall BIND 10 version number (set in configure.ac).
+VERSION = "bind10 20110223 (BIND 10 @PACKAGE_VERSION@)"
+
+# This is for boot_time of Init
+_BASETIME = time.gmtime()
+
+# Detailed error message commonly used on startup failure, possibly due to
+# permission issue regarding log lock file. We dump verbose message because
+# it may not be clear exactly what to do if it simply says
+# "failed to open <filename>: permission denied"
+NOTE_ON_LOCK_FILE = """\
+TIP: if this is about permission error for a lock file, check if the directory
+of the file is writable for the user of the bind10 process; often you need
+to start bind10 as a super user. Also, if you specify the -u option to
+change the user and group, the directory must be writable for the group,
+and the created lock file must be writable for that user. Finally, make sure
+the lock file is not left in the directly before restarting.
+"""
+
+class ProcessInfoError(Exception): pass
+
+class ChangeUserError(Exception):
+ '''Exception raised when setuid/setgid fails.
+
+ When raised, it's expected to be propagated via underlying component
+ management modules to the top level so that it will help provide useful
+ fatal error message.
+
+ '''
+ pass
+
+class ProcessInfo:
+ """Information about a process"""
+
+ dev_null = open(os.devnull, "w")
+
+ def __init__(self, name, args, env={}, dev_null_stdout=False,
+ dev_null_stderr=False):
+ self.name = name
+ self.args = args
+ self.env = env
+ self.dev_null_stdout = dev_null_stdout
+ self.dev_null_stderr = dev_null_stderr
+ self.process = None
+ self.pid = None
+
+ def _preexec_work(self):
+ """Function used before running a program that needs to run as a
+ different user."""
+ # First, put us into a separate process group so we don't get
+ # SIGINT signals on Ctrl-C (b10-init will shut everthing down by
+ # other means).
+ os.setpgrp()
+
+ def _spawn(self):
+ if self.dev_null_stdout:
+ spawn_stdout = self.dev_null
+ else:
+ spawn_stdout = None
+ if self.dev_null_stderr:
+ spawn_stderr = self.dev_null
+ else:
+ spawn_stderr = None
+ # Environment variables for the child process will be a copy of those
+ # of the b10-init process with any additional specific variables given
+ # on construction (self.env).
+ spawn_env = copy.deepcopy(os.environ)
+ spawn_env.update(self.env)
+ spawn_env['PATH'] = LIBEXECPATH + ':' + spawn_env['PATH']
+ self.process = subprocess.Popen(self.args,
+ stdin=subprocess.PIPE,
+ stdout=spawn_stdout,
+ stderr=spawn_stderr,
+ close_fds=True,
+ env=spawn_env,
+ preexec_fn=self._preexec_work)
+ self.pid = self.process.pid
+
+ # spawn() and respawn() are the same for now, but in the future they
+ # may have different functionality
+ def spawn(self):
+ self._spawn()
+
+ def respawn(self):
+ self._spawn()
+
+class CChannelConnectError(Exception): pass
+
+class ProcessStartError(Exception): pass
+
+class Init:
+ """Init of BIND class."""
+
+ def __init__(self, msgq_socket_file=None, data_path=None,
+ config_filename=None, clear_config=False,
+ verbose=False, nokill=False, setuid=None, setgid=None,
+ username=None, cmdctl_port=None, wait_time=10):
+ """
+ Initialize the Init of BIND. This is a singleton (only one can run).
+
+ The msgq_socket_file specifies the UNIX domain socket file that the
+ msgq process listens on. If verbose is True, then b10-init reports
+ what it is doing.
+
+ Data path and config filename are passed through to config manager
+ (if provided) and specify the config file to be used.
+
+ The cmdctl_port is passed to cmdctl and specify on which port it
+ should listen.
+
+ wait_time controls the amount of time (in seconds) that Init waits
+ for selected processes to initialize before continuing with the
+ initialization. Currently this is only the configuration manager.
+ """
+ self.cc_session = None
+ self.ccs = None
+ self.curproc = None
+ self.msgq_socket_file = msgq_socket_file
+ self.component_config = {}
+ # Some time in future, it may happen that a single component has
+ # multple processes (like a pipeline-like component). If so happens,
+ # name "components" may be inapropriate. But as the code isn't probably
+ # completely ready for it, we leave it at components for now. We also
+ # want to support multiple instances of a single component. If it turns
+ # out that we'll have a single component with multiple same processes
+ # or if we start multiple components with the same configuration (we do
+ # this now, but it might change) is an open question.
+ self.components = {}
+ # Simply list of components that died and need to wait for a
+ # restart. Components manage their own restart schedule now
+ self.components_to_restart = []
+ self.runnable = False
+ self.__uid = setuid
+ self.__gid = setgid
+ self.username = username
+ self.verbose = verbose
+ self.nokill = nokill
+ self.data_path = data_path
+ self.config_filename = config_filename
+ self.clear_config = clear_config
+ self.cmdctl_port = cmdctl_port
+ self.wait_time = wait_time
+ self.msgq_timeout = 5
+
+ # _run_under_unittests is only meant to be used when testing. It
+ # bypasses execution of some code to help with testing.
+ self._run_under_unittests = False
+
+ self._component_configurator = isc.bind10.component.Configurator(self,
+ isc.bind10.special_component.get_specials())
+ # The priorities here make them start in the correct order. First
+ # the socket creator (which would drop root privileges by then),
+ # then message queue and after that the config manager (which uses
+ # the config manager)
+ self.__core_components = {
+ 'sockcreator': {
+ 'kind': 'core',
+ 'special': 'sockcreator',
+ 'priority': 200
+ },
+ 'msgq': {
+ 'kind': 'core',
+ 'special': 'msgq',
+ 'priority': 199
+ },
+ 'cfgmgr': {
+ 'kind': 'core',
+ 'special': 'cfgmgr',
+ 'priority': 198
+ }
+ }
+ self.__started = False
+ self.exitcode = 0
+
+ # If -v was set, enable full debug logging.
+ if self.verbose:
+ logger.set_severity("DEBUG", 99)
+ # This is set in init_socket_srv
+ self._socket_path = None
+ self._socket_cache = None
+ self._tmpdir = None
+ self._srv_socket = None
+ self._unix_sockets = {}
+
+ def __propagate_component_config(self, config):
+ comps = dict(config)
+ # Fill in the core components, so they stay alive
+ for comp in self.__core_components:
+ if comp in comps:
+ raise Exception(comp + " is core component managed by " +
+ "b10-init, do not set it")
+ comps[comp] = self.__core_components[comp]
+ # Update the configuration
+ self._component_configurator.reconfigure(comps)
+
+ def change_user(self):
+ '''Change the user and group to those specified on construction.
+
+ This method is expected to be called by a component on initial
+ startup when the system is ready to switch the user and group
+ (i.e., once all components that need the privilege of the original
+ user have started).
+ '''
+ try:
+ if self.__gid is not None:
+ logger.info(BIND10_SETGID, self.__gid)
+ posix.setgid(self.__gid)
+ except Exception as ex:
+ raise ChangeUserError('failed to change group: ' + str(ex))
+
+ try:
+ if self.__uid is not None:
+ posix.setuid(self.__uid)
+ # We use one-shot logger after setuid here. This will
+ # detect any permission issue regarding logging due to the
+ # result of setuid at the earliest opportunity.
+ isc.log.Logger("b10-init").info(BIND10_SETUID, self.__uid)
+ except Exception as ex:
+ raise ChangeUserError('failed to change user: ' + str(ex))
+
+ def config_handler(self, new_config):
+ # If this is initial update, don't do anything now, leave it to startup
+ if not self.runnable:
+ return
+ logger.debug(DBG_COMMANDS, BIND10_RECEIVED_NEW_CONFIGURATION,
+ new_config)
+ try:
+ if 'components' in new_config:
+ self.__propagate_component_config(new_config['components'])
+ return isc.config.ccsession.create_answer(0)
+ except Exception as e:
+ return isc.config.ccsession.create_answer(1, str(e))
+
+ def get_processes(self):
+ pids = list(self.components.keys())
+ pids.sort()
+ process_list = [ ]
+ for pid in pids:
+ process_list.append([pid, self.components[pid].name(),
+ self.components[pid].address()])
+ return process_list
+
+ def _get_stats_data(self):
+ return { 'boot_time':
+ time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME)
+ }
+
+ def command_handler(self, command, args):
+ logger.debug(DBG_COMMANDS, BIND10_RECEIVED_COMMAND, command)
+ answer = isc.config.ccsession.create_answer(1, "command not implemented")
+ if type(command) != str:
+ answer = isc.config.ccsession.create_answer(1, "bad command")
+ else:
+ if command == "shutdown":
+ self.runnable = False
+ answer = isc.config.ccsession.create_answer(0)
+ elif command == "getstats":
+ answer = isc.config.ccsession.create_answer(
+ 0, self._get_stats_data())
+ elif command == "ping":
+ answer = isc.config.ccsession.create_answer(0, "pong")
+ elif command == "show_processes":
+ answer = isc.config.ccsession. \
+ create_answer(0, self.get_processes())
+ elif command == "get_socket":
+ answer = self._get_socket(args)
+ elif command == "drop_socket":
+ if "token" not in args:
+ answer = isc.config.ccsession. \
+ create_answer(1, "Missing token parameter")
+ else:
+ try:
+ self._socket_cache.drop_socket(args["token"])
+ answer = isc.config.ccsession.create_answer(0)
+ except Exception as e:
+ answer = isc.config.ccsession.create_answer(1, str(e))
+ else:
+ answer = isc.config.ccsession.create_answer(1,
+ "Unknown command")
+ return answer
+
+ def kill_started_components(self):
+ """
+ Called as part of the exception handling when a process fails to
+ start, this runs through the list of started processes, killing
+ each one. It then clears that list.
+ """
+ logger.info(BIND10_KILLING_ALL_PROCESSES)
+ self.__kill_children(True)
+ self.components = {}
+
+ def _read_bind10_config(self):
+ """
+ Reads the parameters associated with the Init module itself.
+
+ This means the list of components we should start now.
+
+ This could easily be combined into start_all_processes, but
+ it stays because of historical reasons and because the tests
+ replace the method sometimes.
+ """
+ logger.info(BIND10_READING_INIT_CONFIGURATION)
+
+ config_data = self.ccs.get_full_config()
+ self.__propagate_component_config(config_data['components'])
+
+ def log_starting(self, process, port = None, address = None):
+ """
+ A convenience function to output a "Starting xxx" message if the
+ logging is set to DEBUG with debuglevel DBG_PROCESS or higher.
+ Putting this into a separate method ensures
+ that the output form is consistent across all processes.
+
+ The process name (passed as the first argument) is put into
+ self.curproc, and is used to indicate which process failed to
+ start if there is an error (and is used in the "Started" message
+ on success). The optional port and address information are
+ appended to the message (if present).
+ """
+ self.curproc = process
+ if port is None and address is None:
+ logger.info(BIND10_STARTING_PROCESS, self.curproc)
+ elif address is None:
+ logger.info(BIND10_STARTING_PROCESS_PORT, self.curproc,
+ port)
+ else:
+ logger.info(BIND10_STARTING_PROCESS_PORT_ADDRESS,
+ self.curproc, address, port)
+
+ def log_started(self, pid = None):
+ """
+ A convenience function to output a 'Started xxxx (PID yyyy)'
+ message. As with starting_message(), this ensures a consistent
+ format.
+ """
+ if pid is None:
+ logger.debug(DBG_PROCESS, BIND10_STARTED_PROCESS, self.curproc)
+ else:
+ logger.debug(DBG_PROCESS, BIND10_STARTED_PROCESS_PID, self.curproc, pid)
+
+ def process_running(self, msg, who):
+ """
+ Some processes return a message to the Init after they have
+ started to indicate that they are running. The form of the
+ message is a dictionary with contents {"running:", "<process>"}.
+ This method checks the passed message and returns True if the
+ "who" process is contained in the message (so is presumably
+ running). It returns False for all other conditions and will
+ log an error if appropriate.
+ """
+ if msg is not None:
+ try:
+ if msg["running"] == who:
+ return True
+ else:
+ logger.error(BIND10_STARTUP_UNEXPECTED_MESSAGE, msg)
+ except:
+ logger.error(BIND10_STARTUP_UNRECOGNISED_MESSAGE, msg)
+
+ return False
+
+ # The next few methods start the individual processes of BIND-10. They
+ # are called via start_all_processes(). If any fail, an exception is
+ # raised which is caught by the caller of start_all_processes(); this kills
+ # processes started up to that point before terminating the program.
+
+ def _make_process_info(self, name, args, env,
+ dev_null_stdout=False, dev_null_stderr=False):
+ """
+ Wrapper around ProcessInfo(), useful to override
+ ProcessInfo() creation during testing.
+ """
+ return ProcessInfo(name, args, env, dev_null_stdout, dev_null_stderr)
+
+ def start_msgq(self):
+ """
+ Start the message queue and connect to the command channel.
+ """
+ self.log_starting("b10-msgq")
+ msgq_proc = self._make_process_info("b10-msgq", ["b10-msgq"],
+ self.c_channel_env,
+ True, not self.verbose)
+ msgq_proc.spawn()
+ self.log_started(msgq_proc.pid)
+
+ # Now connect to the c-channel
+ cc_connect_start = time.time()
+ while self.cc_session is None:
+ # if we are run under unittests, break
+ if self._run_under_unittests:
+ break
+
+ # if we have been trying for "a while" give up
+ if (time.time() - cc_connect_start) > self.msgq_timeout:
+ if msgq_proc.process:
+ msgq_proc.process.kill()
+ logger.error(BIND10_CONNECTING_TO_CC_FAIL)
+ raise CChannelConnectError("Unable to connect to c-channel after 5 seconds")
+
+ # try to connect, and if we can't wait a short while
+ try:
+ self.cc_session = isc.cc.Session(self.msgq_socket_file)
+ except isc.cc.session.SessionError:
+ time.sleep(0.1)
+
+ # Subscribe to the message queue. The only messages we expect to receive
+ # on this channel are once relating to process startup.
+ if self.cc_session is not None:
+ self.cc_session.group_subscribe("Init")
+
+ return msgq_proc
+
+ def start_cfgmgr(self):
+ """
+ Starts the configuration manager process
+ """
+ self.log_starting("b10-cfgmgr")
+ args = ["b10-cfgmgr"]
+ if self.data_path is not None:
+ args.append("--data-path=" + self.data_path)
+ if self.config_filename is not None:
+ args.append("--config-filename=" + self.config_filename)
+ if self.clear_config:
+ args.append("--clear-config")
+ bind_cfgd = self._make_process_info("b10-cfgmgr", args,
+ self.c_channel_env)
+ bind_cfgd.spawn()
+ self.log_started(bind_cfgd.pid)
+
+ # Wait for the configuration manager to start up as
+ # subsequent initialization cannot proceed without it. The
+ # time to wait can be set on the command line.
+ time_remaining = self.wait_time
+ msg, env = self.cc_session.group_recvmsg()
+ while time_remaining > 0 and not self.process_running(msg, "ConfigManager"):
+ logger.debug(DBG_PROCESS, BIND10_WAIT_CFGMGR)
+ time.sleep(1)
+ time_remaining = time_remaining - 1
+ msg, env = self.cc_session.group_recvmsg()
+
+ if not self.process_running(msg, "ConfigManager"):
+ raise ProcessStartError("Configuration manager process has not started")
+
+ return bind_cfgd
+
+ def start_ccsession(self, c_channel_env):
+ """
+ Start the CC Session
+
+ The argument c_channel_env is unused but is supplied to keep the
+ argument list the same for all start_xxx methods.
+
+ With regards to logging, note that as the CC session is not a
+ process, the log_starting/log_started methods are not used.
+ """
+ logger.info(BIND10_STARTING_CC)
+ self.ccs = isc.config.ModuleCCSession(SPECFILE_LOCATION,
+ self.config_handler,
+ self.command_handler,
+ socket_file = self.msgq_socket_file)
+ self.ccs.start()
+ logger.debug(DBG_PROCESS, BIND10_STARTED_CC)
+
+ # A couple of utility methods for starting processes...
+
+ def start_process(self, name, args, c_channel_env, port=None, address=None):
+ """
+ Given a set of command arguments, start the process and output
+ appropriate log messages. If the start is successful, the process
+ is added to the list of started processes.
+
+ The port and address arguments are for log messages only.
+ """
+ self.log_starting(name, port, address)
+ newproc = self._make_process_info(name, args, c_channel_env)
+ newproc.spawn()
+ self.log_started(newproc.pid)
+ return newproc
+
+ def register_process(self, pid, component):
+ """
+ Put another process into b10-init to watch over it. When the process
+ dies, the component.failed() is called with the exit code.
+
+ It is expected the info is a isc.bind10.component.BaseComponent
+ subclass (or anything having the same interface).
+ """
+ self.components[pid] = component
+
+ def start_simple(self, name):
+ """
+ Most of the BIND-10 processes are started with the command:
+
+ <process-name> [-v]
+
+ ... where -v is appended if verbose is enabled. This method
+ generates the arguments from the name and starts the process.
+
+ The port and address arguments are for log messages only.
+ """
+ # Set up the command arguments.
+ args = [name]
+ if self.verbose:
+ args += ['-v']
+
+ # ... and start the process
+ return self.start_process(name, args, self.c_channel_env)
+
+ # The next few methods start up the rest of the BIND-10 processes.
+ # Although many of these methods are little more than a call to
+ # start_simple, they are retained (a) for testing reasons and (b) as a place
+ # where modifications can be made if the process start-up sequence changes
+ # for a given process.
+
+ def start_auth(self):
+ """
+ Start the Authoritative server
+ """
+ authargs = ['b10-auth']
+ if self.verbose:
+ authargs += ['-v']
+
+ # ... and start
+ return self.start_process("b10-auth", authargs, self.c_channel_env)
+
+ def start_resolver(self):
+ """
+ Start the Resolver. At present, all these arguments and switches
+ are pure speculation. As with the auth daemon, they should be
+ read from the configuration database.
+ """
+ self.curproc = "b10-resolver"
+ # XXX: this must be read from the configuration manager in the future
+ resargs = ['b10-resolver']
+ if self.verbose:
+ resargs += ['-v']
+
+ # ... and start
+ return self.start_process("b10-resolver", resargs, self.c_channel_env)
+
+ def start_cmdctl(self):
+ """
+ Starts the command control process
+ """
+ args = ["b10-cmdctl"]
+ if self.cmdctl_port is not None:
+ args.append("--port=" + str(self.cmdctl_port))
+ if self.verbose:
+ args.append("-v")
+ return self.start_process("b10-cmdctl", args, self.c_channel_env,
+ self.cmdctl_port)
+
+ def start_all_components(self):
+ """
+ Starts up all the components. Any exception generated during the
+ starting of the components is handled by the caller.
+ """
+ # Start the real core (sockcreator, msgq, cfgmgr)
+ self._component_configurator.startup(self.__core_components)
+
+ # Connect to the msgq. This is not a process, so it's not handled
+ # inside the configurator.
+ self.start_ccsession(self.c_channel_env)
+
+ # Extract the parameters associated with Init. This can only be
+ # done after the CC Session is started. Note that the logging
+ # configuration may override the "-v" switch set on the command line.
+ self._read_bind10_config()
+
+ # TODO: Return the dropping of privileges
+
+ def startup(self):
+ """
+ Start the Init instance.
+
+ Returns None if successful, otherwise an string describing the
+ problem.
+ """
+ # Try to connect to the c-channel daemon, to see if it is already
+ # running
+ c_channel_env = {}
+ if self.msgq_socket_file is not None:
+ c_channel_env["BIND10_MSGQ_SOCKET_FILE"] = self.msgq_socket_file
+ logger.debug(DBG_PROCESS, BIND10_CHECK_MSGQ_ALREADY_RUNNING)
+ try:
+ self.cc_session = isc.cc.Session(self.msgq_socket_file)
+ logger.fatal(BIND10_MSGQ_ALREADY_RUNNING)
+ return "b10-msgq already running, or socket file not cleaned , cannot start"
+ except isc.cc.session.SessionError:
+ # this is the case we want, where the msgq is not running
+ pass
+
+ # Start all components. If any one fails to start, kill all started
+ # components and exit with an error indication.
+ try:
+ self.c_channel_env = c_channel_env
+ self.start_all_components()
+ except ChangeUserError as e:
+ self.kill_started_components()
+ return str(e) + '; ' + NOTE_ON_LOCK_FILE.replace('\n', ' ')
+ except Exception as e:
+ self.kill_started_components()
+ return "Unable to start " + self.curproc + ": " + str(e)
+
+ # Started successfully
+ self.runnable = True
+ self.__started = True
+ return None
+
+ def stop_process(self, process, recipient, pid):
+ """
+ Stop the given process, friendly-like. The process is the name it has
+ (in logs, etc), the recipient is the address on msgq. The pid is the
+ pid of the process (if we have multiple processes of the same name,
+ it might want to choose if it is for this one).
+ """
+ logger.info(BIND10_STOP_PROCESS, process)
+ self.cc_session.group_sendmsg(isc.config.ccsession.
+ create_command('shutdown', {'pid': pid}),
+ recipient, recipient)
+
+ def component_shutdown(self, exitcode=0):
+ """
+ Stop the Init instance from a components' request. The exitcode
+ indicates the desired exit code.
+
+ If we did not start yet, it raises an exception, which is meant
+ to propagate through the component and configurator to the startup
+ routine and abort the startup immediately. If it is started up already,
+ we just mark it so we terminate soon.
+
+ It does set the exit code in both cases.
+ """
+ self.exitcode = exitcode
+ if not self.__started:
+ raise Exception("Component failed during startup");
+ else:
+ self.runnable = False
+
+ def shutdown(self):
+ """Stop the Init instance."""
+ logger.info(BIND10_SHUTDOWN)
+ # If ccsession is still there, inform rest of the system this module
+ # is stopping. Since everything will be stopped shortly, this is not
+ # really necessary, but this is done to reflect that b10-init is also
+ # 'just' a module.
+ self.ccs.send_stopping()
+
+ # try using the BIND 10 request to stop
+ try:
+ self._component_configurator.shutdown()
+ except:
+ pass
+ # XXX: some delay probably useful... how much is uncertain
+ # I have changed the delay from 0.5 to 1, but sometime it's
+ # still not enough.
+ time.sleep(1)
+ self.reap_children()
+
+ # Send TERM and KILL signals to modules if we're not prevented
+ # from doing so
+ if not self.nokill:
+ # next try sending a SIGTERM
+ self.__kill_children(False)
+ # finally, send SIGKILL (unmaskable termination) until everybody
+ # dies
+ while self.components:
+ # XXX: some delay probably useful... how much is uncertain
+ time.sleep(0.1)
+ self.reap_children()
+ self.__kill_children(True)
+ logger.info(BIND10_SHUTDOWN_COMPLETE)
+
+ def __kill_children(self, forceful):
+ '''Terminate remaining subprocesses by sending a signal.
+
+ The forceful paramter will be passed Component.kill().
+ This is a dedicated subroutine of shutdown(), just to unify two
+ similar cases.
+
+ '''
+ logmsg = BIND10_SEND_SIGKILL if forceful else BIND10_SEND_SIGTERM
+ # We need to make a copy of values as the components may be modified
+ # in the loop.
+ for component in list(self.components.values()):
+ logger.info(logmsg, component.name(), component.pid())
+ try:
+ component.kill(forceful)
+ except OSError as ex:
+ # If kill() failed due to EPERM, it doesn't make sense to
+ # keep trying, so we just log the fact and forget that
+ # component. Ignore other OSErrors (usually ESRCH because
+ # the child finally exited)
+ signame = "SIGKILL" if forceful else "SIGTERM"
+ logger.info(BIND10_SEND_SIGNAL_FAIL, signame,
+ component.name(), component.pid(), ex)
+ if ex.errno == errno.EPERM:
+ del self.components[component.pid()]
+
+ def _get_process_exit_status(self):
+ return os.waitpid(-1, os.WNOHANG)
+
+ def reap_children(self):
+ """Check to see if any of our child processes have exited,
+ and note this for later handling.
+ """
+ while True:
+ try:
+ (pid, exit_status) = self._get_process_exit_status()
+ except OSError as o:
+ if o.errno == errno.ECHILD:
+ break
+ # XXX: should be impossible to get any other error here
+ raise
+ if pid == 0:
+ break
+ if pid in self.components:
+ # One of the components we know about. Get information on it.
+ component = self.components.pop(pid)
+ logger.info(BIND10_PROCESS_ENDED, component.name(), pid,
+ exit_status)
+ if component.is_running() and self.runnable:
+ # Tell it it failed. But only if it matters (we are
+ # not shutting down and the component considers itself
+ # to be running.
+ component_restarted = component.failed(exit_status);
+ # if the process wants to be restarted, but not just yet,
+ # it returns False
+ if not component_restarted:
+ self.components_to_restart.append(component)
+ else:
+ logger.info(BIND10_UNKNOWN_CHILD_PROCESS_ENDED, pid)
+
+ def restart_processes(self):
+ """
+ Restart any dead processes:
+
+ * Returns the time when the next process is ready to be restarted.
+ * If the server is shutting down, returns 0.
+ * If there are no processes, returns None.
+
+ The values returned can be safely passed into select() as the
+ timeout value.
+
+ """
+ if not self.runnable:
+ return 0
+ still_dead = []
+ # keep track of the first time we need to check this queue again,
+ # if at all
+ next_restart_time = None
+ now = time.time()
+ for component in self.components_to_restart:
+ # If the component was removed from the configurator between since
+ # scheduled to restart, just ignore it. The object will just be
+ # dropped here.
+ if not self._component_configurator.has_component(component):
+ logger.info(BIND10_RESTART_COMPONENT_SKIPPED, component.name())
+ elif not component.restart(now):
+ still_dead.append(component)
+ if next_restart_time is None or\
+ next_restart_time > component.get_restart_time():
+ next_restart_time = component.get_restart_time()
+ self.components_to_restart = still_dead
+
+ return next_restart_time
+
+ def _get_socket(self, args):
+ """
+ Implementation of the get_socket CC command. It asks the cache
+ to provide the token and sends the information back.
+ """
+ try:
+ try:
+ addr = isc.net.parse.addr_parse(args['address'])
+ port = isc.net.parse.port_parse(args['port'])
+ protocol = args['protocol']
+ if protocol not in ['UDP', 'TCP']:
+ raise ValueError("Protocol must be either UDP or TCP")
+ share_mode = args['share_mode']
+ if share_mode not in ['ANY', 'SAMEAPP', 'NO']:
+ raise ValueError("Share mode must be one of ANY, SAMEAPP" +
+ " or NO")
+ share_name = args['share_name']
+ except KeyError as ke:
+ return \
+ isc.config.ccsession.create_answer(1,
+ "Missing parameter " +
+ str(ke))
+
+ # FIXME: This call contains blocking IPC. It is expected to be
+ # short, but if it turns out to be problem, we'll need to do
+ # something about it.
+ token = self._socket_cache.get_token(protocol, addr, port,
+ share_mode, share_name)
+ return isc.config.ccsession.create_answer(0, {
+ 'token': token,
+ 'path': self._socket_path
+ })
+ except isc.bind10.socket_cache.SocketError as e:
+ return isc.config.ccsession.create_answer(CREATOR_SOCKET_ERROR,
+ str(e))
+ except isc.bind10.socket_cache.ShareError as e:
+ return isc.config.ccsession.create_answer(CREATOR_SHARE_ERROR,
+ str(e))
+ except Exception as e:
+ return isc.config.ccsession.create_answer(1, str(e))
+
+ def socket_request_handler(self, token, unix_socket):
+ """
+ This function handles a token that comes over a unix_domain socket.
+ The function looks into the _socket_cache and sends the socket
+ identified by the token back over the unix_socket.
+ """
+ try:
+ token = str(token, 'ASCII') # Convert from bytes to str
+ fd = self._socket_cache.get_socket(token, unix_socket.fileno())
+ # FIXME: These two calls are blocking in their nature. An OS-level
+ # buffer is likely to be large enough to hold all these data, but
+ # if it wasn't and the remote application got stuck, we would have
+ # a problem. If there appear such problems, we should do something
+ # about it.
+ unix_socket.sendall(CREATOR_SOCKET_OK)
+ libutil_io_python.send_fd(unix_socket.fileno(), fd)
+ except Exception as e:
+ logger.info(BIND10_NO_SOCKET, token, e)
+ unix_socket.sendall(CREATOR_SOCKET_UNAVAILABLE)
+
+ def socket_consumer_dead(self, unix_socket):
+ """
+ This function handles when a unix_socket closes. This means all
+ sockets sent to it are to be considered closed. This function signals
+ so to the _socket_cache.
+ """
+ logger.info(BIND10_LOST_SOCKET_CONSUMER, unix_socket.fileno())
+ try:
+ self._socket_cache.drop_application(unix_socket.fileno())
+ except ValueError:
+ # This means the application holds no sockets. It's harmless, as it
+ # can happen in real life - for example, it requests a socket, but
+ # get_socket doesn't find it, so the application dies. It should be
+ # rare, though.
+ pass
+
+ def set_creator(self, creator):
+ """
+ Registeres a socket creator into the b10-init. The socket creator is not
+ used directly, but through a cache. The cache is created in this
+ method.
+
+ If called more than once, it raises a ValueError.
+ """
+ if self._socket_cache is not None:
+ raise ValueError("A creator was inserted previously")
+ self._socket_cache = isc.bind10.socket_cache.Cache(creator)
+
+ def init_socket_srv(self):
+ """
+ Creates and listens on a unix-domain socket to be able to send out
+ the sockets.
+
+ This method should be called after switching user, or the switched
+ applications won't be able to access the socket.
+ """
+ self._srv_socket = socket.socket(socket.AF_UNIX)
+ # We create a temporary directory somewhere safe and unique, to avoid
+ # the need to find the place ourself or bother users. Also, this
+ # secures the socket on some platforms, as it creates a private
+ # directory.
+ self._tmpdir = tempfile.mkdtemp(prefix='sockcreator-')
+ # Get the name
+ self._socket_path = os.path.join(self._tmpdir, "sockcreator")
+ # And bind the socket to the name
+ self._srv_socket.bind(self._socket_path)
+ self._srv_socket.listen(5)
+
+ def remove_socket_srv(self):
+ """
+ Closes and removes the listening socket and the directory where it
+ lives, as we created both.
+
+ It does nothing if the _srv_socket is not set (eg. it was not yet
+ initialized).
+ """
+ if self._srv_socket is not None:
+ self._srv_socket.close()
+ if os.path.exists(self._socket_path):
+ os.remove(self._socket_path)
+ if os.path.isdir(self._tmpdir):
+ os.rmdir(self._tmpdir)
+
+ def _srv_accept(self):
+ """
+ Accept a socket from the unix domain socket server and put it to the
+ others we care about.
+ """
+ (socket, conn) = self._srv_socket.accept()
+ self._unix_sockets[socket.fileno()] = (socket, b'')
+
+ def _socket_data(self, socket_fileno):
+ """
+ This is called when a socket identified by the socket_fileno needs
+ attention. We try to read data from there. If it is closed, we remove
+ it.
+ """
+ (sock, previous) = self._unix_sockets[socket_fileno]
+ while True:
+ try:
+ data = sock.recv(1, socket.MSG_DONTWAIT)
+ except socket.error as se:
+ # These two might be different on some systems
+ if se.errno == errno.EAGAIN or se.errno == errno.EWOULDBLOCK:
+ # No more data now. Oh, well, just store what we have.
+ self._unix_sockets[socket_fileno] = (sock, previous)
+ return
+ else:
+ data = b'' # Pretend it got closed
+ if len(data) == 0: # The socket got to it's end
+ del self._unix_sockets[socket_fileno]
+ self.socket_consumer_dead(sock)
+ sock.close()
+ return
+ else:
+ if data == b"\n":
+ # Handle this token and clear it
+ self.socket_request_handler(previous, sock)
+ previous = b''
+ else:
+ previous += data
+
+ def run(self, wakeup_fd):
+ """
+ The main loop, waiting for sockets, commands and dead processes.
+ Runs as long as the runnable is true.
+
+ The wakeup_fd descriptor is the read end of pipe where CHLD signal
+ handler writes.
+ """
+ ccs_fd = self.ccs.get_socket().fileno()
+ while self.runnable:
+ # clean up any processes that exited
+ self.reap_children()
+ next_restart = self.restart_processes()
+ if next_restart is None:
+ wait_time = None
+ else:
+ wait_time = max(next_restart - time.time(), 0)
+
+ # select() can raise EINTR when a signal arrives,
+ # even if they are resumable, so we have to catch
+ # the exception
+ try:
+ (rlist, wlist, xlist) = \
+ select.select([wakeup_fd, ccs_fd,
+ self._srv_socket.fileno()] +
+ list(self._unix_sockets.keys()), [], [],
+ wait_time)
+ except select.error as err:
+ if err.args[0] == errno.EINTR:
+ (rlist, wlist, xlist) = ([], [], [])
+ else:
+ logger.fatal(BIND10_SELECT_ERROR, err)
+ break
+
+ for fd in rlist + xlist:
+ if fd == ccs_fd:
+ try:
+ self.ccs.check_command()
+ except isc.cc.session.ProtocolError:
+ logger.fatal(BIND10_MSGQ_DISAPPEARED)
+ self.runnable = False
+ break
+ elif fd == wakeup_fd:
+ os.read(wakeup_fd, 32)
+ elif fd == self._srv_socket.fileno():
+ self._srv_accept()
+ elif fd in self._unix_sockets:
+ self._socket_data(fd)
+
+# global variables, needed for signal handlers
+options = None
+b10_init = None
+
+def reaper(signal_number, stack_frame):
+ """A child process has died (SIGCHLD received)."""
+ # don't do anything...
+ # the Python signal handler has been set up to write
+ # down a pipe, waking up our select() bit
+ pass
+
+def get_signame(signal_number):
+ """Return the symbolic name for a signal."""
+ for sig in dir(signal):
+ if sig.startswith("SIG") and sig[3].isalnum():
+ if getattr(signal, sig) == signal_number:
+ return sig
+ return "Unknown signal %d" % signal_number
+
+# XXX: perhaps register atexit() function and invoke that instead
+def fatal_signal(signal_number, stack_frame):
+ """We need to exit (SIGINT or SIGTERM received)."""
+ global options
+ global b10_init
+ logger.info(BIND10_RECEIVED_SIGNAL, get_signame(signal_number))
+ signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+ b10_init.runnable = False
+
+def process_rename(option, opt_str, value, parser):
+ """Function that renames the process if it is requested by a option."""
+ isc.util.process.rename(value)
+
+def parse_args(args=sys.argv[1:], Parser=OptionParser):
+ """
+ Function for parsing command line arguments. Returns the
+ options object from OptionParser.
+ """
+ parser = Parser(version=VERSION)
+ parser.add_option("-m", "--msgq-socket-file", dest="msgq_socket_file",
+ type="string", default=None,
+ help="UNIX domain socket file the b10-msgq daemon will use")
+ parser.add_option("-i", "--no-kill", action="store_true", dest="nokill",
+ default=False, help="do not send SIGTERM and SIGKILL signals to modules during shutdown")
+ parser.add_option("-u", "--user", dest="user", type="string", default=None,
+ help="Change user after startup (must run as root)")
+ parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
+ help="display more about what is going on")
+ parser.add_option("--pretty-name", type="string", action="callback",
+ callback=process_rename,
+ help="Set the process name (displayed in ps, top, ...)")
+ parser.add_option("-c", "--config-file", action="store",
+ dest="config_file", default=None,
+ help="Configuration database filename")
+ parser.add_option("--clear-config", action="store_true",
+ dest="clear_config", default=False,
+ help="Create backup of the configuration file and " +
+ "start with a clean configuration")
+ parser.add_option("-p", "--data-path", dest="data_path",
+ help="Directory to search for configuration files",
+ default=None)
+ parser.add_option("--cmdctl-port", dest="cmdctl_port", type="int",
+ default=None, help="Port of command control")
+ parser.add_option("--pid-file", dest="pid_file", type="string",
+ default=None,
+ help="file to dump the PID of the BIND 10 process")
+ parser.add_option("-w", "--wait", dest="wait_time", type="int",
+ default=10, help="Time (in seconds) to wait for config manager to start up")
+
+ (options, args) = parser.parse_args(args)
+
+ if options.cmdctl_port is not None:
+ try:
+ isc.net.parse.port_parse(options.cmdctl_port)
+ except ValueError as e:
+ parser.error(e)
+
+ if args:
+ parser.print_help()
+ sys.exit(1)
+
+ return options
+
+def dump_pid(pid_file):
+ """
+ Dump the PID of the current process to the specified file. If the given
+ file is None this function does nothing. If the file already exists,
+ the existing content will be removed. If a system error happens in
+ creating or writing to the file, the corresponding exception will be
+ propagated to the caller.
+ """
+ if pid_file is None:
+ return
+ f = open(pid_file, "w")
+ f.write('%d\n' % os.getpid())
+ f.close()
+
+def unlink_pid_file(pid_file):
+ """
+ Remove the given file, which is basically expected to be the PID file
+ created by dump_pid(). The specified may or may not exist; if it
+ doesn't this function does nothing. Other system level errors in removing
+ the file will be propagated as the corresponding exception.
+ """
+ if pid_file is None:
+ return
+ try:
+ os.unlink(pid_file)
+ except OSError as error:
+ if error.errno is not errno.ENOENT:
+ raise
+
+def remove_lock_files():
+ """
+ Remove various lock files which were created by code such as in the
+ logger. This function should be called after BIND 10 shutdown.
+ """
+
+ lockfiles = ["logger_lockfile"]
+
+ lpath = bind10_config.DATA_PATH
+ if "B10_FROM_BUILD" in os.environ:
+ lpath = os.environ["B10_FROM_BUILD"]
+ if "B10_FROM_SOURCE_LOCALSTATEDIR" in os.environ:
+ lpath = os.environ["B10_FROM_SOURCE_LOCALSTATEDIR"]
+ if "B10_LOCKFILE_DIR_FROM_BUILD" in os.environ:
+ lpath = os.environ["B10_LOCKFILE_DIR_FROM_BUILD"]
+
+ for f in lockfiles:
+ fname = lpath + '/' + f
+ if os.path.isfile(fname):
+ try:
+ os.unlink(fname)
+ except OSError as e:
+ # We catch and ignore permission related error on unlink.
+ # This can happen if bind10 started with -u, created a lock
+ # file as a privileged user, but the directory is not writable
+ # for the changed user. This setup will cause immediate
+ # start failure, and we leave verbose error message including
+ # the leftover lock file, so it should be acceptable to ignore
+ # it (note that it doesn't make sense to log this event at
+ # this poitn)
+ if e.errno != errno.EPERM and e.errno != errno.EACCES:
+ raise
+
+ return
+
+def main():
+ global options
+ global b10_init
+ # Enforce line buffering on stdout, even when not a TTY
+ sys.stdout = io.TextIOWrapper(sys.stdout.detach(), line_buffering=True)
+
+ options = parse_args()
+
+ # Announce startup. Making this is the first log message.
+ try:
+ logger.info(BIND10_STARTING, VERSION)
+ except RuntimeError as e:
+ sys.stderr.write('ERROR: failed to write the initial log: %s\n' %
+ str(e))
+ sys.stderr.write(NOTE_ON_LOCK_FILE)
+ sys.exit(1)
+
+ # Check user ID.
+ setuid = None
+ setgid = None
+ username = None
+ if options.user:
+ # Try getting information about the user, assuming UID passed.
+ try:
+ pw_ent = pwd.getpwuid(int(options.user))
+ setuid = pw_ent.pw_uid
+ setgid = pw_ent.pw_gid
+ username = pw_ent.pw_name
+ except ValueError:
+ pass
+ except KeyError:
+ pass
+
+ # Next try getting information about the user, assuming user name
+ # passed.
+ # If the information is both a valid user name and user number, we
+ # prefer the name because we try it second. A minor point, hopefully.
+ try:
+ pw_ent = pwd.getpwnam(options.user)
+ setuid = pw_ent.pw_uid
+ setgid = pw_ent.pw_gid
+ username = pw_ent.pw_name
+ except KeyError:
+ pass
+
+ if setuid is None:
+ logger.fatal(BIND10_INVALID_USER, options.user)
+ sys.exit(1)
+
+ # Create wakeup pipe for signal handlers
+ wakeup_pipe = os.pipe()
+ signal.set_wakeup_fd(wakeup_pipe[1])
+
+ # Set signal handlers for catching child termination, as well
+ # as our own demise.
+ signal.signal(signal.SIGCHLD, reaper)
+ signal.siginterrupt(signal.SIGCHLD, False)
+ signal.signal(signal.SIGINT, fatal_signal)
+ signal.signal(signal.SIGTERM, fatal_signal)
+
+ # Block SIGPIPE, as we don't want it to end this process
+ signal.signal(signal.SIGPIPE, signal.SIG_IGN)
+
+ try:
+ b10_init = Init(options.msgq_socket_file, options.data_path,
+ options.config_file, options.clear_config,
+ options.verbose, options.nokill,
+ setuid, setgid, username, options.cmdctl_port,
+ options.wait_time)
+ startup_result = b10_init.startup()
+ if startup_result:
+ logger.fatal(BIND10_STARTUP_ERROR, startup_result)
+ sys.exit(1)
+ b10_init.init_socket_srv()
+ logger.info(BIND10_STARTUP_COMPLETE)
+ dump_pid(options.pid_file)
+
+ # Let it run
+ b10_init.run(wakeup_pipe[0])
+
+ # shutdown
+ signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+ b10_init.shutdown()
+ finally:
+ # Clean up the filesystem
+ unlink_pid_file(options.pid_file)
+ remove_lock_files()
+ if b10_init is not None:
+ b10_init.remove_socket_srv()
+ sys.exit(b10_init.exitcode)
+
+if __name__ == "__main__":
+ main()
diff --git a/src/bin/bind10/init.spec b/src/bin/bind10/init.spec
new file mode 100644
index 0000000..62c6f09
--- /dev/null
+++ b/src/bin/bind10/init.spec
@@ -0,0 +1,92 @@
+{
+ "module_spec": {
+ "module_name": "Init",
+ "module_description": "Init process",
+ "config_data": [
+ {
+ "item_name": "components",
+ "item_type": "named_set",
+ "item_optional": false,
+ "item_default": {
+ "b10-stats": { "address": "Stats", "kind": "dispensable" },
+ "b10-cmdctl": { "special": "cmdctl", "kind": "needed" }
+ },
+ "named_set_item_spec": {
+ "item_name": "component",
+ "item_type": "map",
+ "item_optional": false,
+ "item_default": { },
+ "map_item_spec": [
+ {
+ "item_name": "special",
+ "item_optional": true,
+ "item_type": "string"
+ },
+ {
+ "item_name": "process",
+ "item_optional": true,
+ "item_type": "string"
+ },
+ {
+ "item_name": "kind",
+ "item_optional": false,
+ "item_type": "string",
+ "item_default": "dispensable"
+ },
+ {
+ "item_name": "address",
+ "item_optional": true,
+ "item_type": "string"
+ },
+ {
+ "item_name": "params",
+ "item_optional": true,
+ "item_type": "list",
+ "list_item_spec": {
+ "item_name": "param",
+ "item_optional": false,
+ "item_type": "string",
+ "item_default": ""
+ }
+ },
+ {
+ "item_name": "priority",
+ "item_optional": true,
+ "item_type": "integer"
+ }
+ ]
+ }
+ }
+ ],
+ "commands": [
+ {
+ "command_name": "shutdown",
+ "command_description": "Shut down BIND 10",
+ "command_args": []
+ },
+ {
+ "command_name": "ping",
+ "command_description": "Ping the b10-init process",
+ "command_args": []
+ },
+ {
+ "command_name": "show_processes",
+ "command_description": "List the running BIND 10 processes",
+ "command_args": []
+ }
+ ],
+ "statistics": [
+ {
+ "item_name": "boot_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Boot time",
+ "item_description": "A date time when bind10 process starts initially",
+ "item_format": "date-time"
+ }
+ ]
+ }
+}
+
+
diff --git a/src/bin/bind10/init_messages.mes b/src/bin/bind10/init_messages.mes
new file mode 100644
index 0000000..9cdb7ef
--- /dev/null
+++ b/src/bin/bind10/init_messages.mes
@@ -0,0 +1,327 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the xfrin messages python module.
+
+% BIND10_CHECK_MSGQ_ALREADY_RUNNING checking if msgq is already running
+The b10-init process is starting up and will now check if the message bus
+daemon is already running. If so, it will not be able to start, as it
+needs a dedicated message bus.
+
+% BIND10_COMPONENT_FAILED component %1 (pid %2) failed: %3
+The process terminated, but b10-init didn't expect it to, which means
+it must have failed.
+
+% BIND10_COMPONENT_RESTART component %1 is about to restart
+The named component failed previously and we will try to restart it to provide
+as flawless service as possible, but it should be investigated what happened,
+as it could happen again.
+
+% BIND10_COMPONENT_START component %1 is starting
+The named component is about to be started by the b10-init process.
+
+% BIND10_COMPONENT_START_EXCEPTION component %1 failed to start: %2
+An exception (mentioned in the message) happened during the startup of the
+named component. The componet is not considered started and further actions
+will be taken about it.
+
+% BIND10_COMPONENT_STOP component %1 is being stopped
+A component is about to be asked to stop willingly by the b10-init.
+
+% BIND10_COMPONENT_UNSATISFIED component %1 is required to run and failed
+A component failed for some reason (see previous messages). It is either a core
+component or needed component that was just started. In any case, the system
+can't continue without it and will terminate.
+
+% BIND10_CONFIGURATOR_BUILD building plan '%1' -> '%2'
+A debug message. This indicates that the configurator is building a plan
+how to change configuration from the older one to newer one. This does no
+real work yet, it just does the planning what needs to be done.
+
+% BIND10_CONFIGURATOR_PLAN_INTERRUPTED configurator plan interrupted, only %1 of %2 done
+There was an exception during some planned task. The plan will not continue and
+only some tasks of the plan were completed. The rest is aborted. The exception
+will be propagated.
+
+% BIND10_CONFIGURATOR_RECONFIGURE reconfiguring running components
+A different configuration of which components should be running is being
+installed. All components that are no longer needed will be stopped and
+newly introduced ones started. This happens at startup, when the configuration
+is read the first time, or when an operator changes configuration of the b10-init.
+
+% BIND10_CONFIGURATOR_RUN running plan of %1 tasks
+A debug message. The configurator is about to execute a plan of actions it
+computed previously.
+
+% BIND10_CONFIGURATOR_START bind10 component configurator is starting up
+The part that cares about starting and stopping the right component from
+the b10-init process is starting up. This happens only once at the startup
+of the b10-init process. It will start the basic set of processes now (the
+ones b10-init needs to read the configuration), the rest will be started
+after the configuration is known.
+
+% BIND10_CONFIGURATOR_STOP bind10 component configurator is shutting down
+The part that cares about starting and stopping processes in the b10-init is
+shutting down. All started components will be shut down now (more precisely,
+asked to terminate by their own, if they fail to comply, other parts of
+the b10-init process will try to force them).
+
+% BIND10_CONFIGURATOR_TASK performing task %1 on %2
+A debug message. The configurator is about to perform one task of the plan it
+is currently executing on the named component.
+
+% BIND10_CONNECTING_TO_CC_FAIL failed to connect to configuration/command channel; try -v to see output from msgq
+The b10-init process tried to connect to the communication channel for
+commands and configuration updates during initialization, but it
+failed. This is a fatal startup error, and process will soon
+terminate after some cleanup. There can be several reasons for the
+failure, but the most likely cause is that the msgq daemon failed to
+start, and the most likely cause of the msgq failure is that it
+doesn't have a permission to create a socket file for the
+communication. To confirm that, you can see debug messages from msgq
+by starting BIND 10 with the -v command line option. If it indicates
+permission problem for msgq, make sure the directory where the socket
+file is to be created is writable for the msgq process. Note that if
+you specify the -u option to change process users, the directory must
+be writable for that user.
+
+% BIND10_INVALID_STATISTICS_DATA invalid specification of statistics data specified
+An error was encountered when the b10-init module specified
+statistics data which is invalid for the b10-init specification file.
+
+% BIND10_INVALID_USER invalid user: %1
+The b10-init process was started with the -u option, to drop root privileges
+and continue running as the specified user, but the user is unknown.
+
+% BIND10_KILLING_ALL_PROCESSES killing all started processes
+The b10-init module was not able to start every process it needed to start
+during startup, and will now kill the processes that did get started.
+
+% BIND10_LOST_SOCKET_CONSUMER consumer %1 of sockets disconnected, considering all its sockets closed
+A connection from one of the applications which requested a socket was
+closed. This means the application has terminated, so all the sockets it was
+using are now closed and bind10 process can release them as well, unless the
+same sockets are used by yet another application.
+
+% BIND10_MSGQ_ALREADY_RUNNING msgq daemon already running, cannot start
+There already appears to be a message bus daemon running. Either an
+old process was not shut down correctly, and needs to be killed, or
+another instance of BIND10, with the same msgq domain socket, is
+running, which needs to be stopped.
+
+% BIND10_MSGQ_DISAPPEARED msgq channel disappeared
+While listening on the message bus channel for messages, it suddenly
+disappeared. The msgq daemon may have died. This might lead to an
+inconsistent state of the system, and BIND 10 will now shut down.
+
+% BIND10_NO_SOCKET couldn't send a socket for token %1 because of error: %2
+An error occurred when the bind10 process was asked to send a socket file
+descriptor. The error is mentioned, most common reason is that the request
+is invalid and may not come from bind10 process at all.
+
+% BIND10_PROCESS_ENDED process %2 of %1 ended with status %3
+This indicates a process started previously terminated. The process id
+and component owning the process are indicated, as well as the exit code.
+This doesn't distinguish if the process was supposed to terminate or not.
+
+% BIND10_READING_INIT_CONFIGURATION reading b10-init configuration
+The b10-init process is starting up, and will now process the initial
+configuration, as received from the configuration manager.
+
+% BIND10_RECEIVED_COMMAND received command: %1
+The b10-init module received a command and shall now process it. The command
+is printed.
+
+% BIND10_RECEIVED_NEW_CONFIGURATION received new configuration: %1
+The b10-init module received a configuration update and is going to apply
+it now. The new configuration is printed.
+
+% BIND10_RECEIVED_SIGNAL received signal %1
+The b10-init module received the given signal.
+
+% BIND10_RESTART_COMPONENT_SKIPPED Skipped restarting a component %1
+The b10-init module tried to restart a component after it failed (crashed)
+unexpectedly, but the b10-init then found that the component had been removed
+from its local configuration of components to run. This is an unusual
+situation but can happen if the administrator removes the component from
+the configuration after the component's crash and before the restart time.
+The b10-init module simply skipped restarting that module, and the whole system
+went back to the expected state (except that the crash itself is likely
+to be a bug).
+
+% BIND10_RESURRECTED_PROCESS resurrected %1 (PID %2)
+The given process has been restarted successfully, and is now running
+with the given process id.
+
+% BIND10_RESURRECTING_PROCESS resurrecting dead %1 process...
+The given process has ended unexpectedly, and is now restarted.
+
+% BIND10_SELECT_ERROR error in select() call: %1
+There was a fatal error in the call to select(), used to see if a child
+process has ended or if there is a message on the message bus. This
+should not happen under normal circumstances and is considered fatal,
+so BIND 10 will now shut down. The specific error is printed.
+
+% BIND10_SEND_SIGKILL sending SIGKILL to %1 (PID %2)
+The b10-init module is sending a SIGKILL signal to the given process.
+
+% BIND10_SEND_SIGNAL_FAIL sending %1 to %2 (PID %3) failed: %4
+The b10-init module sent a single (either SIGTERM or SIGKILL) to a process,
+but it failed due to some system level error. There are two major cases:
+the target process has already terminated but the b10-init module had sent
+the signal before it noticed the termination. In this case an error
+message should indicate something like "no such process". This can be
+safely ignored. The other case is that the b10-init module doesn't have
+the privilege to send a signal to the process. It can typically
+happen when the b10-init module started as a privileged process, spawned a
+subprocess, and then dropped the privilege. It includes the case for
+the socket creator when the b10-init process runs with the -u command line
+option. In this case, the b10-init module simply gives up to terminate
+the process explicitly because it's unlikely to succeed by keeping
+sending the signal. Although the socket creator is implemented so
+that it will terminate automatically when the b10-init process exits
+(and that should be the case for any other future process running with
+a higher privilege), but it's recommended to check if there's any
+remaining BIND 10 process if this message is logged. For all other
+cases, the b10-init module will keep sending the signal until it confirms
+all child processes terminate. Although unlikely, this could prevent
+the b10-init module from exiting, just keeping sending the signals. So,
+again, it's advisable to check if it really terminates when this
+message is logged.
+
+% BIND10_SEND_SIGTERM sending SIGTERM to %1 (PID %2)
+The b10-init module is sending a SIGTERM signal to the given process.
+
+% BIND10_SETGID setting GID to %1
+The b10-init switches the process group ID to the given value. This happens
+when BIND 10 starts with the -u option, and the group ID will be set to
+that of the specified user.
+
+% BIND10_SETUID setting UID to %1
+The b10-init switches the user it runs as to the given UID.
+
+% BIND10_SHUTDOWN stopping the server
+The b10-init process received a command or signal telling it to shut down.
+It will send a shutdown command to each process. The processes that do
+not shut down will then receive a SIGTERM signal. If that doesn't work,
+it shall send SIGKILL signals to the processes still alive.
+
+% BIND10_SHUTDOWN_COMPLETE all processes ended, shutdown complete
+All child processes have been stopped, and the b10-init process will now
+stop itself.
+
+% BIND10_SOCKCREATOR_BAD_CAUSE unknown error cause from socket creator: %1
+The socket creator reported an error when creating a socket. But the function
+which failed is unknown (not one of 'S' for socket or 'B' for bind).
+
+% BIND10_SOCKCREATOR_BAD_RESPONSE unknown response for socket request: %1
+The b10-init requested a socket from the creator, but the answer is unknown. This
+looks like a programmer error.
+
+% BIND10_SOCKCREATOR_EOF eof while expecting data from socket creator
+There should be more data from the socket creator, but it closed the socket.
+It probably crashed.
+
+% BIND10_SOCKCREATOR_INIT initializing socket creator parser
+The b10-init module initializes routines for parsing the socket creator
+protocol.
+
+% BIND10_SOCKCREATOR_KILL killing the socket creator
+The socket creator is being terminated the aggressive way, by sending it
+sigkill. This should not happen usually.
+
+% BIND10_SOCKCREATOR_TERMINATE terminating socket creator
+The b10-init module sends a request to terminate to the socket creator.
+
+% BIND10_SOCKCREATOR_TRANSPORT_ERROR transport error when talking to the socket creator: %1
+Either sending or receiving data from the socket creator failed with the given
+error. The creator probably crashed or some serious OS-level problem happened,
+as the communication happens only on local host.
+
+% BIND10_SOCKET_CREATED successfully created socket %1
+The socket creator successfully created and sent a requested socket, it has
+the given file number.
+
+% BIND10_SOCKET_ERROR error on %1 call in the creator: %2/%3
+The socket creator failed to create the requested socket. It failed on the
+indicated OS API function with given error.
+
+% BIND10_SOCKET_GET requesting socket [%1]:%2 of type %3 from the creator
+The b10-init forwards a request for a socket to the socket creator.
+
+% BIND10_STARTED_CC started configuration/command session
+Debug message given when BIND 10 has successfully started the object that
+handles configuration and commands.
+
+% BIND10_STARTED_PROCESS started %1
+The given process has successfully been started.
+
+% BIND10_STARTED_PROCESS_PID started %1 (PID %2)
+The given process has successfully been started, and has the given PID.
+
+% BIND10_STARTING starting BIND10: %1
+Informational message on startup that shows the full version.
+
+% BIND10_STARTING_CC starting configuration/command session
+Informational message given when BIND 10 is starting the session object
+that handles configuration and commands.
+
+% BIND10_STARTING_PROCESS starting process %1
+The b10-init module is starting the given process.
+
+% BIND10_STARTING_PROCESS_PORT starting process %1 (to listen on port %2)
+The b10-init module is starting the given process, which will listen on the
+given port number.
+
+% BIND10_STARTING_PROCESS_PORT_ADDRESS starting process %1 (to listen on %2#%3)
+The b10-init module is starting the given process, which will listen on the
+given address and port number (written as <address>#<port>).
+
+% BIND10_STARTUP_COMPLETE BIND 10 started
+All modules have been successfully started, and BIND 10 is now running.
+
+% BIND10_STARTUP_ERROR error during startup: %1
+There was a fatal error when BIND10 was trying to start. The error is
+shown, and BIND10 will now shut down.
+
+% BIND10_STARTUP_UNEXPECTED_MESSAGE unrecognised startup message %1
+During the startup process, a number of messages are exchanged between the
+Init process and the processes it starts. This error is output when a
+message received by the Init process is recognised as being of the
+correct format but is unexpected. It may be that processes are starting
+of sequence.
+
+% BIND10_STARTUP_UNRECOGNISED_MESSAGE unrecognised startup message %1
+During the startup process, a number of messages are exchanged between the
+Init process and the processes it starts. This error is output when a
+message received by the Init process is not recognised.
+
+% BIND10_STOP_PROCESS asking %1 to shut down
+The b10-init module is sending a shutdown command to the given module over
+the message channel.
+
+% BIND10_UNKNOWN_CHILD_PROCESS_ENDED unknown child pid %1 exited
+An unknown child process has exited. The PID is printed, but no further
+action will be taken by the b10-init process.
+
+% BIND10_WAIT_CFGMGR waiting for configuration manager process to initialize
+The configuration manager process is so critical to operation of BIND 10
+that after starting it, the Init module will wait for it to initialize
+itself before continuing. This debug message is produced during the
+wait and may be output zero or more times depending on how long it takes
+the configuration manager to start up. The total length of time Init
+will wait for the configuration manager before reporting an error is
+set with the command line --wait switch, which has a default value of
+ten seconds.
diff --git a/src/bin/bind10/run_bind10.sh.in b/src/bin/bind10/run_bind10.sh.in
index 17d2c53..8121eba 100755
--- a/src/bin/bind10/run_bind10.sh.in
+++ b/src/bin/bind10/run_bind10.sh.in
@@ -45,5 +45,5 @@ export B10_FROM_BUILD
BIND10_MSGQ_SOCKET_FILE=@abs_top_builddir@/msgq_socket
export BIND10_MSGQ_SOCKET_FILE
-exec ${PYTHON_EXEC} -O ${BIND10_PATH}/bind10 "$@"
+exec ${BIND10_PATH}/b10-init "$@"
diff --git a/src/bin/bind10/tests/Makefile.am b/src/bin/bind10/tests/Makefile.am
index a5e3fab..6d59dbd 100644
--- a/src/bin/bind10/tests/Makefile.am
+++ b/src/bin/bind10/tests/Makefile.am
@@ -1,7 +1,7 @@
PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
#PYTESTS = args_test.py bind10_test.py
# NOTE: this has a generated test found in the builddir
-PYTESTS = bind10_test.py
+PYTESTS = init_test.py
noinst_SCRIPTS = $(PYTESTS)
# If necessary (rare cases), explicitly specify paths to dynamic libraries
diff --git a/src/bin/bind10/tests/args_test.py b/src/bin/bind10/tests/args_test.py
index 93a7cea..2447a62 100644
--- a/src/bin/bind10/tests/args_test.py
+++ b/src/bin/bind10/tests/args_test.py
@@ -1,5 +1,5 @@
"""
-This program tests the boss process to make sure that it runs while
+This program tests the b10-init process to make sure that it runs while
dropping permissions. It must be run as a user that can set permission.
"""
import unittest
@@ -17,69 +17,69 @@ SUID_USER="shane"
BIND10_EXE="../run_bind10.sh"
TIMEOUT=3
-class TestBossArgs(unittest.TestCase):
- def _waitForString(self, bob, s):
+class TestInitArgs(unittest.TestCase):
+ def _waitForString(self, init, s):
found_string = False
start_time = time.time()
while time.time() < start_time + TIMEOUT:
- (r,w,x) = select.select((bob.stdout,), (), (), TIMEOUT)
- if bob.stdout in r:
- s = bob.stdout.readline()
+ (r,w,x) = select.select((init.stdout,), (), (), TIMEOUT)
+ if init.stdout in r:
+ s = init.stdout.readline()
if s == '':
break
- if s.startswith(s):
+ if s.startswith(s):
found_string = True
break
return found_string
def testNoArgs(self):
"""Run bind10 without any arguments"""
- bob = subprocess.Popen(args=(BIND10_EXE,),
- stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- started_ok = self._waitForString(bob, '[bind10] BIND 10 started')
+ init = subprocess.Popen(args=(BIND10_EXE,),
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ started_ok = self._waitForString(init, '[bind10] BIND 10 started')
time.sleep(0.1)
- bob.terminate()
- bob.wait()
+ init.terminate()
+ init.wait()
self.assertTrue(started_ok)
def testBadOption(self):
"""Run bind10 with a bogus option"""
- bob = subprocess.Popen(args=(BIND10_EXE, "--badoption"),
- stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- failed = self._waitForString(bob, 'bind10: error: no such option: --badoption')
+ init = subprocess.Popen(args=(BIND10_EXE, "--badoption"),
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ failed = self._waitForString(init, 'bind10: error: no such option: --badoption')
time.sleep(0.1)
- bob.terminate()
- self.assertTrue(bob.wait() == 2)
+ init.terminate()
+ self.assertTrue(init.wait() == 2)
self.assertTrue(failed)
def testArgument(self):
"""Run bind10 with an argument (this is not allowed)"""
- bob = subprocess.Popen(args=(BIND10_EXE, "argument"),
- stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- failed = self._waitForString(bob, 'Usage: bind10 [options]')
+ init = subprocess.Popen(args=(BIND10_EXE, "argument"),
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ failed = self._waitForString(init, 'Usage: bind10 [options]')
time.sleep(0.1)
- bob.terminate()
- self.assertTrue(bob.wait() == 1)
+ init.terminate()
+ self.assertTrue(init.wait() == 1)
self.assertTrue(failed)
def testBadUser(self):
"""Run bind10 with a bogus user"""
- bob = subprocess.Popen(args=(BIND10_EXE, "-u", "bogus_user"),
- stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- failed = self._waitForString(bob, "bind10: invalid user: 'bogus_user'")
+ init = subprocess.Popen(args=(BIND10_EXE, "-u", "bogus_user"),
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ failed = self._waitForString(init, "bind10: invalid user: 'bogus_user'")
time.sleep(0.1)
- bob.terminate()
- self.assertTrue(bob.wait() == 1)
+ init.terminate()
+ self.assertTrue(init.wait() == 1)
self.assertTrue(failed)
def testBadUid(self):
"""Run bind10 with a bogus user ID"""
- bob = subprocess.Popen(args=(BIND10_EXE, "-u", "999999999"),
- stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- failed = self._waitForString(bob, "bind10: invalid user: '999999999'")
+ init = subprocess.Popen(args=(BIND10_EXE, "-u", "999999999"),
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ failed = self._waitForString(init, "bind10: invalid user: '999999999'")
time.sleep(0.1)
- bob.terminate()
- self.assertTrue(bob.wait() == 1)
+ init.terminate()
+ self.assertTrue(init.wait() == 1)
self.assertTrue(failed)
def testFailSetUser(self):
@@ -90,12 +90,12 @@ class TestBossArgs(unittest.TestCase):
if os.getuid() == 0:
self.skipTest("test must not be run as root (uid is 0)")
# XXX: we depend on the "nobody" user
- bob = subprocess.Popen(args=(BIND10_EXE, "-u", "nobody"),
- stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- failed = self._waitForString(bob, "[bind10] Error on startup: Unable to start b10-msgq; Unable to change to user nobody")
+ init = subprocess.Popen(args=(BIND10_EXE, "-u", "nobody"),
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ failed = self._waitForString(init, "[bind10] Error on startup: Unable to start b10-msgq; Unable to change to user nobody")
time.sleep(0.1)
- bob.terminate()
- self.assertTrue(bob.wait() == 1)
+ init.terminate()
+ self.assertTrue(init.wait() == 1)
self.assertTrue(failed)
def testSetUser(self):
@@ -108,9 +108,9 @@ class TestBossArgs(unittest.TestCase):
if os.geteuid() != 0:
self.skipTest("test must run as root (euid is not 0)")
- bob = subprocess.Popen(args=(BIND10_EXE, "-u", SUID_USER),
- stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- started_ok = self._waitForString(bob, '[bind10] BIND 10 started')
+ init = subprocess.Popen(args=(BIND10_EXE, "-u", SUID_USER),
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ started_ok = self._waitForString(init, '[bind10] BIND 10 started')
self.assertTrue(started_ok)
ps = subprocess.Popen(args=("ps", "axo", "user,pid"),
stdout=subprocess.PIPE)
@@ -120,22 +120,22 @@ class TestBossArgs(unittest.TestCase):
s = ps.stdout.readline()
if s == '': break
(user, pid) = s.split()
- if int(pid) == bob.pid:
+ if int(pid) == init.pid:
ps_user = user.decode()
break
self.assertTrue(ps_user is not None)
self.assertTrue(ps_user == SUID_USER)
time.sleep(0.1)
- bob.terminate()
- x = bob.wait()
- self.assertTrue(bob.wait() == 0)
+ init.terminate()
+ x = init.wait()
+ self.assertTrue(init.wait() == 0)
def testPrettyName(self):
"""Try the --pretty-name option."""
- CMD_PRETTY_NAME = b'bob-name-test'
- bob = subprocess.Popen(args=(BIND10_EXE, '--pretty-name',
+ CMD_PRETTY_NAME = b'init-name-test'
+ init = subprocess.Popen(args=(BIND10_EXE, '--pretty-name',
CMD_PRETTY_NAME), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- started_ok = self._waitForString(bob, '[bind10] BIND 10 started')
+ started_ok = self._waitForString(init, '[bind10] BIND 10 started')
self.assertTrue(started_ok)
ps = subprocess.Popen(args=("ps", "axo", "pid,comm"),
stdout=subprocess.PIPE)
@@ -145,13 +145,13 @@ class TestBossArgs(unittest.TestCase):
s = ps.stdout.readline()
if s == '': break
(pid,comm) = s.split(None, 1)
- if int(pid) == bob.pid:
+ if int(pid) == init.pid:
command = comm
break
self.assertEqual(command, CMD_PRETTY_NAME + b'\n')
time.sleep(0.1)
- bob.terminate()
- bob.wait()
+ init.terminate()
+ init.wait()
if __name__ == '__main__':
unittest.main()
diff --git a/src/bin/bind10/tests/bind10_test.py.in b/src/bin/bind10/tests/bind10_test.py.in
deleted file mode 100644
index ccfa831..0000000
--- a/src/bin/bind10/tests/bind10_test.py.in
+++ /dev/null
@@ -1,2422 +0,0 @@
-# Copyright (C) 2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-# Most of the time, we omit the "bind10_src" for brevity. Sometimes,
-# we want to be explicit about what we do, like when hijacking a library
-# call used by the bind10_src.
-from bind10_src import ProcessInfo, BoB, parse_args, dump_pid, unlink_pid_file, _BASETIME
-import bind10_src
-
-# XXX: environment tests are currently disabled, due to the preprocessor
-# setup that we have now complicating the environment
-
-import unittest
-import sys
-import os
-import os.path
-import copy
-import signal
-import socket
-from isc.net.addr import IPAddr
-import time
-import isc
-import isc.log
-import isc.bind10.socket_cache
-import errno
-import random
-
-from isc.testutils.parse_args import TestOptParser, OptsError
-from isc.testutils.ccsession_mock import MockModuleCCSession
-
-class TestProcessInfo(unittest.TestCase):
- def setUp(self):
- # redirect stdout to a pipe so we can check that our
- # process spawning is doing the right thing with stdout
- self.old_stdout = os.dup(sys.stdout.fileno())
- self.pipes = os.pipe()
- os.dup2(self.pipes[1], sys.stdout.fileno())
- os.close(self.pipes[1])
- # note that we use dup2() to restore the original stdout
- # to the main program ASAP in each test... this prevents
- # hangs reading from the child process (as the pipe is only
- # open in the child), and also insures nice pretty output
-
- def tearDown(self):
- # clean up our stdout munging
- os.dup2(self.old_stdout, sys.stdout.fileno())
- os.close(self.pipes[0])
-
- def test_init(self):
- pi = ProcessInfo('Test Process', [ '/bin/echo', 'foo' ])
- pi.spawn()
- os.dup2(self.old_stdout, sys.stdout.fileno())
- self.assertEqual(pi.name, 'Test Process')
- self.assertEqual(pi.args, [ '/bin/echo', 'foo' ])
-# self.assertEqual(pi.env, { 'PATH': os.environ['PATH'],
-# 'PYTHON_EXEC': os.environ['PYTHON_EXEC'] })
- self.assertEqual(pi.dev_null_stdout, False)
- self.assertEqual(os.read(self.pipes[0], 100), b"foo\n")
- self.assertNotEqual(pi.process, None)
- self.assertTrue(type(pi.pid) is int)
-
-# def test_setting_env(self):
-# pi = ProcessInfo('Test Process', [ '/bin/true' ], env={'FOO': 'BAR'})
-# os.dup2(self.old_stdout, sys.stdout.fileno())
-# self.assertEqual(pi.env, { 'PATH': os.environ['PATH'],
-# 'PYTHON_EXEC': os.environ['PYTHON_EXEC'],
-# 'FOO': 'BAR' })
-
- def test_setting_null_stdout(self):
- pi = ProcessInfo('Test Process', [ '/bin/echo', 'foo' ],
- dev_null_stdout=True)
- pi.spawn()
- os.dup2(self.old_stdout, sys.stdout.fileno())
- self.assertEqual(pi.dev_null_stdout, True)
- self.assertEqual(os.read(self.pipes[0], 100), b"")
-
- def test_respawn(self):
- pi = ProcessInfo('Test Process', [ '/bin/echo', 'foo' ])
- pi.spawn()
- # wait for old process to work...
- self.assertEqual(os.read(self.pipes[0], 100), b"foo\n")
- # respawn it
- old_pid = pi.pid
- pi.respawn()
- os.dup2(self.old_stdout, sys.stdout.fileno())
- # make sure the new one started properly
- self.assertEqual(pi.name, 'Test Process')
- self.assertEqual(pi.args, [ '/bin/echo', 'foo' ])
-# self.assertEqual(pi.env, { 'PATH': os.environ['PATH'],
-# 'PYTHON_EXEC': os.environ['PYTHON_EXEC'] })
- self.assertEqual(pi.dev_null_stdout, False)
- self.assertEqual(os.read(self.pipes[0], 100), b"foo\n")
- self.assertNotEqual(pi.process, None)
- self.assertTrue(type(pi.pid) is int)
- self.assertNotEqual(pi.pid, old_pid)
-
-class TestCacheCommands(unittest.TestCase):
- """
- Test methods of boss related to the socket cache and socket handling.
- """
- def setUp(self):
- """
- Prepare the boss for some tests.
-
- Also prepare some variables we need.
- """
- self.__boss = BoB()
- # Fake the cache here so we can pretend it is us and hijack the
- # calls to its methods.
- self.__boss._socket_cache = self
- self.__boss._socket_path = '/socket/path'
- self.__raise_exception = None
- self.__socket_args = {
- "port": 53,
- "address": "::",
- "protocol": "UDP",
- "share_mode": "ANY",
- "share_name": "app"
- }
- # What was and wasn't called.
- self.__drop_app_called = None
- self.__get_socket_called = None
- self.__send_fd_called = None
- self.__get_token_called = None
- self.__drop_socket_called = None
- bind10_src.libutil_io_python.send_fd = self.__send_fd
-
- def __send_fd(self, to, socket):
- """
- A function to hook the send_fd in the bind10_src.
- """
- self.__send_fd_called = (to, socket)
-
- class FalseSocket:
- """
- A socket where we can fake methods we need instead of having a real
- socket.
- """
- def __init__(self):
- self.send = b""
- def fileno(self):
- """
- The file number. Used for identifying the remote application.
- """
- return 42
-
- def sendall(self, data):
- """
- Adds data to the self.send.
- """
- self.send += data
-
- def drop_application(self, application):
- """
- Part of pretending to be the cache. Logs the parameter to
- self.__drop_app_called.
-
- In the case self.__raise_exception is set, the exception there
- is raised instead.
- """
- if self.__raise_exception is not None:
- raise self.__raise_exception
- self.__drop_app_called = application
-
- def test_consumer_dead(self):
- """
- Test that it calls the drop_application method of the cache.
- """
- self.__boss.socket_consumer_dead(self.FalseSocket())
- self.assertEqual(42, self.__drop_app_called)
-
- def test_consumer_dead_invalid(self):
- """
- Test that it doesn't crash in case the application is not known to
- the cache, the boss doesn't crash, as this actually can happen in
- practice.
- """
- self.__raise_exception = ValueError("This application is unknown")
- # This doesn't crash
- self.__boss.socket_consumer_dead(self.FalseSocket())
-
- def get_socket(self, token, application):
- """
- Part of pretending to be the cache. If there's anything in
- __raise_exception, it is raised. Otherwise, the call is logged
- into __get_socket_called and a number is returned.
- """
- if self.__raise_exception is not None:
- raise self.__raise_exception
- self.__get_socket_called = (token, application)
- return 13
-
- def test_request_handler(self):
- """
- Test that a request for socket is forwarded and the socket is sent
- back, if it returns a socket.
- """
- socket = self.FalseSocket()
- # An exception from the cache
- self.__raise_exception = ValueError("Test value error")
- self.__boss.socket_request_handler(b"token", socket)
- # It was called, but it threw, so it is not noted here
- self.assertIsNone(self.__get_socket_called)
- self.assertEqual(b"0\n", socket.send)
- # It should not have sent any socket.
- self.assertIsNone(self.__send_fd_called)
- # Now prepare a valid scenario
- self.__raise_exception = None
- socket.send = b""
- self.__boss.socket_request_handler(b"token", socket)
- self.assertEqual(b"1\n", socket.send)
- self.assertEqual((42, 13), self.__send_fd_called)
- self.assertEqual(("token", 42), self.__get_socket_called)
-
- def get_token(self, protocol, address, port, share_mode, share_name):
- """
- Part of pretending to be the cache. If there's anything in
- __raise_exception, it is raised. Otherwise, the parameters are
- logged into __get_token_called and a token is returned.
- """
- if self.__raise_exception is not None:
- raise self.__raise_exception
- self.__get_token_called = (protocol, address, port, share_mode,
- share_name)
- return "token"
-
- def test_get_socket_ok(self):
- """
- Test the successful scenario of getting a socket.
- """
- result = self.__boss._get_socket(self.__socket_args)
- [code, answer] = result['result']
- self.assertEqual(0, code)
- self.assertEqual({
- 'token': 'token',
- 'path': '/socket/path'
- }, answer)
- addr = self.__get_token_called[1]
- self.assertTrue(isinstance(addr, IPAddr))
- self.assertEqual("::", str(addr))
- self.assertEqual(("UDP", addr, 53, "ANY", "app"),
- self.__get_token_called)
-
- def test_get_socket_error(self):
- """
- Test that bad inputs are handled correctly, etc.
- """
- def check_code(code, args):
- """
- Pass the args there and check if it returns success or not.
-
- The rest is not tested, as it is already checked in the
- test_get_socket_ok.
- """
- [rcode, ranswer] = self.__boss._get_socket(args)['result']
- self.assertEqual(code, rcode)
- if code != 0:
- # This should be an error message. The exact formatting
- # is unknown, but we check it is string at least
- self.assertTrue(isinstance(ranswer, str))
-
- def mod_args(name, value):
- """
- Override a parameter in the args.
- """
- result = dict(self.__socket_args)
- result[name] = value
- return result
-
- # Port too large
- check_code(1, mod_args('port', 65536))
- # Not numeric address
- check_code(1, mod_args('address', 'example.org.'))
- # Some bad values of enum-like params
- check_code(1, mod_args('protocol', 'BAD PROTO'))
- check_code(1, mod_args('share_mode', 'BAD SHARE'))
- # Check missing parameters
- for param in self.__socket_args.keys():
- args = dict(self.__socket_args)
- del args[param]
- check_code(1, args)
- # These are OK values for the enum-like parameters
- # The ones from test_get_socket_ok are not tested here
- check_code(0, mod_args('protocol', 'TCP'))
- check_code(0, mod_args('share_mode', 'SAMEAPP'))
- check_code(0, mod_args('share_mode', 'NO'))
- # If an exception is raised from within the cache, it is converted
- # to an error, not propagated
- self.__raise_exception = Exception("Test exception")
- check_code(1, self.__socket_args)
- # The special "expected" exceptions
- self.__raise_exception = \
- isc.bind10.socket_cache.ShareError("Not shared")
- check_code(3, self.__socket_args)
- self.__raise_exception = \
- isc.bind10.socket_cache.SocketError("Not shared", 13)
- check_code(2, self.__socket_args)
-
- def drop_socket(self, token):
- """
- Part of pretending to be the cache. If there's anything in
- __raise_exception, it is raised. Otherwise, the parameter is stored
- in __drop_socket_called.
- """
- if self.__raise_exception is not None:
- raise self.__raise_exception
- self.__drop_socket_called = token
-
- def test_drop_socket(self):
- """
- Check the drop_socket command. It should directly call the method
- on the cache. Exceptions should be translated to error messages.
- """
- # This should be OK and just propagated to the call.
- self.assertEqual({"result": [0]},
- self.__boss.command_handler("drop_socket",
- {"token": "token"}))
- self.assertEqual("token", self.__drop_socket_called)
- self.__drop_socket_called = None
- # Missing parameter
- self.assertEqual({"result": [1, "Missing token parameter"]},
- self.__boss.command_handler("drop_socket", {}))
- self.assertIsNone(self.__drop_socket_called)
- # An exception is raised from within the cache
- self.__raise_exception = ValueError("Test error")
- self.assertEqual({"result": [1, "Test error"]},
- self.__boss.command_handler("drop_socket",
- {"token": "token"}))
-
-
-class TestBoB(unittest.TestCase):
- def setUp(self):
- # Save original values that may be tweaked in some tests
- self.__orig_setgid = bind10_src.posix.setgid
- self.__orig_setuid = bind10_src.posix.setuid
- self.__orig_logger_class = isc.log.Logger
-
- def tearDown(self):
- # Restore original values saved in setUp()
- bind10_src.posix.setgid = self.__orig_setgid
- bind10_src.posix.setuid = self.__orig_setuid
- isc.log.Logger = self.__orig_logger_class
-
- def test_init(self):
- bob = BoB()
- self.assertEqual(bob.verbose, False)
- self.assertEqual(bob.msgq_socket_file, None)
- self.assertEqual(bob.cc_session, None)
- self.assertEqual(bob.ccs, None)
- self.assertEqual(bob.components, {})
- self.assertEqual(bob.runnable, False)
- self.assertEqual(bob.username, None)
- self.assertIsNone(bob._socket_cache)
-
- def __setgid(self, gid):
- self.__gid_set = gid
-
- def __setuid(self, uid):
- self.__uid_set = uid
-
- def test_change_user(self):
- bind10_src.posix.setgid = self.__setgid
- bind10_src.posix.setuid = self.__setuid
-
- self.__gid_set = None
- self.__uid_set = None
- bob = BoB()
- bob.change_user()
- # No gid/uid set in boss, nothing called.
- self.assertIsNone(self.__gid_set)
- self.assertIsNone(self.__uid_set)
-
- BoB(setuid=42, setgid=4200).change_user()
- # This time, it get's called
- self.assertEqual(4200, self.__gid_set)
- self.assertEqual(42, self.__uid_set)
-
- def raising_set_xid(gid_or_uid):
- ex = OSError()
- ex.errno, ex.strerror = errno.EPERM, 'Operation not permitted'
- raise ex
-
- # Let setgid raise an exception
- bind10_src.posix.setgid = raising_set_xid
- bind10_src.posix.setuid = self.__setuid
- self.assertRaises(bind10_src.ChangeUserError,
- BoB(setuid=42, setgid=4200).change_user)
-
- # Let setuid raise an exception
- bind10_src.posix.setgid = self.__setgid
- bind10_src.posix.setuid = raising_set_xid
- self.assertRaises(bind10_src.ChangeUserError,
- BoB(setuid=42, setgid=4200).change_user)
-
- # Let initial log output after setuid raise an exception
- bind10_src.posix.setgid = self.__setgid
- bind10_src.posix.setuid = self.__setuid
- isc.log.Logger = raising_set_xid
- self.assertRaises(bind10_src.ChangeUserError,
- BoB(setuid=42, setgid=4200).change_user)
-
- def test_set_creator(self):
- """
- Test the call to set_creator. First time, the cache is created
- with the passed creator. The next time, it throws an exception.
- """
- bob = BoB()
- # The cache doesn't use it at start, so just create an empty class
- class Creator: pass
- creator = Creator()
- bob.set_creator(creator)
- self.assertTrue(isinstance(bob._socket_cache,
- isc.bind10.socket_cache.Cache))
- self.assertEqual(creator, bob._socket_cache._creator)
- self.assertRaises(ValueError, bob.set_creator, creator)
-
- def test_socket_srv(self):
- """Tests init_socket_srv() and remove_socket_srv() work as expected."""
- bob = BoB()
-
- self.assertIsNone(bob._srv_socket)
- self.assertIsNone(bob._tmpdir)
- self.assertIsNone(bob._socket_path)
-
- bob.init_socket_srv()
-
- self.assertIsNotNone(bob._srv_socket)
- self.assertNotEqual(-1, bob._srv_socket.fileno())
- self.assertEqual(os.path.join(bob._tmpdir, 'sockcreator'),
- bob._srv_socket.getsockname())
-
- self.assertIsNotNone(bob._tmpdir)
- self.assertTrue(os.path.isdir(bob._tmpdir))
- self.assertIsNotNone(bob._socket_path)
- self.assertTrue(os.path.exists(bob._socket_path))
-
- # Check that it's possible to connect to the socket file (this
- # only works if the socket file exists and the server listens on
- # it).
- s = socket.socket(socket.AF_UNIX)
- try:
- s.connect(bob._socket_path)
- can_connect = True
- s.close()
- except socket.error as e:
- can_connect = False
-
- self.assertTrue(can_connect)
-
- bob.remove_socket_srv()
-
- self.assertEqual(-1, bob._srv_socket.fileno())
- self.assertFalse(os.path.exists(bob._socket_path))
- self.assertFalse(os.path.isdir(bob._tmpdir))
-
- # These should not fail either:
-
- # second call
- bob.remove_socket_srv()
-
- bob._srv_socket = None
- bob.remove_socket_srv()
-
- def test_init_alternate_socket(self):
- bob = BoB("alt_socket_file")
- self.assertEqual(bob.verbose, False)
- self.assertEqual(bob.msgq_socket_file, "alt_socket_file")
- self.assertEqual(bob.cc_session, None)
- self.assertEqual(bob.ccs, None)
- self.assertEqual(bob.components, {})
- self.assertEqual(bob.runnable, False)
- self.assertEqual(bob.username, None)
-
- def test_command_handler(self):
- class DummySession():
- def group_sendmsg(self, msg, group):
- (self.msg, self.group) = (msg, group)
- def group_recvmsg(self, nonblock, seq): pass
- class DummyModuleCCSession():
- module_spec = isc.config.module_spec.ModuleSpec({
- "module_name": "Boss",
- "statistics": [
- {
- "item_name": "boot_time",
- "item_type": "string",
- "item_optional": False,
- "item_default": "1970-01-01T00:00:00Z",
- "item_title": "Boot time",
- "item_description": "A date time when bind10 process starts initially",
- "item_format": "date-time"
- }
- ]
- })
- def get_module_spec(self):
- return self.module_spec
- bob = BoB()
- bob.verbose = True
- bob.cc_session = DummySession()
- bob.ccs = DummyModuleCCSession()
- # a bad command
- self.assertEqual(bob.command_handler(-1, None),
- isc.config.ccsession.create_answer(1, "bad command"))
- # "shutdown" command
- self.assertEqual(bob.command_handler("shutdown", None),
- isc.config.ccsession.create_answer(0))
- self.assertFalse(bob.runnable)
- # "getstats" command
- self.assertEqual(bob.command_handler("getstats", None),
- isc.config.ccsession.create_answer(0,
- { 'boot_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME) }))
- # "ping" command
- self.assertEqual(bob.command_handler("ping", None),
- isc.config.ccsession.create_answer(0, "pong"))
- # "show_processes" command
- self.assertEqual(bob.command_handler("show_processes", None),
- isc.config.ccsession.create_answer(0,
- bob.get_processes()))
- # an unknown command
- self.assertEqual(bob.command_handler("__UNKNOWN__", None),
- isc.config.ccsession.create_answer(1, "Unknown command"))
-
- # Fake the get_token of cache and test the command works
- bob._socket_path = '/socket/path'
- class cache:
- def get_token(self, protocol, addr, port, share_mode, share_name):
- return str(addr) + ':' + str(port)
- bob._socket_cache = cache()
- args = {
- "port": 53,
- "address": "0.0.0.0",
- "protocol": "UDP",
- "share_mode": "ANY",
- "share_name": "app"
- }
- # at all and this is the easiest way to check.
- self.assertEqual({'result': [0, {'token': '0.0.0.0:53',
- 'path': '/socket/path'}]},
- bob.command_handler("get_socket", args))
- # The drop_socket is not tested here, but in TestCacheCommands.
- # It needs the cache mocks to be in place and they are there.
-
- def test_stop_process(self):
- """
- Test checking the stop_process method sends the right message over
- the message bus.
- """
- class DummySession():
- def group_sendmsg(self, msg, group, instance="*"):
- (self.msg, self.group, self.instance) = (msg, group, instance)
- bob = BoB()
- bob.cc_session = DummySession()
- bob.stop_process('process', 'address', 42)
- self.assertEqual('address', bob.cc_session.group)
- self.assertEqual('address', bob.cc_session.instance)
- self.assertEqual({'command': ['shutdown', {'pid': 42}]},
- bob.cc_session.msg)
-
-# Mock class for testing BoB's usage of ProcessInfo
-class MockProcessInfo:
- def __init__(self, name, args, env={}, dev_null_stdout=False,
- dev_null_stderr=False):
- self.name = name
- self.args = args
- self.env = env
- self.dev_null_stdout = dev_null_stdout
- self.dev_null_stderr = dev_null_stderr
- self.process = None
- self.pid = None
-
- def spawn(self):
- # set some pid (only used for testing that it is not None anymore)
- self.pid = 42147
-
-# Class for testing the BoB without actually starting processes.
-# This is used for testing the start/stop components routines and
-# the BoB commands.
-#
-# Testing that external processes start is outside the scope
-# of the unit test, by overriding the process start methods we can check
-# that the right processes are started depending on the configuration
-# options.
-class MockBob(BoB):
- def __init__(self):
- BoB.__init__(self)
-
- # Set flags as to which of the overridden methods has been run.
- self.msgq = False
- self.cfgmgr = False
- self.ccsession = False
- self.auth = False
- self.resolver = False
- self.xfrout = False
- self.xfrin = False
- self.zonemgr = False
- self.stats = False
- self.stats_httpd = False
- self.cmdctl = False
- self.dhcp6 = False
- self.dhcp4 = False
- self.c_channel_env = {}
- self.components = { }
- self.creator = False
- self.get_process_exit_status_called = False
-
- class MockSockCreator(isc.bind10.component.Component):
- def __init__(self, process, boss, kind, address=None, params=None):
- isc.bind10.component.Component.__init__(self, process, boss,
- kind, 'SockCreator')
- self._start_func = boss.start_creator
-
- specials = isc.bind10.special_component.get_specials()
- specials['sockcreator'] = MockSockCreator
- self._component_configurator = \
- isc.bind10.component.Configurator(self, specials)
-
- def start_creator(self):
- self.creator = True
- procinfo = ProcessInfo('b10-sockcreator', ['/bin/false'])
- procinfo.pid = 1
- return procinfo
-
- def _read_bind10_config(self):
- # Configuration options are set directly
- pass
-
- def start_msgq(self):
- self.msgq = True
- procinfo = ProcessInfo('b10-msgq', ['/bin/false'])
- procinfo.pid = 2
- return procinfo
-
- def start_ccsession(self, c_channel_env):
- # this is not a process, don't have to do anything with procinfo
- self.ccsession = True
-
- def start_cfgmgr(self):
- self.cfgmgr = True
- procinfo = ProcessInfo('b10-cfgmgr', ['/bin/false'])
- procinfo.pid = 3
- return procinfo
-
- def start_auth(self):
- self.auth = True
- procinfo = ProcessInfo('b10-auth', ['/bin/false'])
- procinfo.pid = 5
- return procinfo
-
- def start_resolver(self):
- self.resolver = True
- procinfo = ProcessInfo('b10-resolver', ['/bin/false'])
- procinfo.pid = 6
- return procinfo
-
- def start_simple(self, name):
- procmap = { 'b10-zonemgr': self.start_zonemgr,
- 'b10-stats': self.start_stats,
- 'b10-stats-httpd': self.start_stats_httpd,
- 'b10-cmdctl': self.start_cmdctl,
- 'b10-dhcp6': self.start_dhcp6,
- 'b10-dhcp4': self.start_dhcp4,
- 'b10-xfrin': self.start_xfrin,
- 'b10-xfrout': self.start_xfrout }
- return procmap[name]()
-
- def start_xfrout(self):
- self.xfrout = True
- procinfo = ProcessInfo('b10-xfrout', ['/bin/false'])
- procinfo.pid = 7
- return procinfo
-
- def start_xfrin(self):
- self.xfrin = True
- procinfo = ProcessInfo('b10-xfrin', ['/bin/false'])
- procinfo.pid = 8
- return procinfo
-
- def start_zonemgr(self):
- self.zonemgr = True
- procinfo = ProcessInfo('b10-zonemgr', ['/bin/false'])
- procinfo.pid = 9
- return procinfo
-
- def start_stats(self):
- self.stats = True
- procinfo = ProcessInfo('b10-stats', ['/bin/false'])
- procinfo.pid = 10
- return procinfo
-
- def start_stats_httpd(self):
- self.stats_httpd = True
- procinfo = ProcessInfo('b10-stats-httpd', ['/bin/false'])
- procinfo.pid = 11
- return procinfo
-
- def start_cmdctl(self):
- self.cmdctl = True
- procinfo = ProcessInfo('b10-cmdctl', ['/bin/false'])
- procinfo.pid = 12
- return procinfo
-
- def start_dhcp6(self):
- self.dhcp6 = True
- procinfo = ProcessInfo('b10-dhcp6', ['/bin/false'])
- procinfo.pid = 13
- return procinfo
-
- def start_dhcp4(self):
- self.dhcp4 = True
- procinfo = ProcessInfo('b10-dhcp4', ['/bin/false'])
- procinfo.pid = 14
- return procinfo
-
- def stop_process(self, process, recipient, pid):
- procmap = { 'b10-auth': self.stop_auth,
- 'b10-resolver': self.stop_resolver,
- 'b10-xfrout': self.stop_xfrout,
- 'b10-xfrin': self.stop_xfrin,
- 'b10-zonemgr': self.stop_zonemgr,
- 'b10-stats': self.stop_stats,
- 'b10-stats-httpd': self.stop_stats_httpd,
- 'b10-cmdctl': self.stop_cmdctl }
- procmap[process]()
-
- # Some functions to pretend we stop processes, use by stop_process
- def stop_msgq(self):
- if self.msgq:
- del self.components[2]
- self.msgq = False
-
- def stop_cfgmgr(self):
- if self.cfgmgr:
- del self.components[3]
- self.cfgmgr = False
-
- def stop_auth(self):
- if self.auth:
- del self.components[5]
- self.auth = False
-
- def stop_resolver(self):
- if self.resolver:
- del self.components[6]
- self.resolver = False
-
- def stop_xfrout(self):
- if self.xfrout:
- del self.components[7]
- self.xfrout = False
-
- def stop_xfrin(self):
- if self.xfrin:
- del self.components[8]
- self.xfrin = False
-
- def stop_zonemgr(self):
- if self.zonemgr:
- del self.components[9]
- self.zonemgr = False
-
- def stop_stats(self):
- if self.stats:
- del self.components[10]
- self.stats = False
-
- def stop_stats_httpd(self):
- if self.stats_httpd:
- del self.components[11]
- self.stats_httpd = False
-
- def stop_cmdctl(self):
- if self.cmdctl:
- del self.components[12]
- self.cmdctl = False
-
- def _get_process_exit_status(self):
- if self.get_process_exit_status_called:
- return (0, 0)
- self.get_process_exit_status_called = True
- return (53, 0)
-
- def _get_process_exit_status_unknown_pid(self):
- if self.get_process_exit_status_called:
- return (0, 0)
- self.get_process_exit_status_called = True
- return (42, 0)
-
- def _get_process_exit_status_raises_oserror_echild(self):
- raise OSError(errno.ECHILD, 'Mock error')
-
- def _get_process_exit_status_raises_oserror_other(self):
- raise OSError(0, 'Mock error')
-
- def _get_process_exit_status_raises_other(self):
- raise Exception('Mock error')
-
- def _make_mock_process_info(self, name, args, c_channel_env,
- dev_null_stdout=False, dev_null_stderr=False):
- return MockProcessInfo(name, args, c_channel_env,
- dev_null_stdout, dev_null_stderr)
-
-class MockBobSimple(BoB):
- def __init__(self):
- BoB.__init__(self)
- # Set which process has been started
- self.started_process_name = None
- self.started_process_args = None
- self.started_process_env = None
-
- def _make_mock_process_info(self, name, args, c_channel_env,
- dev_null_stdout=False, dev_null_stderr=False):
- return MockProcessInfo(name, args, c_channel_env,
- dev_null_stdout, dev_null_stderr)
-
- def start_process(self, name, args, c_channel_env, port=None,
- address=None):
- self.started_process_name = name
- self.started_process_args = args
- self.started_process_env = c_channel_env
- return None
-
-class TestStartStopProcessesBob(unittest.TestCase):
- """
- Check that the start_all_components method starts the right combination
- of components and that the right components are started and stopped
- according to changes in configuration.
- """
- def check_environment_unchanged(self):
- # Check whether the environment has not been changed
- self.assertEqual(original_os_environ, os.environ)
-
- def check_started(self, bob, core, auth, resolver):
- """
- Check that the right sets of services are started. The ones that
- should be running are specified by the core, auth and resolver parameters
- (they are groups of processes, eg. auth means b10-auth, -xfrout, -xfrin
- and -zonemgr).
- """
- self.assertEqual(bob.msgq, core)
- self.assertEqual(bob.cfgmgr, core)
- self.assertEqual(bob.ccsession, core)
- self.assertEqual(bob.creator, core)
- self.assertEqual(bob.auth, auth)
- self.assertEqual(bob.resolver, resolver)
- self.assertEqual(bob.xfrout, auth)
- self.assertEqual(bob.xfrin, auth)
- self.assertEqual(bob.zonemgr, auth)
- self.assertEqual(bob.stats, core)
- self.assertEqual(bob.stats_httpd, core)
- self.assertEqual(bob.cmdctl, core)
- self.check_environment_unchanged()
-
- def check_preconditions(self, bob):
- self.check_started(bob, False, False, False)
-
- def check_started_none(self, bob):
- """
- Check that the situation is according to configuration where no servers
- should be started. Some components still need to be running.
- """
- self.check_started(bob, True, False, False)
- self.check_environment_unchanged()
-
- def check_started_both(self, bob):
- """
- Check the situation is according to configuration where both servers
- (auth and resolver) are enabled.
- """
- self.check_started(bob, True, True, True)
- self.check_environment_unchanged()
-
- def check_started_auth(self, bob):
- """
- Check the set of components needed to run auth only is started.
- """
- self.check_started(bob, True, True, False)
- self.check_environment_unchanged()
-
- def check_started_resolver(self, bob):
- """
- Check the set of components needed to run resolver only is started.
- """
- self.check_started(bob, True, False, True)
- self.check_environment_unchanged()
-
- def check_started_dhcp(self, bob, v4, v6):
- """
- Check if proper combinations of DHCPv4 and DHCpv6 can be started
- """
- self.assertEqual(v4, bob.dhcp4)
- self.assertEqual(v6, bob.dhcp6)
- self.check_environment_unchanged()
-
- def construct_config(self, start_auth, start_resolver):
- # The things that are common, not turned on an off
- config = {}
- config['b10-stats'] = { 'kind': 'dispensable', 'address': 'Stats' }
- config['b10-stats-httpd'] = { 'kind': 'dispensable',
- 'address': 'StatsHttpd' }
- config['b10-cmdctl'] = { 'kind': 'needed', 'special': 'cmdctl' }
- if start_auth:
- config['b10-auth'] = { 'kind': 'needed', 'special': 'auth' }
- config['b10-xfrout'] = { 'kind': 'dispensable',
- 'address': 'Xfrout' }
- config['b10-xfrin'] = { 'kind': 'dispensable',
- 'address': 'Xfrin' }
- config['b10-zonemgr'] = { 'kind': 'dispensable',
- 'address': 'Zonemgr' }
- if start_resolver:
- config['b10-resolver'] = { 'kind': 'needed',
- 'special': 'resolver' }
- return {'components': config}
-
- def config_start_init(self, start_auth, start_resolver):
- """
- Test the configuration is loaded at the startup.
- """
- bob = MockBob()
- config = self.construct_config(start_auth, start_resolver)
- class CC:
- def get_full_config(self):
- return config
- # Provide the fake CC with data
- bob.ccs = CC()
- # And make sure it's not overwritten
- def start_ccsession():
- bob.ccsession = True
- bob.start_ccsession = lambda _: start_ccsession()
- # We need to return the original _read_bind10_config
- bob._read_bind10_config = lambda: BoB._read_bind10_config(bob)
- bob.start_all_components()
- self.check_started(bob, True, start_auth, start_resolver)
- self.check_environment_unchanged()
-
- def test_start_none(self):
- self.config_start_init(False, False)
-
- def test_start_resolver(self):
- self.config_start_init(False, True)
-
- def test_start_auth(self):
- self.config_start_init(True, False)
-
- def test_start_both(self):
- self.config_start_init(True, True)
-
- def test_config_start(self):
- """
- Test that the configuration starts and stops components according
- to configuration changes.
- """
-
- # Create BoB and ensure correct initialization
- bob = MockBob()
- self.check_preconditions(bob)
-
- bob.start_all_components()
- bob.runnable = True
- bob.config_handler(self.construct_config(False, False))
- self.check_started_none(bob)
-
- # Enable both at once
- bob.config_handler(self.construct_config(True, True))
- self.check_started_both(bob)
-
- # Not touched by empty change
- bob.config_handler({})
- self.check_started_both(bob)
-
- # Not touched by change to the same configuration
- bob.config_handler(self.construct_config(True, True))
- self.check_started_both(bob)
-
- # Turn them both off again
- bob.config_handler(self.construct_config(False, False))
- self.check_started_none(bob)
-
- # Not touched by empty change
- bob.config_handler({})
- self.check_started_none(bob)
-
- # Not touched by change to the same configuration
- bob.config_handler(self.construct_config(False, False))
- self.check_started_none(bob)
-
- # Start and stop auth separately
- bob.config_handler(self.construct_config(True, False))
- self.check_started_auth(bob)
-
- bob.config_handler(self.construct_config(False, False))
- self.check_started_none(bob)
-
- # Start and stop resolver separately
- bob.config_handler(self.construct_config(False, True))
- self.check_started_resolver(bob)
-
- bob.config_handler(self.construct_config(False, False))
- self.check_started_none(bob)
-
- # Alternate
- bob.config_handler(self.construct_config(True, False))
- self.check_started_auth(bob)
-
- bob.config_handler(self.construct_config(False, True))
- self.check_started_resolver(bob)
-
- bob.config_handler(self.construct_config(True, False))
- self.check_started_auth(bob)
-
- def test_config_start_once(self):
- """
- Tests that a component is started only once.
- """
- # Create BoB and ensure correct initialization
- bob = MockBob()
- self.check_preconditions(bob)
-
- bob.start_all_components()
-
- bob.runnable = True
- bob.config_handler(self.construct_config(True, True))
- self.check_started_both(bob)
-
- bob.start_auth = lambda: self.fail("Started auth again")
- bob.start_xfrout = lambda: self.fail("Started xfrout again")
- bob.start_xfrin = lambda: self.fail("Started xfrin again")
- bob.start_zonemgr = lambda: self.fail("Started zonemgr again")
- bob.start_resolver = lambda: self.fail("Started resolver again")
-
- # Send again we want to start them. Should not do it, as they are.
- bob.config_handler(self.construct_config(True, True))
-
- def test_config_not_started_early(self):
- """
- Test that components are not started by the config handler before
- startup.
- """
- bob = MockBob()
- self.check_preconditions(bob)
-
- bob.start_auth = lambda: self.fail("Started auth again")
- bob.start_xfrout = lambda: self.fail("Started xfrout again")
- bob.start_xfrin = lambda: self.fail("Started xfrin again")
- bob.start_zonemgr = lambda: self.fail("Started zonemgr again")
- bob.start_resolver = lambda: self.fail("Started resolver again")
-
- bob.config_handler({'start_auth': True, 'start_resolver': True})
-
- # Checks that DHCP (v4 and v6) components are started when expected
- def test_start_dhcp(self):
-
- # Create BoB and ensure correct initialization
- bob = MockBob()
- self.check_preconditions(bob)
-
- bob.start_all_components()
- bob.config_handler(self.construct_config(False, False))
- self.check_started_dhcp(bob, False, False)
-
- def test_start_dhcp_v6only(self):
- # Create BoB and ensure correct initialization
- bob = MockBob()
- self.check_preconditions(bob)
- # v6 only enabled
- bob.start_all_components()
- bob.runnable = True
- bob._BoB_started = True
- config = self.construct_config(False, False)
- config['components']['b10-dhcp6'] = { 'kind': 'needed',
- 'address': 'Dhcp6' }
- bob.config_handler(config)
- self.check_started_dhcp(bob, False, True)
-
- # uncomment when dhcpv4 becomes implemented
- # v4 only enabled
- #bob.cfg_start_dhcp6 = False
- #bob.cfg_start_dhcp4 = True
- #self.check_started_dhcp(bob, True, False)
-
- # both v4 and v6 enabled
- #bob.cfg_start_dhcp6 = True
- #bob.cfg_start_dhcp4 = True
- #self.check_started_dhcp(bob, True, True)
-
-class MockComponent:
- def __init__(self, name, pid, address=None):
- self.name = lambda: name
- self.pid = lambda: pid
- self.address = lambda: address
- self.restarted = False
- self.forceful = False
- self.running = True
- self.has_failed = False
-
- def get_restart_time(self):
- return 0 # arbitrary dummy value
-
- def restart(self, now):
- self.restarted = True
- return True
-
- def is_running(self):
- return self.running
-
- def failed(self, status):
- return self.has_failed
-
- def kill(self, forceful):
- self.forceful = forceful
-
-class TestBossCmd(unittest.TestCase):
- def test_ping(self):
- """
- Confirm simple ping command works.
- """
- bob = MockBob()
- answer = bob.command_handler("ping", None)
- self.assertEqual(answer, {'result': [0, 'pong']})
-
- def test_show_processes_empty(self):
- """
- Confirm getting a list of processes works.
- """
- bob = MockBob()
- answer = bob.command_handler("show_processes", None)
- self.assertEqual(answer, {'result': [0, []]})
-
- def test_show_processes(self):
- """
- Confirm getting a list of processes works.
- """
- bob = MockBob()
- bob.register_process(1, MockComponent('first', 1))
- bob.register_process(2, MockComponent('second', 2, 'Second'))
- answer = bob.command_handler("show_processes", None)
- processes = [[1, 'first', None],
- [2, 'second', 'Second']]
- self.assertEqual(answer, {'result': [0, processes]})
-
-class TestParseArgs(unittest.TestCase):
- """
- This tests parsing of arguments of the bind10 master process.
- """
- #TODO: Write tests for the original parsing, bad options, etc.
- def test_no_opts(self):
- """
- Test correct default values when no options are passed.
- """
- options = parse_args([], TestOptParser)
- self.assertEqual(None, options.data_path)
- self.assertEqual(None, options.config_file)
- self.assertEqual(None, options.cmdctl_port)
-
- def test_data_path(self):
- """
- Test it can parse the data path.
- """
- self.assertRaises(OptsError, parse_args, ['-p'], TestOptParser)
- self.assertRaises(OptsError, parse_args, ['--data-path'],
- TestOptParser)
- options = parse_args(['-p', '/data/path'], TestOptParser)
- self.assertEqual('/data/path', options.data_path)
- options = parse_args(['--data-path=/data/path'], TestOptParser)
- self.assertEqual('/data/path', options.data_path)
-
- def test_config_filename(self):
- """
- Test it can parse the config switch.
- """
- self.assertRaises(OptsError, parse_args, ['-c'], TestOptParser)
- self.assertRaises(OptsError, parse_args, ['--config-file'],
- TestOptParser)
- options = parse_args(['-c', 'config-file'], TestOptParser)
- self.assertEqual('config-file', options.config_file)
- options = parse_args(['--config-file=config-file'], TestOptParser)
- self.assertEqual('config-file', options.config_file)
-
- def test_clear_config(self):
- options = parse_args([], TestOptParser)
- self.assertEqual(False, options.clear_config)
- options = parse_args(['--clear-config'], TestOptParser)
- self.assertEqual(True, options.clear_config)
-
- def test_nokill(self):
- options = parse_args([], TestOptParser)
- self.assertEqual(False, options.nokill)
- options = parse_args(['--no-kill'], TestOptParser)
- self.assertEqual(True, options.nokill)
- options = parse_args([], TestOptParser)
- self.assertEqual(False, options.nokill)
- options = parse_args(['-i'], TestOptParser)
- self.assertEqual(True, options.nokill)
-
- def test_cmdctl_port(self):
- """
- Test it can parse the command control port.
- """
- self.assertRaises(OptsError, parse_args, ['--cmdctl-port=abc'],
- TestOptParser)
- self.assertRaises(OptsError, parse_args, ['--cmdctl-port=100000000'],
- TestOptParser)
- self.assertRaises(OptsError, parse_args, ['--cmdctl-port'],
- TestOptParser)
- options = parse_args(['--cmdctl-port=1234'], TestOptParser)
- self.assertEqual(1234, options.cmdctl_port)
-
-class TestPIDFile(unittest.TestCase):
- def setUp(self):
- self.pid_file = '@builddir@' + os.sep + 'bind10.pid'
- if os.path.exists(self.pid_file):
- os.unlink(self.pid_file)
-
- def tearDown(self):
- if os.path.exists(self.pid_file):
- os.unlink(self.pid_file)
-
- def check_pid_file(self):
- # dump PID to the file, and confirm the content is correct
- dump_pid(self.pid_file)
- my_pid = os.getpid()
- with open(self.pid_file, "r") as f:
- self.assertEqual(my_pid, int(f.read()))
-
- def test_dump_pid(self):
- self.check_pid_file()
-
- # make sure any existing content will be removed
- with open(self.pid_file, "w") as f:
- f.write('dummy data\n')
- self.check_pid_file()
-
- def test_unlink_pid_file_notexist(self):
- dummy_data = 'dummy_data\n'
-
- with open(self.pid_file, "w") as f:
- f.write(dummy_data)
-
- unlink_pid_file("no_such_pid_file")
-
- # the file specified for unlink_pid_file doesn't exist,
- # and the original content of the file should be intact.
- with open(self.pid_file, "r") as f:
- self.assertEqual(dummy_data, f.read())
-
- def test_dump_pid_with_none(self):
- # Check the behavior of dump_pid() and unlink_pid_file() with None.
- # This should be no-op.
- dump_pid(None)
- self.assertFalse(os.path.exists(self.pid_file))
-
- dummy_data = 'dummy_data\n'
-
- with open(self.pid_file, "w") as f:
- f.write(dummy_data)
-
- unlink_pid_file(None)
-
- with open(self.pid_file, "r") as f:
- self.assertEqual(dummy_data, f.read())
-
- def test_dump_pid_failure(self):
- # the attempt to open file will fail, which should result in exception.
- self.assertRaises(IOError, dump_pid,
- 'nonexistent_dir' + os.sep + 'bind10.pid')
-
-class TestBossComponents(unittest.TestCase):
- """
- Test the boss propagates component configuration properly to the
- component configurator and acts sane.
- """
- def setUp(self):
- self.__param = None
- self.__called = False
- self.__compconfig = {
- 'comp': {
- 'kind': 'needed',
- 'process': 'cat'
- }
- }
- self._tmp_time = None
- self._tmp_sleep = None
- self._tmp_module_cc_session = None
- self._tmp_cc_session = None
-
- def tearDown(self):
- if self._tmp_time is not None:
- time.time = self._tmp_time
- if self._tmp_sleep is not None:
- time.sleep = self._tmp_sleep
- if self._tmp_module_cc_session is not None:
- isc.config.ModuleCCSession = self._tmp_module_cc_session
- if self._tmp_cc_session is not None:
- isc.cc.Session = self._tmp_cc_session
-
- def __unary_hook(self, param):
- """
- A hook function that stores the parameter for later examination.
- """
- self.__param = param
-
- def __nullary_hook(self):
- """
- A hook function that notes down it was called.
- """
- self.__called = True
-
- def __check_core(self, config):
- """
- A function checking that the config contains parts for the valid
- core component configuration.
- """
- self.assertIsNotNone(config)
- for component in ['sockcreator', 'msgq', 'cfgmgr']:
- self.assertTrue(component in config)
- self.assertEqual(component, config[component]['special'])
- self.assertEqual('core', config[component]['kind'])
-
- def __check_extended(self, config):
- """
- This checks that the config contains the core and one more component.
- """
- self.__check_core(config)
- self.assertTrue('comp' in config)
- self.assertEqual('cat', config['comp']['process'])
- self.assertEqual('needed', config['comp']['kind'])
- self.assertEqual(4, len(config))
-
- def test_correct_run(self):
- """
- Test the situation when we run in usual scenario, nothing fails,
- we just start, reconfigure and then stop peacefully.
- """
- bob = MockBob()
- # Start it
- orig = bob._component_configurator.startup
- bob._component_configurator.startup = self.__unary_hook
- bob.start_all_components()
- bob._component_configurator.startup = orig
- self.__check_core(self.__param)
- self.assertEqual(3, len(self.__param))
-
- # Reconfigure it
- self.__param = None
- orig = bob._component_configurator.reconfigure
- bob._component_configurator.reconfigure = self.__unary_hook
- # Otherwise it does not work
- bob.runnable = True
- bob.config_handler({'components': self.__compconfig})
- self.__check_extended(self.__param)
- currconfig = self.__param
- # If we reconfigure it, but it does not contain the components part,
- # nothing is called
- bob.config_handler({})
- self.assertEqual(self.__param, currconfig)
- self.__param = None
- bob._component_configurator.reconfigure = orig
- # Check a configuration that messes up the core components is rejected.
- compconf = dict(self.__compconfig)
- compconf['msgq'] = { 'process': 'echo' }
- result = bob.config_handler({'components': compconf})
- # Check it rejected it
- self.assertEqual(1, result['result'][0])
-
- # We can't call shutdown, that one relies on the stuff in main
- # We check somewhere else that the shutdown is actually called
- # from there (the test_kills).
-
- def __real_test_kill(self, nokill=False, ex_on_kill=None):
- """
- Helper function that does the actual kill functionality testing.
- """
- bob = MockBob()
- bob.nokill = nokill
-
- killed = []
- class ImmortalComponent:
- """
- An immortal component. It does not stop when it is told so
- (anyway it is not told so). It does not die if it is killed
- the first time. It dies only when killed forcefully.
- """
- def __init__(self):
- # number of kill() calls, preventing infinite loop.
- self.__call_count = 0
-
- def kill(self, forceful=False):
- self.__call_count += 1
- if self.__call_count > 2:
- raise Exception('Too many calls to ImmortalComponent.kill')
-
- killed.append(forceful)
- if ex_on_kill is not None:
- # If exception is given by the test, raise it here.
- # In the case of ESRCH, the process should have gone
- # somehow, so we clear the components.
- if ex_on_kill.errno == errno.ESRCH:
- bob.components = {}
- raise ex_on_kill
- if forceful:
- bob.components = {}
- def pid(self):
- return 1
- def name(self):
- return "Immortal"
- bob.components = {}
- bob.register_process(1, ImmortalComponent())
-
- # While at it, we check the configurator shutdown is actually called
- orig = bob._component_configurator.shutdown
- bob._component_configurator.shutdown = self.__nullary_hook
- self.__called = False
-
- bob.ccs = MockModuleCCSession()
- self.assertFalse(bob.ccs.stopped)
-
- bob.shutdown()
-
- self.assertTrue(bob.ccs.stopped)
-
- # Here, killed is an array where False is added if SIGTERM
- # should be sent, or True if SIGKILL should be sent, in order in
- # which they're sent.
- if nokill:
- self.assertEqual([], killed)
- else:
- if ex_on_kill is not None:
- self.assertEqual([False], killed)
- else:
- self.assertEqual([False, True], killed)
-
- self.assertTrue(self.__called)
-
- bob._component_configurator.shutdown = orig
-
- def test_kills(self):
- """
- Test that the boss kills components which don't want to stop.
- """
- self.__real_test_kill()
-
- def test_kill_fail(self):
- """Test cases where kill() results in an exception due to OS error.
-
- The behavior should be different for EPERM, so we test two cases.
-
- """
-
- ex = OSError()
- ex.errno, ex.strerror = errno.ESRCH, 'No such process'
- self.__real_test_kill(ex_on_kill=ex)
-
- ex.errno, ex.strerror = errno.EPERM, 'Operation not permitted'
- self.__real_test_kill(ex_on_kill=ex)
-
- def test_nokill(self):
- """
- Test that the boss *doesn't* kill components which don't want to
- stop, when asked not to (by passing the --no-kill option which
- sets bob.nokill to True).
- """
- self.__real_test_kill(True)
-
- def test_component_shutdown(self):
- """
- Test the component_shutdown sets all variables accordingly.
- """
- bob = MockBob()
- self.assertRaises(Exception, bob.component_shutdown, 1)
- self.assertEqual(1, bob.exitcode)
- bob._BoB__started = True
- bob.component_shutdown(2)
- self.assertEqual(2, bob.exitcode)
- self.assertFalse(bob.runnable)
-
- def test_init_config(self):
- """
- Test initial configuration is loaded.
- """
- bob = MockBob()
- # Start it
- bob._component_configurator.reconfigure = self.__unary_hook
- # We need to return the original read_bind10_config
- bob._read_bind10_config = lambda: BoB._read_bind10_config(bob)
- # And provide a session to read the data from
- class CC:
- pass
- bob.ccs = CC()
- bob.ccs.get_full_config = lambda: {'components': self.__compconfig}
- bob.start_all_components()
- self.__check_extended(self.__param)
-
- def __setup_restart(self, bob, component):
- '''Common procedure for restarting a component used below.'''
- bob.components_to_restart = { component }
- component.restarted = False
- bob.restart_processes()
-
- def test_restart_processes(self):
- '''Check some behavior on restarting processes.'''
- bob = MockBob()
- bob.runnable = True
- component = MockComponent('test', 53)
-
- # A component to be restarted will actually be restarted iff it's
- # in the configurator's configuration.
- # We bruteforce the configurator internal below; ugly, but the easiest
- # way for the test.
- bob._component_configurator._components['test'] = (None, component)
- self.__setup_restart(bob, component)
- self.assertTrue(component.restarted)
- self.assertNotIn(component, bob.components_to_restart)
-
- # Remove the component from the configuration. It won't be restarted
- # even if scheduled, nor will remain in the to-be-restarted list.
- del bob._component_configurator._components['test']
- self.__setup_restart(bob, component)
- self.assertFalse(component.restarted)
- self.assertNotIn(component, bob.components_to_restart)
-
- def test_get_processes(self):
- '''Test that procsses are returned correctly, sorted by pid.'''
- bob = MockBob()
-
- pids = list(range(0, 20))
- random.shuffle(pids)
-
- for i in range(0, 20):
- pid = pids[i]
- component = MockComponent('test' + str(pid), pid,
- 'Test' + str(pid))
- bob.components[pid] = component
-
- process_list = bob.get_processes()
- self.assertEqual(20, len(process_list))
-
- last_pid = -1
- for process in process_list:
- pid = process[0]
- self.assertLessEqual(last_pid, pid)
- last_pid = pid
- self.assertEqual([pid, 'test' + str(pid), 'Test' + str(pid)],
- process)
-
- def _test_reap_children_helper(self, runnable, is_running, failed):
- '''Construct a BoB instance, set various data in it according to
- passed args and check if the component was added to the list of
- components to restart.'''
- bob = MockBob()
- bob.runnable = runnable
-
- component = MockComponent('test', 53)
- component.running = is_running
- component.has_failed = failed
- bob.components[53] = component
-
- self.assertNotIn(component, bob.components_to_restart)
-
- bob.reap_children()
-
- if runnable and is_running and not failed:
- self.assertIn(component, bob.components_to_restart)
- else:
- self.assertEqual([], bob.components_to_restart)
-
- def test_reap_children(self):
- '''Test that children are queued to be restarted when they ask for it.'''
- # test various combinations of 3 booleans
- # (BoB.runnable, component.is_running(), component.failed())
- self._test_reap_children_helper(False, False, False)
- self._test_reap_children_helper(False, False, True)
- self._test_reap_children_helper(False, True, False)
- self._test_reap_children_helper(False, True, True)
- self._test_reap_children_helper(True, False, False)
- self._test_reap_children_helper(True, False, True)
- self._test_reap_children_helper(True, True, False)
- self._test_reap_children_helper(True, True, True)
-
- # setup for more tests below
- bob = MockBob()
- bob.runnable = True
- component = MockComponent('test', 53)
- bob.components[53] = component
-
- # case where the returned pid is unknown to us. nothing should
- # happpen then.
- bob.get_process_exit_status_called = False
- bob._get_process_exit_status = bob._get_process_exit_status_unknown_pid
- bob.components_to_restart = []
- # this should do nothing as the pid is unknown
- bob.reap_children()
- self.assertEqual([], bob.components_to_restart)
-
- # case where bob._get_process_exit_status() raises OSError with
- # errno.ECHILD
- bob._get_process_exit_status = \
- bob._get_process_exit_status_raises_oserror_echild
- bob.components_to_restart = []
- # this should catch and handle the OSError
- bob.reap_children()
- self.assertEqual([], bob.components_to_restart)
-
- # case where bob._get_process_exit_status() raises OSError with
- # errno other than ECHILD
- bob._get_process_exit_status = \
- bob._get_process_exit_status_raises_oserror_other
- with self.assertRaises(OSError):
- bob.reap_children()
-
- # case where bob._get_process_exit_status() raises something
- # other than OSError
- bob._get_process_exit_status = \
- bob._get_process_exit_status_raises_other
- with self.assertRaises(Exception):
- bob.reap_children()
-
- def test_kill_started_components(self):
- '''Test that started components are killed.'''
- bob = MockBob()
-
- component = MockComponent('test', 53, 'Test')
- bob.components[53] = component
-
- self.assertEqual([[53, 'test', 'Test']], bob.get_processes())
- bob.kill_started_components()
- self.assertEqual([], bob.get_processes())
- self.assertTrue(component.forceful)
-
- def _start_msgq_helper(self, bob, verbose):
- bob.verbose = verbose
- pi = bob.start_msgq()
- self.assertEqual('b10-msgq', pi.name)
- self.assertEqual(['b10-msgq'], pi.args)
- self.assertTrue(pi.dev_null_stdout)
- self.assertEqual(pi.dev_null_stderr, not verbose)
- self.assertEqual({'FOO': 'an env string'}, pi.env)
-
- # this is set by ProcessInfo.spawn()
- self.assertEqual(42147, pi.pid)
-
- def test_start_msgq(self):
- '''Test that b10-msgq is started.'''
- bob = MockBobSimple()
- bob.c_channel_env = {'FOO': 'an env string'}
- bob._run_under_unittests = True
-
- # use the MockProcessInfo creator
- bob._make_process_info = bob._make_mock_process_info
-
- # non-verbose case
- self._start_msgq_helper(bob, False)
-
- # verbose case
- self._start_msgq_helper(bob, True)
-
- def test_start_msgq_timeout(self):
- '''Test that b10-msgq startup attempts connections several times
- and times out eventually.'''
- bob = MockBobSimple()
- bob.c_channel_env = {}
- # set the timeout to an arbitrary pre-determined value (which
- # code below depends on)
- bob.msgq_timeout = 1
- bob._run_under_unittests = False
-
- # use the MockProcessInfo creator
- bob._make_process_info = bob._make_mock_process_info
-
- global attempts
- global tsec
- attempts = 0
- tsec = 0
- self._tmp_time = time.time
- self._tmp_sleep = time.sleep
- def _my_time():
- global attempts
- global tsec
- attempts += 1
- return tsec
- def _my_sleep(nsec):
- global tsec
- tsec += nsec
- time.time = _my_time
- time.sleep = _my_sleep
-
- global cc_sub
- cc_sub = None
- class DummySessionAlwaysFails():
- def __init__(self, socket_file):
- raise isc.cc.session.SessionError('Connection fails')
- def group_subscribe(self, s):
- global cc_sub
- cc_sub = s
-
- isc.cc.Session = DummySessionAlwaysFails
-
- with self.assertRaises(bind10_src.CChannelConnectError):
- # An exception will be thrown here when it eventually times
- # out.
- pi = bob.start_msgq()
-
- # time.time() should be called 12 times within the while loop:
- # starting from 0, and 11 more times from 0.1 to 1.1. There's
- # another call to time.time() outside the loop, which makes it
- # 13.
- self.assertEqual(attempts, 13)
-
- # group_subscribe() should not have been called here.
- self.assertIsNone(cc_sub)
-
- global cc_socket_file
- cc_socket_file = None
- cc_sub = None
- class DummySession():
- def __init__(self, socket_file):
- global cc_socket_file
- cc_socket_file = socket_file
- def group_subscribe(self, s):
- global cc_sub
- cc_sub = s
-
- isc.cc.Session = DummySession
-
- # reset values
- attempts = 0
- tsec = 0
-
- pi = bob.start_msgq()
-
- # just one attempt, but 2 calls to time.time()
- self.assertEqual(attempts, 2)
-
- self.assertEqual(cc_socket_file, bob.msgq_socket_file)
- self.assertEqual(cc_sub, 'Boss')
-
- # isc.cc.Session, time.time() and time.sleep() are restored
- # during tearDown().
-
- def _start_cfgmgr_helper(self, bob, data_path, filename, clear_config):
- expect_args = ['b10-cfgmgr']
- if data_path is not None:
- bob.data_path = data_path
- expect_args.append('--data-path=' + data_path)
- if filename is not None:
- bob.config_filename = filename
- expect_args.append('--config-filename=' + filename)
- if clear_config:
- bob.clear_config = clear_config
- expect_args.append('--clear-config')
-
- pi = bob.start_cfgmgr()
- self.assertEqual('b10-cfgmgr', pi.name)
- self.assertEqual(expect_args, pi.args)
- self.assertEqual({'TESTENV': 'A test string'}, pi.env)
-
- # this is set by ProcessInfo.spawn()
- self.assertEqual(42147, pi.pid)
-
- def test_start_cfgmgr(self):
- '''Test that b10-cfgmgr is started.'''
- class DummySession():
- def __init__(self):
- self._tries = 0
- def group_recvmsg(self):
- self._tries += 1
- # return running on the 3rd try onwards
- if self._tries >= 3:
- return ({'running': 'ConfigManager'}, None)
- else:
- return ({}, None)
-
- bob = MockBobSimple()
- bob.c_channel_env = {'TESTENV': 'A test string'}
- bob.cc_session = DummySession()
- bob.wait_time = 5
-
- # use the MockProcessInfo creator
- bob._make_process_info = bob._make_mock_process_info
-
- global attempts
- attempts = 0
- self._tmp_sleep = time.sleep
- def _my_sleep(nsec):
- global attempts
- attempts += 1
- time.sleep = _my_sleep
-
- # defaults
- self._start_cfgmgr_helper(bob, None, None, False)
-
- # check that 2 attempts were made. on the 3rd attempt,
- # process_running() returns that ConfigManager is running.
- self.assertEqual(attempts, 2)
-
- # data_path is specified
- self._start_cfgmgr_helper(bob, '/var/lib/test', None, False)
-
- # config_filename is specified. Because `bob` is not
- # reconstructed, data_path is retained from the last call to
- # _start_cfgmgr_helper().
- self._start_cfgmgr_helper(bob, '/var/lib/test', 'foo.cfg', False)
-
- # clear_config is specified. Because `bob` is not reconstructed,
- # data_path and config_filename are retained from the last call
- # to _start_cfgmgr_helper().
- self._start_cfgmgr_helper(bob, '/var/lib/test', 'foo.cfg', True)
-
- def test_start_cfgmgr_timeout(self):
- '''Test that b10-cfgmgr startup attempts connections several times
- and times out eventually.'''
- class DummySession():
- def group_recvmsg(self):
- return (None, None)
- bob = MockBobSimple()
- bob.c_channel_env = {}
- bob.cc_session = DummySession()
- # set wait_time to an arbitrary pre-determined value (which code
- # below depends on)
- bob.wait_time = 2
-
- # use the MockProcessInfo creator
- bob._make_process_info = bob._make_mock_process_info
-
- global attempts
- attempts = 0
- self._tmp_sleep = time.sleep
- def _my_sleep(nsec):
- global attempts
- attempts += 1
- time.sleep = _my_sleep
-
- # We just check that an exception was thrown, and that several
- # attempts were made to connect.
- with self.assertRaises(bind10_src.ProcessStartError):
- pi = bob.start_cfgmgr()
-
- # 2 seconds of attempts every 1 second should result in 2 attempts
- self.assertEqual(attempts, 2)
-
- # time.sleep() is restored during tearDown().
-
- def test_start_ccsession(self):
- '''Test that CC session is started.'''
- class DummySession():
- def __init__(self, specfile, config_handler, command_handler,
- socket_file):
- self.specfile = specfile
- self.config_handler = config_handler
- self.command_handler = command_handler
- self.socket_file = socket_file
- self.started = False
- def start(self):
- self.started = True
- bob = MockBobSimple()
- self._tmp_module_cc_session = isc.config.ModuleCCSession
- isc.config.ModuleCCSession = DummySession
-
- bob.start_ccsession({})
- self.assertEqual(bind10_src.SPECFILE_LOCATION, bob.ccs.specfile)
- self.assertEqual(bob.config_handler, bob.ccs.config_handler)
- self.assertEqual(bob.command_handler, bob.ccs.command_handler)
- self.assertEqual(bob.msgq_socket_file, bob.ccs.socket_file)
- self.assertTrue(bob.ccs.started)
-
- # isc.config.ModuleCCSession is restored during tearDown().
-
- def test_start_process(self):
- '''Test that processes can be started.'''
- bob = MockBob()
-
- # use the MockProcessInfo creator
- bob._make_process_info = bob._make_mock_process_info
-
- pi = bob.start_process('Test Process', ['/bin/true'], {})
- self.assertEqual('Test Process', pi.name)
- self.assertEqual(['/bin/true'], pi.args)
- self.assertEqual({}, pi.env)
-
- # this is set by ProcessInfo.spawn()
- self.assertEqual(42147, pi.pid)
-
- def test_register_process(self):
- '''Test that processes can be registered with BoB.'''
- bob = MockBob()
- component = MockComponent('test', 53, 'Test')
-
- self.assertFalse(53 in bob.components)
- bob.register_process(53, component)
- self.assertTrue(53 in bob.components)
- self.assertEqual(bob.components[53].name(), 'test')
- self.assertEqual(bob.components[53].pid(), 53)
- self.assertEqual(bob.components[53].address(), 'Test')
-
- def _start_simple_helper(self, bob, verbose):
- bob.verbose = verbose
-
- args = ['/bin/true']
- if verbose:
- args.append('-v')
-
- bob.start_simple('/bin/true')
- self.assertEqual('/bin/true', bob.started_process_name)
- self.assertEqual(args, bob.started_process_args)
- self.assertEqual({'TESTENV': 'A test string'}, bob.started_process_env)
-
- def test_start_simple(self):
- '''Test simple process startup.'''
- bob = MockBobSimple()
- bob.c_channel_env = {'TESTENV': 'A test string'}
-
- # non-verbose case
- self._start_simple_helper(bob, False)
-
- # verbose case
- self._start_simple_helper(bob, True)
-
- def _start_auth_helper(self, bob, verbose):
- bob.verbose = verbose
-
- args = ['b10-auth']
- if verbose:
- args.append('-v')
-
- bob.start_auth()
- self.assertEqual('b10-auth', bob.started_process_name)
- self.assertEqual(args, bob.started_process_args)
- self.assertEqual({'FOO': 'an env string'}, bob.started_process_env)
-
- def test_start_auth(self):
- '''Test that b10-auth is started.'''
- bob = MockBobSimple()
- bob.c_channel_env = {'FOO': 'an env string'}
-
- # non-verbose case
- self._start_auth_helper(bob, False)
-
- # verbose case
- self._start_auth_helper(bob, True)
-
- def _start_resolver_helper(self, bob, verbose):
- bob.verbose = verbose
-
- args = ['b10-resolver']
- if verbose:
- args.append('-v')
-
- bob.start_resolver()
- self.assertEqual('b10-resolver', bob.started_process_name)
- self.assertEqual(args, bob.started_process_args)
- self.assertEqual({'BAR': 'an env string'}, bob.started_process_env)
-
- def test_start_resolver(self):
- '''Test that b10-resolver is started.'''
- bob = MockBobSimple()
- bob.c_channel_env = {'BAR': 'an env string'}
-
- # non-verbose case
- self._start_resolver_helper(bob, False)
-
- # verbose case
- self._start_resolver_helper(bob, True)
-
- def _start_cmdctl_helper(self, bob, verbose, port = None):
- bob.verbose = verbose
-
- args = ['b10-cmdctl']
-
- if port is not None:
- bob.cmdctl_port = port
- args.append('--port=9353')
-
- if verbose:
- args.append('-v')
-
- bob.start_cmdctl()
- self.assertEqual('b10-cmdctl', bob.started_process_name)
- self.assertEqual(args, bob.started_process_args)
- self.assertEqual({'BAZ': 'an env string'}, bob.started_process_env)
-
- def test_start_cmdctl(self):
- '''Test that b10-cmdctl is started.'''
- bob = MockBobSimple()
- bob.c_channel_env = {'BAZ': 'an env string'}
-
- # non-verbose case
- self._start_cmdctl_helper(bob, False)
-
- # verbose case
- self._start_cmdctl_helper(bob, True)
-
- # with port, non-verbose case
- self._start_cmdctl_helper(bob, False, 9353)
-
- # with port, verbose case
- self._start_cmdctl_helper(bob, True, 9353)
-
- def test_socket_data(self):
- '''Test that BoB._socket_data works as expected.'''
- class MockSock:
- def __init__(self, fd, throw):
- self.fd = fd
- self.throw = throw
- self.buf = b'Hello World.\nYou are so nice today.\nXX'
- self.i = 0
-
- def recv(self, bufsize, flags = 0):
- if bufsize != 1:
- raise Exception('bufsize != 1')
- if flags != socket.MSG_DONTWAIT:
- raise Exception('flags != socket.MSG_DONTWAIT')
- # after 15 recv()s, throw a socket.error with EAGAIN to
- # get _socket_data() to save back what's been read. The
- # number 15 is arbitrarily chosen, but the checks then
- # depend on this being 15, i.e., if you adjust this
- # number, you may have to adjust the checks below too.
- if self.throw and self.i > 15:
- raise socket.error(errno.EAGAIN, 'Try again')
- if self.i >= len(self.buf):
- return b'';
- t = self.i
- self.i += 1
- return self.buf[t:t+1]
-
- def close(self):
- return
-
- class MockBobSocketData(BoB):
- def __init__(self, throw):
- self._unix_sockets = {42: (MockSock(42, throw), b'')}
- self.requests = []
- self.dead = []
-
- def socket_request_handler(self, previous, sock):
- self.requests.append({sock.fd: previous})
-
- def socket_consumer_dead(self, sock):
- self.dead.append(sock.fd)
-
- # Case where we get data every time we call recv()
- bob = MockBobSocketData(False)
- bob._socket_data(42)
- self.assertEqual(bob.requests,
- [{42: b'Hello World.'},
- {42: b'You are so nice today.'}])
- self.assertEqual(bob.dead, [42])
- self.assertEqual({}, bob._unix_sockets)
-
- # Case where socket.recv() raises EAGAIN. In this case, the
- # routine is supposed to save what it has back to
- # BoB._unix_sockets.
- bob = MockBobSocketData(True)
- bob._socket_data(42)
- self.assertEqual(bob.requests, [{42: b'Hello World.'}])
- self.assertFalse(bob.dead)
- self.assertEqual(len(bob._unix_sockets), 1)
- self.assertEqual(bob._unix_sockets[42][1], b'You')
-
- def test_startup(self):
- '''Test that BoB.startup() handles failures properly.'''
- class MockBobStartup(BoB):
- def __init__(self, throw):
- self.throw = throw
- self.started = False
- self.killed = False
- self.msgq_socket_file = None
- self.curproc = 'myproc'
- self.runnable = False
-
- def start_all_components(self):
- self.started = True
- if self.throw is True:
- raise Exception('Assume starting components has failed.')
- elif self.throw:
- raise self.throw
-
- def kill_started_components(self):
- self.killed = True
-
- class DummySession():
- def __init__(self, socket_file):
- raise isc.cc.session.SessionError('This is the expected case.')
-
- class DummySessionSocketExists():
- def __init__(self, socket_file):
- # simulate that connect passes
- return
-
- isc.cc.Session = DummySession
-
- # All is well case, where all components are started
- # successfully. We check that the actual call to
- # start_all_components() is made, and BoB.runnable is true.
- bob = MockBobStartup(False)
- r = bob.startup()
- self.assertIsNone(r)
- self.assertTrue(bob.started)
- self.assertFalse(bob.killed)
- self.assertTrue(bob.runnable)
- self.assertEqual({}, bob.c_channel_env)
-
- # Case where starting components fails. We check that
- # kill_started_components() is called right after, and
- # BoB.runnable is not modified.
- bob = MockBobStartup(True)
- r = bob.startup()
- # r contains an error message
- self.assertEqual(r, 'Unable to start myproc: Assume starting components has failed.')
- self.assertTrue(bob.started)
- self.assertTrue(bob.killed)
- self.assertFalse(bob.runnable)
- self.assertEqual({}, bob.c_channel_env)
-
- # Check if msgq_socket_file is carried over
- bob = MockBobStartup(False)
- bob.msgq_socket_file = 'foo'
- r = bob.startup()
- self.assertEqual({'BIND10_MSGQ_SOCKET_FILE': 'foo'}, bob.c_channel_env)
-
- # Check failure of changing user results in a different message
- bob = MockBobStartup(bind10_src.ChangeUserError('failed to chusr'))
- r = bob.startup()
- self.assertIn('failed to chusr', r)
- self.assertTrue(bob.killed)
-
- # Check the case when socket file already exists
- isc.cc.Session = DummySessionSocketExists
- bob = MockBobStartup(False)
- r = bob.startup()
- self.assertIn('already running', r)
-
- # isc.cc.Session is restored during tearDown().
-
-class SocketSrvTest(unittest.TestCase):
- """
- This tests some methods of boss related to the unix domain sockets used
- to transfer other sockets to applications.
- """
- def setUp(self):
- """
- Create the boss to test, testdata and backup some functions.
- """
- self.__boss = BoB()
- self.__select_backup = bind10_src.select.select
- self.__select_called = None
- self.__socket_data_called = None
- self.__consumer_dead_called = None
- self.__socket_request_handler_called = None
-
- def tearDown(self):
- """
- Restore functions.
- """
- bind10_src.select.select = self.__select_backup
-
- class __FalseSocket:
- """
- A mock socket for the select and accept and stuff like that.
- """
- def __init__(self, owner, fileno=42):
- self.__owner = owner
- self.__fileno = fileno
- self.data = None
- self.closed = False
-
- def fileno(self):
- return self.__fileno
-
- def accept(self):
- return (self.__class__(self.__owner, 13), "/path/to/socket")
-
- def recv(self, bufsize, flags=0):
- self.__owner.assertEqual(1, bufsize)
- self.__owner.assertEqual(socket.MSG_DONTWAIT, flags)
- if isinstance(self.data, socket.error):
- raise self.data
- elif self.data is not None:
- if len(self.data):
- result = self.data[0:1]
- self.data = self.data[1:]
- return result
- else:
- raise socket.error(errno.EAGAIN, "Would block")
- else:
- return b''
-
- def close(self):
- self.closed = True
-
- class __CCS:
- """
- A mock CCS, just to provide the socket file number.
- """
- class __Socket:
- def fileno(self):
- return 1
- def get_socket(self):
- return self.__Socket()
-
- def __select_accept(self, r, w, x, t):
- self.__select_called = (r, w, x, t)
- return ([42], [], [])
-
- def __select_data(self, r, w, x, t):
- self.__select_called = (r, w, x, t)
- return ([13], [], [])
-
- def __accept(self):
- """
- Hijact the accept method of the boss.
-
- Notes down it was called and stops the boss.
- """
- self.__accept_called = True
- self.__boss.runnable = False
-
- def test_srv_accept_called(self):
- """
- Test that the _srv_accept method of boss is called when the listening
- socket is readable.
- """
- self.__boss.runnable = True
- self.__boss._srv_socket = self.__FalseSocket(self)
- self.__boss._srv_accept = self.__accept
- self.__boss.ccs = self.__CCS()
- bind10_src.select.select = self.__select_accept
- self.__boss.run(2)
- # It called the accept
- self.assertTrue(self.__accept_called)
- # And the select had the right parameters
- self.assertEqual(([2, 1, 42], [], [], None), self.__select_called)
-
- def test_srv_accept(self):
- """
- Test how the _srv_accept method works.
- """
- self.__boss._srv_socket = self.__FalseSocket(self)
- self.__boss._srv_accept()
- # After we accepted, a new socket is added there
- socket = self.__boss._unix_sockets[13][0]
- # The socket is properly stored there
- self.assertTrue(isinstance(socket, self.__FalseSocket))
- # And the buffer (yet empty) is there
- self.assertEqual({13: (socket, b'')}, self.__boss._unix_sockets)
-
- def __socket_data(self, socket):
- self.__boss.runnable = False
- self.__socket_data_called = socket
-
- def test_socket_data(self):
- """
- Test that a socket that wants attention gets it.
- """
- self.__boss._srv_socket = self.__FalseSocket(self)
- self.__boss._socket_data = self.__socket_data
- self.__boss.ccs = self.__CCS()
- self.__boss._unix_sockets = {13: (self.__FalseSocket(self, 13), b'')}
- self.__boss.runnable = True
- bind10_src.select.select = self.__select_data
- self.__boss.run(2)
- self.assertEqual(13, self.__socket_data_called)
- self.assertEqual(([2, 1, 42, 13], [], [], None), self.__select_called)
-
- def __prepare_data(self, data):
- socket = self.__FalseSocket(self, 13)
- self.__boss._unix_sockets = {13: (socket, b'')}
- socket.data = data
- self.__boss.socket_consumer_dead = self.__consumer_dead
- self.__boss.socket_request_handler = self.__socket_request_handler
- return socket
-
- def __consumer_dead(self, socket):
- self.__consumer_dead_called = socket
-
- def __socket_request_handler(self, token, socket):
- self.__socket_request_handler_called = (token, socket)
-
- def test_socket_closed(self):
- """
- Test that a socket is removed and the socket_consumer_dead is called
- when it is closed.
- """
- socket = self.__prepare_data(None)
- self.__boss._socket_data(13)
- self.assertEqual(socket, self.__consumer_dead_called)
- self.assertEqual({}, self.__boss._unix_sockets)
- self.assertTrue(socket.closed)
-
- def test_socket_short(self):
- """
- Test that if there's not enough data to get the whole socket, it is
- kept there, but nothing is called.
- """
- socket = self.__prepare_data(b'tok')
- self.__boss._socket_data(13)
- self.assertEqual({13: (socket, b'tok')}, self.__boss._unix_sockets)
- self.assertFalse(socket.closed)
- self.assertIsNone(self.__consumer_dead_called)
- self.assertIsNone(self.__socket_request_handler_called)
-
- def test_socket_continue(self):
- """
- Test that we call the token handling function when the whole token
- comes. This test pretends to continue reading where the previous one
- stopped.
- """
- socket = self.__prepare_data(b"en\nanothe")
- # The data to finish
- self.__boss._unix_sockets[13] = (socket, b'tok')
- self.__boss._socket_data(13)
- self.assertEqual({13: (socket, b'anothe')}, self.__boss._unix_sockets)
- self.assertFalse(socket.closed)
- self.assertIsNone(self.__consumer_dead_called)
- self.assertEqual((b'token', socket),
- self.__socket_request_handler_called)
-
- def test_broken_socket(self):
- """
- If the socket raises an exception during the read other than EAGAIN,
- it is broken and we remove it.
- """
- sock = self.__prepare_data(socket.error(errno.ENOMEM,
- "There's more memory available, but not for you"))
- self.__boss._socket_data(13)
- self.assertEqual(sock, self.__consumer_dead_called)
- self.assertEqual({}, self.__boss._unix_sockets)
- self.assertTrue(sock.closed)
-
-class TestFunctions(unittest.TestCase):
- def setUp(self):
- self.lockfile_testpath = \
- "@abs_top_builddir@/src/bin/bind10/tests/lockfile_test"
- self.assertFalse(os.path.exists(self.lockfile_testpath))
- os.mkdir(self.lockfile_testpath)
- self.assertTrue(os.path.isdir(self.lockfile_testpath))
- self.__isfile_orig = bind10_src.os.path.isfile
- self.__unlink_orig = bind10_src.os.unlink
-
- def tearDown(self):
- os.rmdir(self.lockfile_testpath)
- self.assertFalse(os.path.isdir(self.lockfile_testpath))
- os.environ["B10_LOCKFILE_DIR_FROM_BUILD"] = "@abs_top_builddir@"
- bind10_src.os.path.isfile = self.__isfile_orig
- bind10_src.os.unlink = self.__unlink_orig
-
- def test_remove_lock_files(self):
- os.environ["B10_LOCKFILE_DIR_FROM_BUILD"] = self.lockfile_testpath
-
- # create lockfiles for the testcase
- lockfiles = ["logger_lockfile"]
- for f in lockfiles:
- fname = os.environ["B10_LOCKFILE_DIR_FROM_BUILD"] + '/' + f
- self.assertFalse(os.path.exists(fname))
- open(fname, "w").close()
- self.assertTrue(os.path.isfile(fname))
-
- # first call should clear up all the lockfiles
- bind10_src.remove_lock_files()
-
- # check if the lockfiles exist
- for f in lockfiles:
- fname = os.environ["B10_LOCKFILE_DIR_FROM_BUILD"] + '/' + f
- self.assertFalse(os.path.isfile(fname))
-
- # second call should not assert anyway
- bind10_src.remove_lock_files()
-
- def test_remove_lock_files_fail(self):
- # Permission error on unlink is ignored; other exceptions are really
- # unexpected and propagated.
- def __raising_unlink(unused, ex):
- raise ex
-
- bind10_src.os.path.isfile = lambda _: True
- os_error = OSError()
- bind10_src.os.unlink = lambda f: __raising_unlink(f, os_error)
-
- os_error.errno = errno.EPERM
- bind10_src.remove_lock_files() # no disruption
-
- os_error.errno = errno.EACCES
- bind10_src.remove_lock_files() # no disruption
-
- os_error.errno = errno.ENOENT
- self.assertRaises(OSError, bind10_src.remove_lock_files)
-
- bind10_src.os.unlink = lambda f: __raising_unlink(f, Exception('bad'))
- self.assertRaises(Exception, bind10_src.remove_lock_files)
-
- def test_get_signame(self):
- # just test with some samples
- signame = bind10_src.get_signame(signal.SIGTERM)
- self.assertEqual('SIGTERM', signame)
- signame = bind10_src.get_signame(signal.SIGKILL)
- self.assertEqual('SIGKILL', signame)
- # 59426 is hopefully an unused signal on most platforms
- signame = bind10_src.get_signame(59426)
- self.assertEqual('Unknown signal 59426', signame)
-
- def test_fatal_signal(self):
- self.assertIsNone(bind10_src.boss_of_bind)
- bind10_src.boss_of_bind = BoB()
- bind10_src.boss_of_bind.runnable = True
- bind10_src.fatal_signal(signal.SIGTERM, None)
- # Now, runnable must be False
- self.assertFalse(bind10_src.boss_of_bind.runnable)
- bind10_src.boss_of_bind = None
-
-if __name__ == '__main__':
- # store os.environ for test_unchanged_environment
- original_os_environ = copy.deepcopy(os.environ)
- isc.log.resetUnitTestRootLogger()
- unittest.main()
diff --git a/src/bin/bind10/tests/init_test.py.in b/src/bin/bind10/tests/init_test.py.in
new file mode 100644
index 0000000..9a591ef
--- /dev/null
+++ b/src/bin/bind10/tests/init_test.py.in
@@ -0,0 +1,2426 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# Most of the time, we omit the "init" for brevity. Sometimes,
+# we want to be explicit about what we do, like when hijacking a library
+# call used by the b10-init.
+from init import Init, ProcessInfo, parse_args, dump_pid, unlink_pid_file, _BASETIME
+import init
+
+# XXX: environment tests are currently disabled, due to the preprocessor
+# setup that we have now complicating the environment
+
+import unittest
+import sys
+import os
+import os.path
+import copy
+import signal
+import socket
+from isc.net.addr import IPAddr
+import time
+import isc.log
+import isc.config
+import isc.bind10.socket_cache
+import errno
+import random
+
+from isc.testutils.parse_args import TestOptParser, OptsError
+from isc.testutils.ccsession_mock import MockModuleCCSession
+
+class TestProcessInfo(unittest.TestCase):
+ def setUp(self):
+ # redirect stdout to a pipe so we can check that our
+ # process spawning is doing the right thing with stdout
+ self.old_stdout = os.dup(sys.stdout.fileno())
+ self.pipes = os.pipe()
+ os.dup2(self.pipes[1], sys.stdout.fileno())
+ os.close(self.pipes[1])
+ # note that we use dup2() to restore the original stdout
+ # to the main program ASAP in each test... this prevents
+ # hangs reading from the child process (as the pipe is only
+ # open in the child), and also insures nice pretty output
+
+ def tearDown(self):
+ # clean up our stdout munging
+ os.dup2(self.old_stdout, sys.stdout.fileno())
+ os.close(self.pipes[0])
+
+ def test_init(self):
+ pi = ProcessInfo('Test Process', [ '/bin/echo', 'foo' ])
+ pi.spawn()
+ os.dup2(self.old_stdout, sys.stdout.fileno())
+ self.assertEqual(pi.name, 'Test Process')
+ self.assertEqual(pi.args, [ '/bin/echo', 'foo' ])
+# self.assertEqual(pi.env, { 'PATH': os.environ['PATH'],
+# 'PYTHON_EXEC': os.environ['PYTHON_EXEC'] })
+ self.assertEqual(pi.dev_null_stdout, False)
+ self.assertEqual(os.read(self.pipes[0], 100), b"foo\n")
+ self.assertNotEqual(pi.process, None)
+ self.assertTrue(type(pi.pid) is int)
+
+# def test_setting_env(self):
+# pi = ProcessInfo('Test Process', [ '/bin/true' ], env={'FOO': 'BAR'})
+# os.dup2(self.old_stdout, sys.stdout.fileno())
+# self.assertEqual(pi.env, { 'PATH': os.environ['PATH'],
+# 'PYTHON_EXEC': os.environ['PYTHON_EXEC'],
+# 'FOO': 'BAR' })
+
+ def test_setting_null_stdout(self):
+ pi = ProcessInfo('Test Process', [ '/bin/echo', 'foo' ],
+ dev_null_stdout=True)
+ pi.spawn()
+ os.dup2(self.old_stdout, sys.stdout.fileno())
+ self.assertEqual(pi.dev_null_stdout, True)
+ self.assertEqual(os.read(self.pipes[0], 100), b"")
+
+ def test_respawn(self):
+ pi = ProcessInfo('Test Process', [ '/bin/echo', 'foo' ])
+ pi.spawn()
+ # wait for old process to work...
+ self.assertEqual(os.read(self.pipes[0], 100), b"foo\n")
+ # respawn it
+ old_pid = pi.pid
+ pi.respawn()
+ os.dup2(self.old_stdout, sys.stdout.fileno())
+ # make sure the new one started properly
+ self.assertEqual(pi.name, 'Test Process')
+ self.assertEqual(pi.args, [ '/bin/echo', 'foo' ])
+# self.assertEqual(pi.env, { 'PATH': os.environ['PATH'],
+# 'PYTHON_EXEC': os.environ['PYTHON_EXEC'] })
+ self.assertEqual(pi.dev_null_stdout, False)
+ self.assertEqual(os.read(self.pipes[0], 100), b"foo\n")
+ self.assertNotEqual(pi.process, None)
+ self.assertTrue(type(pi.pid) is int)
+ self.assertNotEqual(pi.pid, old_pid)
+
+class TestCacheCommands(unittest.TestCase):
+ """
+ Test methods of b10-init related to the socket cache and socket handling.
+ """
+ def setUp(self):
+ """
+ Prepare b10-init for some tests.
+
+ Also prepare some variables we need.
+ """
+ self.__b10_init = Init()
+ # Fake the cache here so we can pretend it is us and hijack the
+ # calls to its methods.
+ self.__b10_init._socket_cache = self
+ self.__b10_init._socket_path = '/socket/path'
+ self.__raise_exception = None
+ self.__socket_args = {
+ "port": 53,
+ "address": "::",
+ "protocol": "UDP",
+ "share_mode": "ANY",
+ "share_name": "app"
+ }
+ # What was and wasn't called.
+ self.__drop_app_called = None
+ self.__get_socket_called = None
+ self.__send_fd_called = None
+ self.__get_token_called = None
+ self.__drop_socket_called = None
+ init.libutil_io_python.send_fd = self.__send_fd
+
+ def __send_fd(self, to, socket):
+ """
+ A function to hook the send_fd in the b10-init.
+ """
+ self.__send_fd_called = (to, socket)
+
+ class FalseSocket:
+ """
+ A socket where we can fake methods we need instead of having a real
+ socket.
+ """
+ def __init__(self):
+ self.send = b""
+ def fileno(self):
+ """
+ The file number. Used for identifying the remote application.
+ """
+ return 42
+
+ def sendall(self, data):
+ """
+ Adds data to the self.send.
+ """
+ self.send += data
+
+ def drop_application(self, application):
+ """
+ Part of pretending to be the cache. Logs the parameter to
+ self.__drop_app_called.
+
+ In the case self.__raise_exception is set, the exception there
+ is raised instead.
+ """
+ if self.__raise_exception is not None:
+ raise self.__raise_exception
+ self.__drop_app_called = application
+
+ def test_consumer_dead(self):
+ """
+ Test that it calls the drop_application method of the cache.
+ """
+ self.__b10_init.socket_consumer_dead(self.FalseSocket())
+ self.assertEqual(42, self.__drop_app_called)
+
+ def test_consumer_dead_invalid(self):
+ """
+ Test that it doesn't crash in case the application is not known to
+ the cache, the b10_init doesn't crash, as this actually can happen in
+ practice.
+ """
+ self.__raise_exception = ValueError("This application is unknown")
+ # This doesn't crash
+ self.__b10_init.socket_consumer_dead(self.FalseSocket())
+
+ def get_socket(self, token, application):
+ """
+ Part of pretending to be the cache. If there's anything in
+ __raise_exception, it is raised. Otherwise, the call is logged
+ into __get_socket_called and a number is returned.
+ """
+ if self.__raise_exception is not None:
+ raise self.__raise_exception
+ self.__get_socket_called = (token, application)
+ return 13
+
+ def test_request_handler(self):
+ """
+ Test that a request for socket is forwarded and the socket is sent
+ back, if it returns a socket.
+ """
+ socket = self.FalseSocket()
+ # An exception from the cache
+ self.__raise_exception = ValueError("Test value error")
+ self.__b10_init.socket_request_handler(b"token", socket)
+ # It was called, but it threw, so it is not noted here
+ self.assertIsNone(self.__get_socket_called)
+ self.assertEqual(b"0\n", socket.send)
+ # It should not have sent any socket.
+ self.assertIsNone(self.__send_fd_called)
+ # Now prepare a valid scenario
+ self.__raise_exception = None
+ socket.send = b""
+ self.__b10_init.socket_request_handler(b"token", socket)
+ self.assertEqual(b"1\n", socket.send)
+ self.assertEqual((42, 13), self.__send_fd_called)
+ self.assertEqual(("token", 42), self.__get_socket_called)
+
+ def get_token(self, protocol, address, port, share_mode, share_name):
+ """
+ Part of pretending to be the cache. If there's anything in
+ __raise_exception, it is raised. Otherwise, the parameters are
+ logged into __get_token_called and a token is returned.
+ """
+ if self.__raise_exception is not None:
+ raise self.__raise_exception
+ self.__get_token_called = (protocol, address, port, share_mode,
+ share_name)
+ return "token"
+
+ def test_get_socket_ok(self):
+ """
+ Test the successful scenario of getting a socket.
+ """
+ result = self.__b10_init._get_socket(self.__socket_args)
+ [code, answer] = result['result']
+ self.assertEqual(0, code)
+ self.assertEqual({
+ 'token': 'token',
+ 'path': '/socket/path'
+ }, answer)
+ addr = self.__get_token_called[1]
+ self.assertTrue(isinstance(addr, IPAddr))
+ self.assertEqual("::", str(addr))
+ self.assertEqual(("UDP", addr, 53, "ANY", "app"),
+ self.__get_token_called)
+
+ def test_get_socket_error(self):
+ """
+ Test that bad inputs are handled correctly, etc.
+ """
+ def check_code(code, args):
+ """
+ Pass the args there and check if it returns success or not.
+
+ The rest is not tested, as it is already checked in the
+ test_get_socket_ok.
+ """
+ [rcode, ranswer] = self.__b10_init._get_socket(args)['result']
+ self.assertEqual(code, rcode)
+ if code != 0:
+ # This should be an error message. The exact formatting
+ # is unknown, but we check it is string at least
+ self.assertTrue(isinstance(ranswer, str))
+
+ def mod_args(name, value):
+ """
+ Override a parameter in the args.
+ """
+ result = dict(self.__socket_args)
+ result[name] = value
+ return result
+
+ # Port too large
+ check_code(1, mod_args('port', 65536))
+ # Not numeric address
+ check_code(1, mod_args('address', 'example.org.'))
+ # Some bad values of enum-like params
+ check_code(1, mod_args('protocol', 'BAD PROTO'))
+ check_code(1, mod_args('share_mode', 'BAD SHARE'))
+ # Check missing parameters
+ for param in self.__socket_args.keys():
+ args = dict(self.__socket_args)
+ del args[param]
+ check_code(1, args)
+ # These are OK values for the enum-like parameters
+ # The ones from test_get_socket_ok are not tested here
+ check_code(0, mod_args('protocol', 'TCP'))
+ check_code(0, mod_args('share_mode', 'SAMEAPP'))
+ check_code(0, mod_args('share_mode', 'NO'))
+ # If an exception is raised from within the cache, it is converted
+ # to an error, not propagated
+ self.__raise_exception = Exception("Test exception")
+ check_code(1, self.__socket_args)
+ # The special "expected" exceptions
+ self.__raise_exception = \
+ isc.bind10.socket_cache.ShareError("Not shared")
+ check_code(3, self.__socket_args)
+ self.__raise_exception = \
+ isc.bind10.socket_cache.SocketError("Not shared", 13)
+ check_code(2, self.__socket_args)
+
+ def drop_socket(self, token):
+ """
+ Part of pretending to be the cache. If there's anything in
+ __raise_exception, it is raised. Otherwise, the parameter is stored
+ in __drop_socket_called.
+ """
+ if self.__raise_exception is not None:
+ raise self.__raise_exception
+ self.__drop_socket_called = token
+
+ def test_drop_socket(self):
+ """
+ Check the drop_socket command. It should directly call the method
+ on the cache. Exceptions should be translated to error messages.
+ """
+ # This should be OK and just propagated to the call.
+ self.assertEqual({"result": [0]},
+ self.__b10_init.command_handler("drop_socket",
+ {"token": "token"}))
+ self.assertEqual("token", self.__drop_socket_called)
+ self.__drop_socket_called = None
+ # Missing parameter
+ self.assertEqual({"result": [1, "Missing token parameter"]},
+ self.__b10_init.command_handler("drop_socket", {}))
+ self.assertIsNone(self.__drop_socket_called)
+ # An exception is raised from within the cache
+ self.__raise_exception = ValueError("Test error")
+ self.assertEqual({"result": [1, "Test error"]},
+ self.__b10_init.command_handler("drop_socket",
+ {"token": "token"}))
+
+
+class TestInit(unittest.TestCase):
+ def setUp(self):
+ # Save original values that may be tweaked in some tests
+ self.__orig_setgid = init.posix.setgid
+ self.__orig_setuid = init.posix.setuid
+ self.__orig_logger_class = isc.log.Logger
+
+ def tearDown(self):
+ # Restore original values saved in setUp()
+ init.posix.setgid = self.__orig_setgid
+ init.posix.setuid = self.__orig_setuid
+ isc.log.Logger = self.__orig_logger_class
+
+ def test_init(self):
+ b10_init = Init()
+ self.assertEqual(b10_init.verbose, False)
+ self.assertEqual(b10_init.msgq_socket_file, None)
+ self.assertEqual(b10_init.cc_session, None)
+ self.assertEqual(b10_init.ccs, None)
+ self.assertEqual(b10_init.components, {})
+ self.assertEqual(b10_init.runnable, False)
+ self.assertEqual(b10_init.username, None)
+ self.assertIsNone(b10_init._socket_cache)
+
+ def __setgid(self, gid):
+ self.__gid_set = gid
+
+ def __setuid(self, uid):
+ self.__uid_set = uid
+
+ def test_change_user(self):
+ init.posix.setgid = self.__setgid
+ init.posix.setuid = self.__setuid
+
+ self.__gid_set = None
+ self.__uid_set = None
+ b10_init = Init()
+ b10_init.change_user()
+ # No gid/uid set in init, nothing called.
+ self.assertIsNone(self.__gid_set)
+ self.assertIsNone(self.__uid_set)
+
+ Init(setuid=42, setgid=4200).change_user()
+ # This time, it get's called
+ self.assertEqual(4200, self.__gid_set)
+ self.assertEqual(42, self.__uid_set)
+
+ def raising_set_xid(gid_or_uid):
+ ex = OSError()
+ ex.errno, ex.strerror = errno.EPERM, 'Operation not permitted'
+ raise ex
+
+ # Let setgid raise an exception
+ init.posix.setgid = raising_set_xid
+ init.posix.setuid = self.__setuid
+ self.assertRaises(init.ChangeUserError,
+ Init(setuid=42, setgid=4200).change_user)
+
+ # Let setuid raise an exception
+ init.posix.setgid = self.__setgid
+ init.posix.setuid = raising_set_xid
+ self.assertRaises(init.ChangeUserError,
+ Init(setuid=42, setgid=4200).change_user)
+
+ # Let initial log output after setuid raise an exception
+ init.posix.setgid = self.__setgid
+ init.posix.setuid = self.__setuid
+ isc.log.Logger = raising_set_xid
+ self.assertRaises(init.ChangeUserError,
+ Init(setuid=42, setgid=4200).change_user)
+
+ def test_set_creator(self):
+ """
+ Test the call to set_creator. First time, the cache is created
+ with the passed creator. The next time, it throws an exception.
+ """
+ init = Init()
+ # The cache doesn't use it at start, so just create an empty class
+ class Creator: pass
+ creator = Creator()
+ init.set_creator(creator)
+ self.assertTrue(isinstance(init._socket_cache,
+ isc.bind10.socket_cache.Cache))
+ self.assertEqual(creator, init._socket_cache._creator)
+ self.assertRaises(ValueError, init.set_creator, creator)
+
+ def test_socket_srv(self):
+ """Tests init_socket_srv() and remove_socket_srv() work as expected."""
+ init = Init()
+
+ self.assertIsNone(init._srv_socket)
+ self.assertIsNone(init._tmpdir)
+ self.assertIsNone(init._socket_path)
+
+ init.init_socket_srv()
+
+ self.assertIsNotNone(init._srv_socket)
+ self.assertNotEqual(-1, init._srv_socket.fileno())
+ self.assertEqual(os.path.join(init._tmpdir, 'sockcreator'),
+ init._srv_socket.getsockname())
+
+ self.assertIsNotNone(init._tmpdir)
+ self.assertTrue(os.path.isdir(init._tmpdir))
+ self.assertIsNotNone(init._socket_path)
+ self.assertTrue(os.path.exists(init._socket_path))
+
+ # Check that it's possible to connect to the socket file (this
+ # only works if the socket file exists and the server listens on
+ # it).
+ s = socket.socket(socket.AF_UNIX)
+ try:
+ s.connect(init._socket_path)
+ can_connect = True
+ s.close()
+ except socket.error as e:
+ can_connect = False
+
+ self.assertTrue(can_connect)
+
+ init.remove_socket_srv()
+
+ self.assertEqual(-1, init._srv_socket.fileno())
+ self.assertFalse(os.path.exists(init._socket_path))
+ self.assertFalse(os.path.isdir(init._tmpdir))
+
+ # These should not fail either:
+
+ # second call
+ init.remove_socket_srv()
+
+ init._srv_socket = None
+ init.remove_socket_srv()
+
+ def test_init_alternate_socket(self):
+ init = Init("alt_socket_file")
+ self.assertEqual(init.verbose, False)
+ self.assertEqual(init.msgq_socket_file, "alt_socket_file")
+ self.assertEqual(init.cc_session, None)
+ self.assertEqual(init.ccs, None)
+ self.assertEqual(init.components, {})
+ self.assertEqual(init.runnable, False)
+ self.assertEqual(init.username, None)
+
+ def test_command_handler(self):
+ class DummySession():
+ def group_sendmsg(self, msg, group):
+ (self.msg, self.group) = (msg, group)
+ def group_recvmsg(self, nonblock, seq): pass
+ class DummyModuleCCSession():
+ module_spec = isc.config.module_spec.ModuleSpec({
+ "module_name": "Init",
+ "statistics": [
+ {
+ "item_name": "boot_time",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Boot time",
+ "item_description": "A date time when bind10 process starts initially",
+ "item_format": "date-time"
+ }
+ ]
+ })
+ def get_module_spec(self):
+ return self.module_spec
+ init = Init()
+ init.verbose = True
+ init.cc_session = DummySession()
+ init.ccs = DummyModuleCCSession()
+ # a bad command
+ self.assertEqual(init.command_handler(-1, None),
+ isc.config.ccsession.create_answer(1, "bad command"))
+ # "shutdown" command
+ self.assertEqual(init.command_handler("shutdown", None),
+ isc.config.ccsession.create_answer(0))
+ self.assertFalse(init.runnable)
+ # "getstats" command
+ self.assertEqual(init.command_handler("getstats", None),
+ isc.config.ccsession.create_answer(0,
+ { 'boot_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME) }))
+ # "ping" command
+ self.assertEqual(init.command_handler("ping", None),
+ isc.config.ccsession.create_answer(0, "pong"))
+ # "show_processes" command
+ self.assertEqual(init.command_handler("show_processes", None),
+ isc.config.ccsession.create_answer(0,
+ init.get_processes()))
+ # an unknown command
+ self.assertEqual(init.command_handler("__UNKNOWN__", None),
+ isc.config.ccsession.create_answer(1, "Unknown command"))
+
+ # Fake the get_token of cache and test the command works
+ init._socket_path = '/socket/path'
+ class cache:
+ def get_token(self, protocol, addr, port, share_mode, share_name):
+ return str(addr) + ':' + str(port)
+ init._socket_cache = cache()
+ args = {
+ "port": 53,
+ "address": "0.0.0.0",
+ "protocol": "UDP",
+ "share_mode": "ANY",
+ "share_name": "app"
+ }
+ # at all and this is the easiest way to check.
+ self.assertEqual({'result': [0, {'token': '0.0.0.0:53',
+ 'path': '/socket/path'}]},
+ init.command_handler("get_socket", args))
+ # The drop_socket is not tested here, but in TestCacheCommands.
+ # It needs the cache mocks to be in place and they are there.
+
+ def test_stop_process(self):
+ """
+ Test checking the stop_process method sends the right message over
+ the message bus.
+ """
+ class DummySession():
+ def group_sendmsg(self, msg, group, instance="*"):
+ (self.msg, self.group, self.instance) = (msg, group, instance)
+ init = Init()
+ init.cc_session = DummySession()
+ init.stop_process('process', 'address', 42)
+ self.assertEqual('address', init.cc_session.group)
+ self.assertEqual('address', init.cc_session.instance)
+ self.assertEqual({'command': ['shutdown', {'pid': 42}]},
+ init.cc_session.msg)
+
+# Mock class for testing Init's usage of ProcessInfo
+class MockProcessInfo:
+ def __init__(self, name, args, env={}, dev_null_stdout=False,
+ dev_null_stderr=False):
+ self.name = name
+ self.args = args
+ self.env = env
+ self.dev_null_stdout = dev_null_stdout
+ self.dev_null_stderr = dev_null_stderr
+ self.process = None
+ self.pid = None
+
+ def spawn(self):
+ # set some pid (only used for testing that it is not None anymore)
+ self.pid = 42147
+
+# Class for testing the Init without actually starting processes.
+# This is used for testing the start/stop components routines and
+# the Init commands.
+#
+# Testing that external processes start is outside the scope
+# of the unit test, by overriding the process start methods we can check
+# that the right processes are started depending on the configuration
+# options.
+class MockInit(Init):
+ def __init__(self):
+ Init.__init__(self)
+
+ # Set flags as to which of the overridden methods has been run.
+ self.msgq = False
+ self.cfgmgr = False
+ self.ccsession = False
+ self.auth = False
+ self.resolver = False
+ self.xfrout = False
+ self.xfrin = False
+ self.zonemgr = False
+ self.stats = False
+ self.stats_httpd = False
+ self.cmdctl = False
+ self.dhcp6 = False
+ self.dhcp4 = False
+ self.c_channel_env = {}
+ self.components = { }
+ self.creator = False
+ self.get_process_exit_status_called = False
+
+ class MockSockCreator(isc.bind10.component.Component):
+ def __init__(self, process, b10_init, kind, address=None,
+ params=None):
+ isc.bind10.component.Component.__init__(self, process,
+ b10_init, kind,
+ 'SockCreator')
+ self._start_func = b10_init.start_creator
+
+ specials = isc.bind10.special_component.get_specials()
+ specials['sockcreator'] = MockSockCreator
+ self._component_configurator = \
+ isc.bind10.component.Configurator(self, specials)
+
+ def start_creator(self):
+ self.creator = True
+ procinfo = ProcessInfo('b10-sockcreator', ['/bin/false'])
+ procinfo.pid = 1
+ return procinfo
+
+ def _read_bind10_config(self):
+ # Configuration options are set directly
+ pass
+
+ def start_msgq(self):
+ self.msgq = True
+ procinfo = ProcessInfo('b10-msgq', ['/bin/false'])
+ procinfo.pid = 2
+ return procinfo
+
+ def start_ccsession(self, c_channel_env):
+ # this is not a process, don't have to do anything with procinfo
+ self.ccsession = True
+
+ def start_cfgmgr(self):
+ self.cfgmgr = True
+ procinfo = ProcessInfo('b10-cfgmgr', ['/bin/false'])
+ procinfo.pid = 3
+ return procinfo
+
+ def start_auth(self):
+ self.auth = True
+ procinfo = ProcessInfo('b10-auth', ['/bin/false'])
+ procinfo.pid = 5
+ return procinfo
+
+ def start_resolver(self):
+ self.resolver = True
+ procinfo = ProcessInfo('b10-resolver', ['/bin/false'])
+ procinfo.pid = 6
+ return procinfo
+
+ def start_simple(self, name):
+ procmap = { 'b10-zonemgr': self.start_zonemgr,
+ 'b10-stats': self.start_stats,
+ 'b10-stats-httpd': self.start_stats_httpd,
+ 'b10-cmdctl': self.start_cmdctl,
+ 'b10-dhcp6': self.start_dhcp6,
+ 'b10-dhcp4': self.start_dhcp4,
+ 'b10-xfrin': self.start_xfrin,
+ 'b10-xfrout': self.start_xfrout }
+ return procmap[name]()
+
+ def start_xfrout(self):
+ self.xfrout = True
+ procinfo = ProcessInfo('b10-xfrout', ['/bin/false'])
+ procinfo.pid = 7
+ return procinfo
+
+ def start_xfrin(self):
+ self.xfrin = True
+ procinfo = ProcessInfo('b10-xfrin', ['/bin/false'])
+ procinfo.pid = 8
+ return procinfo
+
+ def start_zonemgr(self):
+ self.zonemgr = True
+ procinfo = ProcessInfo('b10-zonemgr', ['/bin/false'])
+ procinfo.pid = 9
+ return procinfo
+
+ def start_stats(self):
+ self.stats = True
+ procinfo = ProcessInfo('b10-stats', ['/bin/false'])
+ procinfo.pid = 10
+ return procinfo
+
+ def start_stats_httpd(self):
+ self.stats_httpd = True
+ procinfo = ProcessInfo('b10-stats-httpd', ['/bin/false'])
+ procinfo.pid = 11
+ return procinfo
+
+ def start_cmdctl(self):
+ self.cmdctl = True
+ procinfo = ProcessInfo('b10-cmdctl', ['/bin/false'])
+ procinfo.pid = 12
+ return procinfo
+
+ def start_dhcp6(self):
+ self.dhcp6 = True
+ procinfo = ProcessInfo('b10-dhcp6', ['/bin/false'])
+ procinfo.pid = 13
+ return procinfo
+
+ def start_dhcp4(self):
+ self.dhcp4 = True
+ procinfo = ProcessInfo('b10-dhcp4', ['/bin/false'])
+ procinfo.pid = 14
+ return procinfo
+
+ def stop_process(self, process, recipient, pid):
+ procmap = { 'b10-auth': self.stop_auth,
+ 'b10-resolver': self.stop_resolver,
+ 'b10-xfrout': self.stop_xfrout,
+ 'b10-xfrin': self.stop_xfrin,
+ 'b10-zonemgr': self.stop_zonemgr,
+ 'b10-stats': self.stop_stats,
+ 'b10-stats-httpd': self.stop_stats_httpd,
+ 'b10-cmdctl': self.stop_cmdctl }
+ procmap[process]()
+
+ # Some functions to pretend we stop processes, use by stop_process
+ def stop_msgq(self):
+ if self.msgq:
+ del self.components[2]
+ self.msgq = False
+
+ def stop_cfgmgr(self):
+ if self.cfgmgr:
+ del self.components[3]
+ self.cfgmgr = False
+
+ def stop_auth(self):
+ if self.auth:
+ del self.components[5]
+ self.auth = False
+
+ def stop_resolver(self):
+ if self.resolver:
+ del self.components[6]
+ self.resolver = False
+
+ def stop_xfrout(self):
+ if self.xfrout:
+ del self.components[7]
+ self.xfrout = False
+
+ def stop_xfrin(self):
+ if self.xfrin:
+ del self.components[8]
+ self.xfrin = False
+
+ def stop_zonemgr(self):
+ if self.zonemgr:
+ del self.components[9]
+ self.zonemgr = False
+
+ def stop_stats(self):
+ if self.stats:
+ del self.components[10]
+ self.stats = False
+
+ def stop_stats_httpd(self):
+ if self.stats_httpd:
+ del self.components[11]
+ self.stats_httpd = False
+
+ def stop_cmdctl(self):
+ if self.cmdctl:
+ del self.components[12]
+ self.cmdctl = False
+
+ def _get_process_exit_status(self):
+ if self.get_process_exit_status_called:
+ return (0, 0)
+ self.get_process_exit_status_called = True
+ return (53, 0)
+
+ def _get_process_exit_status_unknown_pid(self):
+ if self.get_process_exit_status_called:
+ return (0, 0)
+ self.get_process_exit_status_called = True
+ return (42, 0)
+
+ def _get_process_exit_status_raises_oserror_echild(self):
+ raise OSError(errno.ECHILD, 'Mock error')
+
+ def _get_process_exit_status_raises_oserror_other(self):
+ raise OSError(0, 'Mock error')
+
+ def _get_process_exit_status_raises_other(self):
+ raise Exception('Mock error')
+
+ def _make_mock_process_info(self, name, args, c_channel_env,
+ dev_null_stdout=False, dev_null_stderr=False):
+ return MockProcessInfo(name, args, c_channel_env,
+ dev_null_stdout, dev_null_stderr)
+
+class MockInitSimple(Init):
+ def __init__(self):
+ Init.__init__(self)
+ # Set which process has been started
+ self.started_process_name = None
+ self.started_process_args = None
+ self.started_process_env = None
+
+ def _make_mock_process_info(self, name, args, c_channel_env,
+ dev_null_stdout=False, dev_null_stderr=False):
+ return MockProcessInfo(name, args, c_channel_env,
+ dev_null_stdout, dev_null_stderr)
+
+ def start_process(self, name, args, c_channel_env, port=None,
+ address=None):
+ self.started_process_name = name
+ self.started_process_args = args
+ self.started_process_env = c_channel_env
+ return None
+
+class TestStartStopProcessesInit(unittest.TestCase):
+ """
+ Check that the start_all_components method starts the right combination
+ of components and that the right components are started and stopped
+ according to changes in configuration.
+ """
+ def check_environment_unchanged(self):
+ # Check whether the environment has not been changed
+ self.assertEqual(original_os_environ, os.environ)
+
+ def check_started(self, init, core, auth, resolver):
+ """
+ Check that the right sets of services are started. The ones that
+ should be running are specified by the core, auth and resolver parameters
+ (they are groups of processes, eg. auth means b10-auth, -xfrout, -xfrin
+ and -zonemgr).
+ """
+ self.assertEqual(init.msgq, core)
+ self.assertEqual(init.cfgmgr, core)
+ self.assertEqual(init.ccsession, core)
+ self.assertEqual(init.creator, core)
+ self.assertEqual(init.auth, auth)
+ self.assertEqual(init.resolver, resolver)
+ self.assertEqual(init.xfrout, auth)
+ self.assertEqual(init.xfrin, auth)
+ self.assertEqual(init.zonemgr, auth)
+ self.assertEqual(init.stats, core)
+ self.assertEqual(init.stats_httpd, core)
+ self.assertEqual(init.cmdctl, core)
+ self.check_environment_unchanged()
+
+ def check_preconditions(self, init):
+ self.check_started(init, False, False, False)
+
+ def check_started_none(self, init):
+ """
+ Check that the situation is according to configuration where no servers
+ should be started. Some components still need to be running.
+ """
+ self.check_started(init, True, False, False)
+ self.check_environment_unchanged()
+
+ def check_started_both(self, init):
+ """
+ Check the situation is according to configuration where both servers
+ (auth and resolver) are enabled.
+ """
+ self.check_started(init, True, True, True)
+ self.check_environment_unchanged()
+
+ def check_started_auth(self, init):
+ """
+ Check the set of components needed to run auth only is started.
+ """
+ self.check_started(init, True, True, False)
+ self.check_environment_unchanged()
+
+ def check_started_resolver(self, init):
+ """
+ Check the set of components needed to run resolver only is started.
+ """
+ self.check_started(init, True, False, True)
+ self.check_environment_unchanged()
+
+ def check_started_dhcp(self, init, v4, v6):
+ """
+ Check if proper combinations of DHCPv4 and DHCpv6 can be started
+ """
+ self.assertEqual(v4, init.dhcp4)
+ self.assertEqual(v6, init.dhcp6)
+ self.check_environment_unchanged()
+
+ def construct_config(self, start_auth, start_resolver):
+ # The things that are common, not turned on an off
+ config = {}
+ config['b10-stats'] = { 'kind': 'dispensable', 'address': 'Stats' }
+ config['b10-stats-httpd'] = { 'kind': 'dispensable',
+ 'address': 'StatsHttpd' }
+ config['b10-cmdctl'] = { 'kind': 'needed', 'special': 'cmdctl' }
+ if start_auth:
+ config['b10-auth'] = { 'kind': 'needed', 'special': 'auth' }
+ config['b10-xfrout'] = { 'kind': 'dispensable',
+ 'address': 'Xfrout' }
+ config['b10-xfrin'] = { 'kind': 'dispensable',
+ 'address': 'Xfrin' }
+ config['b10-zonemgr'] = { 'kind': 'dispensable',
+ 'address': 'Zonemgr' }
+ if start_resolver:
+ config['b10-resolver'] = { 'kind': 'needed',
+ 'special': 'resolver' }
+ return {'components': config}
+
+ def config_start_init(self, start_auth, start_resolver):
+ """
+ Test the configuration is loaded at the startup.
+ """
+ init = MockInit()
+ config = self.construct_config(start_auth, start_resolver)
+ class CC:
+ def get_full_config(self):
+ return config
+ # Provide the fake CC with data
+ init.ccs = CC()
+ # And make sure it's not overwritten
+ def start_ccsession():
+ init.ccsession = True
+ init.start_ccsession = lambda _: start_ccsession()
+ # We need to return the original _read_bind10_config
+ init._read_bind10_config = lambda: Init._read_bind10_config(init)
+ init.start_all_components()
+ self.check_started(init, True, start_auth, start_resolver)
+ self.check_environment_unchanged()
+
+ def test_start_none(self):
+ self.config_start_init(False, False)
+
+ def test_start_resolver(self):
+ self.config_start_init(False, True)
+
+ def test_start_auth(self):
+ self.config_start_init(True, False)
+
+ def test_start_both(self):
+ self.config_start_init(True, True)
+
+ def test_config_start(self):
+ """
+ Test that the configuration starts and stops components according
+ to configuration changes.
+ """
+
+ # Create Init and ensure correct initialization
+ init = MockInit()
+ self.check_preconditions(init)
+
+ init.start_all_components()
+ init.runnable = True
+ init.config_handler(self.construct_config(False, False))
+ self.check_started_none(init)
+
+ # Enable both at once
+ init.config_handler(self.construct_config(True, True))
+ self.check_started_both(init)
+
+ # Not touched by empty change
+ init.config_handler({})
+ self.check_started_both(init)
+
+ # Not touched by change to the same configuration
+ init.config_handler(self.construct_config(True, True))
+ self.check_started_both(init)
+
+ # Turn them both off again
+ init.config_handler(self.construct_config(False, False))
+ self.check_started_none(init)
+
+ # Not touched by empty change
+ init.config_handler({})
+ self.check_started_none(init)
+
+ # Not touched by change to the same configuration
+ init.config_handler(self.construct_config(False, False))
+ self.check_started_none(init)
+
+ # Start and stop auth separately
+ init.config_handler(self.construct_config(True, False))
+ self.check_started_auth(init)
+
+ init.config_handler(self.construct_config(False, False))
+ self.check_started_none(init)
+
+ # Start and stop resolver separately
+ init.config_handler(self.construct_config(False, True))
+ self.check_started_resolver(init)
+
+ init.config_handler(self.construct_config(False, False))
+ self.check_started_none(init)
+
+ # Alternate
+ init.config_handler(self.construct_config(True, False))
+ self.check_started_auth(init)
+
+ init.config_handler(self.construct_config(False, True))
+ self.check_started_resolver(init)
+
+ init.config_handler(self.construct_config(True, False))
+ self.check_started_auth(init)
+
+ def test_config_start_once(self):
+ """
+ Tests that a component is started only once.
+ """
+ # Create Init and ensure correct initialization
+ init = MockInit()
+ self.check_preconditions(init)
+
+ init.start_all_components()
+
+ init.runnable = True
+ init.config_handler(self.construct_config(True, True))
+ self.check_started_both(init)
+
+ init.start_auth = lambda: self.fail("Started auth again")
+ init.start_xfrout = lambda: self.fail("Started xfrout again")
+ init.start_xfrin = lambda: self.fail("Started xfrin again")
+ init.start_zonemgr = lambda: self.fail("Started zonemgr again")
+ init.start_resolver = lambda: self.fail("Started resolver again")
+
+ # Send again we want to start them. Should not do it, as they are.
+ init.config_handler(self.construct_config(True, True))
+
+ def test_config_not_started_early(self):
+ """
+ Test that components are not started by the config handler before
+ startup.
+ """
+ init = MockInit()
+ self.check_preconditions(init)
+
+ init.start_auth = lambda: self.fail("Started auth again")
+ init.start_xfrout = lambda: self.fail("Started xfrout again")
+ init.start_xfrin = lambda: self.fail("Started xfrin again")
+ init.start_zonemgr = lambda: self.fail("Started zonemgr again")
+ init.start_resolver = lambda: self.fail("Started resolver again")
+
+ init.config_handler({'start_auth': True, 'start_resolver': True})
+
+ # Checks that DHCP (v4 and v6) components are started when expected
+ def test_start_dhcp(self):
+
+ # Create Init and ensure correct initialization
+ init = MockInit()
+ self.check_preconditions(init)
+
+ init.start_all_components()
+ init.config_handler(self.construct_config(False, False))
+ self.check_started_dhcp(init, False, False)
+
+ def test_start_dhcp_v6only(self):
+ # Create Init and ensure correct initialization
+ init = MockInit()
+ self.check_preconditions(init)
+ # v6 only enabled
+ init.start_all_components()
+ init.runnable = True
+ init._Init_started = True
+ config = self.construct_config(False, False)
+ config['components']['b10-dhcp6'] = { 'kind': 'needed',
+ 'address': 'Dhcp6' }
+ init.config_handler(config)
+ self.check_started_dhcp(init, False, True)
+
+ # uncomment when dhcpv4 becomes implemented
+ # v4 only enabled
+ #init.cfg_start_dhcp6 = False
+ #init.cfg_start_dhcp4 = True
+ #self.check_started_dhcp(init, True, False)
+
+ # both v4 and v6 enabled
+ #init.cfg_start_dhcp6 = True
+ #init.cfg_start_dhcp4 = True
+ #self.check_started_dhcp(init, True, True)
+
+class MockComponent:
+ def __init__(self, name, pid, address=None):
+ self.name = lambda: name
+ self.pid = lambda: pid
+ self.address = lambda: address
+ self.restarted = False
+ self.forceful = False
+ self.running = True
+ self.has_failed = False
+
+ def get_restart_time(self):
+ return 0 # arbitrary dummy value
+
+ def restart(self, now):
+ self.restarted = True
+ return True
+
+ def is_running(self):
+ return self.running
+
+ def failed(self, status):
+ return self.has_failed
+
+ def kill(self, forceful):
+ self.forceful = forceful
+
+class TestInitCmd(unittest.TestCase):
+ def test_ping(self):
+ """
+ Confirm simple ping command works.
+ """
+ init = MockInit()
+ answer = init.command_handler("ping", None)
+ self.assertEqual(answer, {'result': [0, 'pong']})
+
+ def test_show_processes_empty(self):
+ """
+ Confirm getting a list of processes works.
+ """
+ init = MockInit()
+ answer = init.command_handler("show_processes", None)
+ self.assertEqual(answer, {'result': [0, []]})
+
+ def test_show_processes(self):
+ """
+ Confirm getting a list of processes works.
+ """
+ init = MockInit()
+ init.register_process(1, MockComponent('first', 1))
+ init.register_process(2, MockComponent('second', 2, 'Second'))
+ answer = init.command_handler("show_processes", None)
+ processes = [[1, 'first', None],
+ [2, 'second', 'Second']]
+ self.assertEqual(answer, {'result': [0, processes]})
+
+class TestParseArgs(unittest.TestCase):
+ """
+ This tests parsing of arguments of the bind10 master process.
+ """
+ #TODO: Write tests for the original parsing, bad options, etc.
+ def test_no_opts(self):
+ """
+ Test correct default values when no options are passed.
+ """
+ options = parse_args([], TestOptParser)
+ self.assertEqual(None, options.data_path)
+ self.assertEqual(None, options.config_file)
+ self.assertEqual(None, options.cmdctl_port)
+
+ def test_data_path(self):
+ """
+ Test it can parse the data path.
+ """
+ self.assertRaises(OptsError, parse_args, ['-p'], TestOptParser)
+ self.assertRaises(OptsError, parse_args, ['--data-path'],
+ TestOptParser)
+ options = parse_args(['-p', '/data/path'], TestOptParser)
+ self.assertEqual('/data/path', options.data_path)
+ options = parse_args(['--data-path=/data/path'], TestOptParser)
+ self.assertEqual('/data/path', options.data_path)
+
+ def test_config_filename(self):
+ """
+ Test it can parse the config switch.
+ """
+ self.assertRaises(OptsError, parse_args, ['-c'], TestOptParser)
+ self.assertRaises(OptsError, parse_args, ['--config-file'],
+ TestOptParser)
+ options = parse_args(['-c', 'config-file'], TestOptParser)
+ self.assertEqual('config-file', options.config_file)
+ options = parse_args(['--config-file=config-file'], TestOptParser)
+ self.assertEqual('config-file', options.config_file)
+
+ def test_clear_config(self):
+ options = parse_args([], TestOptParser)
+ self.assertEqual(False, options.clear_config)
+ options = parse_args(['--clear-config'], TestOptParser)
+ self.assertEqual(True, options.clear_config)
+
+ def test_nokill(self):
+ options = parse_args([], TestOptParser)
+ self.assertEqual(False, options.nokill)
+ options = parse_args(['--no-kill'], TestOptParser)
+ self.assertEqual(True, options.nokill)
+ options = parse_args([], TestOptParser)
+ self.assertEqual(False, options.nokill)
+ options = parse_args(['-i'], TestOptParser)
+ self.assertEqual(True, options.nokill)
+
+ def test_cmdctl_port(self):
+ """
+ Test it can parse the command control port.
+ """
+ self.assertRaises(OptsError, parse_args, ['--cmdctl-port=abc'],
+ TestOptParser)
+ self.assertRaises(OptsError, parse_args, ['--cmdctl-port=100000000'],
+ TestOptParser)
+ self.assertRaises(OptsError, parse_args, ['--cmdctl-port'],
+ TestOptParser)
+ options = parse_args(['--cmdctl-port=1234'], TestOptParser)
+ self.assertEqual(1234, options.cmdctl_port)
+
+class TestPIDFile(unittest.TestCase):
+ def setUp(self):
+ self.pid_file = '@builddir@' + os.sep + 'bind10.pid'
+ if os.path.exists(self.pid_file):
+ os.unlink(self.pid_file)
+
+ def tearDown(self):
+ if os.path.exists(self.pid_file):
+ os.unlink(self.pid_file)
+
+ def check_pid_file(self):
+ # dump PID to the file, and confirm the content is correct
+ dump_pid(self.pid_file)
+ my_pid = os.getpid()
+ with open(self.pid_file, "r") as f:
+ self.assertEqual(my_pid, int(f.read()))
+
+ def test_dump_pid(self):
+ self.check_pid_file()
+
+ # make sure any existing content will be removed
+ with open(self.pid_file, "w") as f:
+ f.write('dummy data\n')
+ self.check_pid_file()
+
+ def test_unlink_pid_file_notexist(self):
+ dummy_data = 'dummy_data\n'
+
+ with open(self.pid_file, "w") as f:
+ f.write(dummy_data)
+
+ unlink_pid_file("no_such_pid_file")
+
+ # the file specified for unlink_pid_file doesn't exist,
+ # and the original content of the file should be intact.
+ with open(self.pid_file, "r") as f:
+ self.assertEqual(dummy_data, f.read())
+
+ def test_dump_pid_with_none(self):
+ # Check the behavior of dump_pid() and unlink_pid_file() with None.
+ # This should be no-op.
+ dump_pid(None)
+ self.assertFalse(os.path.exists(self.pid_file))
+
+ dummy_data = 'dummy_data\n'
+
+ with open(self.pid_file, "w") as f:
+ f.write(dummy_data)
+
+ unlink_pid_file(None)
+
+ with open(self.pid_file, "r") as f:
+ self.assertEqual(dummy_data, f.read())
+
+ def test_dump_pid_failure(self):
+ # the attempt to open file will fail, which should result in exception.
+ self.assertRaises(IOError, dump_pid,
+ 'nonexistent_dir' + os.sep + 'bind10.pid')
+
+class TestInitComponents(unittest.TestCase):
+ """
+ Test b10-init propagates component configuration properly to the
+ component configurator and acts sane.
+ """
+ def setUp(self):
+ self.__param = None
+ self.__called = False
+ self.__compconfig = {
+ 'comp': {
+ 'kind': 'needed',
+ 'process': 'cat'
+ }
+ }
+ self._tmp_time = None
+ self._tmp_sleep = None
+ self._tmp_module_cc_session = None
+ self._tmp_cc_session = None
+
+ def tearDown(self):
+ if self._tmp_time is not None:
+ time.time = self._tmp_time
+ if self._tmp_sleep is not None:
+ time.sleep = self._tmp_sleep
+ if self._tmp_module_cc_session is not None:
+ isc.config.ModuleCCSession = self._tmp_module_cc_session
+ if self._tmp_cc_session is not None:
+ isc.cc.Session = self._tmp_cc_session
+
+ def __unary_hook(self, param):
+ """
+ A hook function that stores the parameter for later examination.
+ """
+ self.__param = param
+
+ def __nullary_hook(self):
+ """
+ A hook function that notes down it was called.
+ """
+ self.__called = True
+
+ def __check_core(self, config):
+ """
+ A function checking that the config contains parts for the valid
+ core component configuration.
+ """
+ self.assertIsNotNone(config)
+ for component in ['sockcreator', 'msgq', 'cfgmgr']:
+ self.assertTrue(component in config)
+ self.assertEqual(component, config[component]['special'])
+ self.assertEqual('core', config[component]['kind'])
+
+ def __check_extended(self, config):
+ """
+ This checks that the config contains the core and one more component.
+ """
+ self.__check_core(config)
+ self.assertTrue('comp' in config)
+ self.assertEqual('cat', config['comp']['process'])
+ self.assertEqual('needed', config['comp']['kind'])
+ self.assertEqual(4, len(config))
+
+ def test_correct_run(self):
+ """
+ Test the situation when we run in usual scenario, nothing fails,
+ we just start, reconfigure and then stop peacefully.
+ """
+ init = MockInit()
+ # Start it
+ orig = init._component_configurator.startup
+ init._component_configurator.startup = self.__unary_hook
+ init.start_all_components()
+ init._component_configurator.startup = orig
+ self.__check_core(self.__param)
+ self.assertEqual(3, len(self.__param))
+
+ # Reconfigure it
+ self.__param = None
+ orig = init._component_configurator.reconfigure
+ init._component_configurator.reconfigure = self.__unary_hook
+ # Otherwise it does not work
+ init.runnable = True
+ init.config_handler({'components': self.__compconfig})
+ self.__check_extended(self.__param)
+ currconfig = self.__param
+ # If we reconfigure it, but it does not contain the components part,
+ # nothing is called
+ init.config_handler({})
+ self.assertEqual(self.__param, currconfig)
+ self.__param = None
+ init._component_configurator.reconfigure = orig
+ # Check a configuration that messes up the core components is rejected.
+ compconf = dict(self.__compconfig)
+ compconf['msgq'] = { 'process': 'echo' }
+ result = init.config_handler({'components': compconf})
+ # Check it rejected it
+ self.assertEqual(1, result['result'][0])
+
+ # We can't call shutdown, that one relies on the stuff in main
+ # We check somewhere else that the shutdown is actually called
+ # from there (the test_kills).
+
+ def __real_test_kill(self, nokill=False, ex_on_kill=None):
+ """
+ Helper function that does the actual kill functionality testing.
+ """
+ init = MockInit()
+ init.nokill = nokill
+
+ killed = []
+ class ImmortalComponent:
+ """
+ An immortal component. It does not stop when it is told so
+ (anyway it is not told so). It does not die if it is killed
+ the first time. It dies only when killed forcefully.
+ """
+ def __init__(self):
+ # number of kill() calls, preventing infinite loop.
+ self.__call_count = 0
+
+ def kill(self, forceful=False):
+ self.__call_count += 1
+ if self.__call_count > 2:
+ raise Exception('Too many calls to ImmortalComponent.kill')
+
+ killed.append(forceful)
+ if ex_on_kill is not None:
+ # If exception is given by the test, raise it here.
+ # In the case of ESRCH, the process should have gone
+ # somehow, so we clear the components.
+ if ex_on_kill.errno == errno.ESRCH:
+ init.components = {}
+ raise ex_on_kill
+ if forceful:
+ init.components = {}
+ def pid(self):
+ return 1
+ def name(self):
+ return "Immortal"
+ init.components = {}
+ init.register_process(1, ImmortalComponent())
+
+ # While at it, we check the configurator shutdown is actually called
+ orig = init._component_configurator.shutdown
+ init._component_configurator.shutdown = self.__nullary_hook
+ self.__called = False
+
+ init.ccs = MockModuleCCSession()
+ self.assertFalse(init.ccs.stopped)
+
+ init.shutdown()
+
+ self.assertTrue(init.ccs.stopped)
+
+ # Here, killed is an array where False is added if SIGTERM
+ # should be sent, or True if SIGKILL should be sent, in order in
+ # which they're sent.
+ if nokill:
+ self.assertEqual([], killed)
+ else:
+ if ex_on_kill is not None:
+ self.assertEqual([False], killed)
+ else:
+ self.assertEqual([False, True], killed)
+
+ self.assertTrue(self.__called)
+
+ init._component_configurator.shutdown = orig
+
+ def test_kills(self):
+ """
+ Test that b10-init kills components which don't want to stop.
+ """
+ self.__real_test_kill()
+
+ def test_kill_fail(self):
+ """Test cases where kill() results in an exception due to OS error.
+
+ The behavior should be different for EPERM, so we test two cases.
+
+ """
+
+ ex = OSError()
+ ex.errno, ex.strerror = errno.ESRCH, 'No such process'
+ self.__real_test_kill(ex_on_kill=ex)
+
+ ex.errno, ex.strerror = errno.EPERM, 'Operation not permitted'
+ self.__real_test_kill(ex_on_kill=ex)
+
+ def test_nokill(self):
+ """
+ Test that b10-init *doesn't* kill components which don't want to
+ stop, when asked not to (by passing the --no-kill option which
+ sets init.nokill to True).
+ """
+ self.__real_test_kill(True)
+
+ def test_component_shutdown(self):
+ """
+ Test the component_shutdown sets all variables accordingly.
+ """
+ init = MockInit()
+ self.assertRaises(Exception, init.component_shutdown, 1)
+ self.assertEqual(1, init.exitcode)
+ init._Init__started = True
+ init.component_shutdown(2)
+ self.assertEqual(2, init.exitcode)
+ self.assertFalse(init.runnable)
+
+ def test_init_config(self):
+ """
+ Test initial configuration is loaded.
+ """
+ init = MockInit()
+ # Start it
+ init._component_configurator.reconfigure = self.__unary_hook
+ # We need to return the original read_bind10_config
+ init._read_bind10_config = lambda: Init._read_bind10_config(init)
+ # And provide a session to read the data from
+ class CC:
+ pass
+ init.ccs = CC()
+ init.ccs.get_full_config = lambda: {'components': self.__compconfig}
+ init.start_all_components()
+ self.__check_extended(self.__param)
+
+ def __setup_restart(self, init, component):
+ '''Common procedure for restarting a component used below.'''
+ init.components_to_restart = { component }
+ component.restarted = False
+ init.restart_processes()
+
+ def test_restart_processes(self):
+ '''Check some behavior on restarting processes.'''
+ init = MockInit()
+ init.runnable = True
+ component = MockComponent('test', 53)
+
+ # A component to be restarted will actually be restarted iff it's
+ # in the configurator's configuration.
+ # We bruteforce the configurator internal below; ugly, but the easiest
+ # way for the test.
+ init._component_configurator._components['test'] = (None, component)
+ self.__setup_restart(init, component)
+ self.assertTrue(component.restarted)
+ self.assertNotIn(component, init.components_to_restart)
+
+ # Remove the component from the configuration. It won't be restarted
+ # even if scheduled, nor will remain in the to-be-restarted list.
+ del init._component_configurator._components['test']
+ self.__setup_restart(init, component)
+ self.assertFalse(component.restarted)
+ self.assertNotIn(component, init.components_to_restart)
+
+ def test_get_processes(self):
+ '''Test that procsses are returned correctly, sorted by pid.'''
+ init = MockInit()
+
+ pids = list(range(0, 20))
+ random.shuffle(pids)
+
+ for i in range(0, 20):
+ pid = pids[i]
+ component = MockComponent('test' + str(pid), pid,
+ 'Test' + str(pid))
+ init.components[pid] = component
+
+ process_list = init.get_processes()
+ self.assertEqual(20, len(process_list))
+
+ last_pid = -1
+ for process in process_list:
+ pid = process[0]
+ self.assertLessEqual(last_pid, pid)
+ last_pid = pid
+ self.assertEqual([pid, 'test' + str(pid), 'Test' + str(pid)],
+ process)
+
+ def _test_reap_children_helper(self, runnable, is_running, failed):
+ '''Construct a Init instance, set various data in it according to
+ passed args and check if the component was added to the list of
+ components to restart.'''
+ init = MockInit()
+ init.runnable = runnable
+
+ component = MockComponent('test', 53)
+ component.running = is_running
+ component.has_failed = failed
+ init.components[53] = component
+
+ self.assertNotIn(component, init.components_to_restart)
+
+ init.reap_children()
+
+ if runnable and is_running and not failed:
+ self.assertIn(component, init.components_to_restart)
+ else:
+ self.assertEqual([], init.components_to_restart)
+
+ def test_reap_children(self):
+ '''Test that children are queued to be restarted when they ask for it.'''
+ # test various combinations of 3 booleans
+ # (Init.runnable, component.is_running(), component.failed())
+ self._test_reap_children_helper(False, False, False)
+ self._test_reap_children_helper(False, False, True)
+ self._test_reap_children_helper(False, True, False)
+ self._test_reap_children_helper(False, True, True)
+ self._test_reap_children_helper(True, False, False)
+ self._test_reap_children_helper(True, False, True)
+ self._test_reap_children_helper(True, True, False)
+ self._test_reap_children_helper(True, True, True)
+
+ # setup for more tests below
+ init = MockInit()
+ init.runnable = True
+ component = MockComponent('test', 53)
+ init.components[53] = component
+
+ # case where the returned pid is unknown to us. nothing should
+ # happpen then.
+ init.get_process_exit_status_called = False
+ init._get_process_exit_status = init._get_process_exit_status_unknown_pid
+ init.components_to_restart = []
+ # this should do nothing as the pid is unknown
+ init.reap_children()
+ self.assertEqual([], init.components_to_restart)
+
+ # case where init._get_process_exit_status() raises OSError with
+ # errno.ECHILD
+ init._get_process_exit_status = \
+ init._get_process_exit_status_raises_oserror_echild
+ init.components_to_restart = []
+ # this should catch and handle the OSError
+ init.reap_children()
+ self.assertEqual([], init.components_to_restart)
+
+ # case where init._get_process_exit_status() raises OSError with
+ # errno other than ECHILD
+ init._get_process_exit_status = \
+ init._get_process_exit_status_raises_oserror_other
+ with self.assertRaises(OSError):
+ init.reap_children()
+
+ # case where init._get_process_exit_status() raises something
+ # other than OSError
+ init._get_process_exit_status = \
+ init._get_process_exit_status_raises_other
+ with self.assertRaises(Exception):
+ init.reap_children()
+
+ def test_kill_started_components(self):
+ '''Test that started components are killed.'''
+ init = MockInit()
+
+ component = MockComponent('test', 53, 'Test')
+ init.components[53] = component
+
+ self.assertEqual([[53, 'test', 'Test']], init.get_processes())
+ init.kill_started_components()
+ self.assertEqual([], init.get_processes())
+ self.assertTrue(component.forceful)
+
+ def _start_msgq_helper(self, init, verbose):
+ init.verbose = verbose
+ pi = init.start_msgq()
+ self.assertEqual('b10-msgq', pi.name)
+ self.assertEqual(['b10-msgq'], pi.args)
+ self.assertTrue(pi.dev_null_stdout)
+ self.assertEqual(pi.dev_null_stderr, not verbose)
+ self.assertEqual({'FOO': 'an env string'}, pi.env)
+
+ # this is set by ProcessInfo.spawn()
+ self.assertEqual(42147, pi.pid)
+
+ def test_start_msgq(self):
+ '''Test that b10-msgq is started.'''
+ init = MockInitSimple()
+ init.c_channel_env = {'FOO': 'an env string'}
+ init._run_under_unittests = True
+
+ # use the MockProcessInfo creator
+ init._make_process_info = init._make_mock_process_info
+
+ # non-verbose case
+ self._start_msgq_helper(init, False)
+
+ # verbose case
+ self._start_msgq_helper(init, True)
+
+ def test_start_msgq_timeout(self):
+ '''Test that b10-msgq startup attempts connections several times
+ and times out eventually.'''
+ b10_init = MockInitSimple()
+ b10_init.c_channel_env = {}
+ # set the timeout to an arbitrary pre-determined value (which
+ # code below depends on)
+ b10_init.msgq_timeout = 1
+ b10_init._run_under_unittests = False
+
+ # use the MockProcessInfo creator
+ b10_init._make_process_info = b10_init._make_mock_process_info
+
+ global attempts
+ global tsec
+ attempts = 0
+ tsec = 0
+ self._tmp_time = time.time
+ self._tmp_sleep = time.sleep
+ def _my_time():
+ global attempts
+ global tsec
+ attempts += 1
+ return tsec
+ def _my_sleep(nsec):
+ global tsec
+ tsec += nsec
+ time.time = _my_time
+ time.sleep = _my_sleep
+
+ global cc_sub
+ cc_sub = None
+ class DummySessionAlwaysFails():
+ def __init__(self, socket_file):
+ raise isc.cc.session.SessionError('Connection fails')
+ def group_subscribe(self, s):
+ global cc_sub
+ cc_sub = s
+
+ isc.cc.Session = DummySessionAlwaysFails
+
+ with self.assertRaises(init.CChannelConnectError):
+ # An exception will be thrown here when it eventually times
+ # out.
+ pi = b10_init.start_msgq()
+
+ # time.time() should be called 12 times within the while loop:
+ # starting from 0, and 11 more times from 0.1 to 1.1. There's
+ # another call to time.time() outside the loop, which makes it
+ # 13.
+ self.assertEqual(attempts, 13)
+
+ # group_subscribe() should not have been called here.
+ self.assertIsNone(cc_sub)
+
+ global cc_socket_file
+ cc_socket_file = None
+ cc_sub = None
+ class DummySession():
+ def __init__(self, socket_file):
+ global cc_socket_file
+ cc_socket_file = socket_file
+ def group_subscribe(self, s):
+ global cc_sub
+ cc_sub = s
+
+ isc.cc.Session = DummySession
+
+ # reset values
+ attempts = 0
+ tsec = 0
+
+ pi = b10_init.start_msgq()
+
+ # just one attempt, but 2 calls to time.time()
+ self.assertEqual(attempts, 2)
+
+ self.assertEqual(cc_socket_file, b10_init.msgq_socket_file)
+ self.assertEqual(cc_sub, 'Init')
+
+ # isc.cc.Session, time.time() and time.sleep() are restored
+ # during tearDown().
+
+ def _start_cfgmgr_helper(self, init, data_path, filename, clear_config):
+ expect_args = ['b10-cfgmgr']
+ if data_path is not None:
+ init.data_path = data_path
+ expect_args.append('--data-path=' + data_path)
+ if filename is not None:
+ init.config_filename = filename
+ expect_args.append('--config-filename=' + filename)
+ if clear_config:
+ init.clear_config = clear_config
+ expect_args.append('--clear-config')
+
+ pi = init.start_cfgmgr()
+ self.assertEqual('b10-cfgmgr', pi.name)
+ self.assertEqual(expect_args, pi.args)
+ self.assertEqual({'TESTENV': 'A test string'}, pi.env)
+
+ # this is set by ProcessInfo.spawn()
+ self.assertEqual(42147, pi.pid)
+
+ def test_start_cfgmgr(self):
+ '''Test that b10-cfgmgr is started.'''
+ class DummySession():
+ def __init__(self):
+ self._tries = 0
+ def group_recvmsg(self):
+ self._tries += 1
+ # return running on the 3rd try onwards
+ if self._tries >= 3:
+ return ({'running': 'ConfigManager'}, None)
+ else:
+ return ({}, None)
+
+ init = MockInitSimple()
+ init.c_channel_env = {'TESTENV': 'A test string'}
+ init.cc_session = DummySession()
+ init.wait_time = 5
+
+ # use the MockProcessInfo creator
+ init._make_process_info = init._make_mock_process_info
+
+ global attempts
+ attempts = 0
+ self._tmp_sleep = time.sleep
+ def _my_sleep(nsec):
+ global attempts
+ attempts += 1
+ time.sleep = _my_sleep
+
+ # defaults
+ self._start_cfgmgr_helper(init, None, None, False)
+
+ # check that 2 attempts were made. on the 3rd attempt,
+ # process_running() returns that ConfigManager is running.
+ self.assertEqual(attempts, 2)
+
+ # data_path is specified
+ self._start_cfgmgr_helper(init, '/var/lib/test', None, False)
+
+ # config_filename is specified. Because `init` is not
+ # reconstructed, data_path is retained from the last call to
+ # _start_cfgmgr_helper().
+ self._start_cfgmgr_helper(init, '/var/lib/test', 'foo.cfg', False)
+
+ # clear_config is specified. Because `init` is not reconstructed,
+ # data_path and config_filename are retained from the last call
+ # to _start_cfgmgr_helper().
+ self._start_cfgmgr_helper(init, '/var/lib/test', 'foo.cfg', True)
+
+ def test_start_cfgmgr_timeout(self):
+ '''Test that b10-cfgmgr startup attempts connections several times
+ and times out eventually.'''
+ class DummySession():
+ def group_recvmsg(self):
+ return (None, None)
+ b10_init = MockInitSimple()
+ b10_init.c_channel_env = {}
+ b10_init.cc_session = DummySession()
+ # set wait_time to an arbitrary pre-determined value (which code
+ # below depends on)
+ b10_init.wait_time = 2
+
+ # use the MockProcessInfo creator
+ b10_init._make_process_info = b10_init._make_mock_process_info
+
+ global attempts
+ attempts = 0
+ self._tmp_sleep = time.sleep
+ def _my_sleep(nsec):
+ global attempts
+ attempts += 1
+ time.sleep = _my_sleep
+
+ # We just check that an exception was thrown, and that several
+ # attempts were made to connect.
+ with self.assertRaises(init.ProcessStartError):
+ pi = b10_init.start_cfgmgr()
+
+ # 2 seconds of attempts every 1 second should result in 2 attempts
+ self.assertEqual(attempts, 2)
+
+ # time.sleep() is restored during tearDown().
+
+ def test_start_ccsession(self):
+ '''Test that CC session is started.'''
+ class DummySession():
+ def __init__(self, specfile, config_handler, command_handler,
+ socket_file):
+ self.specfile = specfile
+ self.config_handler = config_handler
+ self.command_handler = command_handler
+ self.socket_file = socket_file
+ self.started = False
+ def start(self):
+ self.started = True
+ b10_init = MockInitSimple()
+ self._tmp_module_cc_session = isc.config.ModuleCCSession
+ isc.config.ModuleCCSession = DummySession
+
+ b10_init.start_ccsession({})
+ self.assertEqual(init.SPECFILE_LOCATION, b10_init.ccs.specfile)
+ self.assertEqual(b10_init.config_handler, b10_init.ccs.config_handler)
+ self.assertEqual(b10_init.command_handler,
+ b10_init.ccs.command_handler)
+ self.assertEqual(b10_init.msgq_socket_file, b10_init.ccs.socket_file)
+ self.assertTrue(b10_init.ccs.started)
+
+ # isc.config.ModuleCCSession is restored during tearDown().
+
+ def test_start_process(self):
+ '''Test that processes can be started.'''
+ init = MockInit()
+
+ # use the MockProcessInfo creator
+ init._make_process_info = init._make_mock_process_info
+
+ pi = init.start_process('Test Process', ['/bin/true'], {})
+ self.assertEqual('Test Process', pi.name)
+ self.assertEqual(['/bin/true'], pi.args)
+ self.assertEqual({}, pi.env)
+
+ # this is set by ProcessInfo.spawn()
+ self.assertEqual(42147, pi.pid)
+
+ def test_register_process(self):
+ '''Test that processes can be registered with Init.'''
+ init = MockInit()
+ component = MockComponent('test', 53, 'Test')
+
+ self.assertFalse(53 in init.components)
+ init.register_process(53, component)
+ self.assertTrue(53 in init.components)
+ self.assertEqual(init.components[53].name(), 'test')
+ self.assertEqual(init.components[53].pid(), 53)
+ self.assertEqual(init.components[53].address(), 'Test')
+
+ def _start_simple_helper(self, init, verbose):
+ init.verbose = verbose
+
+ args = ['/bin/true']
+ if verbose:
+ args.append('-v')
+
+ init.start_simple('/bin/true')
+ self.assertEqual('/bin/true', init.started_process_name)
+ self.assertEqual(args, init.started_process_args)
+ self.assertEqual({'TESTENV': 'A test string'}, init.started_process_env)
+
+ def test_start_simple(self):
+ '''Test simple process startup.'''
+ init = MockInitSimple()
+ init.c_channel_env = {'TESTENV': 'A test string'}
+
+ # non-verbose case
+ self._start_simple_helper(init, False)
+
+ # verbose case
+ self._start_simple_helper(init, True)
+
+ def _start_auth_helper(self, init, verbose):
+ init.verbose = verbose
+
+ args = ['b10-auth']
+ if verbose:
+ args.append('-v')
+
+ init.start_auth()
+ self.assertEqual('b10-auth', init.started_process_name)
+ self.assertEqual(args, init.started_process_args)
+ self.assertEqual({'FOO': 'an env string'}, init.started_process_env)
+
+ def test_start_auth(self):
+ '''Test that b10-auth is started.'''
+ init = MockInitSimple()
+ init.c_channel_env = {'FOO': 'an env string'}
+
+ # non-verbose case
+ self._start_auth_helper(init, False)
+
+ # verbose case
+ self._start_auth_helper(init, True)
+
+ def _start_resolver_helper(self, init, verbose):
+ init.verbose = verbose
+
+ args = ['b10-resolver']
+ if verbose:
+ args.append('-v')
+
+ init.start_resolver()
+ self.assertEqual('b10-resolver', init.started_process_name)
+ self.assertEqual(args, init.started_process_args)
+ self.assertEqual({'BAR': 'an env string'}, init.started_process_env)
+
+ def test_start_resolver(self):
+ '''Test that b10-resolver is started.'''
+ init = MockInitSimple()
+ init.c_channel_env = {'BAR': 'an env string'}
+
+ # non-verbose case
+ self._start_resolver_helper(init, False)
+
+ # verbose case
+ self._start_resolver_helper(init, True)
+
+ def _start_cmdctl_helper(self, init, verbose, port = None):
+ init.verbose = verbose
+
+ args = ['b10-cmdctl']
+
+ if port is not None:
+ init.cmdctl_port = port
+ args.append('--port=9353')
+
+ if verbose:
+ args.append('-v')
+
+ init.start_cmdctl()
+ self.assertEqual('b10-cmdctl', init.started_process_name)
+ self.assertEqual(args, init.started_process_args)
+ self.assertEqual({'BAZ': 'an env string'}, init.started_process_env)
+
+ def test_start_cmdctl(self):
+ '''Test that b10-cmdctl is started.'''
+ init = MockInitSimple()
+ init.c_channel_env = {'BAZ': 'an env string'}
+
+ # non-verbose case
+ self._start_cmdctl_helper(init, False)
+
+ # verbose case
+ self._start_cmdctl_helper(init, True)
+
+ # with port, non-verbose case
+ self._start_cmdctl_helper(init, False, 9353)
+
+ # with port, verbose case
+ self._start_cmdctl_helper(init, True, 9353)
+
+ def test_socket_data(self):
+ '''Test that Init._socket_data works as expected.'''
+ class MockSock:
+ def __init__(self, fd, throw):
+ self.fd = fd
+ self.throw = throw
+ self.buf = b'Hello World.\nYou are so nice today.\nXX'
+ self.i = 0
+
+ def recv(self, bufsize, flags = 0):
+ if bufsize != 1:
+ raise Exception('bufsize != 1')
+ if flags != socket.MSG_DONTWAIT:
+ raise Exception('flags != socket.MSG_DONTWAIT')
+ # after 15 recv()s, throw a socket.error with EAGAIN to
+ # get _socket_data() to save back what's been read. The
+ # number 15 is arbitrarily chosen, but the checks then
+ # depend on this being 15, i.e., if you adjust this
+ # number, you may have to adjust the checks below too.
+ if self.throw and self.i > 15:
+ raise socket.error(errno.EAGAIN, 'Try again')
+ if self.i >= len(self.buf):
+ return b'';
+ t = self.i
+ self.i += 1
+ return self.buf[t:t+1]
+
+ def close(self):
+ return
+
+ class MockInitSocketData(Init):
+ def __init__(self, throw):
+ self._unix_sockets = {42: (MockSock(42, throw), b'')}
+ self.requests = []
+ self.dead = []
+
+ def socket_request_handler(self, previous, sock):
+ self.requests.append({sock.fd: previous})
+
+ def socket_consumer_dead(self, sock):
+ self.dead.append(sock.fd)
+
+ # Case where we get data every time we call recv()
+ init = MockInitSocketData(False)
+ init._socket_data(42)
+ self.assertEqual(init.requests,
+ [{42: b'Hello World.'},
+ {42: b'You are so nice today.'}])
+ self.assertEqual(init.dead, [42])
+ self.assertEqual({}, init._unix_sockets)
+
+ # Case where socket.recv() raises EAGAIN. In this case, the
+ # routine is supposed to save what it has back to
+ # Init._unix_sockets.
+ init = MockInitSocketData(True)
+ init._socket_data(42)
+ self.assertEqual(init.requests, [{42: b'Hello World.'}])
+ self.assertFalse(init.dead)
+ self.assertEqual(len(init._unix_sockets), 1)
+ self.assertEqual(init._unix_sockets[42][1], b'You')
+
+ def test_startup(self):
+ '''Test that Init.startup() handles failures properly.'''
+ class MockInitStartup(Init):
+ def __init__(self, throw):
+ self.throw = throw
+ self.started = False
+ self.killed = False
+ self.msgq_socket_file = None
+ self.curproc = 'myproc'
+ self.runnable = False
+
+ def start_all_components(self):
+ self.started = True
+ if self.throw is True:
+ raise Exception('Assume starting components has failed.')
+ elif self.throw:
+ raise self.throw
+
+ def kill_started_components(self):
+ self.killed = True
+
+ class DummySession():
+ def __init__(self, socket_file):
+ raise isc.cc.session.SessionError('This is the expected case.')
+
+ class DummySessionSocketExists():
+ def __init__(self, socket_file):
+ # simulate that connect passes
+ return
+
+ isc.cc.Session = DummySession
+
+ # All is well case, where all components are started
+ # successfully. We check that the actual call to
+ # start_all_components() is made, and Init.runnable is true.
+ b10_init = MockInitStartup(False)
+ r = b10_init.startup()
+ self.assertIsNone(r)
+ self.assertTrue(b10_init.started)
+ self.assertFalse(b10_init.killed)
+ self.assertTrue(b10_init.runnable)
+ self.assertEqual({}, b10_init.c_channel_env)
+
+ # Case where starting components fails. We check that
+ # kill_started_components() is called right after, and
+ # Init.runnable is not modified.
+ b10_init = MockInitStartup(True)
+ r = b10_init.startup()
+ # r contains an error message
+ self.assertEqual(r, 'Unable to start myproc: Assume starting components has failed.')
+ self.assertTrue(b10_init.started)
+ self.assertTrue(b10_init.killed)
+ self.assertFalse(b10_init.runnable)
+ self.assertEqual({}, b10_init.c_channel_env)
+
+ # Check if msgq_socket_file is carried over
+ b10_init = MockInitStartup(False)
+ b10_init.msgq_socket_file = 'foo'
+ r = b10_init.startup()
+ self.assertEqual({'BIND10_MSGQ_SOCKET_FILE': 'foo'},
+ b10_init.c_channel_env)
+
+ # Check failure of changing user results in a different message
+ b10_init = MockInitStartup(init.ChangeUserError('failed to chusr'))
+ r = b10_init.startup()
+ self.assertIn('failed to chusr', r)
+ self.assertTrue(b10_init.killed)
+
+ # Check the case when socket file already exists
+ isc.cc.Session = DummySessionSocketExists
+ b10_init = MockInitStartup(False)
+ r = b10_init.startup()
+ self.assertIn('already running', r)
+
+ # isc.cc.Session is restored during tearDown().
+
+class SocketSrvTest(unittest.TestCase):
+ """
+ This tests some methods of b10-init related to the unix domain sockets
+ used to transfer other sockets to applications.
+ """
+ def setUp(self):
+ """
+ Create the b10-init to test, testdata and backup some functions.
+ """
+ self.__b10_init = Init()
+ self.__select_backup = init.select.select
+ self.__select_called = None
+ self.__socket_data_called = None
+ self.__consumer_dead_called = None
+ self.__socket_request_handler_called = None
+
+ def tearDown(self):
+ """
+ Restore functions.
+ """
+ init.select.select = self.__select_backup
+
+ class __FalseSocket:
+ """
+ A mock socket for the select and accept and stuff like that.
+ """
+ def __init__(self, owner, fileno=42):
+ self.__owner = owner
+ self.__fileno = fileno
+ self.data = None
+ self.closed = False
+
+ def fileno(self):
+ return self.__fileno
+
+ def accept(self):
+ return (self.__class__(self.__owner, 13), "/path/to/socket")
+
+ def recv(self, bufsize, flags=0):
+ self.__owner.assertEqual(1, bufsize)
+ self.__owner.assertEqual(socket.MSG_DONTWAIT, flags)
+ if isinstance(self.data, socket.error):
+ raise self.data
+ elif self.data is not None:
+ if len(self.data):
+ result = self.data[0:1]
+ self.data = self.data[1:]
+ return result
+ else:
+ raise socket.error(errno.EAGAIN, "Would block")
+ else:
+ return b''
+
+ def close(self):
+ self.closed = True
+
+ class __CCS:
+ """
+ A mock CCS, just to provide the socket file number.
+ """
+ class __Socket:
+ def fileno(self):
+ return 1
+ def get_socket(self):
+ return self.__Socket()
+
+ def __select_accept(self, r, w, x, t):
+ self.__select_called = (r, w, x, t)
+ return ([42], [], [])
+
+ def __select_data(self, r, w, x, t):
+ self.__select_called = (r, w, x, t)
+ return ([13], [], [])
+
+ def __accept(self):
+ """
+ Hijack the accept method of the b10-init.
+
+ Notes down it was called and stops b10-init.
+ """
+ self.__accept_called = True
+ self.__b10_init.runnable = False
+
+ def test_srv_accept_called(self):
+ """
+ Test that the _srv_accept method of b10-init is called when the
+ listening socket is readable.
+ """
+ self.__b10_init.runnable = True
+ self.__b10_init._srv_socket = self.__FalseSocket(self)
+ self.__b10_init._srv_accept = self.__accept
+ self.__b10_init.ccs = self.__CCS()
+ init.select.select = self.__select_accept
+ self.__b10_init.run(2)
+ # It called the accept
+ self.assertTrue(self.__accept_called)
+ # And the select had the right parameters
+ self.assertEqual(([2, 1, 42], [], [], None), self.__select_called)
+
+ def test_srv_accept(self):
+ """
+ Test how the _srv_accept method works.
+ """
+ self.__b10_init._srv_socket = self.__FalseSocket(self)
+ self.__b10_init._srv_accept()
+ # After we accepted, a new socket is added there
+ socket = self.__b10_init._unix_sockets[13][0]
+ # The socket is properly stored there
+ self.assertTrue(isinstance(socket, self.__FalseSocket))
+ # And the buffer (yet empty) is there
+ self.assertEqual({13: (socket, b'')}, self.__b10_init._unix_sockets)
+
+ def __socket_data(self, socket):
+ self.__b10_init.runnable = False
+ self.__socket_data_called = socket
+
+ def test_socket_data(self):
+ """
+ Test that a socket that wants attention gets it.
+ """
+ self.__b10_init._srv_socket = self.__FalseSocket(self)
+ self.__b10_init._socket_data = self.__socket_data
+ self.__b10_init.ccs = self.__CCS()
+ self.__b10_init._unix_sockets = {13: (self.__FalseSocket(self, 13), b'')}
+ self.__b10_init.runnable = True
+ init.select.select = self.__select_data
+ self.__b10_init.run(2)
+ self.assertEqual(13, self.__socket_data_called)
+ self.assertEqual(([2, 1, 42, 13], [], [], None), self.__select_called)
+
+ def __prepare_data(self, data):
+ socket = self.__FalseSocket(self, 13)
+ self.__b10_init._unix_sockets = {13: (socket, b'')}
+ socket.data = data
+ self.__b10_init.socket_consumer_dead = self.__consumer_dead
+ self.__b10_init.socket_request_handler = self.__socket_request_handler
+ return socket
+
+ def __consumer_dead(self, socket):
+ self.__consumer_dead_called = socket
+
+ def __socket_request_handler(self, token, socket):
+ self.__socket_request_handler_called = (token, socket)
+
+ def test_socket_closed(self):
+ """
+ Test that a socket is removed and the socket_consumer_dead is called
+ when it is closed.
+ """
+ socket = self.__prepare_data(None)
+ self.__b10_init._socket_data(13)
+ self.assertEqual(socket, self.__consumer_dead_called)
+ self.assertEqual({}, self.__b10_init._unix_sockets)
+ self.assertTrue(socket.closed)
+
+ def test_socket_short(self):
+ """
+ Test that if there's not enough data to get the whole socket, it is
+ kept there, but nothing is called.
+ """
+ socket = self.__prepare_data(b'tok')
+ self.__b10_init._socket_data(13)
+ self.assertEqual({13: (socket, b'tok')}, self.__b10_init._unix_sockets)
+ self.assertFalse(socket.closed)
+ self.assertIsNone(self.__consumer_dead_called)
+ self.assertIsNone(self.__socket_request_handler_called)
+
+ def test_socket_continue(self):
+ """
+ Test that we call the token handling function when the whole token
+ comes. This test pretends to continue reading where the previous one
+ stopped.
+ """
+ socket = self.__prepare_data(b"en\nanothe")
+ # The data to finish
+ self.__b10_init._unix_sockets[13] = (socket, b'tok')
+ self.__b10_init._socket_data(13)
+ self.assertEqual({13: (socket, b'anothe')}, self.__b10_init._unix_sockets)
+ self.assertFalse(socket.closed)
+ self.assertIsNone(self.__consumer_dead_called)
+ self.assertEqual((b'token', socket),
+ self.__socket_request_handler_called)
+
+ def test_broken_socket(self):
+ """
+ If the socket raises an exception during the read other than EAGAIN,
+ it is broken and we remove it.
+ """
+ sock = self.__prepare_data(socket.error(errno.ENOMEM,
+ "There's more memory available, but not for you"))
+ self.__b10_init._socket_data(13)
+ self.assertEqual(sock, self.__consumer_dead_called)
+ self.assertEqual({}, self.__b10_init._unix_sockets)
+ self.assertTrue(sock.closed)
+
+class TestFunctions(unittest.TestCase):
+ def setUp(self):
+ self.lockfile_testpath = \
+ "@abs_top_builddir@/src/bin/bind10/tests/lockfile_test"
+ self.assertFalse(os.path.exists(self.lockfile_testpath))
+ os.mkdir(self.lockfile_testpath)
+ self.assertTrue(os.path.isdir(self.lockfile_testpath))
+ self.__isfile_orig = init.os.path.isfile
+ self.__unlink_orig = init.os.unlink
+
+ def tearDown(self):
+ os.rmdir(self.lockfile_testpath)
+ self.assertFalse(os.path.isdir(self.lockfile_testpath))
+ os.environ["B10_LOCKFILE_DIR_FROM_BUILD"] = "@abs_top_builddir@"
+ init.os.path.isfile = self.__isfile_orig
+ init.os.unlink = self.__unlink_orig
+
+ def test_remove_lock_files(self):
+ os.environ["B10_LOCKFILE_DIR_FROM_BUILD"] = self.lockfile_testpath
+
+ # create lockfiles for the testcase
+ lockfiles = ["logger_lockfile"]
+ for f in lockfiles:
+ fname = os.environ["B10_LOCKFILE_DIR_FROM_BUILD"] + '/' + f
+ self.assertFalse(os.path.exists(fname))
+ open(fname, "w").close()
+ self.assertTrue(os.path.isfile(fname))
+
+ # first call should clear up all the lockfiles
+ init.remove_lock_files()
+
+ # check if the lockfiles exist
+ for f in lockfiles:
+ fname = os.environ["B10_LOCKFILE_DIR_FROM_BUILD"] + '/' + f
+ self.assertFalse(os.path.isfile(fname))
+
+ # second call should not assert anyway
+ init.remove_lock_files()
+
+ def test_remove_lock_files_fail(self):
+ # Permission error on unlink is ignored; other exceptions are really
+ # unexpected and propagated.
+ def __raising_unlink(unused, ex):
+ raise ex
+
+ init.os.path.isfile = lambda _: True
+ os_error = OSError()
+ init.os.unlink = lambda f: __raising_unlink(f, os_error)
+
+ os_error.errno = errno.EPERM
+ init.remove_lock_files() # no disruption
+
+ os_error.errno = errno.EACCES
+ init.remove_lock_files() # no disruption
+
+ os_error.errno = errno.ENOENT
+ self.assertRaises(OSError, init.remove_lock_files)
+
+ init.os.unlink = lambda f: __raising_unlink(f, Exception('bad'))
+ self.assertRaises(Exception, init.remove_lock_files)
+
+ def test_get_signame(self):
+ # just test with some samples
+ signame = init.get_signame(signal.SIGTERM)
+ self.assertEqual('SIGTERM', signame)
+ signame = init.get_signame(signal.SIGKILL)
+ self.assertEqual('SIGKILL', signame)
+ # 59426 is hopefully an unused signal on most platforms
+ signame = init.get_signame(59426)
+ self.assertEqual('Unknown signal 59426', signame)
+
+ def test_fatal_signal(self):
+ self.assertIsNone(init.b10_init)
+ init.b10_init = Init()
+ init.b10_init.runnable = True
+ init.fatal_signal(signal.SIGTERM, None)
+ # Now, runnable must be False
+ self.assertFalse(init.b10_init.runnable)
+ init.b10_init = None
+
+if __name__ == '__main__':
+ # store os.environ for test_unchanged_environment
+ original_os_environ = copy.deepcopy(os.environ)
+ isc.log.resetUnitTestRootLogger()
+ unittest.main()
diff --git a/src/bin/bindctl/bindcmd.py b/src/bin/bindctl/bindcmd.py
index af599a4..f382e2a 100644
--- a/src/bin/bindctl/bindcmd.py
+++ b/src/bin/bindctl/bindcmd.py
@@ -25,7 +25,7 @@ from bindctl.moduleinfo import *
from bindctl.cmdparse import BindCmdParser
from bindctl import command_sets
from xml.dom import minidom
-import isc
+import isc.config
import isc.cc.data
import http.client
import json
diff --git a/src/bin/bindctl/bindctl.xml b/src/bin/bindctl/bindctl.xml
index 3993739..b5215f4 100644
--- a/src/bin/bindctl/bindctl.xml
+++ b/src/bin/bindctl/bindctl.xml
@@ -218,7 +218,7 @@
<command>config</command> for Configuration commands.
<!-- TODO: or is config from the cfgmgr module? -->
Additional modules may be available, such as
- <command>Boss</command>, <command>Xfrin</command>, and
+ <command>Init</command>, <command>Xfrin</command>, and
<command>Auth</command>.
</para>
diff --git a/src/bin/bindctl/bindctl_main.py.in b/src/bin/bindctl/bindctl_main.py.in
index 546ecc0..875b06e 100755
--- a/src/bin/bindctl/bindctl_main.py.in
+++ b/src/bin/bindctl/bindctl_main.py.in
@@ -34,7 +34,7 @@ isc.util.process.rename()
# number, and the overall BIND 10 version number (set in configure.ac).
VERSION = "bindctl 20110217 (BIND 10 @PACKAGE_VERSION@)"
-DEFAULT_IDENTIFIER_DESC = "The identifier specifies the config item. Child elements are separated with the '/' character. List indices can be specified with '[i]', where i is an integer specifying the index, starting with 0. Examples: 'Boss/start_auth', 'Recurse/listen_on[0]/address'. If no identifier is given, shows the item at the current location."
+DEFAULT_IDENTIFIER_DESC = "The identifier specifies the config item. Child elements are separated with the '/' character. List indices can be specified with '[i]', where i is an integer specifying the index, starting with 0. Examples: 'Init/start_auth', 'Auth/listen_on[0]/address'. If no identifier is given, shows the item at the current location."
def prepare_config_commands(tool):
'''Prepare fixed commands for local configuration editing'''
diff --git a/src/bin/bindctl/command_sets.py b/src/bin/bindctl/command_sets.py
index c001ec8..b146c38 100644
--- a/src/bin/bindctl/command_sets.py
+++ b/src/bin/bindctl/command_sets.py
@@ -35,21 +35,21 @@ command_sets = {
'commands':
[
'!echo adding Authoritative server component',
- 'config add /Boss/components b10-auth',
- 'config set /Boss/components/b10-auth/kind needed',
- 'config set /Boss/components/b10-auth/special auth',
+ 'config add /Init/components b10-auth',
+ 'config set /Init/components/b10-auth/kind needed',
+ 'config set /Init/components/b10-auth/special auth',
'!echo adding Xfrin component',
- 'config add /Boss/components b10-xfrin',
- 'config set /Boss/components/b10-xfrin/address Xfrin',
- 'config set /Boss/components/b10-xfrin/kind dispensable',
+ 'config add /Init/components b10-xfrin',
+ 'config set /Init/components/b10-xfrin/address Xfrin',
+ 'config set /Init/components/b10-xfrin/kind dispensable',
'!echo adding Xfrout component',
- 'config add /Boss/components b10-xfrout',
- 'config set /Boss/components/b10-xfrout/address Xfrout',
- 'config set /Boss/components/b10-xfrout/kind dispensable',
+ 'config add /Init/components b10-xfrout',
+ 'config set /Init/components/b10-xfrout/address Xfrout',
+ 'config set /Init/components/b10-xfrout/kind dispensable',
'!echo adding Zone Manager component',
- 'config add /Boss/components b10-zonemgr',
- 'config set /Boss/components/b10-zonemgr/address Zonemgr',
- 'config set /Boss/components/b10-zonemgr/kind dispensable',
+ 'config add /Init/components b10-zonemgr',
+ 'config set /Init/components/b10-zonemgr/address Zonemgr',
+ 'config set /Init/components/b10-zonemgr/kind dispensable',
'!echo Components added. Please enter "config commit" to',
'!echo finalize initial setup and run the components.'
]
diff --git a/src/bin/bindctl/run_bindctl.sh.in b/src/bin/bindctl/run_bindctl.sh.in
index 999d7ee..8a5d00b 100755
--- a/src/bin/bindctl/run_bindctl.sh.in
+++ b/src/bin/bindctl/run_bindctl.sh.in
@@ -23,7 +23,7 @@ BINDCTL_PATH=@abs_top_builddir@/src/bin/bindctl
# Note: lib/dns/python/.libs is necessary because __init__.py of isc package
# automatically imports isc.datasrc, which then requires the DNS loadable
# module. #2145 should eliminate the need for it.
-PYTHONPATH=@abs_top_srcdir@/src/bin:@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/bin:@abs_top_srcdir@/src/lib/python:@abs_top_builddir@/src/lib/dns/python/.libs
+PYTHONPATH=@abs_top_srcdir@/src/bin:@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/bin:@abs_top_srcdir@/src/lib/python
export PYTHONPATH
# If necessary (rare cases), explicitly specify paths to dynamic libraries
diff --git a/src/bin/cfgmgr/b10-cfgmgr.py.in b/src/bin/cfgmgr/b10-cfgmgr.py.in
index 315e3c5..06b9b0f 100755
--- a/src/bin/cfgmgr/b10-cfgmgr.py.in
+++ b/src/bin/cfgmgr/b10-cfgmgr.py.in
@@ -115,7 +115,7 @@ def main():
cm.read_config()
for ppath in PLUGIN_PATHS:
load_plugins(ppath, cm)
- cm.notify_boss()
+ cm.notify_b10_init()
cm.run()
except SessionError as se:
logger.fatal(CFGMGR_CC_SESSION_ERROR, se)
diff --git a/src/bin/cfgmgr/tests/b10-cfgmgr_test.py.in b/src/bin/cfgmgr/tests/b10-cfgmgr_test.py.in
index 351e8bf..02b48bd 100644
--- a/src/bin/cfgmgr/tests/b10-cfgmgr_test.py.in
+++ b/src/bin/cfgmgr/tests/b10-cfgmgr_test.py.in
@@ -27,7 +27,7 @@ class MyConfigManager:
def __init__(self, path, filename, session=None, rename_config_file=False):
self._path = path
self.read_config_called = False
- self.notify_boss_called = False
+ self.notify_b10_init_called = False
self.run_called = False
self.write_config_called = False
self.rename_config_called = False
@@ -37,8 +37,8 @@ class MyConfigManager:
def read_config(self):
self.read_config_called = True
- def notify_boss(self):
- self.notify_boss_called = True
+ def notify_b10_init(self):
+ self.notify_b10_init_called = True
def run(self):
self.run_called = True
@@ -89,7 +89,7 @@ class TestConfigManagerStartup(unittest.TestCase):
b.load_plugins = orig_load
self.assertTrue(b.cm.read_config_called)
- self.assertTrue(b.cm.notify_boss_called)
+ self.assertTrue(b.cm.notify_b10_init_called)
self.assertTrue(b.cm.run_called)
self.assertTrue(self.loaded_plugins)
# if there are no changes, config is not written
diff --git a/src/bin/cmdctl/Makefile.am b/src/bin/cmdctl/Makefile.am
index bfc13af..ab87dd3 100644
--- a/src/bin/cmdctl/Makefile.am
+++ b/src/bin/cmdctl/Makefile.am
@@ -11,17 +11,12 @@ pylogmessagedir = $(pyexecdir)/isc/log_messages/
b10_cmdctldir = $(pkgdatadir)
-# NOTE: this will overwrite on install
-# So these generic copies are placed in share/bind10 instead of to etc
-# Admin or packageer will need to put into place manually.
+USERSFILES = cmdctl-accounts.csv
+CERTFILES = cmdctl-keyfile.pem cmdctl-certfile.pem
-CMDCTL_CONFIGURATIONS = cmdctl-accounts.csv
-CMDCTL_CONFIGURATIONS += cmdctl-keyfile.pem cmdctl-certfile.pem
+b10_cmdctl_DATA = cmdctl.spec
-b10_cmdctl_DATA = $(CMDCTL_CONFIGURATIONS)
-b10_cmdctl_DATA += cmdctl.spec
-
-EXTRA_DIST = $(CMDCTL_CONFIGURATIONS)
+EXTRA_DIST = $(USERSFILES)
CLEANFILES= b10-cmdctl cmdctl.pyc cmdctl.spec
CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/cmdctl_messages.py
@@ -55,7 +50,7 @@ $(PYTHON_LOGMSGPKG_DIR)/work/cmdctl_messages.py : cmdctl_messages.mes
-d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/cmdctl_messages.mes
# this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
-b10-cmdctl: cmdctl.py $(PYTHON_LOGMSGPKG_DIR)/work/cmdctl_messages.py
+b10-cmdctl: cmdctl.py $(PYTHON_LOGMSGPKG_DIR)/work/cmdctl_messages.py $(CERTFILES)
$(SED) "s|@@PYTHONPATH@@|@pyexecdir@|" cmdctl.py >$@
chmod a+x $@
@@ -76,7 +71,7 @@ if INSTALL_CONFIGURATIONS
# because these file will contain sensitive information.
install-data-local:
$(mkinstalldirs) $(DESTDIR)/@sysconfdir@/@PACKAGE@
- for f in $(CMDCTL_CONFIGURATIONS) ; do \
+ for f in $(USERSFILES) $(CERTFILES) ; do \
if test ! -f $(DESTDIR)$(sysconfdir)/@PACKAGE@/$$f; then \
${INSTALL} -m 640 $(srcdir)/$$f $(DESTDIR)$(sysconfdir)/@PACKAGE@/ ; \
fi ; \
diff --git a/src/bin/cmdctl/b10-cmdctl.xml b/src/bin/cmdctl/b10-cmdctl.xml
index 4b1b32f..4d315ac 100644
--- a/src/bin/cmdctl/b10-cmdctl.xml
+++ b/src/bin/cmdctl/b10-cmdctl.xml
@@ -78,7 +78,7 @@
<refsect1>
<title>OPTIONS</title>
-
+
<para>The arguments are as follows:</para>
<variablelist>
@@ -175,7 +175,7 @@
<command>shutdown</command> exits <command>b10-cmdctl</command>.
This has an optional <varname>pid</varname> argument to
select the process ID to stop.
- (Note that the BIND 10 boss process may restart this service
+ (Note that the b10-init process may restart this service
if configured.)
</para>
diff --git a/src/bin/cmdctl/cmdctl.py.in b/src/bin/cmdctl/cmdctl.py.in
index b026612..15a41ec 100755
--- a/src/bin/cmdctl/cmdctl.py.in
+++ b/src/bin/cmdctl/cmdctl.py.in
@@ -339,7 +339,7 @@ class CommandControl():
self.modules_spec[args[0]] = args[1]
elif command == ccsession.COMMAND_SHUTDOWN:
- #When cmdctl get 'shutdown' command from boss,
+ #When cmdctl get 'shutdown' command from b10-init,
#shutdown the outer httpserver.
self._module_cc.send_stopping()
self._httpserver.shutdown()
@@ -650,4 +650,6 @@ if __name__ == '__main__':
if httpd:
httpd.shutdown()
+ logger.info(CMDCTL_EXITING)
+
sys.exit(result)
diff --git a/src/bin/cmdctl/cmdctl_messages.mes b/src/bin/cmdctl/cmdctl_messages.mes
index 5fbb430..32afce3 100644
--- a/src/bin/cmdctl/cmdctl_messages.mes
+++ b/src/bin/cmdctl/cmdctl_messages.mes
@@ -43,6 +43,9 @@ specific error is printed in the message.
This debug message indicates that the given command has been sent to
the given module.
+% CMDCTL_EXITING exiting
+The b10-cmdctl daemon is exiting.
+
% CMDCTL_NO_SUCH_USER username not found in user database: %1
A login attempt was made to b10-cmdctl, but the username was not known.
Users can be added with the tool b10-cmdctl-usermgr.
diff --git a/src/bin/ddns/b10-ddns.xml b/src/bin/ddns/b10-ddns.xml
index fb895b9..7935482 100644
--- a/src/bin/ddns/b10-ddns.xml
+++ b/src/bin/ddns/b10-ddns.xml
@@ -56,8 +56,8 @@
<para>The <command>b10-ddns</command> daemon provides the BIND 10
Dynamic Update (DDNS) service, as specified in RFC 2136.
Normally it is started by the
- <citerefentry><refentrytitle>bind10</refentrytitle><manvolnum>8</manvolnum></citerefentry>
- boss process.
+ <citerefentry><refentrytitle>b10-init</refentrytitle><manvolnum>8</manvolnum></citerefentry>
+ process.
</para>
<para>
@@ -119,7 +119,7 @@
<listitem>
<para>
This value is ignored at this moment, but is provided for
- compatibility with the <command>bind10</command> Boss process.
+ compatibility with the <command>b10-init</command> process.
</para>
</listitem>
</varlistentry>
@@ -154,7 +154,7 @@
<command>shutdown</command> exits <command>b10-ddns</command>.
This has an optional <varname>pid</varname> argument to
select the process ID to stop.
- (Note that the BIND 10 boss process may restart this service
+ (Note that the b10-init process may restart this service
if configured.)
</para>
diff --git a/src/bin/ddns/ddns.py.in b/src/bin/ddns/ddns.py.in
index 094e0ec..d7fcab7 100755
--- a/src/bin/ddns/ddns.py.in
+++ b/src/bin/ddns/ddns.py.in
@@ -134,7 +134,7 @@ def get_datasrc_client(cc_session):
function will simply be removed.
'''
- HARDCODED_DATASRC_CLASS = RRClass.IN()
+ HARDCODED_DATASRC_CLASS = RRClass.IN
file, is_default = cc_session.get_remote_config_value("Auth",
"database_file")
# See xfrout.py:get_db_file() for this trick:
@@ -469,7 +469,7 @@ class DDNSServer:
self.__request_msg.clear(Message.PARSE)
# specify PRESERVE_ORDER as we need to handle each RR separately.
self.__request_msg.from_wire(req_data, Message.PRESERVE_ORDER)
- if self.__request_msg.get_opcode() != Opcode.UPDATE():
+ if self.__request_msg.get_opcode() != Opcode.UPDATE:
raise self.InternalError('Update request has unexpected '
'opcode: ' +
str(self.__request_msg.get_opcode()))
@@ -536,7 +536,7 @@ class DDNSServer:
else:
tcp_ctx.close()
except socket.error as ex:
- logger.warn(DDNS_RESPONSE_SOCKET_ERROR, ClientFormatter(dest), ex)
+ logger.warn(DDNS_RESPONSE_SOCKET_SEND_FAILED, ClientFormatter(dest), ex)
return False
return True
@@ -683,7 +683,7 @@ class DDNSServer:
result = ctx[0].send_ready()
if result != DNSTCPContext.SENDING:
if result == DNSTCPContext.CLOSED:
- logger.warn(DDNS_RESPONSE_TCP_SOCKET_ERROR,
+ logger.warn(DDNS_RESPONSE_TCP_SOCKET_SEND_FAILED,
ClientFormatter(ctx[1]))
ctx[0].close()
del self._tcp_ctxs[fileno]
diff --git a/src/bin/ddns/ddns_messages.mes b/src/bin/ddns/ddns_messages.mes
index d128361..cdc7b4d 100644
--- a/src/bin/ddns/ddns_messages.mes
+++ b/src/bin/ddns/ddns_messages.mes
@@ -134,12 +134,12 @@ appropriate ACL configuration or some lower layer filtering. The
number of existing TCP clients are shown in the log, which should be
identical to the current quota.
-% DDNS_RESPONSE_SOCKET_ERROR failed to send update response to %1: %2
+% DDNS_RESPONSE_SOCKET_SEND_FAILED failed to send update response to %1: %2
Network I/O error happens in sending an update response. The
client's address that caused the error and error details are also
logged.
-% DDNS_RESPONSE_TCP_SOCKET_ERROR failed to complete sending update response to %1 over TCP
+% DDNS_RESPONSE_TCP_SOCKET_SEND_FAILED failed to complete sending update response to %1 over TCP
b10-ddns had tried to send an update response over TCP, and it hadn't
been completed at that time, and a followup attempt to complete the
send operation failed due to some network I/O error. While a network
diff --git a/src/bin/ddns/tests/ddns_test.py b/src/bin/ddns/tests/ddns_test.py
index 0f5ca9b..d366f09 100755
--- a/src/bin/ddns/tests/ddns_test.py
+++ b/src/bin/ddns/tests/ddns_test.py
@@ -39,9 +39,9 @@ TESTDATA_PATH = os.environ['TESTDATA_PATH'] + os.sep
READ_ZONE_DB_FILE = TESTDATA_PATH + "rwtest.sqlite3" # original, to be copied
TEST_ZONE_NAME = Name('example.org')
TEST_ZONE_NAME_STR = TEST_ZONE_NAME.to_text()
-UPDATE_RRTYPE = RRType.SOA()
+UPDATE_RRTYPE = RRType.SOA
TEST_QID = 5353 # arbitrary chosen
-TEST_RRCLASS = RRClass.IN()
+TEST_RRCLASS = RRClass.IN
TEST_RRCLASS_STR = TEST_RRCLASS.to_text()
TEST_SERVER6 = ('2001:db8::53', 53, 0, 0)
TEST_CLIENT6 = ('2001:db8::1', 53000, 0, 0)
@@ -169,9 +169,9 @@ class FakeUpdateSession:
self.__msg.make_response()
self.__msg.clear_section(SECTION_ZONE)
if self.__faked_result == UPDATE_SUCCESS:
- self.__msg.set_rcode(Rcode.NOERROR())
+ self.__msg.set_rcode(Rcode.NOERROR)
else:
- self.__msg.set_rcode(Rcode.REFUSED())
+ self.__msg.set_rcode(Rcode.REFUSED)
return self.__msg
class FakeKeyringModule:
@@ -478,7 +478,7 @@ class TestDDNSServer(unittest.TestCase):
# By default (in our faked config) it should be derived from the
# test data source
rrclass, datasrc_client = self.ddns_server._datasrc_info
- self.assertEqual(RRClass.IN(), rrclass)
+ self.assertEqual(RRClass.IN, rrclass)
self.assertEqual(DataSourceClient.SUCCESS,
datasrc_client.find_zone(Name('example.org'))[0])
@@ -491,7 +491,7 @@ class TestDDNSServer(unittest.TestCase):
{'database_file': './notexistentdir/somedb.sqlite3'}
self.__cc_session.add_remote_config_by_name('Auth')
rrclass, datasrc_client = self.ddns_server._datasrc_info
- self.assertEqual(RRClass.IN(), rrclass)
+ self.assertEqual(RRClass.IN, rrclass)
self.assertRaises(isc.datasrc.Error,
datasrc_client.find_zone, Name('example.org'))
@@ -887,12 +887,12 @@ class TestDDNSServer(unittest.TestCase):
self.__select_answer = ([], [10], [])
self.assertRaises(KeyError, self.ddns_server.run)
-def create_msg(opcode=Opcode.UPDATE(), zones=[TEST_ZONE_RECORD], prereq=[],
+def create_msg(opcode=Opcode.UPDATE, zones=[TEST_ZONE_RECORD], prereq=[],
tsigctx=None):
msg = Message(Message.RENDER)
msg.set_qid(TEST_QID)
msg.set_opcode(opcode)
- msg.set_rcode(Rcode.NOERROR())
+ msg.set_rcode(Rcode.NOERROR)
for z in zones:
msg.add_question(z)
for p in prereq:
@@ -936,7 +936,7 @@ class TestDDNSSession(unittest.TestCase):
return FakeUpdateSession(req_message, client_addr, zone_config,
self.__faked_result)
- def check_update_response(self, resp_wire, expected_rcode=Rcode.NOERROR(),
+ def check_update_response(self, resp_wire, expected_rcode=Rcode.NOERROR,
tsig_ctx=None, tcp=False):
'''Check if given wire data are valid form of update response.
@@ -963,7 +963,7 @@ class TestDDNSSession(unittest.TestCase):
self.assertNotEqual(None, tsig_record)
self.assertEqual(TSIGError.NOERROR,
tsig_ctx.verify(tsig_record, resp_wire))
- self.assertEqual(Opcode.UPDATE(), msg.get_opcode())
+ self.assertEqual(Opcode.UPDATE, msg.get_opcode())
self.assertEqual(expected_rcode, msg.get_rcode())
self.assertEqual(TEST_QID, msg.get_qid())
for section in [SECTION_ZONE, SECTION_PREREQUISITE, SECTION_UPDATE]:
@@ -977,7 +977,7 @@ class TestDDNSSession(unittest.TestCase):
server_addr = TEST_SERVER6 if ipv6 else TEST_SERVER4
client_addr = TEST_CLIENT6 if ipv6 else TEST_CLIENT4
tsig = TSIGContext(tsig_key) if tsig_key is not None else None
- rcode = Rcode.NOERROR() if result == UPDATE_SUCCESS else Rcode.REFUSED()
+ rcode = Rcode.NOERROR if result == UPDATE_SUCCESS else Rcode.REFUSED
has_response = (result != UPDATE_DROP)
self.assertEqual(has_response,
@@ -1015,7 +1015,7 @@ class TestDDNSSession(unittest.TestCase):
# Opcode is not UPDATE
self.assertFalse(self.server.handle_request(
- (self.__sock, None, None, create_msg(opcode=Opcode.QUERY()))))
+ (self.__sock, None, None, create_msg(opcode=Opcode.QUERY))))
self.assertEqual((None, None), (s._sent_data, s._sent_addr))
# TSIG verification error. We use UPDATE_DROP to signal check_session
@@ -1031,7 +1031,7 @@ class TestDDNSSession(unittest.TestCase):
TEST_CLIENT6,
create_msg())))
# this check ensures sendto() was really attempted.
- self.check_update_response(self.__sock._sent_data, Rcode.NOERROR())
+ self.check_update_response(self.__sock._sent_data, Rcode.NOERROR)
def test_tcp_request(self):
# A simple case using TCP: all resopnse data are sent out at once.
@@ -1040,7 +1040,7 @@ class TestDDNSSession(unittest.TestCase):
self.assertTrue(self.server.handle_request((s, TEST_SERVER6,
TEST_CLIENT6,
create_msg())))
- self.check_update_response(s._sent_data, Rcode.NOERROR(), tcp=True)
+ self.check_update_response(s._sent_data, Rcode.NOERROR, tcp=True)
# In the current implementation, the socket should be closed
# immedidately after a successful send.
self.assertEqual(1, s._close_called)
@@ -1071,7 +1071,7 @@ class TestDDNSSession(unittest.TestCase):
s.make_send_ready()
self.assertEqual(DNSTCPContext.SEND_DONE,
self.server._tcp_ctxs[s.fileno()][0].send_ready())
- self.check_update_response(s._sent_data, Rcode.NOERROR(), tcp=True)
+ self.check_update_response(s._sent_data, Rcode.NOERROR, tcp=True)
def test_tcp_request_error(self):
# initial send() on the TCP socket will fail. The request handling
@@ -1127,9 +1127,9 @@ class TestDDNSSession(unittest.TestCase):
self.__faked_result = UPDATE_DROP
# Put the same RR twice in the prerequisite section. We should see
# them as separate RRs.
- dummy_record = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.NS(),
+ dummy_record = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.NS,
RRTTL(0))
- dummy_record.add_rdata(Rdata(RRType.NS(), TEST_RRCLASS, "ns.example"))
+ dummy_record.add_rdata(Rdata(RRType.NS, TEST_RRCLASS, "ns.example."))
self.server.handle_request((self.__sock, TEST_SERVER6, TEST_CLIENT6,
create_msg(prereq=[dummy_record,
dummy_record])))
diff --git a/src/bin/dhcp4/ctrl_dhcp4_srv.h b/src/bin/dhcp4/ctrl_dhcp4_srv.h
index ab4d45a..ac15c44 100644
--- a/src/bin/dhcp4/ctrl_dhcp4_srv.h
+++ b/src/bin/dhcp4/ctrl_dhcp4_srv.h
@@ -49,7 +49,7 @@ public:
/// @brief Establishes msgq session.
///
/// Creates session that will be used to receive commands and updated
- /// configuration from boss (or indirectly from user via bindctl).
+ /// configuration from cfgmgr (or indirectly from user via bindctl).
void establishSession();
/// @brief Terminates existing msgq session.
diff --git a/src/bin/dhcp4/tests/dhcp4_test.py b/src/bin/dhcp4/tests/dhcp4_test.py
index e493e04..276456e 100644
--- a/src/bin/dhcp4/tests/dhcp4_test.py
+++ b/src/bin/dhcp4/tests/dhcp4_test.py
@@ -13,7 +13,7 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-from bind10_src import ProcessInfo, parse_args, dump_pid, unlink_pid_file, _BASETIME
+from init import ProcessInfo, parse_args, dump_pid, unlink_pid_file, _BASETIME
import unittest
import sys
diff --git a/src/bin/dhcp6/ctrl_dhcp6_srv.h b/src/bin/dhcp6/ctrl_dhcp6_srv.h
index e3452bb..0e699ce 100644
--- a/src/bin/dhcp6/ctrl_dhcp6_srv.h
+++ b/src/bin/dhcp6/ctrl_dhcp6_srv.h
@@ -49,7 +49,7 @@ public:
/// @brief Establishes msgq session.
///
/// Creates session that will be used to receive commands and updated
- /// configuration from boss (or indirectly from user via bindctl).
+ /// configuration from cfgmgr (or indirectly from user via bindctl).
void establishSession();
/// @brief Terminates existing msgq session.
diff --git a/src/bin/dhcp6/tests/dhcp6_test.py b/src/bin/dhcp6/tests/dhcp6_test.py
index 1870392..3333111 100644
--- a/src/bin/dhcp6/tests/dhcp6_test.py
+++ b/src/bin/dhcp6/tests/dhcp6_test.py
@@ -13,7 +13,7 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-from bind10_src import ProcessInfo, parse_args, dump_pid, unlink_pid_file, _BASETIME
+from init import ProcessInfo, parse_args, dump_pid, unlink_pid_file, _BASETIME
import unittest
import sys
@@ -157,7 +157,7 @@ class TestDhcpv6Daemon(unittest.TestCase):
def test_alive(self):
"""
- Simple test. Checks that b10-dhcp6 can be started and prints out info
+ Simple test. Checks that b10-dhcp6 can be started and prints out info
about starting DHCPv6 operation.
"""
print("Note: Purpose of some of the tests is to check if DHCPv6 server can be started,")
diff --git a/src/bin/loadzone/loadzone.py.in b/src/bin/loadzone/loadzone.py.in
index 949446b..736aa31 100755
--- a/src/bin/loadzone/loadzone.py.in
+++ b/src/bin/loadzone/loadzone.py.in
@@ -164,7 +164,7 @@ class LoadZoneRunner:
self._zone_class = RRClass(options.zone_class)
except isc.dns.InvalidRRClass as ex:
raise BadArgument('Invalid zone class: ' + str(ex))
- if self._zone_class != RRClass.IN():
+ if self._zone_class != RRClass.IN:
raise BadArgument("RR class is not supported: " +
str(self._zone_class))
diff --git a/src/bin/loadzone/tests/correct/example.db b/src/bin/loadzone/tests/correct/example.db
index 38d1329..fe012cf 100644
--- a/src/bin/loadzone/tests/correct/example.db
+++ b/src/bin/loadzone/tests/correct/example.db
@@ -2,17 +2,11 @@
$ORIGIN example.com.
$TTL 60
@ IN SOA ns1.example.com. hostmaster.example.com. (1 43200 900 1814400 7200)
-; these need #2390
-; IN 20 NS ns1
-; NS ns2
- IN 20 NS ns1.example.com.
- NS ns2.example.com.
+ IN 20 NS ns1
+ NS ns2
ns1 IN 30 A 192.168.1.102
-; these need #2390
-; 70 NS ns3
-; IN NS ns4
- 70 NS ns3.example.com.
- IN NS ns4.example.com.
+ 70 NS ns3
+ IN NS ns4
10 IN MX 10 mail.example.com.
ns2 80 A 1.1.1.1
ns3 IN A 2.2.2.2
diff --git a/src/bin/loadzone/tests/correct/include.db b/src/bin/loadzone/tests/correct/include.db
index 53871bb..f60a240 100644
--- a/src/bin/loadzone/tests/correct/include.db
+++ b/src/bin/loadzone/tests/correct/include.db
@@ -7,9 +7,7 @@ $TTL 300
1814400
3600
)
-; this needs #2390
-; NS ns
- NS ns.include.
+ NS ns
ns A 127.0.0.1
diff --git a/src/bin/loadzone/tests/correct/mix1.db b/src/bin/loadzone/tests/correct/mix1.db
index 059fde7..a9d58a8 100644
--- a/src/bin/loadzone/tests/correct/mix1.db
+++ b/src/bin/loadzone/tests/correct/mix1.db
@@ -6,9 +6,7 @@ $ORIGIN mix1.
1814400
3
)
-; this needs #2390
-; NS ns
- NS ns.mix1.
+ NS ns
ns A 10.53.0.1
a TXT "soa minttl 3"
b 2 TXT "explicit ttl 2"
diff --git a/src/bin/loadzone/tests/correct/mix2.db b/src/bin/loadzone/tests/correct/mix2.db
index e89c2af..2c8153d 100644
--- a/src/bin/loadzone/tests/correct/mix2.db
+++ b/src/bin/loadzone/tests/correct/mix2.db
@@ -6,9 +6,7 @@ $ORIGIN mix2.
1814400
3
)
-; this needs #2390
-; NS ns
- NS ns.mix2.
+ NS ns
ns A 10.53.0.1
a TXT "inherited ttl 1"
$INCLUDE mix2sub1.txt
diff --git a/src/bin/loadzone/tests/correct/ttl1.db b/src/bin/loadzone/tests/correct/ttl1.db
index 7f04ff8..aa6e2bb 100644
--- a/src/bin/loadzone/tests/correct/ttl1.db
+++ b/src/bin/loadzone/tests/correct/ttl1.db
@@ -6,9 +6,7 @@ $ORIGIN ttl1.
1814400
3
)
-; this needs #2390
-; NS ns
- NS ns.ttl1.
+ NS ns
ns A 10.53.0.1
a TXT "soa minttl 3"
b 2 TXT "explicit ttl 2"
diff --git a/src/bin/loadzone/tests/correct/ttl2.db b/src/bin/loadzone/tests/correct/ttl2.db
index b7df040..f7f6eee 100644
--- a/src/bin/loadzone/tests/correct/ttl2.db
+++ b/src/bin/loadzone/tests/correct/ttl2.db
@@ -6,9 +6,7 @@ $ORIGIN ttl2.
1814400
3
)
-; this needs #2390
-; NS ns
- NS ns.ttl2.
+ NS ns
ns A 10.53.0.1
a TXT "inherited ttl 1"
b 2 TXT "explicit ttl 2"
diff --git a/src/bin/loadzone/tests/correct/ttlext.db b/src/bin/loadzone/tests/correct/ttlext.db
index 844f452..f8b96ea 100644
--- a/src/bin/loadzone/tests/correct/ttlext.db
+++ b/src/bin/loadzone/tests/correct/ttlext.db
@@ -6,9 +6,7 @@ $ORIGIN ttlext.
1814400
3
)
-; this needs #2390
-; NS ns
- NS ns.ttlext.
+ NS ns
ns A 10.53.0.1
a TXT "soa minttl 3"
b 2S TXT "explicit ttl 2"
diff --git a/src/bin/loadzone/tests/loadzone_test.py b/src/bin/loadzone/tests/loadzone_test.py
index fee8c2a..351bc59 100755
--- a/src/bin/loadzone/tests/loadzone_test.py
+++ b/src/bin/loadzone/tests/loadzone_test.py
@@ -79,7 +79,7 @@ class TestLoadZoneRunner(unittest.TestCase):
self.assertEqual(DATASRC_CONFIG, self.__runner._datasrc_config)
self.assertEqual('sqlite3', self.__runner._datasrc_type) # default
self.assertEqual(10000, self.__runner._report_interval) # default
- self.assertEqual(RRClass.IN(), self.__runner._zone_class) # default
+ self.assertEqual(RRClass.IN, self.__runner._zone_class) # default
self.assertEqual('INFO', self.__runner._log_severity) # default
self.assertEqual(0, self.__runner._log_debuglevel)
@@ -135,7 +135,7 @@ class TestLoadZoneRunner(unittest.TestCase):
'memory')
def __common_load_setup(self):
- self.__runner._zone_class = RRClass.IN()
+ self.__runner._zone_class = RRClass.IN
self.__runner._zone_name = TEST_ZONE_NAME
self.__runner._zone_file = NEW_ZONE_TXT_FILE
self.__runner._datasrc_type = 'sqlite3'
@@ -159,7 +159,7 @@ class TestLoadZoneRunner(unittest.TestCase):
self.assertEqual(client.NOTFOUND, result)
return
self.assertEqual(client.SUCCESS, result)
- result, rrset, _ = finder.find(zone_name, RRType.SOA())
+ result, rrset, _ = finder.find(zone_name, RRType.SOA)
if soa_txt:
self.assertEqual(finder.SUCCESS, result)
self.assertEqual(soa_txt, rrset.to_text())
diff --git a/src/bin/msgq/msgq.py.in b/src/bin/msgq/msgq.py.in
index 68c18dc..ca5d705 100755
--- a/src/bin/msgq/msgq.py.in
+++ b/src/bin/msgq/msgq.py.in
@@ -70,6 +70,23 @@ SPECFILE_LOCATION = SPECFILE_PATH + "/msgq.spec"
class MsgQReceiveError(Exception): pass
+class MsgQCloseOnReceive(Exception):
+ """Exception raised when reading data from a socket results in 'shutdown'.
+
+ This happens when msgq received 0-length data. This class holds whether
+ it happens in the middle of reading (i.e. after reading some) via
+ partial_read parameter, which is set to True if and only if so.
+ This will be used by an upper layer catching the exception to distinguish
+ the severity of the event.
+
+ """
+ def __init__(self, reason, partial_read):
+ self.partial_read = partial_read
+ self.__reason = reason
+
+ def __str__(self):
+ return self.__reason
+
class SubscriptionManager:
def __init__(self, cfgmgr_ready):
"""
@@ -311,23 +328,22 @@ class MsgQ:
lname = self.newlname()
self.lnames[lname] = newsocket
+ logger.debug(TRACE_BASIC, MSGQ_SOCKET_REGISTERED, newsocket.fileno(),
+ lname)
+
if self.poller:
self.poller.register(newsocket, select.POLLIN)
else:
self.add_kqueue_socket(newsocket)
- def process_socket(self, fd):
- """Process a read on a socket."""
- if not fd in self.sockets:
- logger.error(MSGQ_READ_UNKNOWN_FD, fd)
- return
- sock = self.sockets[fd]
- self.process_packet(fd, sock)
-
def kill_socket(self, fd, sock):
"""Fully close down the socket."""
+ # Unregister events on the socket. Note that we don't have to do
+ # this for kqueue because the registered events are automatically
+ # deleted when the corresponding socket is closed.
if self.poller:
self.poller.unregister(sock)
+
self.subs.unsubscribe_all(sock)
lname = [ k for k, v in self.lnames.items() if v == sock ][0]
del self.lnames[lname]
@@ -337,24 +353,35 @@ class MsgQ:
del self.sendbuffs[fd]
logger.debug(TRACE_BASIC, MSGQ_SOCK_CLOSE, fd)
- def getbytes(self, fd, sock, length):
+ def __getbytes(self, fd, sock, length, continued):
"""Get exactly the requested bytes, or raise an exception if
- EOF."""
+ EOF.
+
+ continued is set to True if this method is called to complete
+ already read data.
+ """
received = b''
while len(received) < length:
try:
data = sock.recv(length - len(received))
- except socket.error:
- raise MsgQReceiveError(socket.error)
+
+ except socket.error as err:
+ # This case includes ECONNRESET, which seems to happen when
+ # the remote client has closed its socket at some subtle
+ # timing (it should normally result in receiving empty data).
+ # Since we didn't figure out how exactly that could happen,
+ # we treat it just like other really-unexpected socket errors.
+ raise MsgQReceiveError(str(err))
if len(data) == 0:
- raise MsgQReceiveError("EOF")
+ raise MsgQCloseOnReceive("EOF", continued)
received += data
+ continued = True
return received
def read_packet(self, fd, sock):
"""Read a correctly formatted packet. Will raise exceptions if
something fails."""
- lengths = self.getbytes(fd, sock, 6)
+ lengths = self.__getbytes(fd, sock, 6, False)
overall_length, routing_length = struct.unpack(">IH", lengths)
if overall_length < 2:
raise MsgQReceiveError("overall_length < 2")
@@ -365,9 +392,9 @@ class MsgQ:
raise MsgQReceiveError("routing_length == 0")
data_length = overall_length - routing_length
# probably need to sanity check lengths here...
- routing = self.getbytes(fd, sock, routing_length)
+ routing = self.__getbytes(fd, sock, routing_length, True)
if data_length > 0:
- data = self.getbytes(fd, sock, data_length)
+ data = self.__getbytes(fd, sock, data_length, True)
else:
data = None
return (routing, data)
@@ -376,8 +403,15 @@ class MsgQ:
"""Process one packet."""
try:
routing, data = self.read_packet(fd, sock)
- except MsgQReceiveError as err:
- logger.error(MSGQ_RECV_ERR, fd, err)
+ except (MsgQReceiveError, MsgQCloseOnReceive) as err:
+ # If it's MsgQCloseOnReceive and that happens without reading
+ # any data, it basically means the remote clinet has closed the
+ # socket, so we log it as debug information. Otherwise, it's
+ # a somewhat unexpected event, so we consider it an "error".
+ if isinstance(err, MsgQCloseOnReceive) and not err.partial_read:
+ logger.debug(TRACE_BASIC, MSGQ_CLOSE_ON_RECV, fd)
+ else:
+ logger.error(MSGQ_RECV_ERROR, fd, err)
self.kill_socket(fd, sock)
return
@@ -385,7 +419,7 @@ class MsgQ:
routingmsg = isc.cc.message.from_wire(routing)
except DecodeError as err:
self.kill_socket(fd, sock)
- logger.error(MSGQ_HDR_DECODE_ERR, fd, err)
+ logger.error(MSGQ_HDR_DECODE_ERROR, fd, err)
return
self.process_command(fd, sock, routingmsg, data)
@@ -428,9 +462,12 @@ class MsgQ:
def sendmsg(self, sock, env, msg = None):
self.send_prepared_msg(sock, self.preparemsg(env, msg))
- def __send_data(self, sock, data):
+ def _send_data(self, sock, data):
"""
- Send a piece of data to the given socket.
+ Send a piece of data to the given socket. This method is
+ essentially "private" to MsgQ, but defined as if it were "protected"
+ for easier access from tests.
+
Parameters:
sock: The socket to send to
data: The list of bytes to send
@@ -446,15 +483,17 @@ class MsgQ:
sock.setblocking(0)
return sock.send(data)
except socket.error as e:
- if e.errno in [ errno.EAGAIN,
- errno.EWOULDBLOCK,
- errno.EINTR ]:
+ if e.errno in [ errno.EAGAIN, errno.EWOULDBLOCK, errno.EINTR ]:
return 0
- elif e.errno in [ errno.EPIPE,
- errno.ECONNRESET,
- errno.ENOBUFS ]:
- logger.error(MSGQ_SEND_ERR, sock.fileno(),
- errno.errorcode[e.errno])
+ elif e.errno in [ errno.EPIPE, errno.ECONNRESET, errno.ENOBUFS ]:
+ # EPIPE happens if the remote module has terminated by the time
+ # of this send; its severity can vary, but in many cases it
+ # shouldn't be critical, so we log it separately as a warning.
+ if e.errno == errno.EPIPE:
+ logger.warn(MSGQ_CLOSE_ON_SEND, sock.fileno())
+ else:
+ logger.error(MSGQ_SEND_ERROR, sock.fileno(),
+ errno.errorcode[e.errno])
self.kill_socket(sock.fileno(), sock)
return None
else:
@@ -469,7 +508,7 @@ class MsgQ:
if fileno in self.sendbuffs:
amount_sent = 0
else:
- amount_sent = self.__send_data(sock, msg)
+ amount_sent = self._send_data(sock, msg)
if amount_sent is None:
# Socket has been killed, drop the send
return
@@ -489,7 +528,7 @@ class MsgQ:
last_sent = now
if self.poller:
self.poller.register(fileno, select.POLLIN |
- select.POLLOUT)
+ select.POLLOUT)
else:
self.add_kqueue_socket(sock, True)
self.sendbuffs[fileno] = (last_sent, buff)
@@ -498,7 +537,7 @@ class MsgQ:
# Try to send some data from the buffer
(_, msg) = self.sendbuffs[fileno]
sock = self.sockets[fileno]
- amount_sent = self.__send_data(sock, msg)
+ amount_sent = self._send_data(sock, msg)
if amount_sent is not None:
# Keep the rest
msg = msg[amount_sent:]
@@ -581,7 +620,7 @@ class MsgQ:
if err.args[0] == errno.EINTR:
events = []
else:
- logger.fatal(MSGQ_POLL_ERR, err)
+ logger.fatal(MSGQ_POLL_ERROR, err)
break
with self.__lock:
for (fd, event) in events:
@@ -592,12 +631,17 @@ class MsgQ:
self.running = False
break
else:
- if event & select.POLLOUT:
- self.__process_write(fd)
- elif event & select.POLLIN:
- self.process_socket(fd)
- else:
+ writable = event & select.POLLOUT
+ # Note: it may be okay to read data if available
+ # immediately after write some, but due to unexpected
+ # regression (see comments on the kqueue version below)
+ # we restrict one operation per iteration for now.
+ # In future we may clarify the point and enable the
+ # "read/write" mode.
+ readable = not writable and (event & select.POLLIN)
+ if not writable and not readable:
logger.error(MSGQ_POLL_UNKNOWN_EVENT, fd, event)
+ self._process_fd(fd, writable, readable, False)
def run_kqueue(self):
while self.running:
@@ -616,14 +660,35 @@ class MsgQ:
self.running = False
break;
else:
- if event.filter == select.KQ_FILTER_WRITE:
- self.__process_write(event.ident)
- if event.filter == select.KQ_FILTER_READ and \
- event.data > 0:
- self.process_socket(event.ident)
- elif event.flags & select.KQ_EV_EOF:
- self.kill_socket(event.ident,
- self.sockets[event.ident])
+ fd = event.ident
+ writable = event.filter == select.KQ_FILTER_WRITE
+ readable = (event.filter == select.KQ_FILTER_READ and
+ event.data > 0)
+ # It seems to break some of our test cases if we
+ # immediately close the socket on EOF after reading
+ # some data. It may be possible to avoid by tweaking
+ # the test, but unless we can be sure we'll hold off.
+ closed = (not readable and
+ (event.flags & select.KQ_EV_EOF))
+ self._process_fd(fd, writable, readable, closed)
+
+ def _process_fd(self, fd, writable, readable, closed):
+ '''Process a single FD: unified subroutine of run_kqueue/poller.
+
+ closed can be True only in the case of kqueue. This is essentially
+ private but is defined as if it were "protected" so it's callable
+ from tests.
+
+ '''
+ # We need to check if FD is still in the sockets dict, because
+ # it's possible that the socket has been "killed" while processing
+ # other FDs; it's even possible it's killed within this method.
+ if writable and fd in self.sockets:
+ self.__process_write(fd)
+ if readable and fd in self.sockets:
+ self.process_packet(fd, self.sockets[fd])
+ if closed and fd in self.sockets:
+ self.kill_socket(fd, self.sockets[fd])
def stop(self):
# Signal it should terminate.
@@ -760,3 +825,5 @@ if __name__ == "__main__":
pass
msgq.shutdown()
+
+ logger.info(MSGQ_EXITING)
diff --git a/src/bin/msgq/msgq_messages.mes b/src/bin/msgq/msgq_messages.mes
index 75e4227..09c9030 100644
--- a/src/bin/msgq/msgq_messages.mes
+++ b/src/bin/msgq/msgq_messages.mes
@@ -23,6 +23,31 @@
This is a debug message. The message queue has little bit of special handling
for the configuration manager. This special handling is happening now.
+% MSGQ_CLOSE_ON_RECV Reading from socket canceled as it's closed: FD=%1
+A debug message. The msgq daemon was notified of a read event on a
+socket, but its initial read operation failed because the remote
+client has closed its socket. This is possible in a normal operation
+when a module shuts down.
+
+% MSGQ_CLOSE_ON_SEND Sending to socket failed as already closed (okay to ignore on shutdown): FD=%1
+The msgq daemon tries to send some data to a client module, but it
+failed because the socket has been closed. This normally means the
+client terminates (for some reason - either gracefully or as a crash)
+while other modules try to send a message to the terminated module.
+Since msgq doesn't keep track of the status of client modules, this
+can happen and is not really an error for msgq; however, it can still
+be an unexpected event for the BIND 10 system as a whole in that this
+particular message is lost, so it's logged as a warning. If this
+message is logged for a running BIND 10 system, it's suggested to
+check other log messages; there may be an error from other modules
+reporting a missing response message. One common, less critical case
+where this message is logged is during shutdown. The ordering of
+process shutdown is basically arbitrary at this moment, so it's
+possible that some module tries to send a "quitting" message to some
+other module but the latter has already shut down. Such cases are
+generally non critical, but you may want to check other possible error
+messages.
+
% MSGQ_COMMAND Running command %1 with arguments %2
Debug message. The message queue received a command and it is running it.
@@ -34,13 +59,21 @@ the message queue version and version of the module.
% MSGQ_CONFIG_DATA Received configuration update for the msgq: %1
Debug message. The message queue received a configuration update, handling it.
-% MSGQ_HDR_DECODE_ERR Error decoding header received from socket %1: %2
+% MSGQ_EXITING exiting
+The msgq daemon is exiting.
+
+% MSGQ_HDR_DECODE_ERROR Error decoding header received from socket %1: %2
The socket with mentioned file descriptor sent a packet. However, it was not
possible to decode the routing header of the packet. The packet is ignored.
This may be caused by a programmer error (one of the components sending invalid
data) or possibly by incompatible version of msgq and the component (but that's
unlikely, as the protocol is not changed often).
+% MSGQ_INVALID_CMD Received invalid command: %1
+An unknown command listed in the log has been received. It is ignored. This
+indicates either a programmer error (eg. a typo in the command name) or
+incompatible version of a module and message queue daemon.
+
% MSGQ_LISTENER_FAILED Failed to initialize listener on socket file '%1': %2
The message queue daemon tried to listen on a file socket (the path is in the
message), but it failed. The error from the operating system is logged.
@@ -52,7 +85,7 @@ Debug message. The listener is trying to open a listening socket.
Debug message. The message queue successfully opened a listening socket and
waits for incoming connections.
-% MSGQ_POLL_ERR Error while polling for events: %1
+% MSGQ_POLL_ERROR Error while polling for events: %1
A low-level error happened when waiting for events, the error is logged. The
reason for this varies, but it usually means the system is short on some
resources.
@@ -63,30 +96,41 @@ happen and it is either a programmer error or OS bug. The event is ignored. The
number noted as the event is the raw encoded value, which might be useful to
the authors when figuring the problem out.
-% MSGQ_READ_UNKNOWN_FD Got read on strange socket %1
-The OS reported a file descriptor is ready to read. But the daemon doesn't know
-the mentioned file descriptor, which is either a programmer error or OS bug.
-The read event is ignored.
-
-% MSGQ_RECV_ERR Error reading from socket %1: %2
+% MSGQ_RECV_ERROR Error reading from socket %1: %2
There was a low-level error when reading from a socket. The error is logged and
-the corresponding socket is dropped.
+the corresponding socket is dropped. The errors include receiving
+broken or (non empty but) incomplete data. In either case it usually suggests
+something unexpected happens within the BIND 10 system; it's probably
+better to restart the system, and if it continues it should be
+reported as a bug. One known, probably non critical case is
+the "connection reset by peer" (or its variants) socket error appearing
+on shutdown. It's known this happens when the remote client closes the
+connection as part of shutdown process. Such cases are normally expected
+to be reported as receiving empty data (which we log it at the debug level
+as the MSGQ_CLOSE_ON_RECV message), but for some (yet) unknown reason
+it can also be reported as the system error. At shutdown time it's expected
+that connections are closed, so it's probably safe to ignore these messages
+in such a case. We still log them as an error as we've not figured out
+how exactly that can happen. In future, we may make the shutdown process
+more robust so the msgq daemon can explicitly know when a client shuts down
+more reliably. If and when it's implemented this error message won't appear
+on shutdown unless there's really something unexpected.
% MSGQ_RECV_HDR Received header: %1
Debug message. This message includes the whole routing header of a packet.
-% MSGQ_INVALID_CMD Received invalid command: %1
-An unknown command listed in the log has been received. It is ignored. This
-indicates either a programmer error (eg. a typo in the command name) or
-incompatible version of a module and message queue daemon.
-
-% MSGQ_SEND_ERR Error while sending to socket %1: %2
+% MSGQ_SEND_ERROR Error while sending to socket %1: %2
There was a low-level error when sending data to a socket. The error is logged
and the corresponding socket is dropped.
% MSGQ_SHUTDOWN Stopping Msgq
Debug message. The message queue is shutting down.
+% MSGQ_SOCKET_REGISTERED Registered a socket descriptor %1 with lname %2
+Debug message. The msgq daemon accepted a session request on the
+shown descriptor of socket and assigned a unique identifier (lname)
+for the client on that socket.
+
% MSGQ_SOCK_CLOSE Closing socket fd %1
Debug message. Closing the mentioned socket.
diff --git a/src/bin/msgq/tests/msgq_test.py b/src/bin/msgq/tests/msgq_test.py
index 88bb022..1f04e84 100644
--- a/src/bin/msgq/tests/msgq_test.py
+++ b/src/bin/msgq/tests/msgq_test.py
@@ -1,3 +1,4 @@
+import msgq
from msgq import SubscriptionManager, MsgQ
import unittest
@@ -559,6 +560,178 @@ class ThreadTests(unittest.TestCase):
test_thread.join(60)
self.assertTrue(self.__result)
+class SocketTests(unittest.TestCase):
+ '''Test cases for micro behaviors related to socket operations.
+
+ Some cases are covered as part of other tests, but in this fixture
+ we check more details of specific method related to socket operation,
+ with the help of mock classes to avoid expensive overhead.
+
+ '''
+ class MockSocket():
+ '''A mock socket used instead of standard socket objects.'''
+ def __init__(self):
+ self.ex_on_send = None # raised from send() if not None
+ self.recv_result = b'test' # dummy data or exception
+ self.blockings = [] # history of setblocking() params
+ def setblocking(self, on):
+ self.blockings.append(on)
+ def send(self, data):
+ if self.ex_on_send is not None:
+ raise self.ex_on_send
+ return 10 # arbitrary choice
+ def recv(self, len):
+ if isinstance(self.recv_result, Exception):
+ raise self.recv_result
+ ret = self.recv_result
+ self.recv_result = b'' # if called again, return empty data
+ return ret
+ def fileno(self):
+ return 42 # arbitrary choice
+
+ class LoggerWrapper():
+ '''A simple wrapper of logger to inspect log messages.'''
+ def __init__(self, logger):
+ self.error_called = 0
+ self.warn_called = 0
+ self.debug_called = 0
+ self.orig_logger = logger
+ def error(self, *args):
+ self.error_called += 1
+ self.orig_logger.error(*args)
+ def warn(self, *args):
+ self.warn_called += 1
+ self.orig_logger.warn(*args)
+ def debug(self, *args):
+ self.debug_called += 1
+ self.orig_logger.debug(*args)
+
+ def mock_kill_socket(self, fileno, sock):
+ '''A replacement of MsgQ.kill_socket method for inspection.'''
+ self.__killed_socket = (fileno, sock)
+ if fileno in self.__msgq.sockets:
+ del self.__msgq.sockets[fileno]
+
+ def setUp(self):
+ self.__msgq = MsgQ()
+ self.__msgq.kill_socket = self.mock_kill_socket
+ self.__sock = self.MockSocket()
+ self.__data = b'dummy'
+ self.__msgq.sockets[42] = self.__sock
+ self.__msgq.sendbuffs[42] = (None, b'testdata')
+ self.__sock_error = socket.error()
+ self.__killed_socket = None
+ self.__logger = self.LoggerWrapper(msgq.logger)
+ msgq.logger = self.__logger
+
+ def tearDown(self):
+ msgq.logger = self.__logger.orig_logger
+
+ def test_send_data(self):
+ # Successful case: _send_data() returns the hardcoded value, and
+ # setblocking() is called twice with the expected parameters
+ self.assertEqual(10, self.__msgq._send_data(self.__sock, self.__data))
+ self.assertEqual([0, 1], self.__sock.blockings)
+ self.assertIsNone(self.__killed_socket)
+
+ def test_send_data_interrupt(self):
+ '''send() is interruptted. send_data() returns 0, sock isn't killed.'''
+ expected_blockings = []
+ for eno in [errno.EAGAIN, errno.EWOULDBLOCK, errno.EINTR]:
+ self.__sock_error.errno = eno
+ self.__sock.ex_on_send = self.__sock_error
+ self.assertEqual(0, self.__msgq._send_data(self.__sock,
+ self.__data))
+ expected_blockings.extend([0, 1])
+ self.assertEqual(expected_blockings, self.__sock.blockings)
+ self.assertIsNone(self.__killed_socket)
+
+ def test_send_data_error(self):
+ '''Unexpected error happens on send(). The socket is killed.
+
+ If the error is EPIPE, it's logged at the warn level; otherwise
+ an error message is logged.
+
+ '''
+ expected_blockings = []
+ expected_errors = 0
+ expected_warns = 0
+ for eno in [errno.EPIPE, errno.ECONNRESET, errno.ENOBUFS]:
+ self.__sock_error.errno = eno
+ self.__sock.ex_on_send = self.__sock_error
+ self.__killed_socket = None # clear any previuos value
+ self.assertEqual(None, self.__msgq._send_data(self.__sock,
+ self.__data))
+ self.assertEqual((42, self.__sock), self.__killed_socket)
+ expected_blockings.extend([0, 1])
+ self.assertEqual(expected_blockings, self.__sock.blockings)
+
+ if eno == errno.EPIPE:
+ expected_warns += 1
+ else:
+ expected_errors += 1
+ self.assertEqual(expected_errors, self.__logger.error_called)
+ self.assertEqual(expected_warns, self.__logger.warn_called)
+
+ def test_process_fd_read_after_bad_write(self):
+ '''Check the specific case of write fail followed by read attempt.
+
+ The write failure results in kill_socket, then read shouldn't tried.
+
+ '''
+ self.__sock_error.errno = errno.EPIPE
+ self.__sock.ex_on_send = self.__sock_error
+ self.__msgq.process_socket = None # if called, trigger an exception
+ self.__msgq._process_fd(42, True, True, False) # shouldn't crash
+
+ # check the socket is deleted from the fileno=>sock dictionary
+ self.assertEqual({}, self.__msgq.sockets)
+
+ def test_process_fd_close_after_bad_write(self):
+ '''Similar to the previous, but for checking dup'ed kill attempt'''
+ self.__sock_error.errno = errno.EPIPE
+ self.__sock.ex_on_send = self.__sock_error
+ self.__msgq._process_fd(42, True, False, True) # shouldn't crash
+ self.assertEqual({}, self.__msgq.sockets)
+
+ def test_process_fd_writer_after_close(self):
+ '''Emulate a "writable" socket has been already closed and killed.'''
+ # This just shouldn't crash
+ self.__msgq._process_fd(4200, True, False, False)
+
+ def test_process_packet(self):
+ '''Check some failure cases in handling an incoming message.'''
+ expected_errors = 0
+ expected_debugs = 0
+
+ # if socket.recv() fails due to socket.error, it will be logged
+ # as error and the socket will be killed regardless of errno.
+ for eno in [errno.ENOBUFS, errno.ECONNRESET]:
+ self.__sock_error.errno = eno
+ self.__sock.recv_result = self.__sock_error
+ self.__killed_socket = None # clear any previuos value
+ self.__msgq.process_packet(42, self.__sock)
+ self.assertEqual((42, self.__sock), self.__killed_socket)
+ expected_errors += 1
+ self.assertEqual(expected_errors, self.__logger.error_called)
+ self.assertEqual(expected_debugs, self.__logger.debug_called)
+
+ # if socket.recv() returns empty data, the result depends on whether
+ # there's any preceding data; in the second case below, at least
+ # 6 bytes of data will be expected, and the second call to our faked
+ # recv() returns empty data. In that case it will be logged as error.
+ for recv_data in [b'', b'short']:
+ self.__sock.recv_result = recv_data
+ self.__killed_socket = None
+ self.__msgq.process_packet(42, self.__sock)
+ self.assertEqual((42, self.__sock), self.__killed_socket)
+ if len(recv_data) == 0:
+ expected_debugs += 1
+ else:
+ expected_errors += 1
+ self.assertEqual(expected_errors, self.__logger.error_called)
+ self.assertEqual(expected_debugs, self.__logger.debug_called)
+
if __name__ == '__main__':
isc.log.resetUnitTestRootLogger()
unittest.main()
diff --git a/src/bin/resolver/b10-resolver.xml b/src/bin/resolver/b10-resolver.xml
index 485d022..ae73c3d 100644
--- a/src/bin/resolver/b10-resolver.xml
+++ b/src/bin/resolver/b10-resolver.xml
@@ -52,8 +52,8 @@
<title>DESCRIPTION</title>
<para>The <command>b10-resolver</command> daemon provides the BIND 10
recursive DNS server. Normally it is started by the
- <citerefentry><refentrytitle>bind10</refentrytitle><manvolnum>8</manvolnum></citerefentry>
- boss process.
+ <citerefentry><refentrytitle>b10-init</refentrytitle><manvolnum>8</manvolnum></citerefentry>
+ process.
</para>
<para>
@@ -205,7 +205,7 @@ once that is merged you can for instance do 'config add Resolver/forward_address
<command>shutdown</command> exits <command>b10-resolver</command>.
This has an optional <varname>pid</varname> argument to
select the process ID to stop.
- (Note that the BIND 10 boss process may restart this service
+ (Note that the b10-init process may restart this service
if configured.)
</para>
diff --git a/src/bin/resolver/resolver.cc b/src/bin/resolver/resolver.cc
index 9536608..a3de340 100644
--- a/src/bin/resolver/resolver.cc
+++ b/src/bin/resolver/resolver.cc
@@ -431,13 +431,14 @@ Resolver::processMessage(const IOMessage& io_message,
// Ignore all responses.
if (query_message->getHeaderFlag(Message::HEADERFLAG_QR)) {
- LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_UNEXPECTED_RESPONSE);
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO,
+ RESOLVER_UNEXPECTED_RESPONSE);
server->resume(false);
return;
}
} catch (const Exception& ex) {
- LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_HEADER_ERROR)
- .arg(ex.what());
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO,
+ RESOLVER_HEADER_PROCESSING_FAILED).arg(ex.what());
server->resume(false);
return;
}
@@ -446,14 +447,16 @@ Resolver::processMessage(const IOMessage& io_message,
try {
query_message->fromWire(request_buffer);
} catch (const DNSProtocolError& error) {
- LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_PROTOCOL_ERROR)
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO,
+ RESOLVER_PROTOCOL_BODY_PARSE_FAILED)
.arg(error.what()).arg(error.getRcode());
makeErrorMessage(query_message, answer_message,
buffer, error.getRcode());
server->resume(true);
return;
} catch (const Exception& ex) {
- LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_MESSAGE_ERROR)
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO,
+ RESOLVER_MESSAGE_PROCESSING_FAILED)
.arg(ex.what()).arg(Rcode::SERVFAIL());
makeErrorMessage(query_message, answer_message,
buffer, Rcode::SERVFAIL());
diff --git a/src/bin/resolver/resolver_messages.mes b/src/bin/resolver/resolver_messages.mes
index 214519b..c722af1 100644
--- a/src/bin/resolver/resolver_messages.mes
+++ b/src/bin/resolver/resolver_messages.mes
@@ -81,7 +81,7 @@ has passed a set of checks (message is well-formed, it is allowed by the
ACL, it is a supported opcode, etc.) and is being forwarded to upstream
servers.
-% RESOLVER_HEADER_ERROR message received, exception when processing header: %1
+% RESOLVER_HEADER_PROCESSING_FAILED message received, exception when processing header: %1
This is a debug message from the resolver noting that an exception
occurred during the processing of a received packet. The packet has
been dropped.
@@ -97,7 +97,7 @@ During the update of the resolver's configuration parameters, the value
of the lookup timeout was found to be too small. The configuration
update will not be applied.
-% RESOLVER_MESSAGE_ERROR error parsing received message: %1 - returning %2
+% RESOLVER_MESSAGE_PROCESSING_FAILED error parsing received message: %1 - returning %2
This is a debug message noting that parsing of the body of a received
message by the resolver failed due to some error (although the parsing of
the header succeeded). The message parameters give a textual description
@@ -135,18 +135,11 @@ A warning message issued during resolver startup, this indicates that
no root addresses have been set. This may be because the resolver will
get them from a priming query.
-% RESOLVER_PARSE_ERROR error parsing received message: %1 - returning %2
-This is a debug message noting that the resolver received a message and
-the parsing of the body of the message failed due to some non-protocol
-related reason (although the parsing of the header succeeded).
-The message parameters give a textual description of the problem and
-the RCODE returned.
-
% RESOLVER_PRINT_COMMAND print message command, arguments are: %1
This debug message is logged when a "print_message" command is received
by the resolver over the command channel.
-% RESOLVER_PROTOCOL_ERROR protocol error parsing received message: %1 - returning %2
+% RESOLVER_PROTOCOL_BODY_PARSE_FAILED protocol error parsing received message: %1 - returning %2
This is a debug message noting that the resolver received a message and
the parsing of the body of the message failed due to some protocol error
(although the parsing of the header succeeded). The message parameters
diff --git a/src/bin/sockcreator/tests/sockcreator_tests.cc b/src/bin/sockcreator/tests/sockcreator_tests.cc
index 9604567..b834e1c 100644
--- a/src/bin/sockcreator/tests/sockcreator_tests.cc
+++ b/src/bin/sockcreator/tests/sockcreator_tests.cc
@@ -12,6 +12,8 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <config.h>
+
#include "../sockcreator.h"
#include <util/unittests/fork.h>
@@ -195,7 +197,12 @@ TEST(get_sock, tcp4_create) {
testAnyCreate<sockaddr_in>(SOCK_STREAM, tcpCheck);
}
-TEST(get_sock, udp6_create) {
+#ifdef HAVE_BROKEN_GET_IPV6_USE_MIN_MTU
+TEST(get_sock, DISABLED_udp6_create)
+#else
+TEST(get_sock, udp6_create)
+#endif
+{
testAnyCreate<sockaddr_in6>(SOCK_DGRAM, udpCheck<sockaddr_in6>);
}
diff --git a/src/bin/stats/b10-stats-httpd.xml b/src/bin/stats/b10-stats-httpd.xml
index 28d6ac9..be91737 100644
--- a/src/bin/stats/b10-stats-httpd.xml
+++ b/src/bin/stats/b10-stats-httpd.xml
@@ -54,7 +54,7 @@
intended for HTTP/XML interface for statistics module. This server
process runs as a process separated from the process of the BIND 10 Stats
daemon (<command>b10-stats</command>). The server is initially executed
- by the BIND 10 boss process (<command>bind10</command>) and eventually
+ by the b10-init process and eventually
exited by it. The server is intended to serve requests by HTTP
clients like web browsers and third-party modules. When the server is
asked, it requests BIND 10 statistics data or its schema from
@@ -74,7 +74,7 @@
10 statistics. The server uses CC session in communication
with <command>b10-stats</command>. CC session is provided
by <command>b10-msgq</command> which is started
- by <command>bind10</command> in advance. The server is implemented by
+ by <command>b10-init</command> in advance. The server is implemented by
HTTP-server libraries included in Python 3. The server obtains the
configuration from the config manager (<command>b10-cfgmgr</command>) in
runtime. Please see below for more details about this spec file and
@@ -176,7 +176,7 @@
exits the <command>b10-stats-httpd</command> process.
This has an optional <varname>pid</varname> argument to
select the process ID to stop.
- (Note that the BIND 10 boss process may restart this service
+ (Note that the b10-init process may restart this service
if configured.)
</para>
</listitem>
diff --git a/src/bin/stats/b10-stats.xml b/src/bin/stats/b10-stats.xml
index ee89ad2..bbdb96e 100644
--- a/src/bin/stats/b10-stats.xml
+++ b/src/bin/stats/b10-stats.xml
@@ -56,18 +56,18 @@
from each BIND 10 module. Its statistics information may be
reported via <command>bindctl</command> or
<command>b10-stats-httpd</command>. It is started by
- <command>bind10</command> and communicates by using the
+ <command>b10-init</command> and communicates by using the
Command Channel by <command>b10-msgq</command> with other
- modules like <command>bind10</command>, <command>b10-auth</command>
+ modules like <command>b10-init</command>, <command>b10-auth</command>
and so on. <command>b10-stats</command> periodically requests statistics
data from each module. The interval time can be configured
via <command>bindctl</command>. <command>b10-stats</command> cannot
accept any command from other modules for updating statistics data. The
stats module collects data and
aggregates it. <command>b10-stats</command> invokes an internal
- command for <command>bind10</command> after its initial
+ command for <command>b10-init</command> after its initial
starting to make sure it collects statistics data from
- <command>bind10</command>.
+ <command>b10-init</command>.
</para>
</refsect1>
@@ -131,7 +131,7 @@
<command>b10-stats</command> process.
This has an optional <varname>pid</varname> argument to
select the process ID to stop.
- (Note that the BIND 10 boss process may restart this service
+ (Note that the b10-init process may restart this service
if configured.)
</para>
diff --git a/src/bin/stats/stats.py.in b/src/bin/stats/stats.py.in
index 7123c53..0af0933 100755
--- a/src/bin/stats/stats.py.in
+++ b/src/bin/stats/stats.py.in
@@ -26,7 +26,8 @@ from optparse import OptionParser, OptionValueError
import errno
import select
-import isc
+import isc.cc
+import isc.config
import isc.util.process
import isc.log
from isc.log_messages.stats_messages import *
@@ -249,17 +250,17 @@ class Stats:
# It counts the number of instances of same module by
# examining the third value from the array result of
- # 'show_processes' of Boss
+ # 'show_processes' of Init
seq = self.cc_session.group_sendmsg(
isc.config.ccsession.create_command("show_processes"),
- 'Boss')
+ 'Init')
(answer, env) = self.cc_session.group_recvmsg(False, seq)
modules = []
if answer:
(rcode, value) = isc.config.ccsession.parse_answer(answer)
if rcode == 0 and type(value) is list:
# NOTE: For example, the "show_processes" command
- # of Boss is assumed to return the response in this
+ # of Init is assumed to return the response in this
# format:
# [
# ...
@@ -696,3 +697,5 @@ if __name__ == "__main__":
sys.exit(1)
except KeyboardInterrupt as kie:
logger.info(STATS_STOPPED_BY_KEYBOARD)
+
+ logger.info(STATS_EXITING)
diff --git a/src/bin/stats/stats.spec b/src/bin/stats/stats.spec
index 6fbf7bb..dab10a2 100644
--- a/src/bin/stats/stats.spec
+++ b/src/bin/stats/stats.spec
@@ -92,7 +92,7 @@
"item_optional": false,
"item_default": "1970-01-01T00:00:00Z",
"item_title": "Last update time",
- "item_description": "The latest date time when the stats module receives from other modules like auth server or boss process and so on",
+ "item_description": "The latest date time when the stats module receives from other modules like auth server or b10-init process and so on",
"item_format": "date-time"
},
{
diff --git a/src/bin/stats/stats_httpd.py.in b/src/bin/stats/stats_httpd.py.in
old mode 100644
new mode 100755
index 736029b..367f56e
--- a/src/bin/stats/stats_httpd.py.in
+++ b/src/bin/stats/stats_httpd.py.in
@@ -130,9 +130,9 @@ def item_name_list(element, identifier):
return ret
class HttpHandler(http.server.BaseHTTPRequestHandler):
- """HTTP handler class for HttpServer class. The class inhrits the super
- class http.server.BaseHTTPRequestHandler. It implemets do_GET()
- and do_HEAD() and orverrides log_message()"""
+ """HTTP handler class for HttpServer class. The class inherits the super
+ class http.server.BaseHTTPRequestHandler. It implements do_GET()
+ and do_HEAD() and overrides log_message()"""
def do_GET(self):
body = self.send_head()
if body is not None:
@@ -413,7 +413,7 @@ class StatsHttpd:
try:
self.open_httpd()
except HttpServerError as err:
- logger.error(STATSHTTPD_SERVER_ERROR, err)
+ logger.error(STATSHTTPD_SERVER_INIT_ERROR, err)
# restore old config
self.load_config(old_config)
self.open_httpd()
@@ -631,3 +631,5 @@ if __name__ == "__main__":
sys.exit(1)
except KeyboardInterrupt as kie:
logger.info(STATSHTTPD_STOPPED_BY_KEYBOARD)
+
+ logger.info(STATSHTTPD_EXITING)
diff --git a/src/bin/stats/stats_httpd_messages.mes b/src/bin/stats/stats_httpd_messages.mes
index 62fbd45..93491b6 100644
--- a/src/bin/stats/stats_httpd_messages.mes
+++ b/src/bin/stats/stats_httpd_messages.mes
@@ -32,10 +32,19 @@ address and port number.
Debug message indicating that the stats-httpd module is disconnecting
from the command and control bus.
+% STATSHTTPD_EXITING exiting
+The stats HTTP server is exiting.
+
% STATSHTTPD_HANDLE_CONFIG reading configuration: %1
The stats-httpd daemon has received new configuration data and will now
process it. The (changed) data is printed.
+% STATSHTTPD_HTTPLOG %1 %2
+Debug HTTP log message. These are the messages logged by the http server
+instance. For most logs, the message shows HTTP client and query
+information like HTTP method, URI, and status code, but the http server
+can also log other information, such as extended status reports.
+
% STATSHTTPD_RECEIVED_SHUTDOWN_COMMAND shutdown command received
A shutdown command was sent to the stats-httpd module, and it will
now shut down.
@@ -59,7 +68,7 @@ corresponding to the requested URI is incorrect.
An internal error occurred while handling an HTTP request. An HTTP 500
response will be sent back, and the specific error is printed. This
is an error condition that likely points to a module that is not
-responding correctly to statistic requests.
+responding correctly to statistics requests.
% STATSHTTPD_SERVER_INIT_ERROR HTTP server initialization error: %1
There was a problem initializing the HTTP server in the stats-httpd
@@ -96,9 +105,3 @@ configuration is unknown. The new configuration is ignored, and an
error is sent back. As possible cause is that there was an upgrade
problem, and the stats-httpd version is out of sync with the rest of
the system.
-
-% STATSHTTPD_HTTPLOG %1 %2
-Debug HTTP log message. These are the messages logged by the http server
-instance. For most logs, the message shows HTTP client and query
-information like HTTP method, URI, and status code, but the http server
-can also log other information, such as extended status reports.
diff --git a/src/bin/stats/stats_messages.mes b/src/bin/stats/stats_messages.mes
index 3960c26..b6f0b16 100644
--- a/src/bin/stats/stats_messages.mes
+++ b/src/bin/stats/stats_messages.mes
@@ -24,6 +24,9 @@ The stats module was unable to connect to the BIND 10 command and
control bus. A likely problem is that the message bus daemon
(b10-msgq) is not running. The stats module will now shut down.
+% STATS_EXITING exiting
+The stats module process is exiting.
+
% STATS_RECEIVED_INVALID_STATISTICS_DATA received invalid statistics data from %1
Invalid statistics data has been received from the module while
polling and it has been discarded.
diff --git a/src/bin/stats/tests/b10-stats-httpd_test.py b/src/bin/stats/tests/b10-stats-httpd_test.py
index fb0510a..98689c8 100644
--- a/src/bin/stats/tests/b10-stats-httpd_test.py
+++ b/src/bin/stats/tests/b10-stats-httpd_test.py
@@ -68,7 +68,7 @@ XMLNS_XSD = "http://www.w3.org/2001/XMLSchema"
XMLNS_XSI = stats_httpd.XMLNS_XSI
DUMMY_DATA = {
- 'Boss' : {
+ 'Init' : {
"boot_time": time.strftime('%Y-%m-%dT%H:%M:%SZ', CONST_BASETIME)
},
'Auth' : {
@@ -278,7 +278,7 @@ class TestHttpHandler(unittest.TestCase):
+ stats_httpd.XSD_URL_PATH)
# check the path of XSL
self.assertTrue(xsl_doctype.startswith(
- '<?xml-stylesheet type="text/xsl" href="' +
+ '<?xml-stylesheet type="text/xsl" href="' +
stats_httpd.XSL_URL_PATH
+ '"?>'))
# check whether the list of 'identifier' attributes in
@@ -397,7 +397,7 @@ class TestHttpHandler(unittest.TestCase):
# 404 NotFound (too long path)
self.client._http_vsn_str = 'HTTP/1.0'
- self.client.putrequest('GET', stats_httpd.XML_URL_PATH + '/Boss/boot_time/a')
+ self.client.putrequest('GET', stats_httpd.XML_URL_PATH + '/Init/boot_time/a')
self.client.endheaders()
response = self.client.getresponse()
self.assertEqual(response.status, 404)
@@ -1001,7 +1001,7 @@ class TestStatsHttpd(unittest.TestCase):
self.assertFalse('item_format' in spec)
self.assertFalse('format' in stats_xml[i].attrib)
- @unittest.skipUnless(xml_parser, "skipping the test using XMLParser")
+ @unittest.skipUnless(xml_parser, "skipping the test using XMLParser")
def test_xsd_handler(self):
self.stats_httpd = MyStatsHttpd(get_availaddr())
xsd_string = self.stats_httpd.xsd_handler()
@@ -1036,7 +1036,7 @@ class TestStatsHttpd(unittest.TestCase):
self.assertEqual(attribs[i][1], stats_xsd[i].attrib['type'])
self.assertEqual(attribs[i][2], stats_xsd[i].attrib['use'])
- @unittest.skipUnless(xml_parser, "skipping the test using XMLParser")
+ @unittest.skipUnless(xml_parser, "skipping the test using XMLParser")
def test_xsl_handler(self):
self.stats_httpd = MyStatsHttpd(get_availaddr())
xsl_string = self.stats_httpd.xsl_handler()
diff --git a/src/bin/stats/tests/b10-stats_test.py b/src/bin/stats/tests/b10-stats_test.py
index d18abf1..80bd3a6 100644
--- a/src/bin/stats/tests/b10-stats_test.py
+++ b/src/bin/stats/tests/b10-stats_test.py
@@ -319,15 +319,15 @@ class TestStats(unittest.TestCase):
self.assertEqual(
send_command(
'show', 'Stats',
- params={ 'owner' : 'Boss',
+ params={ 'owner' : 'Init',
'name' : 'boot_time' }),
- (0, {'Boss': {'boot_time': self.const_datetime}}))
+ (0, {'Init': {'boot_time': self.const_datetime}}))
self.assertEqual(
send_command(
'show', 'Stats',
- params={ 'owner' : 'Boss',
+ params={ 'owner' : 'Init',
'name' : 'boot_time' }),
- (0, {'Boss': {'boot_time': self.const_datetime}}))
+ (0, {'Init': {'boot_time': self.const_datetime}}))
self.assertEqual(
send_command('status', 'Stats'),
(0, "Stats is up. (PID " + str(os.getpid()) + ")"))
@@ -335,13 +335,13 @@ class TestStats(unittest.TestCase):
(rcode, value) = send_command('show', 'Stats')
self.assertEqual(rcode, 0)
self.assertEqual(len(value), 3)
- self.assertTrue('Boss' in value)
+ self.assertTrue('Init' in value)
self.assertTrue('Stats' in value)
self.assertTrue('Auth' in value)
self.assertEqual(len(value['Stats']), 5)
- self.assertEqual(len(value['Boss']), 1)
- self.assertTrue('boot_time' in value['Boss'])
- self.assertEqual(value['Boss']['boot_time'], self.const_datetime)
+ self.assertEqual(len(value['Init']), 1)
+ self.assertTrue('boot_time' in value['Init'])
+ self.assertEqual(value['Init']['boot_time'], self.const_datetime)
self.assertTrue('report_time' in value['Stats'])
self.assertTrue('boot_time' in value['Stats'])
self.assertTrue('last_update_time' in value['Stats'])
@@ -350,12 +350,12 @@ class TestStats(unittest.TestCase):
(rcode, value) = send_command('showschema', 'Stats')
self.assertEqual(rcode, 0)
self.assertEqual(len(value), 3)
- self.assertTrue('Boss' in value)
+ self.assertTrue('Init' in value)
self.assertTrue('Stats' in value)
self.assertTrue('Auth' in value)
self.assertEqual(len(value['Stats']), 5)
- self.assertEqual(len(value['Boss']), 1)
- for item in value['Boss']:
+ self.assertEqual(len(value['Init']), 1)
+ for item in value['Init']:
self.assertTrue(len(item) == 7)
self.assertTrue('item_name' in item)
self.assertTrue('item_type' in item)
@@ -383,10 +383,10 @@ class TestStats(unittest.TestCase):
def test_update_modules(self):
self.stats = stats.Stats()
- self.assertEqual(len(self.stats.modules), 3) # Auth, Boss, Stats
+ self.assertEqual(len(self.stats.modules), 3) # Auth, Init, Stats
self.stats.update_modules()
self.assertTrue('Stats' in self.stats.modules)
- self.assertTrue('Boss' in self.stats.modules)
+ self.assertTrue('Init' in self.stats.modules)
self.assertFalse('Dummy' in self.stats.modules)
my_statistics_data = stats.get_spec_defaults(self.stats.modules['Stats'].get_statistics_spec())
self.assertTrue('report_time' in my_statistics_data)
@@ -399,7 +399,7 @@ class TestStats(unittest.TestCase):
self.assertEqual(my_statistics_data['last_update_time'], self.const_default_datetime)
self.assertEqual(my_statistics_data['timestamp'], 0.0)
self.assertEqual(my_statistics_data['lname'], "")
- my_statistics_data = stats.get_spec_defaults(self.stats.modules['Boss'].get_statistics_spec())
+ my_statistics_data = stats.get_spec_defaults(self.stats.modules['Init'].get_statistics_spec())
self.assertTrue('boot_time' in my_statistics_data)
self.assertEqual(my_statistics_data['boot_time'], self.const_default_datetime)
orig_parse_answer = stats.isc.config.ccsession.parse_answer
@@ -411,8 +411,8 @@ class TestStats(unittest.TestCase):
self.stats = stats.Stats()
my_statistics_data = self.stats.get_statistics_data()
self.assertTrue('Stats' in my_statistics_data)
- self.assertTrue('Boss' in my_statistics_data)
- self.assertTrue('boot_time' in my_statistics_data['Boss'])
+ self.assertTrue('Init' in my_statistics_data)
+ self.assertTrue('boot_time' in my_statistics_data['Init'])
my_statistics_data = self.stats.get_statistics_data(owner='Stats')
self.assertTrue('Stats' in my_statistics_data)
self.assertTrue('report_time' in my_statistics_data['Stats'])
@@ -601,7 +601,7 @@ class TestStats(unittest.TestCase):
['foo1']['nds_queries.perzone'],\
_new_val)
self.assertEqual(self.stats.update_statistics_data(
- 'Boss', 'bar1', _test_exp7), ["KeyError: 'foo'"])
+ 'Init', 'bar1', _test_exp7), ["KeyError: 'foo'"])
self.assertEqual(self.stats.update_statistics_data(
'Foo', 'foo1', _test_exp6), ['unknown module name: Foo'])
@@ -659,7 +659,7 @@ class TestStats(unittest.TestCase):
self.assertEqual(self.stats.statistics_data['Auth']['queries.udp'], sum_qudp)
self.assertTrue('Auth' in self.stats.statistics_data_bymid)
# restore statistics data of killed auth
- # self.base.boss.server.pid_list = [ killed ] + self.base.boss.server.pid_list[:]
+ # self.base.b10_init.server.pid_list = [ killed ] + self.base.b10_init.server.pid_list[:]
self.stats.update_statistics_data('Auth',
"bar1 at foo",
{'queries.tcp': bar1_tcp})
@@ -717,9 +717,9 @@ class TestStats(unittest.TestCase):
self.assertEqual(
send_command(
'show', 'Stats',
- params={ 'owner' : 'Boss',
+ params={ 'owner' : 'Init',
'name' : 'boot_time' }),
- (0, {'Boss': {'boot_time': self.const_datetime}}))
+ (0, {'Init': {'boot_time': self.const_datetime}}))
stats_server.shutdown()
def test_commands(self):
@@ -833,7 +833,7 @@ class TestStats(unittest.TestCase):
self.assertEqual(rcode, 0)
self.assertEqual(len(value), 3)
self.assertTrue('Stats' in value)
- self.assertTrue('Boss' in value)
+ self.assertTrue('Init' in value)
self.assertTrue('Auth' in value)
self.assertFalse('__Dummy__' in value)
schema = value['Stats']
@@ -849,7 +849,7 @@ class TestStats(unittest.TestCase):
if len(item) == 7:
self.assertTrue('item_format' in item)
- schema = value['Boss']
+ schema = value['Init']
self.assertEqual(len(schema), 1)
for item in schema:
self.assertTrue(len(item) == 7)
@@ -879,7 +879,7 @@ class TestStats(unittest.TestCase):
self.stats.command_showschema(owner='Stats'))
self.assertEqual(rcode, 0)
self.assertTrue('Stats' in value)
- self.assertFalse('Boss' in value)
+ self.assertFalse('Init' in value)
self.assertFalse('Auth' in value)
for item in value['Stats']:
self.assertTrue(len(item) == 6 or len(item) == 7)
@@ -896,7 +896,7 @@ class TestStats(unittest.TestCase):
self.stats.command_showschema(owner='Stats', name='report_time'))
self.assertEqual(rcode, 0)
self.assertTrue('Stats' in value)
- self.assertFalse('Boss' in value)
+ self.assertFalse('Init' in value)
self.assertFalse('Auth' in value)
self.assertEqual(len(value['Stats'][0]), 7)
self.assertTrue('item_name' in value['Stats'][0])
@@ -1150,15 +1150,15 @@ class TestStats(unittest.TestCase):
self.assertEqual(
send_command('show', 'Stats'),
(0, stat.statistics_data))
- # check statistics data of 'Boss'
- boss = self.base.boss.server
+ # check statistics data of 'Init'
+ b10_init = self.base.b10_init.server
self.assertEqual(
- stat.statistics_data_bymid['Boss'][boss.cc_session.lname],
+ stat.statistics_data_bymid['Init'][b10_init.cc_session.lname],
{'boot_time': self.const_datetime})
self.assertEqual(
- len(stat.statistics_data_bymid['Boss']), 1)
+ len(stat.statistics_data_bymid['Init']), 1)
self.assertEqual(
- stat.statistics_data['Boss'],
+ stat.statistics_data['Init'],
{'boot_time': self.const_datetime})
# check statistics data of each 'Auth' instances
list_auth = ['', '2']
@@ -1219,17 +1219,17 @@ class TestStats(unittest.TestCase):
def test_polling2(self):
# set invalid statistics
- boss = self.base.boss.server
- boss.statistics_data = {'boot_time':1}
+ b10_init = self.base.b10_init.server
+ b10_init.statistics_data = {'boot_time':1}
stats_server = ThreadingServerManager(MyStats)
stat = stats_server.server
stats_server.run()
self.assertEqual(
send_command('status', 'Stats'),
(0, "Stats is up. (PID " + str(os.getpid()) + ")"))
- # check default statistics data of 'Boss'
+ # check default statistics data of 'Init'
self.assertEqual(
- stat.statistics_data['Boss'],
+ stat.statistics_data['Init'],
{'boot_time': self.const_default_datetime})
stats_server.shutdown()
diff --git a/src/bin/stats/tests/test_utils.py b/src/bin/stats/tests/test_utils.py
index 5c1855a..1c5cc3c 100644
--- a/src/bin/stats/tests/test_utils.py
+++ b/src/bin/stats/tests/test_utils.py
@@ -140,11 +140,11 @@ class MockCfgmgr:
def shutdown(self):
self.cfgmgr.running = False
-class MockBoss:
+class MockInit:
spec_str = """\
{
"module_spec": {
- "module_name": "Boss",
+ "module_name": "Init",
"module_description": "Mock Master process",
"config_data": [
{
@@ -210,7 +210,7 @@ class MockBoss:
},
{
"command_name": "ping",
- "command_description": "Ping the boss process",
+ "command_description": "Ping the b10-init process",
"command_args": []
},
{
@@ -537,9 +537,9 @@ class BaseModules:
# MockCfgmgr
self.cfgmgr = ThreadingServerManager(MockCfgmgr)
self.cfgmgr.run()
- # MockBoss
- self.boss = ThreadingServerManager(MockBoss)
- self.boss.run()
+ # MockInit
+ self.b10_init = ThreadingServerManager(MockInit)
+ self.b10_init.run()
# MockAuth
self.auth = ThreadingServerManager(MockAuth)
self.auth.run()
@@ -558,8 +558,8 @@ class BaseModules:
# MockAuth
self.auth2.shutdown(True)
self.auth.shutdown(True)
- # MockBoss
- self.boss.shutdown(True)
+ # MockInit
+ self.b10_init.shutdown(True)
# MockCfgmgr
self.cfgmgr.shutdown(True)
# remove the unused socket file
diff --git a/src/bin/stats/tests/testdata/b10-config.db b/src/bin/stats/tests/testdata/b10-config.db
index 2f89b98..7dd9daf 100644
--- a/src/bin/stats/tests/testdata/b10-config.db
+++ b/src/bin/stats/tests/testdata/b10-config.db
@@ -1,5 +1,5 @@
{ "version": 2,
- "Boss": {
+ "Init": {
"components": {
"b10-auth": {
"kind": "needed",
diff --git a/src/bin/sysinfo/run_sysinfo.sh.in b/src/bin/sysinfo/run_sysinfo.sh.in
index 6459c2d..b5593b9 100755
--- a/src/bin/sysinfo/run_sysinfo.sh.in
+++ b/src/bin/sysinfo/run_sysinfo.sh.in
@@ -20,20 +20,8 @@ export PYTHON_EXEC
SYSINFO_PATH=@abs_top_builddir@/src/bin/sysinfo
-# Note: we shouldn't need log_messages and lib/dns except for the seemingly
-# necessary dependency due to the automatic import in the isc package (its
-# __init__.py imports some other modules)
-# #2145 should eliminate the need for them.
-PYTHONPATH=@abs_top_builddir@/src/lib/python:@abs_top_srcdir@/src/lib/python:@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/dns/python/.libs
+PYTHONPATH=@abs_top_builddir@/src/lib/python:@abs_top_srcdir@/src/lib/python
export PYTHONPATH
-# Likewise, we need only because isc.log requires some loadable modules.
-# sysinfo itself shouldn't need any of them.
-SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
-if test $SET_ENV_LIBRARY_PATH = yes; then
- @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/acl/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
- export @ENV_LIBRARY_PATH@
-fi
-
cd ${SYSINFO_PATH}
exec ${PYTHON_EXEC} -O sysinfo.py "$@"
diff --git a/src/bin/tests/process_rename_test.py.in b/src/bin/tests/process_rename_test.py.in
index 055ebdc..ea8ad87 100644
--- a/src/bin/tests/process_rename_test.py.in
+++ b/src/bin/tests/process_rename_test.py.in
@@ -42,7 +42,7 @@ class TestRename(unittest.TestCase):
# Scripts named in this list are not expected to be renamed and
# should be excluded from the scan.
- EXCLUDED_SCRIPTS = ['isc-sysinfo']
+ EXCLUDED_SCRIPTS = ['isc-sysinfo', 'bind10']
# Regexp to find all the *_SCRIPTS = something lines (except for
# noinst_SCRIPTS, which are scripts for tests), including line
diff --git a/src/bin/xfrin/b10-xfrin.xml b/src/bin/xfrin/b10-xfrin.xml
index eb16ab3..0f3e99c 100644
--- a/src/bin/xfrin/b10-xfrin.xml
+++ b/src/bin/xfrin/b10-xfrin.xml
@@ -56,8 +56,8 @@
<para>The <command>b10-xfrin</command> daemon provides the BIND 10
incoming DNS zone transfer service.
Normally it is started by the
- <citerefentry><refentrytitle>bind10</refentrytitle><manvolnum>8</manvolnum></citerefentry>
- boss process.
+ <citerefentry><refentrytitle>b10-init</refentrytitle><manvolnum>8</manvolnum></citerefentry>
+ process.
When triggered it can request and receive a zone transfer and store
the zone in a BIND 10 zone data source.
</para>
@@ -180,8 +180,8 @@ in separate zonemgr process.
<para>
<command>shutdown</command> stops all incoming zone transfers
- and exits <command>b10-xfrin</command>. (Note that the BIND 10
- boss process will restart this service.)
+ and exits <command>b10-xfrin</command>. (Note that the
+ b10-init process will restart this service.)
</para>
<!-- TODO:
add a usage example of xfrin -->
diff --git a/src/bin/xfrin/tests/xfrin_test.py b/src/bin/xfrin/tests/xfrin_test.py
index 2370708..a1714de 100644
--- a/src/bin/xfrin/tests/xfrin_test.py
+++ b/src/bin/xfrin/tests/xfrin_test.py
@@ -40,7 +40,7 @@ import sqlite3
#
TEST_ZONE_NAME_STR = "example.com."
TEST_ZONE_NAME = Name(TEST_ZONE_NAME_STR)
-TEST_RRCLASS = RRClass.IN()
+TEST_RRCLASS = RRClass.IN
TEST_RRCLASS_STR = 'IN'
TEST_DB_FILE = 'db_file'
TEST_MASTER_IPV4_ADDRESS = '127.0.0.1'
@@ -59,21 +59,21 @@ TEST_MASTER_PORT = '53535'
TSIG_KEY = TSIGKey("example.com:SFuWd/q99SzF8Yzd1QbB9g==")
# SOA intended to be used for the new SOA as a result of transfer.
-soa_rdata = Rdata(RRType.SOA(), TEST_RRCLASS,
+soa_rdata = Rdata(RRType.SOA, TEST_RRCLASS,
'master.example.com. admin.example.com. ' +
'1234 3600 1800 2419200 7200')
-soa_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA(), RRTTL(3600))
+soa_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA, RRTTL(3600))
soa_rrset.add_rdata(soa_rdata)
# SOA intended to be used for the current SOA at the secondary side.
# Note that its serial is smaller than that of soa_rdata.
-begin_soa_rdata = Rdata(RRType.SOA(), TEST_RRCLASS,
+begin_soa_rdata = Rdata(RRType.SOA, TEST_RRCLASS,
'master.example.com. admin.example.com. ' +
'1230 3600 1800 2419200 7200')
-begin_soa_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA(), RRTTL(3600))
+begin_soa_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA, RRTTL(3600))
begin_soa_rrset.add_rdata(begin_soa_rdata)
-example_axfr_question = Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.AXFR())
-example_soa_question = Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA())
+example_axfr_question = Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.AXFR)
+example_soa_question = Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA)
default_questions = [example_axfr_question]
default_answers = [soa_rrset]
@@ -208,12 +208,12 @@ class MockDataSourceClient():
zone names.
'''
- if name == TEST_ZONE_NAME and rrtype == RRType.SOA():
+ if name == TEST_ZONE_NAME and rrtype == RRType.SOA:
return (ZoneFinder.SUCCESS, begin_soa_rrset, 0)
if name == Name('no-soa.example'):
return (ZoneFinder.NXDOMAIN, None, 0)
if name == Name('dup-soa.example'):
- dup_soa_rrset = RRset(name, TEST_RRCLASS, RRType.SOA(), RRTTL(0))
+ dup_soa_rrset = RRset(name, TEST_RRCLASS, RRType.SOA, RRTTL(0))
dup_soa_rrset.add_rdata(begin_soa_rdata)
dup_soa_rrset.add_rdata(soa_rdata)
return (ZoneFinder.SUCCESS, dup_soa_rrset, 0)
@@ -329,7 +329,7 @@ class MockXfrinConnection(XfrinConnection):
return len(data)
def create_response_data(self, response=True, auth=True, bad_qid=False,
- rcode=Rcode.NOERROR(),
+ rcode=Rcode.NOERROR,
questions=default_questions,
answers=default_answers,
authorities=[],
@@ -339,7 +339,7 @@ class MockXfrinConnection(XfrinConnection):
if bad_qid:
qid += 1
resp.set_qid(qid)
- resp.set_opcode(Opcode.QUERY())
+ resp.set_opcode(Opcode.QUERY)
resp.set_rcode(rcode)
if response:
resp.set_header_flag(Message.HEADERFLAG_QR)
@@ -366,17 +366,17 @@ class TestXfrinState(unittest.TestCase):
TEST_RRCLASS, None, threading.Event(),
TEST_MASTER_IPV4_ADDRINFO)
self.conn.init_socket()
- self.begin_soa = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA(),
+ self.begin_soa = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA,
RRTTL(3600))
- self.begin_soa.add_rdata(Rdata(RRType.SOA(), TEST_RRCLASS,
+ self.begin_soa.add_rdata(Rdata(RRType.SOA, TEST_RRCLASS,
'm. r. 1230 0 0 0 0'))
- self.ns_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.NS(),
+ self.ns_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.NS,
RRTTL(3600))
- self.ns_rrset.add_rdata(Rdata(RRType.NS(), TEST_RRCLASS,
- 'ns.example.com'))
- self.a_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.A(),
+ self.ns_rrset.add_rdata(Rdata(RRType.NS, TEST_RRCLASS,
+ 'ns.example.com.'))
+ self.a_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.A,
RRTTL(3600))
- self.a_rrset.add_rdata(Rdata(RRType.A(), TEST_RRCLASS, '192.0.2.1'))
+ self.a_rrset.add_rdata(Rdata(RRType.A, TEST_RRCLASS, '192.0.2.1'))
self.conn._datasrc_client = MockDataSourceClient()
self.conn._diff = Diff(self.conn._datasrc_client, TEST_ZONE_NAME)
@@ -408,14 +408,14 @@ class TestXfrinInitialSOA(TestXfrinState):
self.ns_rrset)
def test_handle_ixfr_uptodate(self):
- self.conn._request_type = RRType.IXFR()
+ self.conn._request_type = RRType.IXFR
self.conn._request_serial = isc.dns.Serial(1234) # same as soa_rrset
self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
self.assertEqual(type(XfrinIXFRUptodate()),
type(self.conn.get_xfrstate()))
def test_handle_ixfr_uptodate2(self):
- self.conn._request_type = RRType.IXFR()
+ self.conn._request_type = RRType.IXFR
self.conn._request_serial = isc.dns.Serial(1235) # > soa_rrset
self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
self.assertEqual(type(XfrinIXFRUptodate()),
@@ -424,7 +424,7 @@ class TestXfrinInitialSOA(TestXfrinState):
def test_handle_ixfr_uptodate3(self):
# Similar to the previous case, but checking serial number arithmetic
# comparison
- self.conn._request_type = RRType.IXFR()
+ self.conn._request_type = RRType.IXFR
self.conn._request_serial = isc.dns.Serial(0xffffffff)
self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
self.assertEqual(type(XfrinFirstData()),
@@ -432,7 +432,7 @@ class TestXfrinInitialSOA(TestXfrinState):
def test_handle_axfr_uptodate(self):
# "request serial" should matter only for IXFR
- self.conn._request_type = RRType.AXFR()
+ self.conn._request_type = RRType.AXFR
self.conn._request_serial = isc.dns.Serial(1234) # same as soa_rrset
self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
self.assertEqual(type(XfrinFirstData()),
@@ -445,13 +445,13 @@ class TestXfrinFirstData(TestXfrinState):
def setUp(self):
super().setUp()
self.state = XfrinFirstData()
- self.conn._request_type = RRType.IXFR()
+ self.conn._request_type = RRType.IXFR
# arbitrary chosen serial < 1234:
self.conn._request_serial = isc.dns.Serial(1230)
self.conn._diff = None # should be replaced in the AXFR case
def test_handle_ixfr_begin_soa(self):
- self.conn._request_type = RRType.IXFR()
+ self.conn._request_type = RRType.IXFR
self.assertFalse(self.state.handle_rr(self.conn, self.begin_soa))
self.assertEqual(type(XfrinIXFRDeleteSOA()),
type(self.conn.get_xfrstate()))
@@ -459,7 +459,7 @@ class TestXfrinFirstData(TestXfrinState):
def test_handle_axfr(self):
# If the original type is AXFR, other conditions aren't considered,
# and AXFR processing will continue
- self.conn._request_type = RRType.AXFR()
+ self.conn._request_type = RRType.AXFR
self.assertFalse(self.state.handle_rr(self.conn, self.begin_soa))
self.assertEqual(type(XfrinAXFR()), type(self.conn.get_xfrstate()))
@@ -598,9 +598,9 @@ class TestXfrinIXFRAdd(TestXfrinState):
# First, push a starting SOA inside. This should be OK, nothing checked
# yet.
self.state.handle_rr(self.conn, self.begin_soa)
- end_soa_rdata = Rdata(RRType.SOA(), TEST_RRCLASS,
+ end_soa_rdata = Rdata(RRType.SOA, TEST_RRCLASS,
'm. r. 1234 0 0 0 0')
- end_soa_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA(),
+ end_soa_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA,
RRTTL(3600))
end_soa_rrset.add_rdata(end_soa_rdata)
# This would try to finish up. But the TSIG pretends not everything is
@@ -724,7 +724,7 @@ class TestXfrinConnection(unittest.TestCase):
'bad_qid': False,
'response': True,
'auth': True,
- 'rcode': Rcode.NOERROR(),
+ 'rcode': Rcode.NOERROR,
'answers': default_answers,
'authorities': [],
'tsig': False,
@@ -817,21 +817,21 @@ class TestXfrinConnection(unittest.TestCase):
self.conn.reply_data += bogus_data
def _create_a(self, address):
- rrset = RRset(Name('a.example.com'), TEST_RRCLASS, RRType.A(),
+ rrset = RRset(Name('a.example.com'), TEST_RRCLASS, RRType.A,
RRTTL(3600))
- rrset.add_rdata(Rdata(RRType.A(), TEST_RRCLASS, address))
+ rrset.add_rdata(Rdata(RRType.A, TEST_RRCLASS, address))
return rrset
def _create_soa(self, serial):
- rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA(),
+ rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA,
RRTTL(3600))
rdata_str = 'm. r. ' + serial + ' 3600 1800 2419200 7200'
- rrset.add_rdata(Rdata(RRType.SOA(), TEST_RRCLASS, rdata_str))
+ rrset.add_rdata(Rdata(RRType.SOA, TEST_RRCLASS, rdata_str))
return rrset
def _create_ns(self, nsname='ns.'+TEST_ZONE_NAME_STR):
- rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.NS(), RRTTL(3600))
- rrset.add_rdata(Rdata(RRType.NS(), TEST_RRCLASS, nsname))
+ rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.NS, RRTTL(3600))
+ rrset.add_rdata(Rdata(RRType.NS, TEST_RRCLASS, nsname))
return rrset
def _set_test_zone(self, zone_name):
@@ -899,19 +899,19 @@ class TestAXFR(TestXfrinConnection):
c.close()
def test_init_chclass(self):
- c = MockXfrinConnection({}, TEST_ZONE_NAME, RRClass.CH(), None,
+ c = MockXfrinConnection({}, TEST_ZONE_NAME, RRClass.CH, None,
threading.Event(), TEST_MASTER_IPV4_ADDRINFO)
c.init_socket()
- axfrmsg = c._create_query(RRType.AXFR())
+ axfrmsg = c._create_query(RRType.AXFR)
self.assertEqual(axfrmsg.get_question()[0].get_class(),
- RRClass.CH())
+ RRClass.CH)
c.close()
def test_create_query(self):
def check_query(expected_qtype, expected_auth):
'''Helper method to repeat the same pattern of tests'''
- self.assertEqual(Opcode.QUERY(), msg.get_opcode())
- self.assertEqual(Rcode.NOERROR(), msg.get_rcode())
+ self.assertEqual(Opcode.QUERY, msg.get_opcode())
+ self.assertEqual(Rcode.NOERROR, msg.get_rcode())
self.assertEqual(1, msg.get_rr_count(Message.SECTION_QUESTION))
self.assertEqual(TEST_ZONE_NAME, msg.get_question()[0].get_name())
self.assertEqual(expected_qtype, msg.get_question()[0].get_type())
@@ -936,16 +936,16 @@ class TestAXFR(TestXfrinConnection):
# Actual tests start here
# SOA query
- msg = self.conn._create_query(RRType.SOA())
- check_query(RRType.SOA(), None)
+ msg = self.conn._create_query(RRType.SOA)
+ check_query(RRType.SOA, None)
# AXFR query
- msg = self.conn._create_query(RRType.AXFR())
- check_query(RRType.AXFR(), None)
+ msg = self.conn._create_query(RRType.AXFR)
+ check_query(RRType.AXFR, None)
# IXFR query
- msg = self.conn._create_query(RRType.IXFR())
- check_query(RRType.IXFR(), begin_soa_rrset)
+ msg = self.conn._create_query(RRType.IXFR)
+ check_query(RRType.IXFR, begin_soa_rrset)
self.assertEqual(1230, self.conn._request_serial.get_value())
def test_create_ixfr_query_fail(self):
@@ -954,20 +954,20 @@ class TestAXFR(TestXfrinConnection):
self._set_test_zone(Name('no-such-zone.example'))
self.assertRaises(XfrinException, self.conn._create_query,
- RRType.IXFR())
+ RRType.IXFR)
self._set_test_zone(Name('partial-match-zone.example'))
self.assertRaises(XfrinException, self.conn._create_query,
- RRType.IXFR())
+ RRType.IXFR)
self._set_test_zone(Name('no-soa.example'))
self.assertRaises(XfrinException, self.conn._create_query,
- RRType.IXFR())
+ RRType.IXFR)
self._set_test_zone(Name('dup-soa.example'))
self.conn._zone_soa = self.conn._get_zone_soa()
self.assertRaises(XfrinException, self.conn._create_query,
- RRType.IXFR())
+ RRType.IXFR)
def test_send_query(self):
def message_has_tsig(data):
@@ -980,11 +980,11 @@ class TestAXFR(TestXfrinConnection):
# soa request with tsig
self.conn._tsig_key = TSIG_KEY
- self.conn._send_query(RRType.SOA())
+ self.conn._send_query(RRType.SOA)
self.assertTrue(message_has_tsig(self.conn.query_data[2:]))
# axfr request with tsig
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.assertTrue(message_has_tsig(self.conn.query_data[2:]))
def test_response_with_invalid_msg(self):
@@ -995,14 +995,14 @@ class TestAXFR(TestXfrinConnection):
def test_response_with_tsigfail(self):
self.conn._tsig_key = TSIG_KEY
# server tsig check fail, return with RCODE 9 (NOTAUTH)
- self.conn._send_query(RRType.SOA())
+ self.conn._send_query(RRType.SOA)
self.conn.reply_data = \
- self.conn.create_response_data(rcode=Rcode.NOTAUTH())
+ self.conn.create_response_data(rcode=Rcode.NOTAUTH)
self.assertRaises(XfrinProtocolError,
self.conn._handle_xfrin_responses)
def test_response_without_end_soa(self):
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.conn.reply_data = self.conn.create_response_data()
# This should result in timeout in the asyncore loop. We emulate
# that situation in recv() by emptying the reply data buffer.
@@ -1010,7 +1010,7 @@ class TestAXFR(TestXfrinConnection):
self.conn._handle_xfrin_responses)
def test_response_bad_qid(self):
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.conn.reply_data = self.conn.create_response_data(bad_qid=True)
self.assertRaises(XfrinProtocolError,
self.conn._handle_xfrin_responses)
@@ -1019,9 +1019,9 @@ class TestAXFR(TestXfrinConnection):
self.conn._tsig_key = TSIG_KEY
self.conn._tsig_ctx_creator = \
lambda key: self.__create_mock_tsig(key, TSIGError.BAD_SIG)
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.conn.reply_data = self.conn.create_response_data(
- rcode=Rcode.SERVFAIL())
+ rcode=Rcode.SERVFAIL)
# xfrin should check TSIG before other part of incoming message
# validate log message for XfrinException
self.__match_exception(XfrinProtocolError,
@@ -1032,7 +1032,7 @@ class TestAXFR(TestXfrinConnection):
self.conn._tsig_key = TSIG_KEY
self.conn._tsig_ctx_creator = \
lambda key: self.__create_mock_tsig(key, TSIGError.BAD_KEY)
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.conn.reply_data = self.conn.create_response_data(bad_qid=True)
# xfrin should check TSIG before other part of incoming message
# validate log message for XfrinException
@@ -1041,26 +1041,26 @@ class TestAXFR(TestXfrinConnection):
self.conn._handle_xfrin_responses)
def test_response_non_response(self):
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.conn.reply_data = self.conn.create_response_data(response=False)
self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
def test_response_error_code(self):
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.conn.reply_data = self.conn.create_response_data(
- rcode=Rcode.SERVFAIL())
+ rcode=Rcode.SERVFAIL)
self.assertRaises(XfrinProtocolError,
self.conn._handle_xfrin_responses)
def test_response_multi_question(self):
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.conn.reply_data = self.conn.create_response_data(
questions=[example_axfr_question, example_axfr_question])
self.assertRaises(XfrinProtocolError,
self.conn._handle_xfrin_responses)
def test_response_non_response(self):
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.conn.reply_data = self.conn.create_response_data(response = False)
self.assertRaises(XfrinProtocolError,
self.conn._handle_xfrin_responses)
@@ -1098,7 +1098,7 @@ class TestAXFR(TestXfrinConnection):
self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
def test_soacheck_error_code(self):
- self.soa_response_params['rcode'] = Rcode.SERVFAIL()
+ self.soa_response_params['rcode'] = Rcode.SERVFAIL
self.conn.response_generator = self._create_soa_response_data
self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
@@ -1146,21 +1146,21 @@ class TestAXFR(TestXfrinConnection):
self.conn.response_generator = self._create_soa_response_data
self.soa_response_params['questions'] = [Question(Name('example.org'),
TEST_RRCLASS,
- RRType.SOA())]
+ RRType.SOA)]
self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
def test_soacheck_question_class_mismatch(self):
self.conn.response_generator = self._create_soa_response_data
self.soa_response_params['questions'] = [Question(TEST_ZONE_NAME,
- RRClass.CH(),
- RRType.SOA())]
+ RRClass.CH,
+ RRType.SOA)]
self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
def test_soacheck_question_type_mismatch(self):
self.conn.response_generator = self._create_soa_response_data
self.soa_response_params['questions'] = [Question(TEST_ZONE_NAME,
TEST_RRCLASS,
- RRType.AAAA())]
+ RRType.AAAA)]
self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
def test_soacheck_no_soa(self):
@@ -1178,8 +1178,8 @@ class TestAXFR(TestXfrinConnection):
def test_soacheck_soa_class_mismatch(self):
self.conn.response_generator = self._create_soa_response_data
- soa = RRset(TEST_ZONE_NAME, RRClass.CH(), RRType.SOA(), RRTTL(0))
- soa.add_rdata(Rdata(RRType.SOA(), RRClass.CH(), 'm. r. 1234 0 0 0 0'))
+ soa = RRset(TEST_ZONE_NAME, RRClass.CH, RRType.SOA, RRTTL(0))
+ soa.add_rdata(Rdata(RRType.SOA, RRClass.CH, 'm. r. 1234 0 0 0 0'))
self.soa_response_params['answers'] = [soa]
self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
@@ -1197,7 +1197,7 @@ class TestAXFR(TestXfrinConnection):
def test_soacheck_referral_response(self):
self.conn.response_generator = self._create_soa_response_data
self.soa_response_params['answers'] = []
- self.soa_response_params['authorities'] = [create_ns('ns.example.com')]
+ self.soa_response_params['authorities'] = [create_ns('ns.example.com.')]
self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
def test_soacheck_nodata_response(self):
@@ -1220,7 +1220,7 @@ class TestAXFR(TestXfrinConnection):
self.conn._tsig_key = TSIG_KEY
self.conn._tsig_ctx_creator = \
lambda key: self.__create_mock_tsig(key, TSIGError.BAD_SIG)
- self.soa_response_params['rcode'] = Rcode.NOTAUTH()
+ self.soa_response_params['rcode'] = Rcode.NOTAUTH
self.conn.response_generator = self._create_soa_response_data
self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
@@ -1257,7 +1257,7 @@ class TestAXFR(TestXfrinConnection):
def test_response_shutdown(self):
self.conn.response_generator = self._create_normal_response_data
self.conn._shutdown_event.set()
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
def test_response_timeout(self):
@@ -1272,13 +1272,13 @@ class TestAXFR(TestXfrinConnection):
def test_response_bad_message(self):
self.conn.response_generator = self._create_broken_response_data
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.assertRaises(Exception, self.conn._handle_xfrin_responses)
def test_axfr_response(self):
# A simple normal case: AXFR consists of SOA, NS, then trailing SOA.
self.conn.response_generator = self._create_normal_response_data
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.conn._handle_xfrin_responses()
self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
check_diffs(self.assertEqual,
@@ -1299,7 +1299,7 @@ class TestAXFR(TestXfrinConnection):
self._create_ns(),
soa_rrset]
self.conn.response_generator = self._create_normal_response_data
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.conn._handle_xfrin_responses()
self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
check_diffs(self.assertEqual,
@@ -1314,10 +1314,9 @@ class TestAXFR(TestXfrinConnection):
'''
ns_rr = self._create_ns()
a_rr = self._create_a('192.0.2.1')
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.conn.reply_data = self.conn.create_response_data(
- questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
- RRType.AXFR())],
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.AXFR)],
# begin serial=1230, end serial=1234. end will be used.
answers=[begin_soa_rrset, ns_rr, a_rr, soa_rrset])
self.conn._handle_xfrin_responses()
@@ -1331,10 +1330,9 @@ class TestAXFR(TestXfrinConnection):
Test we reject a zone transfer if it fails the check_zone validation.
"""
a_rr = self._create_a('192.0.2.1')
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.conn.reply_data = self.conn.create_response_data(
- questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
- RRType.AXFR())],
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.AXFR)],
# begin serial=1230, end serial=1234. end will be used.
answers=[begin_soa_rrset, a_rr, soa_rrset])
# Make it fail the validation
@@ -1361,10 +1359,10 @@ class TestAXFR(TestXfrinConnection):
'''
ns_rr = self._create_ns()
a_rr = self._create_a('192.0.2.1')
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.conn.reply_data = self.conn.create_response_data(
questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
- RRType.AXFR())],
+ RRType.AXFR)],
answers=[soa_rrset, ns_rr, a_rr, soa_rrset, a_rr])
self.assertRaises(XfrinProtocolError,
self.conn._handle_xfrin_responses)
@@ -1378,9 +1376,9 @@ class TestAXFR(TestXfrinConnection):
'''
self.axfr_response_params['question_1st'] = \
- [Question(Name('mismatch.example'), TEST_RRCLASS, RRType.AXFR())]
+ [Question(Name('mismatch.example'), TEST_RRCLASS, RRType.AXFR)]
self.conn.response_generator = self._create_normal_response_data
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.conn._handle_xfrin_responses()
self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
check_diffs(self.assertEqual,
@@ -1394,9 +1392,9 @@ class TestAXFR(TestXfrinConnection):
'''
self.axfr_response_params['question_1st'] = \
- [Question(TEST_ZONE_NAME, RRClass.CH(), RRType.AXFR())]
+ [Question(TEST_ZONE_NAME, RRClass.CH, RRType.AXFR)]
self.conn.response_generator = self._create_normal_response_data
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.conn._handle_xfrin_responses()
self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
check_diffs(self.assertEqual,
@@ -1411,9 +1409,9 @@ class TestAXFR(TestXfrinConnection):
'''
# returning IXFR in question to AXFR query
self.axfr_response_params['question_1st'] = \
- [Question(TEST_ZONE_NAME, RRClass.CH(), RRType.IXFR())]
+ [Question(TEST_ZONE_NAME, RRClass.CH, RRType.IXFR)]
self.conn.response_generator = self._create_normal_response_data
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.conn._handle_xfrin_responses()
self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
check_diffs(self.assertEqual,
@@ -1428,7 +1426,7 @@ class TestAXFR(TestXfrinConnection):
'''
self.axfr_response_params['question_1st'] = []
self.conn.response_generator = self._create_normal_response_data
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.conn._handle_xfrin_responses()
self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
check_diffs(self.assertEqual,
@@ -1617,7 +1615,7 @@ class TestIXFRResponse(TestXfrinConnection):
super().setUp()
self.conn._query_id = self.conn.qid = 1035
self.conn._request_serial = isc.dns.Serial(1230)
- self.conn._request_type = RRType.IXFR()
+ self.conn._request_type = RRType.IXFR
self.conn._datasrc_client = MockDataSourceClient()
XfrinInitialSOA().set_xfrstate(self.conn, XfrinInitialSOA())
@@ -1631,7 +1629,7 @@ class TestIXFRResponse(TestXfrinConnection):
'''
self.conn.reply_data = self.conn.create_response_data(
- questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR)],
answers=[soa_rrset, begin_soa_rrset, soa_rrset, soa_rrset])
self.conn._handle_xfrin_responses()
self.assertEqual(type(XfrinIXFREnd()), type(self.conn.get_xfrstate()))
@@ -1646,7 +1644,7 @@ class TestIXFRResponse(TestXfrinConnection):
An IXFR that fails validation later on. Check it is rejected.
'''
self.conn.reply_data = self.conn.create_response_data(
- questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR)],
answers=[soa_rrset, begin_soa_rrset, soa_rrset, soa_rrset])
self._check_zone_result = False
self.assertRaises(XfrinZoneError, self.conn._handle_xfrin_responses)
@@ -1666,7 +1664,7 @@ class TestIXFRResponse(TestXfrinConnection):
'''
self.conn.reply_data = self.conn.create_response_data(
- questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR)],
answers=[soa_rrset,
# removing one A in serial 1230
begin_soa_rrset, self._create_a('192.0.2.1'),
@@ -1706,10 +1704,10 @@ class TestIXFRResponse(TestXfrinConnection):
'''
self.conn.reply_data = self.conn.create_response_data(
- questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR)],
answers=[soa_rrset, begin_soa_rrset, soa_rrset])
self.conn.reply_data += self.conn.create_response_data(
- questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR)],
answers=[soa_rrset])
self.conn._handle_xfrin_responses()
self.assertEqual(type(XfrinIXFREnd()), type(self.conn.get_xfrstate()))
@@ -1720,7 +1718,7 @@ class TestIXFRResponse(TestXfrinConnection):
def test_ixfr_response_uptodate(self):
'''IXFR response indicates the zone is new enough'''
self.conn.reply_data = self.conn.create_response_data(
- questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR)],
answers=[begin_soa_rrset])
self.assertRaises(XfrinZoneUptodate, self.conn._handle_xfrin_responses)
# no diffs should have been committed
@@ -1733,7 +1731,7 @@ class TestIXFRResponse(TestXfrinConnection):
'''
# SOA sequence is out-of-sync
self.conn.reply_data = self.conn.create_response_data(
- questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR)],
answers=[soa_rrset, begin_soa_rrset, soa_rrset,
self._create_soa('1235')])
self.assertRaises(XfrinProtocolError,
@@ -1750,7 +1748,7 @@ class TestIXFRResponse(TestXfrinConnection):
specification, but it is how BIND 9 works and we do the same.
'''
self.conn.reply_data = self.conn.create_response_data(
- questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR)],
answers=[soa_rrset, begin_soa_rrset, soa_rrset, soa_rrset,
self._create_a('192.0.2.1')])
self.assertRaises(XfrinProtocolError,
@@ -1767,7 +1765,7 @@ class TestIXFRResponse(TestXfrinConnection):
'''
self.conn.reply_data = self.conn.create_response_data(
- questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR)],
answers=[begin_soa_rrset, soa_rrset])
self.assertRaises(XfrinProtocolError,
self.conn._handle_xfrin_responses)
@@ -1784,7 +1782,7 @@ class TestIXFRResponse(TestXfrinConnection):
ns_rr = self._create_ns()
a_rr = self._create_a('192.0.2.1')
self.conn.reply_data = self.conn.create_response_data(
- questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR)],
answers=[soa_rrset, ns_rr, a_rr, soa_rrset])
self.conn._handle_xfrin_responses()
self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
@@ -1806,7 +1804,7 @@ class TestIXFRResponse(TestXfrinConnection):
ns_rr = self._create_ns()
a_rr = self._create_a('192.0.2.1')
self.conn.reply_data = self.conn.create_response_data(
- questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR)],
answers=[soa_rrset, ns_rr, a_rr, begin_soa_rrset])
self.conn._handle_xfrin_responses()
self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
@@ -1825,7 +1823,7 @@ class TestIXFRResponse(TestXfrinConnection):
ns_rr = self._create_ns()
a_rr = self._create_a('192.0.2.1')
self.conn.reply_data = self.conn.create_response_data(
- questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR)],
answers=[soa_rrset, ns_rr, a_rr, soa_rrset, a_rr])
self.assertRaises(XfrinProtocolError,
self.conn._handle_xfrin_responses)
@@ -1852,10 +1850,10 @@ class TestIXFRSession(TestXfrinConnection):
def create_ixfr_response():
self.conn.reply_data = self.conn.create_response_data(
questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
- RRType.IXFR())],
+ RRType.IXFR)],
answers=[soa_rrset, begin_soa_rrset, soa_rrset, soa_rrset])
self.conn.response_generator = create_ixfr_response
- self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.IXFR()))
+ self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.IXFR))
# Check some details of the IXFR protocol processing
self.assertEqual(type(XfrinIXFREnd()), type(self.conn.get_xfrstate()))
@@ -1869,7 +1867,7 @@ class TestIXFRSession(TestXfrinConnection):
qmsg.from_wire(qdata, len(qdata))
self.assertEqual(1, qmsg.get_rr_count(Message.SECTION_QUESTION))
self.assertEqual(TEST_ZONE_NAME, qmsg.get_question()[0].get_name())
- self.assertEqual(RRType.IXFR(), qmsg.get_question()[0].get_type())
+ self.assertEqual(RRType.IXFR, qmsg.get_question()[0].get_type())
self.assertEqual(1, self.conn._transfer_stats.message_count)
self.assertEqual(0, self.conn._transfer_stats.axfr_rr_count)
@@ -1886,18 +1884,18 @@ class TestIXFRSession(TestXfrinConnection):
def create_ixfr_response():
self.conn.reply_data = self.conn.create_response_data(
questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
- RRType.IXFR())],
+ RRType.IXFR)],
answers=[soa_rrset, begin_soa_rrset, soa_rrset,
self._create_soa('1235')])
self.conn.response_generator = create_ixfr_response
- self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
+ self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR))
def test_do_xfrin_fail2(self):
'''IXFR fails due to a bogus DNS message.
'''
self._create_broken_response_data()
- self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
+ self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR))
def test_do_xfrin_uptodate(self):
'''IXFR is (gracefully) aborted because serial is not new
@@ -1906,10 +1904,10 @@ class TestIXFRSession(TestXfrinConnection):
def create_response():
self.conn.reply_data = self.conn.create_response_data(
questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
- RRType.IXFR())],
+ RRType.IXFR)],
answers=[begin_soa_rrset])
self.conn.response_generator = create_response
- self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.IXFR()))
+ self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.IXFR))
self.assertEqual(1, self.conn._transfer_stats.message_count)
self.assertEqual(0, self.conn._transfer_stats.axfr_rr_count)
@@ -1956,7 +1954,7 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
def get_zone_serial(self):
result, finder = self.conn._datasrc_client.find_zone(TEST_ZONE_NAME)
self.assertEqual(DataSourceClient.SUCCESS, result)
- result, soa, _ = finder.find(TEST_ZONE_NAME, RRType.SOA())
+ result, soa, _ = finder.find(TEST_ZONE_NAME, RRType.SOA)
self.assertEqual(ZoneFinder.SUCCESS, result)
self.assertEqual(1, soa.get_rdata_count())
return get_soa_serial(soa.get_rdata()[0])
@@ -1971,13 +1969,13 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
def create_ixfr_response():
self.conn.reply_data = self.conn.create_response_data(
questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
- RRType.IXFR())],
+ RRType.IXFR)],
answers=[soa_rrset, begin_soa_rrset, soa_rrset, soa_rrset])
self.conn.response_generator = create_ixfr_response
# Confirm xfrin succeeds and SOA is updated
self.assertEqual(1230, self.get_zone_serial().get_value())
- self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.IXFR()))
+ self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.IXFR))
self.assertEqual(1234, self.get_zone_serial().get_value())
# Also confirm the corresponding diffs are stored in the diffs table
@@ -2002,18 +2000,18 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
def create_ixfr_response():
self.conn.reply_data = self.conn.create_response_data(
questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
- RRType.IXFR())],
+ RRType.IXFR)],
answers=[soa_rrset, begin_soa_rrset, soa_rrset,
self._create_soa('1235')])
self.conn.response_generator = create_ixfr_response
self.assertEqual(1230, self.get_zone_serial().get_value())
- self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
+ self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR))
self.assertEqual(1230, self.get_zone_serial().get_value())
def test_do_ixfrin_nozone_sqlite3(self):
self._set_test_zone(Name('nosuchzone.example'))
- self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
+ self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR))
# This should fail even before starting state transition
self.assertEqual(None, self.conn.get_xfrstate())
@@ -2030,23 +2028,23 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
# Confirm xfrin succeeds and SOA is updated, A RR is deleted.
self.assertEqual(1230, self.get_zone_serial().get_value())
self.assertTrue(self.record_exist(Name('dns01.example.com'),
- RRType.A()))
+ RRType.A))
self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, type))
self.assertEqual(1234, self.get_zone_serial().get_value())
self.assertFalse(self.record_exist(Name('dns01.example.com'),
- RRType.A()))
+ RRType.A))
def test_do_ixfrin_axfr_sqlite3(self):
'''AXFR-style IXFR.
'''
- self.axfr_check(RRType.IXFR())
+ self.axfr_check(RRType.IXFR)
def test_do_axfrin_sqlite3(self):
'''AXFR.
'''
- self.axfr_check(RRType.AXFR())
+ self.axfr_check(RRType.AXFR)
def axfr_failure_check(self, type):
'''Similar to the previous two tests, but xfrin fails due to error.
@@ -2062,23 +2060,23 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
self.assertEqual(1230, self.get_zone_serial().get_value())
self.assertTrue(self.record_exist(Name('dns01.example.com'),
- RRType.A()))
+ RRType.A))
self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, type))
self.assertEqual(1230, self.get_zone_serial().get_value())
self.assertTrue(self.record_exist(Name('dns01.example.com'),
- RRType.A()))
+ RRType.A))
def test_do_xfrin_axfr_sqlite3_fail(self):
'''Failure case for AXFR-style IXFR.
'''
- self.axfr_failure_check(RRType.IXFR())
+ self.axfr_failure_check(RRType.IXFR)
def test_do_axfrin_sqlite3_fail(self):
'''Failure case for AXFR.
'''
- self.axfr_failure_check(RRType.AXFR())
+ self.axfr_failure_check(RRType.AXFR)
def test_do_axfrin_nozone_sqlite3(self):
'''AXFR test with an empty SQLite3 DB file, thus no target zone there.
@@ -2095,16 +2093,16 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
def create_response():
self.conn.reply_data = self.conn.create_response_data(
questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
- RRType.AXFR())],
+ RRType.AXFR)],
answers=[soa_rrset, self._create_ns(), soa_rrset])
self.conn.response_generator = create_response
self._set_test_zone(Name('example.com'))
- self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.AXFR()))
+ self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.AXFR))
self.assertEqual(type(XfrinAXFREnd()),
type(self.conn.get_xfrstate()))
self.assertEqual(1234, self.get_zone_serial().get_value())
self.assertFalse(self.record_exist(Name('dns01.example.com'),
- RRType.A()))
+ RRType.A))
class TestXfrinRecorder(unittest.TestCase):
def setUp(self):
@@ -2213,7 +2211,7 @@ class TestXfrinProcess(unittest.TestCase):
# Normal, successful case. We only check that things are cleaned up
# at the tearDown time.
process_xfrin(self, self, TEST_ZONE_NAME, TEST_RRCLASS, None, None,
- self.master, False, None, RRType.AXFR(),
+ self.master, False, None, RRType.AXFR,
self.create_xfrinconn)
def test_process_xfrin_exception_on_connect(self):
@@ -2221,7 +2219,7 @@ class TestXfrinProcess(unittest.TestCase):
# cleaned up.
self.do_raise_on_connect = True
process_xfrin(self, self, TEST_ZONE_NAME, TEST_RRCLASS, None, None,
- self.master, False, None, RRType.AXFR(),
+ self.master, False, None, RRType.AXFR,
self.create_xfrinconn)
def test_process_xfrin_exception_on_close(self):
@@ -2231,7 +2229,7 @@ class TestXfrinProcess(unittest.TestCase):
self.do_raise_on_connect = True
self.do_raise_on_close = True
process_xfrin(self, self, TEST_ZONE_NAME, TEST_RRCLASS, None, None,
- self.master, False, None, RRType.AXFR(),
+ self.master, False, None, RRType.AXFR,
self.create_xfrinconn)
def test_process_xfrin_exception_on_publish(self):
@@ -2239,7 +2237,7 @@ class TestXfrinProcess(unittest.TestCase):
# everything must still be cleaned up.
self.do_raise_on_publish = True
process_xfrin(self, self, TEST_ZONE_NAME, TEST_RRCLASS, None, None,
- self.master, False, None, RRType.AXFR(),
+ self.master, False, None, RRType.AXFR,
self.create_xfrinconn)
class TestXfrin(unittest.TestCase):
@@ -2292,7 +2290,7 @@ class TestXfrin(unittest.TestCase):
def test_parse_cmd_params_chclass(self):
self.args['zone_class'] = 'CH'
- self.assertEqual(self._do_parse_zone_name_class()[1], RRClass.CH())
+ self.assertEqual(self._do_parse_zone_name_class()[1], RRClass.CH)
def test_parse_cmd_params_bogusclass(self):
self.args['zone_class'] = 'XXX'
@@ -2339,7 +2337,7 @@ class TestXfrin(unittest.TestCase):
self.assertEqual(self.args['master'], self.xfr.xfrin_started_master_addr)
self.assertEqual(int(self.args['port']), self.xfr.xfrin_started_master_port)
# By default we use AXFR (for now)
- self.assertEqual(RRType.AXFR(), self.xfr.xfrin_started_request_type)
+ self.assertEqual(RRType.AXFR, self.xfr.xfrin_started_request_type)
def test_command_handler_retransfer_short_command1(self):
# try it when only specifying the zone name (of unknown zone)
@@ -2453,7 +2451,7 @@ class TestXfrin(unittest.TestCase):
self.assertEqual(int(TEST_MASTER_PORT),
self.xfr.xfrin_started_master_port)
# By default we use AXFR (for now)
- self.assertEqual(RRType.AXFR(), self.xfr.xfrin_started_request_type)
+ self.assertEqual(RRType.AXFR, self.xfr.xfrin_started_request_type)
def test_command_handler_notify(self):
# at this level, refresh is no different than retransfer.
@@ -2520,7 +2518,7 @@ class TestXfrin(unittest.TestCase):
self.xfr._max_transfers_in)
for zone_config in config_given['zones']:
zone_name = zone_config['name']
- zone_info = self.xfr._get_zone_info(Name(zone_name), RRClass.IN())
+ zone_info = self.xfr._get_zone_info(Name(zone_name), RRClass.IN)
self.assertEqual(str(zone_info.master_addr), zone_config['master_addr'])
self.assertEqual(zone_info.master_port, zone_config['master_port'])
if 'tsig_key' in zone_config:
@@ -2695,16 +2693,16 @@ class TestXfrin(unittest.TestCase):
def test_command_handler_retransfer_ixfr_enabled(self):
# If IXFR is explicitly enabled in config, IXFR will be used
self.common_ixfr_setup('retransfer', True)
- self.assertEqual(RRType.IXFR(), self.xfr.xfrin_started_request_type)
+ self.assertEqual(RRType.IXFR, self.xfr.xfrin_started_request_type)
def test_command_handler_refresh_ixfr_enabled(self):
# Same for refresh
self.common_ixfr_setup('refresh', True)
- self.assertEqual(RRType.IXFR(), self.xfr.xfrin_started_request_type)
+ self.assertEqual(RRType.IXFR, self.xfr.xfrin_started_request_type)
def test_command_handler_retransfer_with_tsig(self):
self.common_ixfr_setup('retransfer', False, 'example.com.key')
- self.assertEqual(RRType.AXFR(), self.xfr.xfrin_started_request_type)
+ self.assertEqual(RRType.AXFR, self.xfr.xfrin_started_request_type)
def test_command_handler_retransfer_with_tsig_bad_key(self):
# bad keys should not reach xfrin, but should they somehow,
@@ -2718,7 +2716,7 @@ class TestXfrin(unittest.TestCase):
def test_command_handler_refresh_with_tsig(self):
self.common_ixfr_setup('refresh', False, 'example.com.key')
- self.assertEqual(RRType.AXFR(), self.xfr.xfrin_started_request_type)
+ self.assertEqual(RRType.AXFR, self.xfr.xfrin_started_request_type)
def test_command_handler_refresh_with_tsig_bad_key(self):
# bad keys should not reach xfrin, but should they somehow,
@@ -2734,12 +2732,12 @@ class TestXfrin(unittest.TestCase):
# Similar to the previous case, but explicitly disabled. AXFR should
# be used.
self.common_ixfr_setup('retransfer', False)
- self.assertEqual(RRType.AXFR(), self.xfr.xfrin_started_request_type)
+ self.assertEqual(RRType.AXFR, self.xfr.xfrin_started_request_type)
def test_command_handler_refresh_ixfr_disabled(self):
# Same for refresh
self.common_ixfr_setup('refresh', False)
- self.assertEqual(RRType.AXFR(), self.xfr.xfrin_started_request_type)
+ self.assertEqual(RRType.AXFR, self.xfr.xfrin_started_request_type)
class TestXfrinMemoryZones(unittest.TestCase):
def setUp(self):
@@ -3015,7 +3013,7 @@ class TestXfrinProcess(unittest.TestCase):
self.__rets = rets
published = rets[-1]
xfrin.process_xfrin(self, XfrinRecorder(), Name("example.org."),
- RRClass.IN(), None, None, None, True, None,
+ RRClass.IN, None, None, None, True, None,
request_type, self.__get_connection)
self.assertEqual([], self.__rets)
self.assertEqual(transfers, self.__transfers)
@@ -3027,7 +3025,7 @@ class TestXfrinProcess(unittest.TestCase):
"""
Everything OK the first time, over IXFR.
"""
- self.__do_test([XFRIN_OK], [RRType.IXFR()], RRType.IXFR())
+ self.__do_test([XFRIN_OK], [RRType.IXFR], RRType.IXFR)
# Check there was loadzone command
self.assertTrue(self._send_cc_session.send_called)
self.assertTrue(self._send_cc_session.send_called_correctly)
@@ -3038,7 +3036,7 @@ class TestXfrinProcess(unittest.TestCase):
"""
Everything OK the first time, over AXFR.
"""
- self.__do_test([XFRIN_OK], [RRType.AXFR()], RRType.AXFR())
+ self.__do_test([XFRIN_OK], [RRType.AXFR], RRType.AXFR)
def test_axfr_fail(self):
"""
@@ -3046,15 +3044,15 @@ class TestXfrinProcess(unittest.TestCase):
to fail on AXFR, but succeed on IXFR and we didn't use IXFR in the first
place for some reason.
"""
- self.__do_test([XFRIN_FAIL], [RRType.AXFR()], RRType.AXFR())
+ self.__do_test([XFRIN_FAIL], [RRType.AXFR], RRType.AXFR)
def test_ixfr_fallback(self):
"""
The transfer fails over IXFR, but suceeds over AXFR. It should fall back
to it and say everything is OK.
"""
- self.__do_test([XFRIN_FAIL, XFRIN_OK], [RRType.IXFR(), RRType.AXFR()],
- RRType.IXFR())
+ self.__do_test([XFRIN_FAIL, XFRIN_OK], [RRType.IXFR, RRType.AXFR],
+ RRType.IXFR)
def test_ixfr_fail(self):
"""
@@ -3062,13 +3060,13 @@ class TestXfrinProcess(unittest.TestCase):
(only once) and should try both before giving up.
"""
self.__do_test([XFRIN_FAIL, XFRIN_FAIL],
- [RRType.IXFR(), RRType.AXFR()], RRType.IXFR())
+ [RRType.IXFR, RRType.AXFR], RRType.IXFR)
def test_send_loadzone(self):
"""
Check the loadzone command is sent after successful transfer.
"""
- self.__do_test([XFRIN_OK], [RRType.IXFR()], RRType.IXFR())
+ self.__do_test([XFRIN_OK], [RRType.IXFR], RRType.IXFR)
self.assertTrue(self._send_cc_session.send_called)
self.assertTrue(self._send_cc_session.send_called_correctly)
self.assertTrue(self._send_cc_session.recv_called)
diff --git a/src/bin/xfrin/xfrin.py.in b/src/bin/xfrin/xfrin.py.in
index cfefa63..55d9818 100755
--- a/src/bin/xfrin/xfrin.py.in
+++ b/src/bin/xfrin/xfrin.py.in
@@ -78,7 +78,7 @@ DBG_XFRIN_TRACE = logger.DBGLVL_TRACE_BASIC
# (TODO: have similar support to get default values for command
# arguments as we do for config options)
DEFAULT_MASTER_PORT = 53
-DEFAULT_ZONE_CLASS = RRClass.IN()
+DEFAULT_ZONE_CLASS = RRClass.IN
__version__ = 'BIND10'
@@ -135,7 +135,7 @@ def _check_zone_class(zone_class_str):
"""If the given argument is a string: checks if the given class is
a valid one, and returns an RRClass object if so.
Raises XfrinZoneInfoException if not.
- If it is None, this function returns the default RRClass.IN()"""
+ If it is None, this function returns the default RRClass.IN"""
if zone_class_str is None:
return DEFAULT_ZONE_CLASS
try:
@@ -316,12 +316,12 @@ class XfrinState:
class XfrinInitialSOA(XfrinState):
def handle_rr(self, conn, rr):
- if rr.get_type() != RRType.SOA():
+ if rr.get_type() != RRType.SOA:
raise XfrinProtocolError('First RR in zone transfer must be SOA ('
+ rr.get_type().to_text() + ' received)')
conn._end_serial = get_soa_serial(rr.get_rdata()[0])
- if conn._request_type == RRType.IXFR() and \
+ if conn._request_type == RRType.IXFR and \
conn._end_serial <= conn._request_serial:
logger.info(XFRIN_IXFR_UPTODATE, conn.zone_str(),
conn._request_serial, conn._end_serial)
@@ -364,8 +364,8 @@ class XfrinFirstData(XfrinState):
http://www.ietf.org/mail-archive/web/dnsext/current/msg07908.html
'''
- if conn._request_type == RRType.IXFR() and \
- rr.get_type() == RRType.SOA() and \
+ if conn._request_type == RRType.IXFR and \
+ rr.get_type() == RRType.SOA and \
conn._request_serial == get_soa_serial(rr.get_rdata()[0]):
logger.debug(DBG_XFRIN_TRACE, XFRIN_GOT_INCREMENTAL_RESP,
conn.zone_str())
@@ -382,7 +382,7 @@ class XfrinFirstData(XfrinState):
class XfrinIXFRDeleteSOA(XfrinState):
def handle_rr(self, conn, rr):
- if rr.get_type() != RRType.SOA():
+ if rr.get_type() != RRType.SOA:
# this shouldn't happen; should this occur it means an internal
# bug.
raise XfrinException(rr.get_type().to_text() +
@@ -402,7 +402,7 @@ class XfrinIXFRDeleteSOA(XfrinState):
class XfrinIXFRDelete(XfrinState):
def handle_rr(self, conn, rr):
- if rr.get_type() == RRType.SOA():
+ if rr.get_type() == RRType.SOA:
# This is the only place where current_serial is set
conn._current_serial = get_soa_serial(rr.get_rdata()[0])
self.set_xfrstate(conn, XfrinIXFRAddSOA())
@@ -413,7 +413,7 @@ class XfrinIXFRDelete(XfrinState):
class XfrinIXFRAddSOA(XfrinState):
def handle_rr(self, conn, rr):
- if rr.get_type() != RRType.SOA():
+ if rr.get_type() != RRType.SOA:
# this shouldn't happen; should this occur it means an internal
# bug.
raise XfrinException(rr.get_type().to_text() +
@@ -425,7 +425,7 @@ class XfrinIXFRAddSOA(XfrinState):
class XfrinIXFRAdd(XfrinState):
def handle_rr(self, conn, rr):
- if rr.get_type() == RRType.SOA():
+ if rr.get_type() == RRType.SOA:
# This SOA marks the end of a difference sequence
conn.get_transfer_stats().ixfr_changeset_count += 1
soa_serial = get_soa_serial(rr.get_rdata()[0])
@@ -480,7 +480,7 @@ class XfrinAXFR(XfrinState):
Handle the RR by putting it into the zone.
"""
conn._diff.add_data(rr)
- if rr.get_type() == RRType.SOA():
+ if rr.get_type() == RRType.SOA:
# SOA means end. Don't commit it yet - we need to perform
# post-transfer checks
@@ -662,7 +662,7 @@ class XfrinConnection(asyncore.dispatcher):
result, finder = self._datasrc_client.find_zone(self._zone_name)
if result != DataSourceClient.SUCCESS:
return None
- result, soa_rrset, _ = finder.find(self._zone_name, RRType.SOA())
+ result, soa_rrset, _ = finder.find(self._zone_name, RRType.SOA)
if result != ZoneFinder.SUCCESS:
logger.info(XFRIN_ZONE_NO_SOA, self.zone_str())
return None
@@ -714,8 +714,8 @@ class XfrinConnection(asyncore.dispatcher):
query_id = random.randint(0, 0xFFFF)
self._query_id = query_id
msg.set_qid(query_id)
- msg.set_opcode(Opcode.QUERY())
- msg.set_rcode(Rcode.NOERROR())
+ msg.set_opcode(Opcode.QUERY)
+ msg.set_rcode(Rcode.NOERROR)
msg.add_question(Question(self._zone_name, self._rrclass, query_type))
# Remember our serial, if known
@@ -723,7 +723,7 @@ class XfrinConnection(asyncore.dispatcher):
if self._zone_soa is not None else None
# Set the authority section with our SOA for IXFR
- if query_type == RRType.IXFR():
+ if query_type == RRType.IXFR:
if self._zone_soa is None:
# (incremental) IXFR doesn't work without known SOA
raise XfrinException('Failed to create IXFR query due to no ' +
@@ -855,7 +855,7 @@ class XfrinConnection(asyncore.dispatcher):
resp_question = msg.get_question()[0]
if resp_question.get_name() != self._zone_name or \
resp_question.get_class() != self._rrclass or \
- resp_question.get_type() != RRType.SOA():
+ resp_question.get_type() != RRType.SOA:
raise XfrinProtocolError('Invalid response to SOA query: '
'question mismatch: ' +
str(resp_question))
@@ -863,21 +863,21 @@ class XfrinConnection(asyncore.dispatcher):
# Look into the answer section for SOA
soa = None
for rr in msg.get_section(Message.SECTION_ANSWER):
- if rr.get_type() == RRType.SOA():
+ if rr.get_type() == RRType.SOA:
if soa is not None:
raise XfrinProtocolError('SOA response had multiple SOAs')
soa = rr
# There should not be a CNAME record at top of zone.
- if rr.get_type() == RRType.CNAME():
+ if rr.get_type() == RRType.CNAME:
raise XfrinProtocolError('SOA query resulted in CNAME')
# If SOA is not found, try to figure out the reason then report it.
if soa is None:
# See if we have any SOA records in the authority section.
for rr in msg.get_section(Message.SECTION_AUTHORITY):
- if rr.get_type() == RRType.NS():
+ if rr.get_type() == RRType.NS:
raise XfrinProtocolError('SOA query resulted in referral')
- if rr.get_type() == RRType.SOA():
+ if rr.get_type() == RRType.SOA:
raise XfrinProtocolError('SOA query resulted in NODATA')
raise XfrinProtocolError('No SOA record found in response to ' +
'SOA query')
@@ -901,7 +901,7 @@ class XfrinConnection(asyncore.dispatcher):
'''
- self._send_query(RRType.SOA())
+ self._send_query(RRType.SOA)
data_len = self._get_request_response(2)
msg_len = socket.htons(struct.unpack('H', data_len)[0])
soa_response = self._get_request_response(msg_len)
@@ -925,7 +925,7 @@ class XfrinConnection(asyncore.dispatcher):
return XFRIN_OK
- def do_xfrin(self, check_soa, request_type=RRType.AXFR()):
+ def do_xfrin(self, check_soa, request_type=RRType.AXFR):
'''Do an xfr session by sending xfr request and parsing responses.'''
try:
@@ -933,7 +933,7 @@ class XfrinConnection(asyncore.dispatcher):
self._request_type = request_type
# Right now RRType.[IA]XFR().to_text() is 'TYPExxx', so we need
# to hardcode here.
- req_str = 'IXFR' if request_type == RRType.IXFR() else 'AXFR'
+ req_str = 'IXFR' if request_type == RRType.IXFR else 'AXFR'
if check_soa:
self._check_soa_serial()
self.close()
@@ -1024,7 +1024,7 @@ class XfrinConnection(asyncore.dispatcher):
# cause interoperability trouble with stricter checks.
msg_rcode = msg.get_rcode()
- if msg_rcode != Rcode.NOERROR():
+ if msg_rcode != Rcode.NOERROR:
raise XfrinProtocolError('error response: %s' %
msg_rcode.to_text())
@@ -1120,13 +1120,13 @@ def __process_xfrin(server, zone_name, rrclass, db_file,
ret = XFRIN_FAIL
if conn.connect_to_master():
ret = conn.do_xfrin(check_soa, request_type)
- if ret == XFRIN_FAIL and request_type == RRType.IXFR():
+ if ret == XFRIN_FAIL and request_type == RRType.IXFR:
# IXFR failed for some reason. It might mean the server can't
# handle it, or we don't have the zone or we are out of sync or
# whatever else. So we retry with with AXFR, as it may succeed
# in many such cases.
retry = True
- request_type = RRType.AXFR()
+ request_type = RRType.AXFR
logger.warn(XFRIN_XFR_TRANSFER_FALLBACK, conn.zone_str())
conn.close()
conn = None
@@ -1172,7 +1172,7 @@ def process_xfrin(server, xfrin_recorder, zone_name, rrclass, db_file,
xfrin_recorder.decrement(zone_name)
if exception is not None:
- typestr = "AXFR" if request_type == RRType.AXFR() else "IXFR"
+ typestr = "AXFR" if request_type == RRType.AXFR else "IXFR"
logger.error(XFRIN_XFR_PROCESS_FAILURE, typestr, zone_name.to_text(),
str(rrclass), str(exception))
@@ -1507,9 +1507,9 @@ class Xfrin:
logger.info(XFRIN_RETRANSFER_UNKNOWN_ZONE, zone_str)
answer = create_answer(1, errmsg)
else:
- request_type = RRType.AXFR()
+ request_type = RRType.AXFR
if zone_info.use_ixfr:
- request_type = RRType.IXFR()
+ request_type = RRType.IXFR
master_addr = zone_info.get_master_addr_info()
if notify_addr[0] == master_addr[0] and\
notify_addr[2] == master_addr[2]:
@@ -1538,11 +1538,11 @@ class Xfrin:
rrclass)
zone_info = self._get_zone_info(zone_name, rrclass)
tsig_key = None
- request_type = RRType.AXFR()
+ request_type = RRType.AXFR
if zone_info:
tsig_key = zone_info.get_tsig_key()
if zone_info.use_ixfr:
- request_type = RRType.IXFR()
+ request_type = RRType.IXFR
db_file = args.get('db_file') or self._get_db_file()
ret = self.xfrin_start(zone_name,
rrclass,
@@ -1738,5 +1738,7 @@ def main(xfrin_class, use_signal=True):
if xfrind:
xfrind.shutdown()
+ logger.info(XFRIN_EXITING)
+
if __name__ == '__main__':
main(Xfrin)
diff --git a/src/bin/xfrin/xfrin_messages.mes b/src/bin/xfrin/xfrin_messages.mes
index 88cacde..1d90b75 100644
--- a/src/bin/xfrin/xfrin_messages.mes
+++ b/src/bin/xfrin/xfrin_messages.mes
@@ -60,6 +60,9 @@ error is given in the log message.
There was an error opening a connection to the master. The error is
shown in the log message.
+% XFRIN_EXITING exiting
+The xfrin daemon is exiting.
+
% XFRIN_GOT_INCREMENTAL_RESP got incremental response for %1
In an attempt of IXFR processing, the beginning SOA of the first difference
(following the initial SOA that specified the final SOA for all the
@@ -122,10 +125,6 @@ There was a problem sending a message to the xfrout module or the
zone manager. This most likely means that the msgq daemon has quit or
was killed.
-% XFRIN_MSGQ_SEND_ERROR_AUTH error while contacting %1
-There was a problem sending a message to b10-auth. This most likely
-means that the msgq daemon has quit or was killed.
-
% XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER error while contacting %1
There was a problem sending a message to the zone manager. This most
likely means that the msgq daemon has quit or was killed.
diff --git a/src/bin/xfrout/b10-xfrout.xml b/src/bin/xfrout/b10-xfrout.xml
index 8b616d2..0790f98 100644
--- a/src/bin/xfrout/b10-xfrout.xml
+++ b/src/bin/xfrout/b10-xfrout.xml
@@ -55,8 +55,8 @@
outgoing DNS zone transfer service using AXFR or IXFR.
It is also used to send outgoing NOTIFY messages.
Normally it is started by the
- <citerefentry><refentrytitle>bind10</refentrytitle><manvolnum>8</manvolnum></citerefentry>
- boss process.
+ <citerefentry><refentrytitle>b10-init</refentrytitle><manvolnum>8</manvolnum></citerefentry>
+ process.
When the <command>b10-auth</command> DNS server receives
a transfer request, <command>b10-xfrout</command> sends the
zone as found in the BIND 10 zone data store.
@@ -147,7 +147,7 @@
and exits <command>b10-xfrout</command>.
This has an optional <varname>pid</varname> argument to
select the process ID to stop.
- (Note that the BIND 10 boss process may restart this service
+ (Note that the BIND 10 b10-init process may restart this service
if configured.)
</para>
diff --git a/src/bin/xfrout/tests/xfrout_test.py.in b/src/bin/xfrout/tests/xfrout_test.py.in
index 774187f..bc0fae9 100644
--- a/src/bin/xfrout/tests/xfrout_test.py.in
+++ b/src/bin/xfrout/tests/xfrout_test.py.in
@@ -38,7 +38,7 @@ TSIG_KEY = TSIGKey("example.com:SFuWd/q99SzF8Yzd1QbB9g==")
#
TEST_ZONE_NAME_STR = "example.com."
TEST_ZONE_NAME = Name(TEST_ZONE_NAME_STR)
-TEST_RRCLASS = RRClass.IN()
+TEST_RRCLASS = RRClass.IN
IXFR_OK_VERSION = 2011111802
IXFR_NG_VERSION = 2011111803
SOA_CURRENT_VERSION = 2011112001
@@ -109,16 +109,16 @@ class MockDataSrcClient:
zone names.
'''
- if name == Name('nosoa.example.com') and rrtype == RRType.SOA():
+ if name == Name('nosoa.example.com') and rrtype == RRType.SOA:
return (ZoneFinder.NXDOMAIN, None, 0)
- elif name == Name('multisoa.example.com') and rrtype == RRType.SOA():
+ elif name == Name('multisoa.example.com') and rrtype == RRType.SOA:
soa_rrset = create_soa(SOA_CURRENT_VERSION)
soa_rrset.add_rdata(soa_rrset.get_rdata()[0])
return (ZoneFinder.SUCCESS, soa_rrset, 0)
elif name == Name('maxserial.example.com'):
soa_rrset = create_soa(0xffffffff)
return (ZoneFinder.SUCCESS, soa_rrset, 0)
- elif rrtype == RRType.SOA():
+ elif rrtype == RRType.SOA:
return (ZoneFinder.SUCCESS, create_soa(SOA_CURRENT_VERSION), 0)
raise ValueError('Unexpected input to mock finder: bug in test case?')
@@ -238,17 +238,17 @@ class TestXfroutSessionBase(unittest.TestCase):
msg = Message(Message.RENDER)
query_id = 0x1035
msg.set_qid(query_id)
- msg.set_opcode(Opcode.QUERY())
- msg.set_rcode(Rcode.NOERROR())
- req_type = RRType.AXFR() if ixfr is None else RRType.IXFR()
+ msg.set_opcode(Opcode.QUERY)
+ msg.set_rcode(Rcode.NOERROR)
+ req_type = RRType.AXFR if ixfr is None else RRType.IXFR
if with_question:
- msg.add_question(Question(zone_name, RRClass.IN(),
+ msg.add_question(Question(zone_name, RRClass.IN,
req_type if qtype is None else qtype))
- if req_type == RRType.IXFR():
- soa = RRset(zone_name, soa_class, RRType.SOA(), RRTTL(0))
+ if req_type == RRType.IXFR:
+ soa = RRset(zone_name, soa_class, RRType.SOA, RRTTL(0))
# In the RDATA only the serial matters.
for i in range(0, num_soa):
- soa.add_rdata(Rdata(RRType.SOA(), soa_class,
+ soa.add_rdata(Rdata(RRType.SOA, soa_class,
'm. r. ' + str(ixfr) + ' 1 1 1 1'))
msg.add_rrset(Message.SECTION_AUTHORITY, soa)
@@ -263,7 +263,7 @@ class TestXfroutSessionBase(unittest.TestCase):
def set_request_type(self, type):
self.xfrsess._request_type = type
- if type == RRType.AXFR():
+ if type == RRType.AXFR:
self.xfrsess._request_typestr = 'AXFR'
else:
self.xfrsess._request_typestr = 'IXFR'
@@ -280,7 +280,7 @@ class TestXfroutSessionBase(unittest.TestCase):
[{"action": "ACCEPT"}]),
{},
**self._counters)
- self.set_request_type(RRType.AXFR()) # test AXFR by default
+ self.set_request_type(RRType.AXFR) # test AXFR by default
self.mdata = self.create_request_data()
self.soa_rrset = create_soa(SOA_CURRENT_VERSION)
# some test replaces a module-wide function. We should ensure the
@@ -342,7 +342,7 @@ class TestXfroutSession(TestXfroutSessionBase):
self.xfrsess._request_data = self.mdata
self.xfrsess._server.increase_transfers_counter = lambda : False
XfroutSession._handle(self.xfrsess)
- self.assertEqual(self.sock.read_msg().get_rcode(), Rcode.REFUSED())
+ self.assertEqual(self.sock.read_msg().get_rcode(), Rcode.REFUSED)
def test_quota_ok(self):
'''The default case in terms of the xfrout quota.
@@ -355,7 +355,7 @@ class TestXfroutSession(TestXfroutSessionBase):
# Replace the data source client to avoid datasrc related exceptions
self.xfrsess.ClientClass = MockDataSrcClient
XfroutSession._handle(self.xfrsess)
- self.assertEqual(self.sock.read_msg().get_rcode(), Rcode.FORMERR())
+ self.assertEqual(self.sock.read_msg().get_rcode(), Rcode.FORMERR)
def test_exception_from_session(self):
'''Test the case where the main processing raises an exception.
@@ -372,14 +372,14 @@ class TestXfroutSession(TestXfroutSessionBase):
def test_parse_query_message(self):
# Valid AXFR
[get_rcode, get_msg] = self.xfrsess._parse_query_message(self.mdata)
- self.assertEqual(RRType.AXFR(), self.xfrsess._request_type)
+ self.assertEqual(RRType.AXFR, self.xfrsess._request_type)
self.assertEqual(get_rcode.to_text(), "NOERROR")
# Valid IXFR
request_data = self.create_request_data(ixfr=2011111801)
rcode, msg = self.xfrsess._parse_query_message(request_data)
- self.assertEqual(RRType.IXFR(), self.xfrsess._request_type)
- self.assertEqual(Rcode.NOERROR(), rcode)
+ self.assertEqual(RRType.IXFR, self.xfrsess._request_type)
+ self.assertEqual(Rcode.NOERROR, rcode)
# Broken request: no question
self.assertRaises(RuntimeError, self.xfrsess._parse_query_message,
@@ -387,7 +387,7 @@ class TestXfroutSession(TestXfroutSessionBase):
# Broken request: invalid RR type (neither AXFR nor IXFR)
self.assertRaises(RuntimeError, self.xfrsess._parse_query_message,
- self.create_request_data(qtype=RRType.A()))
+ self.create_request_data(qtype=RRType.A))
# NOERROR
request_data = self.create_request_data(ixfr=IXFR_OK_VERSION)
@@ -554,7 +554,7 @@ class TestXfroutSession(TestXfroutSessionBase):
# should be used.
self.xfrsess._acl = isc.acl.dns.REQUEST_LOADER.load([
{"from": "127.0.0.1", "action": "ACCEPT"}])
- acl = self.xfrsess._get_transfer_acl(Name('example.com'), RRClass.IN())
+ acl = self.xfrsess._get_transfer_acl(Name('example.com'), RRClass.IN)
self.assertEqual(acl, self.xfrsess._acl)
# install a per zone config with transfer ACL for example.com. Then
@@ -567,15 +567,15 @@ class TestXfroutSession(TestXfroutSessionBase):
com_acl
self.assertEqual(com_acl,
self.xfrsess._get_transfer_acl(Name('example.com'),
- RRClass.IN()))
+ RRClass.IN))
self.assertEqual(self.xfrsess._acl,
self.xfrsess._get_transfer_acl(Name('example.org'),
- RRClass.IN()))
+ RRClass.IN))
# Name matching should be case insensitive.
self.assertEqual(com_acl,
self.xfrsess._get_transfer_acl(Name('EXAMPLE.COM'),
- RRClass.IN()))
+ RRClass.IN))
def test_send_data(self):
self.xfrsess._send_data(self.sock, self.mdata)
@@ -600,9 +600,9 @@ class TestXfroutSession(TestXfroutSessionBase):
msg = self.getmsg()
msg.make_response()
# SOA record data with different cases
- soa_rrset = RRset(Name('Example.com.'), RRClass.IN(), RRType.SOA(),
+ soa_rrset = RRset(Name('Example.com.'), RRClass.IN, RRType.SOA,
RRTTL(3600))
- soa_rrset.add_rdata(Rdata(RRType.SOA(), RRClass.IN(),
+ soa_rrset.add_rdata(Rdata(RRType.SOA, RRClass.IN,
'master.Example.com. admin.exAmple.com. ' +
'2011112001 3600 1800 2419200 7200'))
msg.add_rrset(Message.SECTION_ANSWER, soa_rrset)
@@ -680,8 +680,8 @@ class TestXfroutSession(TestXfroutSessionBase):
self.assertEqual(get_msg.get_rr_count(Message.SECTION_AUTHORITY), 0)
def test_trigger_send_message_with_last_soa(self):
- rrset_a = RRset(Name("example.com"), RRClass.IN(), RRType.A(), RRTTL(3600))
- rrset_a.add_rdata(Rdata(RRType.A(), RRClass.IN(), "192.0.2.1"))
+ rrset_a = RRset(Name("example.com"), RRClass.IN, RRType.A, RRTTL(3600))
+ rrset_a.add_rdata(Rdata(RRType.A, RRClass.IN, "192.0.2.1"))
msg = self.getmsg()
msg.make_response()
@@ -759,36 +759,36 @@ class TestXfroutSession(TestXfroutSessionBase):
self.xfrsess.ClientClass = MockDataSrcClient
# Successful case. A zone iterator should be set up.
self.assertEqual(self.xfrsess._xfrout_setup(
- self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR())
+ self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR)
self.assertNotEqual(None, self.xfrsess._iterator)
# Failure cases
self.assertEqual(self.xfrsess._xfrout_setup(
self.getmsg(), Name('notauth.example.com'), TEST_RRCLASS),
- Rcode.NOTAUTH())
+ Rcode.NOTAUTH)
self.assertEqual(self.xfrsess._xfrout_setup(
self.getmsg(), Name('nosoa.example.com'), TEST_RRCLASS),
- Rcode.SERVFAIL())
+ Rcode.SERVFAIL)
self.assertEqual(self.xfrsess._xfrout_setup(
self.getmsg(), Name('multisoa.example.com'), TEST_RRCLASS),
- Rcode.SERVFAIL())
+ Rcode.SERVFAIL)
def test_xfrout_ixfr_setup(self):
self.xfrsess.ClientClass = MockDataSrcClient
- self.set_request_type(RRType.IXFR())
+ self.set_request_type(RRType.IXFR)
# Successful case of pure IXFR. A zone journal reader should be set
# up.
self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION)
self.assertEqual(self.xfrsess._xfrout_setup(
- self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR())
+ self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR)
self.assertNotEqual(None, self.xfrsess._jnl_reader)
# Successful case, but as a result of falling back to AXFR-style
# IXFR. A zone iterator should be set up instead of a journal reader.
self.mdata = self.create_request_data(ixfr=IXFR_NG_VERSION)
self.assertEqual(self.xfrsess._xfrout_setup(
- self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR())
+ self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR)
self.assertNotEqual(None, self.xfrsess._iterator)
self.assertEqual(None, self.xfrsess._jnl_reader)
@@ -797,7 +797,7 @@ class TestXfroutSession(TestXfroutSessionBase):
# indicating that the response will contain just one SOA.
self.mdata = self.create_request_data(ixfr=SOA_CURRENT_VERSION+1)
self.assertEqual(self.xfrsess._xfrout_setup(
- self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR())
+ self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR)
self.assertEqual(None, self.xfrsess._iterator)
self.assertEqual(None, self.xfrsess._jnl_reader)
@@ -805,7 +805,7 @@ class TestXfroutSession(TestXfroutSessionBase):
# the local SOA.
self.mdata = self.create_request_data(ixfr=SOA_CURRENT_VERSION)
self.assertEqual(self.xfrsess._xfrout_setup(
- self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR())
+ self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR)
self.assertEqual(None, self.xfrsess._iterator)
self.assertEqual(None, self.xfrsess._jnl_reader)
@@ -814,7 +814,7 @@ class TestXfroutSession(TestXfroutSessionBase):
zone_name = Name('maxserial.example.com') # whose SOA is 0xffffffff
self.mdata = self.create_request_data(ixfr=1, zone_name=zone_name)
self.assertEqual(self.xfrsess._xfrout_setup(
- self.getmsg(), zone_name, TEST_RRCLASS), Rcode.NOERROR())
+ self.getmsg(), zone_name, TEST_RRCLASS), Rcode.NOERROR)
self.assertEqual(None, self.xfrsess._iterator)
self.assertEqual(None, self.xfrsess._jnl_reader)
@@ -823,7 +823,7 @@ class TestXfroutSession(TestXfroutSessionBase):
self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
zone_name=zone_name)
self.assertEqual(self.xfrsess._xfrout_setup(
- self.getmsg(), zone_name, TEST_RRCLASS), Rcode.NOERROR())
+ self.getmsg(), zone_name, TEST_RRCLASS), Rcode.NOERROR)
self.assertNotEqual(None, self.xfrsess._iterator)
# Failure cases
@@ -831,42 +831,42 @@ class TestXfroutSession(TestXfroutSessionBase):
self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
zone_name=zone_name)
self.assertEqual(self.xfrsess._xfrout_setup(
- self.getmsg(), zone_name, TEST_RRCLASS), Rcode.NOTAUTH())
+ self.getmsg(), zone_name, TEST_RRCLASS), Rcode.NOTAUTH)
# this is a strange case: zone's SOA will be found but the journal
# reader won't be created due to 'no such zone'.
zone_name = Name('notauth2.example.com')
self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
zone_name=zone_name)
self.assertEqual(self.xfrsess._xfrout_setup(
- self.getmsg(), zone_name, TEST_RRCLASS), Rcode.NOTAUTH())
+ self.getmsg(), zone_name, TEST_RRCLASS), Rcode.NOTAUTH)
zone_name = Name('nosoa.example.com')
self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
zone_name=zone_name)
self.assertEqual(self.xfrsess._xfrout_setup(
- self.getmsg(), zone_name, TEST_RRCLASS), Rcode.SERVFAIL())
+ self.getmsg(), zone_name, TEST_RRCLASS), Rcode.SERVFAIL)
zone_name = Name('multisoa.example.com')
self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
zone_name=zone_name)
self.assertEqual(self.xfrsess._xfrout_setup(
- self.getmsg(), zone_name, TEST_RRCLASS), Rcode.SERVFAIL())
+ self.getmsg(), zone_name, TEST_RRCLASS), Rcode.SERVFAIL)
# query name doesn't match the SOA's owner
self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION)
self.assertEqual(self.xfrsess._xfrout_setup(
- self.getmsg(), zone_name, TEST_RRCLASS), Rcode.FORMERR())
+ self.getmsg(), zone_name, TEST_RRCLASS), Rcode.FORMERR)
# query's RR class doesn't match the SOA's class
zone_name = TEST_ZONE_NAME # make sure the name matches this time
self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
- soa_class=RRClass.CH())
+ soa_class=RRClass.CH)
self.assertEqual(self.xfrsess._xfrout_setup(
- self.getmsg(), zone_name, TEST_RRCLASS), Rcode.FORMERR())
+ self.getmsg(), zone_name, TEST_RRCLASS), Rcode.FORMERR)
# multiple SOA RRs
self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
num_soa=2)
self.assertEqual(self.xfrsess._xfrout_setup(
- self.getmsg(), zone_name, TEST_RRCLASS), Rcode.FORMERR())
+ self.getmsg(), zone_name, TEST_RRCLASS), Rcode.FORMERR)
def test_dns_xfrout_start_formerror(self):
# formerror
@@ -876,7 +876,7 @@ class TestXfroutSession(TestXfroutSessionBase):
def test_dns_xfrout_start_notauth(self):
def notauth(msg, name, rrclass):
- return Rcode.NOTAUTH()
+ return Rcode.NOTAUTH
self.xfrsess._xfrout_setup = notauth
self.xfrsess.dns_xfrout_start(self.sock, self.mdata)
get_msg = self.sock.read_msg()
@@ -887,11 +887,11 @@ class TestXfroutSession(TestXfroutSessionBase):
raise isc.datasrc.Error('exception for the sake of test')
self.xfrsess.ClientClass = internal_raise
self.xfrsess.dns_xfrout_start(self.sock, self.mdata)
- self.assertEqual(self.sock.read_msg().get_rcode(), Rcode.SERVFAIL())
+ self.assertEqual(self.sock.read_msg().get_rcode(), Rcode.SERVFAIL)
def test_dns_xfrout_start_noerror(self):
def noerror(msg, name, rrclass):
- return Rcode.NOERROR()
+ return Rcode.NOERROR
self.xfrsess._xfrout_setup = noerror
def myreply(msg, sock):
@@ -905,7 +905,7 @@ class TestXfroutSession(TestXfroutSessionBase):
def test_dns_xfrout_start_with_notcallable_xfrreqdone(self):
def noerror(msg, name, rrclass):
- return Rcode.NOERROR()
+ return Rcode.NOERROR
self.xfrsess._xfrout_setup = noerror
def myreply(msg, sock):
@@ -925,9 +925,9 @@ class TestXfroutSession(TestXfroutSessionBase):
self.assertEqual(reply_msg.get_rr_count(Message.SECTION_ANSWER), 2)
def test_reply_xfrout_query_axfr_with_tsig(self):
- rrset = RRset(Name('a.example.com'), RRClass.IN(), RRType.A(),
+ rrset = RRset(Name('a.example.com'), RRClass.IN, RRType.A,
RRTTL(3600))
- rrset.add_rdata(Rdata(RRType.A(), RRClass.IN(), '192.0.2.1'))
+ rrset.add_rdata(Rdata(RRType.A, RRClass.IN, '192.0.2.1'))
global xfrout
def get_rrset_len(rrset):
@@ -1015,8 +1015,8 @@ class TestXfroutSession(TestXfroutSessionBase):
algorithm = hmac-md5)
'''
- soa = RRset(Name('.'), RRClass.IN(), RRType.SOA(), RRTTL(3600))
- soa.add_rdata(Rdata(RRType.SOA(), RRClass.IN(), '. . 0 0 0 0 0'))
+ soa = RRset(Name('.'), RRClass.IN, RRType.SOA, RRTTL(3600))
+ soa.add_rdata(Rdata(RRType.SOA, RRClass.IN, '. . 0 0 0 0 0'))
self.mdata = self.create_request_data(zone_name=Name('.'))
self.xfrsess._soa = soa
if tsig:
@@ -1144,7 +1144,7 @@ class TestXfroutSessionWithSQLite3(TestXfroutSessionBase):
self.xfrsess._request_data = self.mdata
self.xfrsess._server.get_db_file = lambda : TESTDATA_SRCDIR + \
'test.sqlite3'
- self.ns_name = 'a.dns.example.com'
+ self.ns_name = 'a.dns.example.com.'
def check_axfr_stream(self, response):
'''Common checks for AXFR(-style) response for the test zone.
@@ -1177,10 +1177,10 @@ class TestXfroutSessionWithSQLite3(TestXfroutSessionBase):
self.assertEqual(self.get_counter('ixfr_ended'), 0)
XfroutSession._handle(self.xfrsess)
response = self.sock.read_msg(Message.PRESERVE_ORDER);
- self.assertEqual(Rcode.NOERROR(), response.get_rcode())
+ self.assertEqual(Rcode.NOERROR, response.get_rcode())
self.check_axfr_stream(response)
- self.assertEqual(self.xfrsess._request_type, RRType.AXFR())
- self.assertNotEqual(self.xfrsess._request_type, RRType.IXFR())
+ self.assertEqual(self.xfrsess._request_type, RRType.AXFR)
+ self.assertNotEqual(self.xfrsess._request_type, RRType.IXFR)
self.assertEqual(self.get_counter('axfr_started'), 1)
self.assertEqual(self.get_counter('axfr_ended'), 1)
self.assertEqual(self.get_counter('ixfr_started'), 0)
@@ -1191,10 +1191,10 @@ class TestXfroutSessionWithSQLite3(TestXfroutSessionBase):
self.create_request_data(ixfr=IXFR_NG_VERSION)
XfroutSession._handle(self.xfrsess)
response = self.sock.read_msg(Message.PRESERVE_ORDER);
- self.assertEqual(Rcode.NOERROR(), response.get_rcode())
+ self.assertEqual(Rcode.NOERROR, response.get_rcode())
# This is an AXFR-style IXFR. So the question section should indicate
# that it's an IXFR resposne.
- self.assertEqual(RRType.IXFR(), response.get_question()[0].get_type())
+ self.assertEqual(RRType.IXFR, response.get_question()[0].get_type())
self.check_axfr_stream(response)
def test_ixfr_normal_session(self):
@@ -1222,8 +1222,8 @@ class TestXfroutSessionWithSQLite3(TestXfroutSessionBase):
self.assertEqual(len(expected_records), len(actual_records))
for (expected_rr, actual_rr) in zip(expected_records, actual_records):
self.assertTrue(rrsets_equal(expected_rr, actual_rr))
- self.assertNotEqual(self.xfrsess._request_type, RRType.AXFR())
- self.assertEqual(self.xfrsess._request_type, RRType.IXFR())
+ self.assertNotEqual(self.xfrsess._request_type, RRType.AXFR)
+ self.assertEqual(self.xfrsess._request_type, RRType.IXFR)
self.assertEqual(self.get_counter('axfr_started'), 0)
self.assertEqual(self.get_counter('axfr_ended'), 0)
self.assertEqual(self.get_counter('ixfr_started'), 1)
diff --git a/src/bin/xfrout/xfrout.py.in b/src/bin/xfrout/xfrout.py.in
index f869955..5d25276 100755
--- a/src/bin/xfrout/xfrout.py.in
+++ b/src/bin/xfrout/xfrout.py.in
@@ -227,9 +227,9 @@ class XfroutSession():
self._tsig_key_ring)
tsig_error = self._tsig_ctx.verify(tsig_record, request_data)
if tsig_error != TSIGError.NOERROR:
- return Rcode.NOTAUTH()
+ return Rcode.NOTAUTH
- return Rcode.NOERROR()
+ return Rcode.NOERROR
def _parse_query_message(self, mdata):
''' parse query message to [socket,message]'''
@@ -239,11 +239,11 @@ class XfroutSession():
Message.from_wire(msg, mdata)
except Exception as err: # Exception is too broad
logger.error(XFROUT_PARSE_QUERY_ERROR, err)
- return Rcode.FORMERR(), None
+ return Rcode.FORMERR, None
# TSIG related checks
rcode = self._check_request_tsig(msg, mdata)
- if rcode != Rcode.NOERROR():
+ if rcode != Rcode.NOERROR:
return rcode, msg
# Make sure the question is valid. This should be ensured by
@@ -257,9 +257,9 @@ class XfroutSession():
# Identify the request type
self._request_type = question.get_type()
- if self._request_type == RRType.AXFR():
+ if self._request_type == RRType.AXFR:
self._request_typestr = 'AXFR'
- elif self._request_type == RRType.IXFR():
+ elif self._request_type == RRType.IXFR:
self._request_typestr = 'IXFR'
else:
# Likewise, this should be impossible.
@@ -283,7 +283,7 @@ class XfroutSession():
logger.debug(DBG_XFROUT_TRACE, XFROUT_QUERY_REJECTED,
self._request_type, format_addrinfo(self._remote),
format_zone_str(zone_name, zone_class))
- return Rcode.REFUSED(), msg
+ return Rcode.REFUSED, msg
return rcode, msg
@@ -351,16 +351,16 @@ class XfroutSession():
'''
result, finder = self._datasrc_client.find_zone(zone_name)
if result != DataSourceClient.SUCCESS:
- return (Rcode.NOTAUTH(), None)
- result, soa_rrset, _ = finder.find(zone_name, RRType.SOA())
+ return (Rcode.NOTAUTH, None)
+ result, soa_rrset, _ = finder.find(zone_name, RRType.SOA)
if result != ZoneFinder.SUCCESS:
- return (Rcode.SERVFAIL(), None)
+ return (Rcode.SERVFAIL, None)
# Especially for database-based zones, a working zone may be in
# a broken state where it has more than one SOA RR. We proactively
# check the condition and abort the xfr attempt if we identify it.
if soa_rrset.get_rdata_count() != 1:
- return (Rcode.SERVFAIL(), None)
- return (Rcode.NOERROR(), soa_rrset)
+ return (Rcode.SERVFAIL, None)
+ return (Rcode.NOERROR, soa_rrset)
def __axfr_setup(self, zone_name):
'''Setup a zone iterator for AXFR or AXFR-style IXFR.
@@ -379,16 +379,16 @@ class XfroutSession():
# update get_iterator() API so that we can distinguish "no such
# zone" and other cases (#1373). For now we consider all these
# cases as NOTAUTH.
- return Rcode.NOTAUTH()
+ return Rcode.NOTAUTH
# If we are an authoritative name server for the zone, but fail
# to find the zone's SOA record in datasource, xfrout can't
# provide zone transfer for it.
self._soa = self._iterator.get_soa()
if self._soa is None or self._soa.get_rdata_count() != 1:
- return Rcode.SERVFAIL()
+ return Rcode.SERVFAIL
- return Rcode.NOERROR()
+ return Rcode.NOERROR
def __ixfr_setup(self, request_msg, zone_name, zone_class):
'''Setup a zone journal reader for IXFR.
@@ -405,21 +405,21 @@ class XfroutSession():
# Ignore data whose owner name is not the zone apex, and
# ignore non-SOA or different class of records.
if auth_rrset.get_name() != zone_name or \
- auth_rrset.get_type() != RRType.SOA() or \
+ auth_rrset.get_type() != RRType.SOA or \
auth_rrset.get_class() != zone_class:
continue
if auth_rrset.get_rdata_count() != 1:
logger.info(XFROUT_IXFR_MULTIPLE_SOA,
format_addrinfo(self._remote))
- return Rcode.FORMERR()
+ return Rcode.FORMERR
remote_soa = auth_rrset
if remote_soa is None:
logger.info(XFROUT_IXFR_NO_SOA, format_addrinfo(self._remote))
- return Rcode.FORMERR()
+ return Rcode.FORMERR
# Retrieve the local SOA
rcode, self._soa = self._get_zone_soa(zone_name)
- if rcode != Rcode.NOERROR():
+ if rcode != Rcode.NOERROR:
return rcode
# RFC1995 says "If an IXFR query with the same or newer version
@@ -437,7 +437,7 @@ class XfroutSession():
logger.info(XFROUT_IXFR_UPTODATE, format_addrinfo(self._remote),
format_zone_str(zone_name, zone_class),
begin_serial, end_serial)
- return Rcode.NOERROR()
+ return Rcode.NOERROR
# Set up the journal reader or fall back to AXFR-style IXFR
try:
@@ -462,12 +462,12 @@ class XfroutSession():
# between these two operations. We treat it as NOTAUTH.
logger.warn(XFROUT_IXFR_NO_ZONE, format_addrinfo(self._remote),
format_zone_str(zone_name, zone_class))
- return Rcode.NOTAUTH()
+ return Rcode.NOTAUTH
# Use the reader as the iterator to generate the response.
self._iterator = self._jnl_reader
- return Rcode.NOERROR()
+ return Rcode.NOERROR
def _xfrout_setup(self, request_msg, zone_name, zone_class):
'''Setup a context for xfr responses according to the request type.
@@ -490,7 +490,7 @@ class XfroutSession():
self._server.get_db_file() + '"}'
self._datasrc_client = self.ClientClass('sqlite3', datasrc_config)
- if self._request_type == RRType.AXFR():
+ if self._request_type == RRType.AXFR:
return self.__axfr_setup(zone_name)
else:
return self.__ixfr_setup(request_msg, zone_name, zone_class)
@@ -500,17 +500,17 @@ class XfroutSession():
#TODO. create query message and parse header
if rcode_ is None: # Dropped by ACL
return
- elif rcode_ == Rcode.NOTAUTH() or rcode_ == Rcode.REFUSED():
+ elif rcode_ == Rcode.NOTAUTH or rcode_ == Rcode.REFUSED:
return self._reply_query_with_error_rcode(msg, sock_fd, rcode_)
- elif rcode_ != Rcode.NOERROR():
+ elif rcode_ != Rcode.NOERROR:
return self._reply_query_with_error_rcode(msg, sock_fd,
- Rcode.FORMERR())
+ Rcode.FORMERR)
elif not quota_ok:
logger.warn(XFROUT_QUERY_QUOTA_EXCCEEDED, self._request_typestr,
format_addrinfo(self._remote),
self._server._max_transfers_out)
return self._reply_query_with_error_rcode(msg, sock_fd,
- Rcode.REFUSED())
+ Rcode.REFUSED)
question = msg.get_question()[0]
zone_name = question.get_name()
@@ -522,15 +522,15 @@ class XfroutSession():
except Exception as ex:
logger.error(XFROUT_XFR_TRANSFER_CHECK_ERROR, self._request_typestr,
format_addrinfo(self._remote), zone_str, ex)
- rcode_ = Rcode.SERVFAIL()
- if rcode_ != Rcode.NOERROR():
+ rcode_ = Rcode.SERVFAIL
+ if rcode_ != Rcode.NOERROR:
logger.info(XFROUT_XFR_TRANSFER_FAILED, self._request_typestr,
format_addrinfo(self._remote), zone_str, rcode_)
return self._reply_query_with_error_rcode(msg, sock_fd, rcode_)
try:
# increment Xfr starts by RRType
- if self._request_type == RRType.AXFR():
+ if self._request_type == RRType.AXFR:
self._inc_axfr_running()
else:
self._inc_ixfr_running()
@@ -542,7 +542,7 @@ class XfroutSession():
format_addrinfo(self._remote), zone_str, err)
finally:
# decrement Xfr starts by RRType
- if self._request_type == RRType.AXFR():
+ if self._request_type == RRType.AXFR:
self._dec_axfr_running()
else:
self._dec_ixfr_running()
@@ -614,7 +614,7 @@ class XfroutSession():
# For AXFR (or AXFR-style IXFR), in which case _jnl_reader is None,
# we should skip SOAs from the iterator.
- if self._jnl_reader is None and rrset.get_type() == RRType.SOA():
+ if self._jnl_reader is None and rrset.get_type() == RRType.SOA:
continue
# We calculate the maximum size of the RRset (i.e. the
@@ -756,7 +756,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn,
"""
sock_fd = recv_fd(request.fileno())
if sock_fd < 0:
- logger.warn(XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR)
+ logger.warn(XFROUT_RECEIVE_FD_FAILED)
return False
# receive request msg. If it fails we simply terminate the thread;
@@ -1215,7 +1215,7 @@ class XfroutServer:
zone_name = args.get('zone_name')
zone_class = args.get('zone_class')
if not zone_class:
- zone_class = str(RRClass.IN())
+ zone_class = str(RRClass.IN)
if zone_name:
logger.info(XFROUT_NOTIFY_COMMAND, zone_name, zone_class)
if self.send_notify(zone_name, zone_class):
@@ -1274,7 +1274,7 @@ if '__main__' == __name__:
xfrout_server = XfroutServer()
xfrout_server.run()
except KeyboardInterrupt:
- logger.INFO(XFROUT_STOPPED_BY_KEYBOARD)
+ logger.info(XFROUT_STOPPED_BY_KEYBOARD)
except SessionError as e:
logger.error(XFROUT_CC_SESSION_ERROR, str(e))
except ModuleCCSessionError as e:
@@ -1287,3 +1287,4 @@ if '__main__' == __name__:
if xfrout_server:
xfrout_server.shutdown()
+ logger.info(XFROUT_EXITING)
diff --git a/src/bin/xfrout/xfrout_messages.mes b/src/bin/xfrout/xfrout_messages.mes
index d48aa24..5fb254e 100644
--- a/src/bin/xfrout/xfrout_messages.mes
+++ b/src/bin/xfrout/xfrout_messages.mes
@@ -32,6 +32,9 @@ configuration manager b10-cfgmgr is not running.
The xfrout process encountered an error when installing the configuration at
startup time. Details of the error are included in the log message.
+% XFROUT_EXITING exiting
+The xfrout daemon is exiting.
+
% XFROUT_FETCH_REQUEST_ERROR socket error while fetching a request from the auth daemon
There was a socket error while contacting the b10-auth daemon to
fetch a transfer request. The auth daemon may have shutdown.
@@ -152,7 +155,7 @@ statistics data should be sent to the stats daemon.
The xfrout daemon received a shutdown command from the command channel
and will now shut down.
-% XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR error receiving the file descriptor for an XFR connection
+% XFROUT_RECEIVE_FD_FAILED error receiving the file descriptor for an XFR connection
There was an error receiving the file descriptor for the transfer
request from b10-auth. There can be several reasons for this, but
the most likely cause is that b10-auth terminates for some reason
diff --git a/src/bin/zonemgr/b10-zonemgr.xml b/src/bin/zonemgr/b10-zonemgr.xml
index f859d23..91cdfc2 100644
--- a/src/bin/zonemgr/b10-zonemgr.xml
+++ b/src/bin/zonemgr/b10-zonemgr.xml
@@ -55,8 +55,8 @@
as the BIND 10 secondary manager, keeps track of timers
and other information necessary for BIND 10 to act as a DNS slave.
Normally it is started by the
- <citerefentry><refentrytitle>bind10</refentrytitle><manvolnum>8</manvolnum></citerefentry>
- boss process.
+ <citerefentry><refentrytitle>b10-init</refentrytitle><manvolnum>8</manvolnum></citerefentry>
+ process.
</para>
<para>
@@ -74,7 +74,7 @@
<!--
- self._send_command(XFRIN_MODULE_NAME, ZONE_NOTIFY_COMMAND, param)
+ self._send_command(XFRIN_MODULE_NAME, ZONE_NOTIFY_COMMAND, param)
self._clear_zone_notifier_master(zone_name_class)
# Send refresh command to xfrin module
else:
@@ -188,7 +188,7 @@
<command>shutdown</command> exits <command>b10-zonemgr</command>.
This has an optional <varname>pid</varname> argument to
select the process ID to stop.
- (Note that the BIND 10 boss process may restart this service
+ (Note that the b10-init process may restart this service
if configured.)
</para>
diff --git a/src/bin/zonemgr/zonemgr.py.in b/src/bin/zonemgr/zonemgr.py.in
index 0412e3f..40bfa39 100755
--- a/src/bin/zonemgr/zonemgr.py.in
+++ b/src/bin/zonemgr/zonemgr.py.in
@@ -625,7 +625,7 @@ class Zonemgr:
ZONE_NOTIFY_COMMAND is issued by Auth process;
ZONE_NEW_DATA_READY_CMD and ZONE_XFRIN_FAILED are issued by
Xfrin process;
- shutdown is issued by a user or Boss process. """
+ shutdown is issued by a user or Init process. """
answer = create_answer(0)
if command == ZONE_NOTIFY_COMMAND:
""" Handle Auth notify command"""
@@ -714,4 +714,4 @@ if '__main__' == __name__:
if zonemgrd and zonemgrd.running:
zonemgrd.shutdown()
- logger.debug(DBG_START_SHUT, ZONEMGR_SHUTDOWN)
+ logger.info(ZONEMGR_SHUTDOWN)
diff --git a/src/bin/zonemgr/zonemgr_messages.mes b/src/bin/zonemgr/zonemgr_messages.mes
index 4f58271..f67b5b9 100644
--- a/src/bin/zonemgr/zonemgr_messages.mes
+++ b/src/bin/zonemgr/zonemgr_messages.mes
@@ -69,7 +69,7 @@ new data.
% ZONEMGR_RECEIVE_SHUTDOWN received SHUTDOWN command
This is a debug message indicating that the zone manager has received
-a SHUTDOWN command over the command channel from the Boss process.
+a SHUTDOWN command over the command channel from the Init process.
It will act on this command and shut down.
% ZONEMGR_RECEIVE_UNKNOWN received unknown command '%1'
@@ -114,7 +114,7 @@ connecting to the command channel daemon. The most usual cause of this
problem is that the daemon is not running.
% ZONEMGR_SHUTDOWN zone manager has shut down
-A debug message, output when the zone manager has shut down completely.
+The zone manager has shut down completely.
% ZONEMGR_STARTED zonemgr started
This informational message is output by zonemgr when all initialization
diff --git a/src/lib/cc/cc_messages.mes b/src/lib/cc/cc_messages.mes
index 94b955a..b561784 100644
--- a/src/lib/cc/cc_messages.mes
+++ b/src/lib/cc/cc_messages.mes
@@ -60,6 +60,10 @@ and its length (2 bytes) is counted in the total length.
There should be data representing the length of message on the socket, but it
is not there.
+% CC_LNAME_RECEIVED received local name: %1
+Debug message: the local module received its unique identifier (name)
+from msgq on completion of establishing the session with msgq.
+
% CC_NO_MESSAGE no message ready to be received yet
The program polled for incoming messages, but there was no message waiting.
This is a debug message which may happen only after CC_GROUP_RECEIVE.
diff --git a/src/lib/cc/session.cc b/src/lib/cc/session.cc
index 4455b68..1d3fac2 100644
--- a/src/lib/cc/session.cc
+++ b/src/lib/cc/session.cc
@@ -333,6 +333,7 @@ Session::establish(const char* socket_file) {
recvmsg(routing, msg, false);
impl_->lname_ = msg->get("lname")->stringValue();
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED, CC_LNAME_RECEIVED).arg(impl_->lname_);
// At this point there's no risk of resource leak.
session_holder.clear();
diff --git a/src/lib/datasrc/memory/zone_finder.h b/src/lib/datasrc/memory/zone_finder.h
index cd2bd58..f4f411a 100644
--- a/src/lib/datasrc/memory/zone_finder.h
+++ b/src/lib/datasrc/memory/zone_finder.h
@@ -66,7 +66,7 @@ public:
/// This specialized version exploits internal data structure to find
/// RRsets at the zone origin and (if \c use_minttl is true) extract
/// the SOA Minimum TTL much more efficiently.
- virtual boost::shared_ptr<Context> findAtOrigin(
+ virtual boost::shared_ptr<ZoneFinder::Context> findAtOrigin(
const isc::dns::RRType& type, bool use_minttl,
FindOptions options);
diff --git a/src/lib/datasrc/tests/database_unittest.cc b/src/lib/datasrc/tests/database_unittest.cc
index e52d9e9..58af193 100644
--- a/src/lib/datasrc/tests/database_unittest.cc
+++ b/src/lib/datasrc/tests/database_unittest.cc
@@ -2466,7 +2466,7 @@ TYPED_TEST(DatabaseClientTest, findDelegation) {
// It should normally just result in DELEGATION; if GLUE_OK is specified,
// the other RR should be visible.
this->expected_rdatas_.clear();
- this->expected_rdatas_.push_back("ns.example.com");
+ this->expected_rdatas_.push_back("ns.example.com.");
doFindTest(*finder, Name("brokenns1.example.org"), this->qtype_,
RRType::NS(), this->rrttl_, ZoneFinder::DELEGATION,
this->expected_rdatas_, this->empty_rdatas_,
@@ -2515,7 +2515,7 @@ TYPED_TEST(DatabaseClientTest, findDS) {
// Some insane case: DS under a zone cut. It's included in the DB, but
// shouldn't be visible via finder.
this->expected_rdatas_.clear();
- this->expected_rdatas_.push_back("ns.example.com");
+ this->expected_rdatas_.push_back("ns.example.com.");
doFindTest(*finder, Name("child.insecdelegation.example.org"),
RRType::DS(), RRType::NS(), this->rrttl_,
ZoneFinder::DELEGATION, this->expected_rdatas_,
@@ -3649,7 +3649,7 @@ TYPED_TEST(DatabaseClientTest, deleteRRset) {
RRType::CNAME(), this->rrttl_));
this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
this->rrset_->getClass(),
- "www.example.org"));
+ "www.example.org."));
this->updater_->deleteRRset(*this->rrset_);
// The this->updater_ finder should immediately see the deleted results.
@@ -3701,7 +3701,7 @@ TYPED_TEST(DatabaseClientTest, deleteRRsetToNXDOMAIN) {
RRType::CNAME(), this->rrttl_));
this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
this->rrset_->getClass(),
- "www.example.org"));
+ "www.example.org."));
this->updater_ = this->client_->getUpdater(this->zname_, false);
this->updater_->deleteRRset(*this->rrset_);
diff --git a/src/lib/datasrc/tests/memory/rdata_serialization_unittest.cc b/src/lib/datasrc/tests/memory/rdata_serialization_unittest.cc
index a45c2bd..b36b294 100644
--- a/src/lib/datasrc/tests/memory/rdata_serialization_unittest.cc
+++ b/src/lib/datasrc/tests/memory/rdata_serialization_unittest.cc
@@ -632,9 +632,9 @@ addRdataMultiCommon(const vector<ConstRdataPtr>& rrsigs) {
checkEncode(RRClass::IN(), RRType::A(), rdata_list_, 0, rrsigs);
ConstRdataPtr mx_rdata1 = createRdata(RRType::MX(), RRClass::IN(),
- "5 mx1.example.com");
+ "5 mx1.example.com.");
ConstRdataPtr mx_rdata2 = createRdata(RRType::MX(), RRClass::IN(),
- "10 mx2.example.com");
+ "10 mx2.example.com.");
rdata_list_.clear();
rdata_list_.push_back(mx_rdata1);
rdata_list_.push_back(mx_rdata2);
@@ -767,13 +767,13 @@ TEST_F(RdataSerializationTest, badAddRdata) {
// Likewise. Inconsistent name compression policy.
const ConstRdataPtr ns_rdata =
- createRdata(RRType::NS(), RRClass::IN(), "ns.example");
+ createRdata(RRType::NS(), RRClass::IN(), "ns.example.");
encoder_.start(RRClass::IN(), RRType::DNAME());
EXPECT_THROW(encoder_.addRdata(*ns_rdata), isc::BadValue);
// Same as the previous one, opposite inconsistency.
const ConstRdataPtr dname_rdata =
- createRdata(RRType::DNAME(), RRClass::IN(), "dname.example");
+ createRdata(RRType::DNAME(), RRClass::IN(), "dname.example.");
encoder_.start(RRClass::IN(), RRType::NS());
EXPECT_THROW(encoder_.addRdata(*dname_rdata), isc::BadValue);
diff --git a/src/lib/datasrc/tests/memory/treenode_rrset_unittest.cc b/src/lib/datasrc/tests/memory/treenode_rrset_unittest.cc
index c331eaa..921ca68 100644
--- a/src/lib/datasrc/tests/memory/treenode_rrset_unittest.cc
+++ b/src/lib/datasrc/tests/memory/treenode_rrset_unittest.cc
@@ -354,14 +354,18 @@ TEST_F(TreeNodeRRsetTest, toWire) {
const uint32_t ttl = 0;
const TreeNodeRRset rrset(rrclass_, www_node_, a_rdataset_, true,
&ttl);
- checkToWireResult(expected_renderer, actual_renderer, rrset,
- www_name_,
- textToRRset("www.example.com. 0 IN A 192.0.2.1\n"
- "www.example.com. 0 IN A 192.0.2.2"),
- textToRRset("www.example.com. 0 IN RRSIG "
- "A 5 2 3600 20120814220826 "
- "20120715220826 1234 example.com. FAKE"),
- true);
+ // We need separate variable for the following two: SunStudio cannot
+ // automatically promote RRsetPtr to ConstRRsetPtr in the templated
+ // function.
+ ConstRRsetPtr expected_rrset =
+ textToRRset("www.example.com. 0 IN A 192.0.2.1\n"
+ "www.example.com. 0 IN A 192.0.2.2");
+ ConstRRsetPtr expected_rrsig_rrset =
+ textToRRset("www.example.com. 0 IN RRSIG "
+ "A 5 2 3600 20120814220826 "
+ "20120715220826 1234 example.com. FAKE");
+ checkToWireResult(expected_renderer, actual_renderer, rrset, www_name_,
+ expected_rrset, expected_rrsig_rrset, true);
}
{
diff --git a/src/lib/datasrc/tests/memory/zone_finder_unittest.cc b/src/lib/datasrc/tests/memory/zone_finder_unittest.cc
index 055708d..e59013e 100644
--- a/src/lib/datasrc/tests/memory/zone_finder_unittest.cc
+++ b/src/lib/datasrc/tests/memory/zone_finder_unittest.cc
@@ -110,9 +110,9 @@ protected:
{"example.org. 300 IN A 192.0.2.1", &rr_a_},
{"ns.example.org. 300 IN A 192.0.2.2", &rr_ns_a_},
// This one will place rr_ns_a_ at a zone cut, making it a glue:
- {"ns.example.org. 300 IN NS 192.0.2.2", &rr_ns_ns_},
+ {"ns.example.org. 300 IN NS 192.0.2.2.", &rr_ns_ns_},
{"ns.example.org. 300 IN AAAA 2001:db8::2", &rr_ns_aaaa_},
- {"cname.example.org. 300 IN CNAME canonical.example.org",
+ {"cname.example.org. 300 IN CNAME canonical.example.org.",
&rr_cname_},
{"cname.example.org. 300 IN A 192.0.2.3", &rr_cname_a_},
{"dname.example.org. 300 IN DNAME target.example.org.",
diff --git a/src/lib/datasrc/tests/memory_datasrc_unittest.cc b/src/lib/datasrc/tests/memory_datasrc_unittest.cc
index 81eb3ca..f7a478b 100644
--- a/src/lib/datasrc/tests/memory_datasrc_unittest.cc
+++ b/src/lib/datasrc/tests/memory_datasrc_unittest.cc
@@ -347,7 +347,7 @@ public:
{"example.org. 300 IN A 192.0.2.1", &rr_a_},
{"ns.example.org. 300 IN A 192.0.2.2", &rr_ns_a_},
{"ns.example.org. 300 IN AAAA 2001:db8::2", &rr_ns_aaaa_},
- {"cname.example.org. 300 IN CNAME canonical.example.org",
+ {"cname.example.org. 300 IN CNAME canonical.example.org.",
&rr_cname_},
{"cname.example.org. 300 IN A 192.0.2.3", &rr_cname_a_},
{"dname.example.org. 300 IN DNAME target.example.org.",
diff --git a/src/lib/dhcpsrv/lease_mgr.cc b/src/lib/dhcpsrv/lease_mgr.cc
index 6608b14..2310dd4 100644
--- a/src/lib/dhcpsrv/lease_mgr.cc
+++ b/src/lib/dhcpsrv/lease_mgr.cc
@@ -113,11 +113,22 @@ Lease4::toText() const {
bool
Lease4::operator==(const Lease4& other) const {
+ if ( (client_id_ && !other.client_id_) ||
+ (!client_id_ && other.client_id_) ) {
+ // One lease has client-id, but the other doesn't
+ return false;
+ }
+
+ if (client_id_ && other.client_id_ &&
+ *client_id_ != *other.client_id_) {
+ // Different client-ids
+ return false;
+ }
+
return (
addr_ == other.addr_ &&
ext_ == other.ext_ &&
hwaddr_ == other.hwaddr_ &&
- *client_id_ == *other.client_id_ &&
t1_ == other.t1_ &&
t2_ == other.t2_ &&
valid_lft_ == other.valid_lft_ &&
diff --git a/src/lib/dhcpsrv/mysql_lease_mgr.cc b/src/lib/dhcpsrv/mysql_lease_mgr.cc
index 292df61..6b6cde5 100644
--- a/src/lib/dhcpsrv/mysql_lease_mgr.cc
+++ b/src/lib/dhcpsrv/mysql_lease_mgr.cc
@@ -338,12 +338,25 @@ public:
bind_[1].length = &hwaddr_length_;
// client_id: varbinary(128)
- client_id_ = lease_->client_id_->getClientId();
- client_id_length_ = client_id_.size();
- bind_[2].buffer_type = MYSQL_TYPE_BLOB;
- bind_[2].buffer = reinterpret_cast<char*>(&client_id_[0]);
- bind_[2].buffer_length = client_id_length_;
- bind_[2].length = &client_id_length_;
+ if (lease_->client_id_) {
+ client_id_ = lease_->client_id_->getClientId();
+ client_id_length_ = client_id_.size();
+ bind_[2].buffer_type = MYSQL_TYPE_BLOB;
+ bind_[2].buffer = reinterpret_cast<char*>(&client_id_[0]);
+ bind_[2].buffer_length = client_id_length_;
+ bind_[2].length = &client_id_length_;
+ } else {
+ bind_[2].buffer_type = MYSQL_TYPE_NULL;
+
+ // According to http://dev.mysql.com/doc/refman/5.5/en/
+ // c-api-prepared-statement-data-structures.html, the other
+ // fields doesn't matter if type is set to MYSQL_TYPE_NULL,
+ // but let's set them to some sane values in case earlier versions
+ // didn't have that assumption.
+ static my_bool no_clientid = MLM_TRUE;
+ bind_[2].buffer = NULL;
+ bind_[2].is_null = &no_clientid;
+ }
// valid lifetime: unsigned int
bind_[3].buffer_type = MYSQL_TYPE_LONG;
diff --git a/src/lib/dns/Makefile.am b/src/lib/dns/Makefile.am
index 286bd8c..bbf33ed 100644
--- a/src/lib/dns/Makefile.am
+++ b/src/lib/dns/Makefile.am
@@ -8,6 +8,10 @@ AM_CXXFLAGS = $(B10_CXXFLAGS)
CLEANFILES = *.gcno *.gcda
CLEANFILES += rrclass.h rrtype.h rrparamregistry.cc rdataclass.h rdataclass.cc
+# These two are created with rrtype/class.h, so not explicitly listed in
+# BUILT_SOURCES.
+CLEANFILES += python/rrtype_constants_inc.cc
+CLEANFILES += python/rrclass_constants_inc.cc
EXTRA_DIST = rrclass-placeholder.h
EXTRA_DIST += rrparamregistry-placeholder.cc
diff --git a/src/lib/dns/gen-rdatacode.py.in b/src/lib/dns/gen-rdatacode.py.in
index fc63d73..b385bf4 100755
--- a/src/lib/dns/gen-rdatacode.py.in
+++ b/src/lib/dns/gen-rdatacode.py.in
@@ -33,16 +33,43 @@ import sys
# Example:
# new_rdata_factory_users = [('a', 'in'), ('a', 'ch'), ('soa', 'generic')]
new_rdata_factory_users = [('aaaa', 'in'),
+ ('cname', 'generic'),
+ ('dname', 'generic'),
('hinfo', 'generic'),
('naptr', 'generic'),
+ ('mx', 'generic'),
+ ('ns', 'generic'),
+ ('ptr', 'generic'),
('soa', 'generic'),
('spf', 'generic'),
+ ('srv', 'in'),
('txt', 'generic')
]
-re_typecode = re.compile('([\da-z]+)_(\d+)')
+re_typecode = re.compile('([\da-z\-]+)_(\d+)')
classcode2txt = {}
typecode2txt = {}
+# For meta types and types well-known but not implemented. This is a dict from
+# type code values (as string) to textual mnemonic.
+meta_types = {
+ # Real meta types. We won't have Rdata implement for them, but we need
+ # RRType constants.
+ '251': 'ixfr', '252': 'axfr', '255': 'any',
+ # Obsolete types. We probalby won't implement Rdata for them, but it's
+ # better to have RRType constants.
+ '3': 'md', '4': 'mf', '7': 'mb', '8': 'mg', '9': 'mr', '30': 'nxt',
+ '38': 'a6', '254': 'maila',
+ # Types officially assigned but not yet supported in our implementation.
+ '10': 'null', '11': 'wks', '19': 'x25', '21': 'rt', '22': 'nsap',
+ '23': 'nsap-ptr', '24': 'sig', '20': 'isdn', '25': 'key', '26': 'px',
+ '27': 'gpos', '29': 'loc', '36': 'kx', '37': 'cert', '42': 'apl',
+ '45': 'ipseckey', '52': 'tlsa', '55': 'hip', '103': 'unspec',
+ '104': 'nid', '105': 'l32', '106': 'l64', '107': 'lp', '249': 'tkey',
+ '253': 'mailb', '256': 'uri', '257': 'caa'
+ }
+# Classes that don't have any known types. This is a dict from type code
+# values (as string) to textual mnemonic.
+meta_classes = {'254': 'none'}
typeandclass = []
generic_code = 65536 # something larger than any code value
rdata_declarations = ''
@@ -185,11 +212,11 @@ def import_definitions(classcode2txt, typecode2txt, typeandclass):
type_code = m.group(2)
if not type_code in typecode2txt:
typecode2txt[type_code] = type_txt
- if re.search('\cc$', file):
+ if re.search('\.cc$', file):
if rdatadef_mtime < getmtime(file):
rdatadef_mtime = getmtime(file)
class_definitions += import_classdef(class_txt, file)
- elif re.search('\h$', file):
+ elif re.search('\.h$', file):
if rdatahdr_mtime < getmtime(file):
rdatahdr_mtime = getmtime(file)
rdata_declarations += import_classheader(class_txt,
@@ -255,36 +282,66 @@ class MasterLoaderCallbacks;
def generate_typeclasscode(fileprefix, basemtime, code2txt, type_or_class):
placeholder = '@srcdir@/' + fileprefix + '-placeholder.h'
outputfile = '@builddir@/' + fileprefix + '.h'
+ py_outputfile = '@builddir@/python/' + fileprefix + '_constants_inc.cc'
upper_key = type_or_class.upper() # TYPE or CLASS
lower_key = 'rr' + type_or_class.lower() # rrtype or rrclass
cap_key = type_or_class # Type or Class
- if not need_generate(outputfile, basemtime) and getmtime(outputfile) > getmtime(placeholder):
+ # We only decide whether to generate files for libdns++ files; Python
+ # files are generated if and only if libdns++ files are generated.
+ # In practice it should be sufficient.
+ if (not need_generate(outputfile, basemtime) and
+ getmtime(outputfile) > getmtime(placeholder)):
print('skip generating ' + outputfile)
return
- declarationtxt = ''
- deftxt = ''
- for code in code2txt.keys():
- codetxt = code2txt[code].upper()
- declarationtxt += ' ' * 4 + 'static const RR' + cap_key + '& ' + codetxt + '();\n'
- deftxt += '''inline const RR''' + cap_key + '''&
-RR''' + cap_key + '''::''' + codetxt + '''() {
- static RR''' + cap_key + ''' ''' + lower_key + '''(''' + code + ''');
+ # Create a list of (code, code-text) pairs, where code-text is generally
+ # upper-cased, with applying speicial filters when necessary.
+ def convert(code_txt):
+ # Workaround by heuristics: there's a "NULL" RR type, but it would
+ # cause conflict with the C/C++ macro. We use Null as a special case.
+ if code_txt == 'null':
+ return 'Null'
+ # Likewise, convert "nsap-ptr" to "NSAP_PTR" as a dash cannot be part
+ # of a C/C++ variable.
+ if code_txt == 'nsap-ptr':
+ return 'NSAP_PTR'
+ return code_txt.upper()
+ codes = [ (code, convert(txt)) for code, txt in code2txt.items() ]
+
+ # Dump source code for libdns++
+ with open(placeholder, 'r') as header_temp:
+ with open(outputfile, 'w') as header_out:
+ header_out.write(heading_txt)
+ for line in header_temp:
+ header_out.write(line)
+ if re.match('\s+// BEGIN_WELL_KNOWN_' + upper_key +
+ '_DECLARATIONS$', line):
+ for code in codes:
+ header_out.write(' ' * 4 + 'static const RR' +
+ cap_key + '& ' + code[1] + '();\n')
+ if re.match('// BEGIN_WELL_KNOWN_' + upper_key +
+ '_DEFINITIONS$', line):
+ for code in codes:
+ header_out.write('''inline const RR''' + cap_key +
+ '''&
+RR''' + cap_key + '''::''' + code[1] + '''() {
+ static RR''' + cap_key + ''' ''' + lower_key + '''(''' + code[0] + ''');
return (''' + lower_key + ''');
}\n
-'''
- header_temp = open(placeholder, 'r')
- header_out = open(outputfile, 'w')
- header_out.write(heading_txt)
- for line in header_temp.readlines():
- header_out.write(line)
- if re.match('\s+// BEGIN_WELL_KNOWN_' + upper_key + '_DECLARATIONS$', line):
- header_out.write(declarationtxt)
- if re.match('// BEGIN_WELL_KNOWN_' + upper_key + '_DEFINITIONS$', line):
- header_out.write('\n' + deftxt)
- header_out.close()
- header_temp.close()
+''')
+
+ # Dump source code snippet for isc.dns Python module
+ with open(py_outputfile, 'w') as py_out:
+ py_out.write(" // auto-generated by ../gen-rdatacode.py."
+ " Don't edit this file.\n")
+ py_out.write("\n")
+ for code in codes:
+ py_out.write('''\
+ installClassVariable(''' + lower_key + '''_type, "''' + code[1] + '''",
+ createRR''' + cap_key + '''Object(RR''' + \
+ cap_key + '''::''' + code[1] + '''()));
+''')
def generate_rrparam(fileprefix, basemtime):
placeholder = '@srcdir@/' + fileprefix + '-placeholder.cc'
@@ -331,6 +388,16 @@ def generate_rrparam(fileprefix, basemtime):
typeandclassparams += ', RdataFactoryPtr(new ' + rdf_class + '<'
typeandclassparams += class_txt + '::' + type_utxt + '>()));\n'
+ typeandclassparams += indent + '// Meta and non-implemented RR types\n'
+ for type_code, type_txt in meta_types.items():
+ typeandclassparams += indent + \
+ 'addType("' + type_txt.upper() + '", ' + type_code + ');\n'
+
+ typeandclassparams += indent + '// Meta classes\n'
+ for cls_code, cls_txt in meta_classes.items():
+ typeandclassparams += indent + \
+ 'addClass("' + cls_txt.upper() + '", ' + cls_code + ');\n'
+
rrparam_temp = open(placeholder, 'r')
rrparam_out = open(outputfile, 'w')
rrparam_out.write(heading_txt)
@@ -347,9 +414,14 @@ if __name__ == "__main__":
generate_rdatadef('@builddir@/rdataclass.cc', rdatadef_mtime)
generate_rdatahdr('@builddir@/rdataclass.h', heading_txt,
rdata_declarations, rdatahdr_mtime)
- generate_typeclasscode('rrtype', rdatahdr_mtime, typecode2txt, 'Type')
+
+ # merge auto-generated types/classes with meta maps and generate the
+ # corresponding code.
+ generate_typeclasscode('rrtype', rdatahdr_mtime,
+ dict(typecode2txt, **meta_types), 'Type')
generate_typeclasscode('rrclass', classdir_mtime,
- classcode2txt, 'Class')
+ dict(classcode2txt, **meta_classes), 'Class')
+
generate_rrparam('rrparamregistry', rdatahdr_mtime)
except:
sys.stderr.write('Code generation failed due to exception: %s\n' %
diff --git a/src/lib/dns/python/opcode_python.cc b/src/lib/dns/python/opcode_python.cc
index 50436a9..8d40d9d 100644
--- a/src/lib/dns/python/opcode_python.cc
+++ b/src/lib/dns/python/opcode_python.cc
@@ -43,62 +43,12 @@ void Opcode_destroy(s_Opcode* const self);
PyObject* Opcode_getCode(const s_Opcode* const self);
PyObject* Opcode_toText(const s_Opcode* const self);
PyObject* Opcode_str(PyObject* self);
-PyObject* Opcode_QUERY(const s_Opcode* self);
-PyObject* Opcode_IQUERY(const s_Opcode* self);
-PyObject* Opcode_STATUS(const s_Opcode* self);
-PyObject* Opcode_RESERVED3(const s_Opcode* self);
-PyObject* Opcode_NOTIFY(const s_Opcode* self);
-PyObject* Opcode_UPDATE(const s_Opcode* self);
-PyObject* Opcode_RESERVED6(const s_Opcode* self);
-PyObject* Opcode_RESERVED7(const s_Opcode* self);
-PyObject* Opcode_RESERVED8(const s_Opcode* self);
-PyObject* Opcode_RESERVED9(const s_Opcode* self);
-PyObject* Opcode_RESERVED10(const s_Opcode* self);
-PyObject* Opcode_RESERVED11(const s_Opcode* self);
-PyObject* Opcode_RESERVED12(const s_Opcode* self);
-PyObject* Opcode_RESERVED13(const s_Opcode* self);
-PyObject* Opcode_RESERVED14(const s_Opcode* self);
-PyObject* Opcode_RESERVED15(const s_Opcode* self);
-PyObject* Opcode_richcmp(const s_Opcode* const self,
- const s_Opcode* const other, int op);
PyMethodDef Opcode_methods[] = {
{ "get_code", reinterpret_cast<PyCFunction>(Opcode_getCode), METH_NOARGS,
"Returns the code value" },
{ "to_text", reinterpret_cast<PyCFunction>(Opcode_toText), METH_NOARGS,
"Returns the text representation" },
- { "QUERY", reinterpret_cast<PyCFunction>(Opcode_QUERY),
- METH_NOARGS | METH_STATIC, "Creates a QUERY Opcode" },
- { "IQUERY", reinterpret_cast<PyCFunction>(Opcode_IQUERY),
- METH_NOARGS | METH_STATIC, "Creates a IQUERY Opcode" },
- { "STATUS", reinterpret_cast<PyCFunction>(Opcode_STATUS),
- METH_NOARGS | METH_STATIC, "Creates a STATUS Opcode" },
- { "RESERVED3", reinterpret_cast<PyCFunction>(Opcode_RESERVED3),
- METH_NOARGS | METH_STATIC, "Creates a RESERVED3 Opcode" },
- { "NOTIFY", reinterpret_cast<PyCFunction>(Opcode_NOTIFY),
- METH_NOARGS | METH_STATIC, "Creates a NOTIFY Opcode" },
- { "UPDATE", reinterpret_cast<PyCFunction>(Opcode_UPDATE),
- METH_NOARGS | METH_STATIC, "Creates a UPDATE Opcode" },
- { "RESERVED6", reinterpret_cast<PyCFunction>(Opcode_RESERVED6),
- METH_NOARGS | METH_STATIC, "Creates a RESERVED6 Opcode" },
- { "RESERVED7", reinterpret_cast<PyCFunction>(Opcode_RESERVED7),
- METH_NOARGS | METH_STATIC, "Creates a RESERVED7 Opcode" },
- { "RESERVED8", reinterpret_cast<PyCFunction>(Opcode_RESERVED8),
- METH_NOARGS | METH_STATIC, "Creates a RESERVED8 Opcode" },
- { "RESERVED9", reinterpret_cast<PyCFunction>(Opcode_RESERVED9),
- METH_NOARGS | METH_STATIC, "Creates a RESERVED9 Opcode" },
- { "RESERVED10", reinterpret_cast<PyCFunction>(Opcode_RESERVED10),
- METH_NOARGS | METH_STATIC, "Creates a RESERVED10 Opcode" },
- { "RESERVED11", reinterpret_cast<PyCFunction>(Opcode_RESERVED11),
- METH_NOARGS | METH_STATIC, "Creates a RESERVED11 Opcode" },
- { "RESERVED12", reinterpret_cast<PyCFunction>(Opcode_RESERVED12),
- METH_NOARGS | METH_STATIC, "Creates a RESERVED12 Opcode" },
- { "RESERVED13", reinterpret_cast<PyCFunction>(Opcode_RESERVED13),
- METH_NOARGS | METH_STATIC, "Creates a RESERVED13 Opcode" },
- { "RESERVED14", reinterpret_cast<PyCFunction>(Opcode_RESERVED14),
- METH_NOARGS | METH_STATIC, "Creates a RESERVED14 Opcode" },
- { "RESERVED15", reinterpret_cast<PyCFunction>(Opcode_RESERVED15),
- METH_NOARGS | METH_STATIC, "Creates a RESERVED15 Opcode" },
{ NULL, NULL, 0, NULL }
};
@@ -156,96 +106,6 @@ Opcode_str(PyObject* self) {
}
PyObject*
-Opcode_createStatic(const Opcode& opcode) {
- s_Opcode* ret = PyObject_New(s_Opcode, &opcode_type);
- if (ret != NULL) {
- ret->cppobj = &opcode;
- ret->static_code = true;
- }
- return (ret);
-}
-
-PyObject*
-Opcode_QUERY(const s_Opcode*) {
- return (Opcode_createStatic(Opcode::QUERY()));
-}
-
-PyObject*
-Opcode_IQUERY(const s_Opcode*) {
- return (Opcode_createStatic(Opcode::IQUERY()));
-}
-
-PyObject*
-Opcode_STATUS(const s_Opcode*) {
- return (Opcode_createStatic(Opcode::STATUS()));
-}
-
-PyObject*
-Opcode_RESERVED3(const s_Opcode*) {
- return (Opcode_createStatic(Opcode::RESERVED3()));
-}
-
-PyObject*
-Opcode_NOTIFY(const s_Opcode*) {
- return (Opcode_createStatic(Opcode::NOTIFY()));
-}
-
-PyObject*
-Opcode_UPDATE(const s_Opcode*) {
- return (Opcode_createStatic(Opcode::UPDATE()));
-}
-
-PyObject*
-Opcode_RESERVED6(const s_Opcode*) {
- return (Opcode_createStatic(Opcode::RESERVED6()));
-}
-
-PyObject*
-Opcode_RESERVED7(const s_Opcode*) {
- return (Opcode_createStatic(Opcode::RESERVED7()));
-}
-
-PyObject*
-Opcode_RESERVED8(const s_Opcode*) {
- return (Opcode_createStatic(Opcode::RESERVED8()));
-}
-
-PyObject*
-Opcode_RESERVED9(const s_Opcode*) {
- return (Opcode_createStatic(Opcode::RESERVED9()));
-}
-
-PyObject*
-Opcode_RESERVED10(const s_Opcode*) {
- return (Opcode_createStatic(Opcode::RESERVED10()));
-}
-
-PyObject*
-Opcode_RESERVED11(const s_Opcode*) {
- return (Opcode_createStatic(Opcode::RESERVED11()));
-}
-
-PyObject*
-Opcode_RESERVED12(const s_Opcode*) {
- return (Opcode_createStatic(Opcode::RESERVED12()));
-}
-
-PyObject*
-Opcode_RESERVED13(const s_Opcode*) {
- return (Opcode_createStatic(Opcode::RESERVED13()));
-}
-
-PyObject*
-Opcode_RESERVED14(const s_Opcode*) {
- return (Opcode_createStatic(Opcode::RESERVED14()));
-}
-
-PyObject*
-Opcode_RESERVED15(const s_Opcode*) {
- return (Opcode_createStatic(Opcode::RESERVED15()));
-}
-
-PyObject*
Opcode_richcmp(const s_Opcode* const self, const s_Opcode* const other,
const int op)
{
diff --git a/src/lib/dns/python/pydnspp.cc b/src/lib/dns/python/pydnspp.cc
index c75c737..30dc090 100644
--- a/src/lib/dns/python/pydnspp.cc
+++ b/src/lib/dns/python/pydnspp.cc
@@ -294,38 +294,83 @@ initModulePart_Opcode(PyObject* mod) {
return (false);
}
- addClassVariable(opcode_type, "QUERY_CODE",
- Py_BuildValue("h", Opcode::QUERY_CODE));
- addClassVariable(opcode_type, "IQUERY_CODE",
- Py_BuildValue("h", Opcode::IQUERY_CODE));
- addClassVariable(opcode_type, "STATUS_CODE",
- Py_BuildValue("h", Opcode::STATUS_CODE));
- addClassVariable(opcode_type, "RESERVED3_CODE",
- Py_BuildValue("h", Opcode::RESERVED3_CODE));
- addClassVariable(opcode_type, "NOTIFY_CODE",
- Py_BuildValue("h", Opcode::NOTIFY_CODE));
- addClassVariable(opcode_type, "UPDATE_CODE",
- Py_BuildValue("h", Opcode::UPDATE_CODE));
- addClassVariable(opcode_type, "RESERVED6_CODE",
- Py_BuildValue("h", Opcode::RESERVED6_CODE));
- addClassVariable(opcode_type, "RESERVED7_CODE",
- Py_BuildValue("h", Opcode::RESERVED7_CODE));
- addClassVariable(opcode_type, "RESERVED8_CODE",
- Py_BuildValue("h", Opcode::RESERVED8_CODE));
- addClassVariable(opcode_type, "RESERVED9_CODE",
- Py_BuildValue("h", Opcode::RESERVED9_CODE));
- addClassVariable(opcode_type, "RESERVED10_CODE",
- Py_BuildValue("h", Opcode::RESERVED10_CODE));
- addClassVariable(opcode_type, "RESERVED11_CODE",
- Py_BuildValue("h", Opcode::RESERVED11_CODE));
- addClassVariable(opcode_type, "RESERVED12_CODE",
- Py_BuildValue("h", Opcode::RESERVED12_CODE));
- addClassVariable(opcode_type, "RESERVED13_CODE",
- Py_BuildValue("h", Opcode::RESERVED13_CODE));
- addClassVariable(opcode_type, "RESERVED14_CODE",
- Py_BuildValue("h", Opcode::RESERVED14_CODE));
- addClassVariable(opcode_type, "RESERVED15_CODE",
- Py_BuildValue("h", Opcode::RESERVED15_CODE));
+ try {
+ installClassVariable(opcode_type, "QUERY_CODE",
+ Py_BuildValue("h", Opcode::QUERY_CODE));
+ installClassVariable(opcode_type, "IQUERY_CODE",
+ Py_BuildValue("h", Opcode::IQUERY_CODE));
+ installClassVariable(opcode_type, "STATUS_CODE",
+ Py_BuildValue("h", Opcode::STATUS_CODE));
+ installClassVariable(opcode_type, "RESERVED3_CODE",
+ Py_BuildValue("h", Opcode::RESERVED3_CODE));
+ installClassVariable(opcode_type, "NOTIFY_CODE",
+ Py_BuildValue("h", Opcode::NOTIFY_CODE));
+ installClassVariable(opcode_type, "UPDATE_CODE",
+ Py_BuildValue("h", Opcode::UPDATE_CODE));
+ installClassVariable(opcode_type, "RESERVED6_CODE",
+ Py_BuildValue("h", Opcode::RESERVED6_CODE));
+ installClassVariable(opcode_type, "RESERVED7_CODE",
+ Py_BuildValue("h", Opcode::RESERVED7_CODE));
+ installClassVariable(opcode_type, "RESERVED8_CODE",
+ Py_BuildValue("h", Opcode::RESERVED8_CODE));
+ installClassVariable(opcode_type, "RESERVED9_CODE",
+ Py_BuildValue("h", Opcode::RESERVED9_CODE));
+ installClassVariable(opcode_type, "RESERVED10_CODE",
+ Py_BuildValue("h", Opcode::RESERVED10_CODE));
+ installClassVariable(opcode_type, "RESERVED11_CODE",
+ Py_BuildValue("h", Opcode::RESERVED11_CODE));
+ installClassVariable(opcode_type, "RESERVED12_CODE",
+ Py_BuildValue("h", Opcode::RESERVED12_CODE));
+ installClassVariable(opcode_type, "RESERVED13_CODE",
+ Py_BuildValue("h", Opcode::RESERVED13_CODE));
+ installClassVariable(opcode_type, "RESERVED14_CODE",
+ Py_BuildValue("h", Opcode::RESERVED14_CODE));
+ installClassVariable(opcode_type, "RESERVED15_CODE",
+ Py_BuildValue("h", Opcode::RESERVED15_CODE));
+
+ installClassVariable(opcode_type, "QUERY",
+ createOpcodeObject(Opcode::QUERY()));
+ installClassVariable(opcode_type, "IQUERY",
+ createOpcodeObject(Opcode::IQUERY()));
+ installClassVariable(opcode_type, "STATUS",
+ createOpcodeObject(Opcode::STATUS()));
+ installClassVariable(opcode_type, "RESERVED3",
+ createOpcodeObject(Opcode::RESERVED3()));
+ installClassVariable(opcode_type, "NOTIFY",
+ createOpcodeObject(Opcode::NOTIFY()));
+ installClassVariable(opcode_type, "UPDATE",
+ createOpcodeObject(Opcode::UPDATE()));
+ installClassVariable(opcode_type, "RESERVED6",
+ createOpcodeObject(Opcode::RESERVED6()));
+ installClassVariable(opcode_type, "RESERVED7",
+ createOpcodeObject(Opcode::RESERVED7()));
+ installClassVariable(opcode_type, "RESERVED8",
+ createOpcodeObject(Opcode::RESERVED8()));
+ installClassVariable(opcode_type, "RESERVED9",
+ createOpcodeObject(Opcode::RESERVED9()));
+ installClassVariable(opcode_type, "RESERVED10",
+ createOpcodeObject(Opcode::RESERVED10()));
+ installClassVariable(opcode_type, "RESERVED11",
+ createOpcodeObject(Opcode::RESERVED11()));
+ installClassVariable(opcode_type, "RESERVED12",
+ createOpcodeObject(Opcode::RESERVED12()));
+ installClassVariable(opcode_type, "RESERVED13",
+ createOpcodeObject(Opcode::RESERVED13()));
+ installClassVariable(opcode_type, "RESERVED14",
+ createOpcodeObject(Opcode::RESERVED14()));
+ installClassVariable(opcode_type, "RESERVED15",
+ createOpcodeObject(Opcode::RESERVED15()));
+ } catch (const std::exception& ex) {
+ const std::string ex_what =
+ "Unexpected failure in Opcode initialization: " +
+ std::string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (false);
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure in Opcode initialization");
+ return (false);
+ }
return (true);
}
@@ -341,40 +386,87 @@ initModulePart_Rcode(PyObject* mod) {
return (false);
}
- addClassVariable(rcode_type, "NOERROR_CODE",
- Py_BuildValue("h", Rcode::NOERROR_CODE));
- addClassVariable(rcode_type, "FORMERR_CODE",
- Py_BuildValue("h", Rcode::FORMERR_CODE));
- addClassVariable(rcode_type, "SERVFAIL_CODE",
- Py_BuildValue("h", Rcode::SERVFAIL_CODE));
- addClassVariable(rcode_type, "NXDOMAIN_CODE",
- Py_BuildValue("h", Rcode::NXDOMAIN_CODE));
- addClassVariable(rcode_type, "NOTIMP_CODE",
- Py_BuildValue("h", Rcode::NOTIMP_CODE));
- addClassVariable(rcode_type, "REFUSED_CODE",
- Py_BuildValue("h", Rcode::REFUSED_CODE));
- addClassVariable(rcode_type, "YXDOMAIN_CODE",
- Py_BuildValue("h", Rcode::YXDOMAIN_CODE));
- addClassVariable(rcode_type, "YXRRSET_CODE",
- Py_BuildValue("h", Rcode::YXRRSET_CODE));
- addClassVariable(rcode_type, "NXRRSET_CODE",
- Py_BuildValue("h", Rcode::NXRRSET_CODE));
- addClassVariable(rcode_type, "NOTAUTH_CODE",
- Py_BuildValue("h", Rcode::NOTAUTH_CODE));
- addClassVariable(rcode_type, "NOTZONE_CODE",
- Py_BuildValue("h", Rcode::NOTZONE_CODE));
- addClassVariable(rcode_type, "RESERVED11_CODE",
- Py_BuildValue("h", Rcode::RESERVED11_CODE));
- addClassVariable(rcode_type, "RESERVED12_CODE",
- Py_BuildValue("h", Rcode::RESERVED12_CODE));
- addClassVariable(rcode_type, "RESERVED13_CODE",
- Py_BuildValue("h", Rcode::RESERVED13_CODE));
- addClassVariable(rcode_type, "RESERVED14_CODE",
- Py_BuildValue("h", Rcode::RESERVED14_CODE));
- addClassVariable(rcode_type, "RESERVED15_CODE",
- Py_BuildValue("h", Rcode::RESERVED15_CODE));
- addClassVariable(rcode_type, "BADVERS_CODE",
- Py_BuildValue("h", Rcode::BADVERS_CODE));
+ try {
+ installClassVariable(rcode_type, "NOERROR_CODE",
+ Py_BuildValue("h", Rcode::NOERROR_CODE));
+ installClassVariable(rcode_type, "FORMERR_CODE",
+ Py_BuildValue("h", Rcode::FORMERR_CODE));
+ installClassVariable(rcode_type, "SERVFAIL_CODE",
+ Py_BuildValue("h", Rcode::SERVFAIL_CODE));
+ installClassVariable(rcode_type, "NXDOMAIN_CODE",
+ Py_BuildValue("h", Rcode::NXDOMAIN_CODE));
+ installClassVariable(rcode_type, "NOTIMP_CODE",
+ Py_BuildValue("h", Rcode::NOTIMP_CODE));
+ installClassVariable(rcode_type, "REFUSED_CODE",
+ Py_BuildValue("h", Rcode::REFUSED_CODE));
+ installClassVariable(rcode_type, "YXDOMAIN_CODE",
+ Py_BuildValue("h", Rcode::YXDOMAIN_CODE));
+ installClassVariable(rcode_type, "YXRRSET_CODE",
+ Py_BuildValue("h", Rcode::YXRRSET_CODE));
+ installClassVariable(rcode_type, "NXRRSET_CODE",
+ Py_BuildValue("h", Rcode::NXRRSET_CODE));
+ installClassVariable(rcode_type, "NOTAUTH_CODE",
+ Py_BuildValue("h", Rcode::NOTAUTH_CODE));
+ installClassVariable(rcode_type, "NOTZONE_CODE",
+ Py_BuildValue("h", Rcode::NOTZONE_CODE));
+ installClassVariable(rcode_type, "RESERVED11_CODE",
+ Py_BuildValue("h", Rcode::RESERVED11_CODE));
+ installClassVariable(rcode_type, "RESERVED12_CODE",
+ Py_BuildValue("h", Rcode::RESERVED12_CODE));
+ installClassVariable(rcode_type, "RESERVED13_CODE",
+ Py_BuildValue("h", Rcode::RESERVED13_CODE));
+ installClassVariable(rcode_type, "RESERVED14_CODE",
+ Py_BuildValue("h", Rcode::RESERVED14_CODE));
+ installClassVariable(rcode_type, "RESERVED15_CODE",
+ Py_BuildValue("h", Rcode::RESERVED15_CODE));
+ installClassVariable(rcode_type, "BADVERS_CODE",
+ Py_BuildValue("h", Rcode::BADVERS_CODE));
+
+ installClassVariable(rcode_type, "NOERROR",
+ createRcodeObject(Rcode::NOERROR()));
+ installClassVariable(rcode_type, "FORMERR",
+ createRcodeObject(Rcode::FORMERR()));
+ installClassVariable(rcode_type, "SERVFAIL",
+ createRcodeObject(Rcode::SERVFAIL()));
+ installClassVariable(rcode_type, "NXDOMAIN",
+ createRcodeObject(Rcode::NXDOMAIN()));
+ installClassVariable(rcode_type, "NOTIMP",
+ createRcodeObject(Rcode::NOTIMP()));
+ installClassVariable(rcode_type, "REFUSED",
+ createRcodeObject(Rcode::REFUSED()));
+ installClassVariable(rcode_type, "YXDOMAIN",
+ createRcodeObject(Rcode::YXDOMAIN()));
+ installClassVariable(rcode_type, "YXRRSET",
+ createRcodeObject(Rcode::YXRRSET()));
+ installClassVariable(rcode_type, "NXRRSET",
+ createRcodeObject(Rcode::NXRRSET()));
+ installClassVariable(rcode_type, "NOTAUTH",
+ createRcodeObject(Rcode::NOTAUTH()));
+ installClassVariable(rcode_type, "NOTZONE",
+ createRcodeObject(Rcode::NOTZONE()));
+ installClassVariable(rcode_type, "RESERVED11",
+ createRcodeObject(Rcode::RESERVED11()));
+ installClassVariable(rcode_type, "RESERVED12",
+ createRcodeObject(Rcode::RESERVED12()));
+ installClassVariable(rcode_type, "RESERVED13",
+ createRcodeObject(Rcode::RESERVED13()));
+ installClassVariable(rcode_type, "RESERVED14",
+ createRcodeObject(Rcode::RESERVED14()));
+ installClassVariable(rcode_type, "RESERVED15",
+ createRcodeObject(Rcode::RESERVED15()));
+ installClassVariable(rcode_type, "BADVERS",
+ createRcodeObject(Rcode::BADVERS()));
+ } catch (const std::exception& ex) {
+ const std::string ex_what =
+ "Unexpected failure in Rcode initialization: " +
+ std::string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (false);
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure in Rcode initialization");
+ return (false);
+ }
return (true);
}
@@ -432,6 +524,9 @@ initModulePart_RRClass(PyObject* mod) {
NULL, NULL);
PyObjectContainer(po_IncompleteRRClass).installToModule(
mod, "IncompleteRRClass");
+
+ // Incorporate auto-generated RRClass constants
+#include <dns/python/rrclass_constants_inc.cc>
} catch (const std::exception& ex) {
const std::string ex_what =
"Unexpected failure in RRClass initialization: " +
@@ -518,6 +613,9 @@ initModulePart_RRType(PyObject* mod) {
NULL, NULL);
PyObjectContainer(po_IncompleteRRType).installToModule(
mod, "IncompleteRRType");
+
+ // Incorporate auto-generated RRType constants
+#include <dns/python/rrtype_constants_inc.cc>
} catch (const std::exception& ex) {
const std::string ex_what =
"Unexpected failure in RRType initialization: " +
diff --git a/src/lib/dns/python/rcode_python.cc b/src/lib/dns/python/rcode_python.cc
index 42b48e7..67b45e7 100644
--- a/src/lib/dns/python/rcode_python.cc
+++ b/src/lib/dns/python/rcode_python.cc
@@ -55,23 +55,6 @@ PyObject* Rcode_getCode(const s_Rcode* const self);
PyObject* Rcode_getExtendedCode(const s_Rcode* const self);
PyObject* Rcode_toText(const s_Rcode* const self);
PyObject* Rcode_str(PyObject* self);
-PyObject* Rcode_NOERROR(const s_Rcode* self);
-PyObject* Rcode_FORMERR(const s_Rcode* self);
-PyObject* Rcode_SERVFAIL(const s_Rcode* self);
-PyObject* Rcode_NXDOMAIN(const s_Rcode* self);
-PyObject* Rcode_NOTIMP(const s_Rcode* self);
-PyObject* Rcode_REFUSED(const s_Rcode* self);
-PyObject* Rcode_YXDOMAIN(const s_Rcode* self);
-PyObject* Rcode_YXRRSET(const s_Rcode* self);
-PyObject* Rcode_NXRRSET(const s_Rcode* self);
-PyObject* Rcode_NOTAUTH(const s_Rcode* self);
-PyObject* Rcode_NOTZONE(const s_Rcode* self);
-PyObject* Rcode_RESERVED11(const s_Rcode* self);
-PyObject* Rcode_RESERVED12(const s_Rcode* self);
-PyObject* Rcode_RESERVED13(const s_Rcode* self);
-PyObject* Rcode_RESERVED14(const s_Rcode* self);
-PyObject* Rcode_RESERVED15(const s_Rcode* self);
-PyObject* Rcode_BADVERS(const s_Rcode* self);
PyObject* Rcode_richcmp(const s_Rcode* const self,
const s_Rcode* const other, int op);
@@ -83,40 +66,6 @@ PyMethodDef Rcode_methods[] = {
"Returns the upper 8-bit part of the extended code value" },
{ "to_text", reinterpret_cast<PyCFunction>(Rcode_toText), METH_NOARGS,
"Returns the text representation" },
- { "NOERROR", reinterpret_cast<PyCFunction>(Rcode_NOERROR),
- METH_NOARGS | METH_STATIC, "Creates a NOERROR Rcode" },
- { "FORMERR", reinterpret_cast<PyCFunction>(Rcode_FORMERR),
- METH_NOARGS | METH_STATIC, "Creates a FORMERR Rcode" },
- { "SERVFAIL", reinterpret_cast<PyCFunction>(Rcode_SERVFAIL),
- METH_NOARGS | METH_STATIC, "Creates a SERVFAIL Rcode" },
- { "NXDOMAIN", reinterpret_cast<PyCFunction>(Rcode_NXDOMAIN),
- METH_NOARGS | METH_STATIC, "Creates a NXDOMAIN Rcode" },
- { "NOTIMP", reinterpret_cast<PyCFunction>(Rcode_NOTIMP),
- METH_NOARGS | METH_STATIC, "Creates a NOTIMP Rcode" },
- { "REFUSED", reinterpret_cast<PyCFunction>(Rcode_REFUSED),
- METH_NOARGS | METH_STATIC, "Creates a REFUSED Rcode" },
- { "YXDOMAIN", reinterpret_cast<PyCFunction>(Rcode_YXDOMAIN),
- METH_NOARGS | METH_STATIC, "Creates a YXDOMAIN Rcode" },
- { "YXRRSET", reinterpret_cast<PyCFunction>(Rcode_YXRRSET),
- METH_NOARGS | METH_STATIC, "Creates a YYRRSET Rcode" },
- { "NXRRSET", reinterpret_cast<PyCFunction>(Rcode_NXRRSET),
- METH_NOARGS | METH_STATIC, "Creates a NXRRSET Rcode" },
- { "NOTAUTH", reinterpret_cast<PyCFunction>(Rcode_NOTAUTH),
- METH_NOARGS | METH_STATIC, "Creates a NOTAUTH Rcode" },
- { "NOTZONE", reinterpret_cast<PyCFunction>(Rcode_NOTZONE),
- METH_NOARGS | METH_STATIC, "Creates a NOTZONE Rcode" },
- { "RESERVED11", reinterpret_cast<PyCFunction>(Rcode_RESERVED11),
- METH_NOARGS | METH_STATIC, "Creates a RESERVED11 Rcode" },
- { "RESERVED12", reinterpret_cast<PyCFunction>(Rcode_RESERVED12),
- METH_NOARGS | METH_STATIC, "Creates a RESERVED12 Rcode" },
- { "RESERVED13", reinterpret_cast<PyCFunction>(Rcode_RESERVED13),
- METH_NOARGS | METH_STATIC, "Creates a RESERVED13 Rcode" },
- { "RESERVED14", reinterpret_cast<PyCFunction>(Rcode_RESERVED14),
- METH_NOARGS | METH_STATIC, "Creates a RESERVED14 Rcode" },
- { "RESERVED15", reinterpret_cast<PyCFunction>(Rcode_RESERVED15),
- METH_NOARGS | METH_STATIC, "Creates a RESERVED15 Rcode" },
- { "BADVERS", reinterpret_cast<PyCFunction>(Rcode_BADVERS),
- METH_NOARGS | METH_STATIC, "Creates a BADVERS Rcode" },
{ NULL, NULL, 0, NULL }
};
@@ -193,101 +142,6 @@ Rcode_str(PyObject* self) {
}
PyObject*
-Rcode_createStatic(const Rcode& rcode) {
- s_Rcode* ret = PyObject_New(s_Rcode, &rcode_type);
- if (ret != NULL) {
- ret->cppobj = &rcode;
- ret->static_code = true;
- }
- return (ret);
-}
-
-PyObject*
-Rcode_NOERROR(const s_Rcode*) {
- return (Rcode_createStatic(Rcode::NOERROR()));
-}
-
-PyObject*
-Rcode_FORMERR(const s_Rcode*) {
- return (Rcode_createStatic(Rcode::FORMERR()));
-}
-
-PyObject*
-Rcode_SERVFAIL(const s_Rcode*) {
- return (Rcode_createStatic(Rcode::SERVFAIL()));
-}
-
-PyObject*
-Rcode_NXDOMAIN(const s_Rcode*) {
- return (Rcode_createStatic(Rcode::NXDOMAIN()));
-}
-
-PyObject*
-Rcode_NOTIMP(const s_Rcode*) {
- return (Rcode_createStatic(Rcode::NOTIMP()));
-}
-
-PyObject*
-Rcode_REFUSED(const s_Rcode*) {
- return (Rcode_createStatic(Rcode::REFUSED()));
-}
-
-PyObject*
-Rcode_YXDOMAIN(const s_Rcode*) {
- return (Rcode_createStatic(Rcode::YXDOMAIN()));
-}
-
-PyObject*
-Rcode_YXRRSET(const s_Rcode*) {
- return (Rcode_createStatic(Rcode::YXRRSET()));
-}
-
-PyObject*
-Rcode_NXRRSET(const s_Rcode*) {
- return (Rcode_createStatic(Rcode::NXRRSET()));
-}
-
-PyObject*
-Rcode_NOTAUTH(const s_Rcode*) {
- return (Rcode_createStatic(Rcode::NOTAUTH()));
-}
-
-PyObject*
-Rcode_NOTZONE(const s_Rcode*) {
- return (Rcode_createStatic(Rcode::NOTZONE()));
-}
-
-PyObject*
-Rcode_RESERVED11(const s_Rcode*) {
- return (Rcode_createStatic(Rcode::RESERVED11()));
-}
-
-PyObject*
-Rcode_RESERVED12(const s_Rcode*) {
- return (Rcode_createStatic(Rcode::RESERVED12()));
-}
-
-PyObject*
-Rcode_RESERVED13(const s_Rcode*) {
- return (Rcode_createStatic(Rcode::RESERVED13()));
-}
-
-PyObject*
-Rcode_RESERVED14(const s_Rcode*) {
- return (Rcode_createStatic(Rcode::RESERVED14()));
-}
-
-PyObject*
-Rcode_RESERVED15(const s_Rcode*) {
- return (Rcode_createStatic(Rcode::RESERVED15()));
-}
-
-PyObject*
-Rcode_BADVERS(const s_Rcode*) {
- return (Rcode_createStatic(Rcode::BADVERS()));
-}
-
-PyObject*
Rcode_richcmp(const s_Rcode* const self, const s_Rcode* const other,
const int op)
{
diff --git a/src/lib/dns/python/rrclass_python.cc b/src/lib/dns/python/rrclass_python.cc
index a566f47..d62c88d 100644
--- a/src/lib/dns/python/rrclass_python.cc
+++ b/src/lib/dns/python/rrclass_python.cc
@@ -54,13 +54,6 @@ PyObject* RRClass_getCode(s_RRClass* self);
PyObject* RRClass_richcmp(s_RRClass* self, s_RRClass* other, int op);
Py_hash_t RRClass_hash(PyObject* pyself);
-// Static function for direct class creation
-PyObject* RRClass_IN(s_RRClass *self);
-PyObject* RRClass_CH(s_RRClass *self);
-PyObject* RRClass_HS(s_RRClass *self);
-PyObject* RRClass_NONE(s_RRClass *self);
-PyObject* RRClass_ANY(s_RRClass *self);
-
typedef CPPPyObjectContainer<s_RRClass, RRClass> RRClassContainer;
// This list contains the actual set of functions we have in
@@ -81,11 +74,6 @@ PyMethodDef RRClass_methods[] = {
"returned" },
{ "get_code", reinterpret_cast<PyCFunction>(RRClass_getCode), METH_NOARGS,
"Returns the class code as an integer" },
- { "IN", reinterpret_cast<PyCFunction>(RRClass_IN), METH_NOARGS | METH_STATIC, "Creates an IN RRClass" },
- { "CH", reinterpret_cast<PyCFunction>(RRClass_CH), METH_NOARGS | METH_STATIC, "Creates a CH RRClass" },
- { "HS", reinterpret_cast<PyCFunction>(RRClass_HS), METH_NOARGS | METH_STATIC, "Creates an HS RRClass" },
- { "NONE", reinterpret_cast<PyCFunction>(RRClass_NONE), METH_NOARGS | METH_STATIC, "Creates a NONE RRClass" },
- { "ANY", reinterpret_cast<PyCFunction>(RRClass_ANY), METH_NOARGS | METH_STATIC, "Creates an ANY RRClass" },
{ NULL, NULL, 0, NULL }
};
@@ -234,37 +222,6 @@ RRClass_richcmp(s_RRClass* self, s_RRClass* other, int op) {
Py_RETURN_FALSE;
}
-//
-// Common function for RRClass_IN/CH/etc.
-//
-PyObject* RRClass_createStatic(RRClass stc) {
- s_RRClass* ret = PyObject_New(s_RRClass, &rrclass_type);
- if (ret != NULL) {
- ret->cppobj = new RRClass(stc);
- }
- return (ret);
-}
-
-PyObject* RRClass_IN(s_RRClass*) {
- return (RRClass_createStatic(RRClass::IN()));
-}
-
-PyObject* RRClass_CH(s_RRClass*) {
- return (RRClass_createStatic(RRClass::CH()));
-}
-
-PyObject* RRClass_HS(s_RRClass*) {
- return (RRClass_createStatic(RRClass::HS()));
-}
-
-PyObject* RRClass_NONE(s_RRClass*) {
- return (RRClass_createStatic(RRClass::NONE()));
-}
-
-PyObject* RRClass_ANY(s_RRClass*) {
- return (RRClass_createStatic(RRClass::ANY()));
-}
-
Py_hash_t
RRClass_hash(PyObject* pyself) {
const s_RRClass* const self = static_cast<s_RRClass*>(pyself);
diff --git a/src/lib/dns/python/rrtype_python.cc b/src/lib/dns/python/rrtype_python.cc
index 97b66d4..bf705cc 100644
--- a/src/lib/dns/python/rrtype_python.cc
+++ b/src/lib/dns/python/rrtype_python.cc
@@ -50,25 +50,6 @@ PyObject* RRType_toWire(s_RRType* self, PyObject* args);
PyObject* RRType_getCode(s_RRType* self);
PyObject* RRType_richcmp(s_RRType* self, s_RRType* other, int op);
Py_hash_t RRType_hash(PyObject* pyself);
-PyObject* RRType_NSEC3PARAM(s_RRType *self);
-PyObject* RRType_DNAME(s_RRType *self);
-PyObject* RRType_PTR(s_RRType *self);
-PyObject* RRType_MX(s_RRType *self);
-PyObject* RRType_DNSKEY(s_RRType *self);
-PyObject* RRType_TXT(s_RRType *self);
-PyObject* RRType_RRSIG(s_RRType *self);
-PyObject* RRType_NSEC(s_RRType *self);
-PyObject* RRType_AAAA(s_RRType *self);
-PyObject* RRType_DS(s_RRType *self);
-PyObject* RRType_OPT(s_RRType *self);
-PyObject* RRType_A(s_RRType *self);
-PyObject* RRType_NS(s_RRType *self);
-PyObject* RRType_CNAME(s_RRType *self);
-PyObject* RRType_SOA(s_RRType *self);
-PyObject* RRType_NSEC3(s_RRType *self);
-PyObject* RRType_IXFR(s_RRType *self);
-PyObject* RRType_AXFR(s_RRType *self);
-PyObject* RRType_ANY(s_RRType *self);
typedef CPPPyObjectContainer<s_RRType, RRType> RRTypeContainer;
@@ -90,25 +71,6 @@ PyMethodDef RRType_methods[] = {
"returned" },
{ "get_code", reinterpret_cast<PyCFunction>(RRType_getCode), METH_NOARGS,
"Returns the type code as an integer" },
- { "NSEC3PARAM", reinterpret_cast<PyCFunction>(RRType_NSEC3PARAM), METH_NOARGS | METH_STATIC, "Creates an NSEC3PARAM RRType" },
- { "DNAME", reinterpret_cast<PyCFunction>(RRType_DNAME), METH_NOARGS | METH_STATIC, "Creates a DNAME RRType" },
- { "PTR", reinterpret_cast<PyCFunction>(RRType_PTR), METH_NOARGS | METH_STATIC, "Creates a PTR RRType" },
- { "MX", reinterpret_cast<PyCFunction>(RRType_MX), METH_NOARGS | METH_STATIC, "Creates an MX RRType" },
- { "DNSKEY", reinterpret_cast<PyCFunction>(RRType_DNSKEY), METH_NOARGS | METH_STATIC, "Creates a DNSKEY RRType" },
- { "TXT", reinterpret_cast<PyCFunction>(RRType_TXT), METH_NOARGS | METH_STATIC, "Creates a TXT RRType" },
- { "RRSIG", reinterpret_cast<PyCFunction>(RRType_RRSIG), METH_NOARGS | METH_STATIC, "Creates a RRSIG RRType" },
- { "NSEC", reinterpret_cast<PyCFunction>(RRType_NSEC), METH_NOARGS | METH_STATIC, "Creates a NSEC RRType" },
- { "AAAA", reinterpret_cast<PyCFunction>(RRType_AAAA), METH_NOARGS | METH_STATIC, "Creates an AAAA RRType" },
- { "DS", reinterpret_cast<PyCFunction>(RRType_DS), METH_NOARGS | METH_STATIC, "Creates a DS RRType" },
- { "OPT", reinterpret_cast<PyCFunction>(RRType_OPT), METH_NOARGS | METH_STATIC, "Creates an OPT RRType" },
- { "A", reinterpret_cast<PyCFunction>(RRType_A), METH_NOARGS | METH_STATIC, "Creates an A RRType" },
- { "NS", reinterpret_cast<PyCFunction>(RRType_NS), METH_NOARGS | METH_STATIC, "Creates an NS RRType" },
- { "CNAME", reinterpret_cast<PyCFunction>(RRType_CNAME), METH_NOARGS | METH_STATIC, "Creates a CNAME RRType" },
- { "SOA", reinterpret_cast<PyCFunction>(RRType_SOA), METH_NOARGS | METH_STATIC, "Creates a SOA RRType" },
- { "NSEC3", reinterpret_cast<PyCFunction>(RRType_NSEC3), METH_NOARGS | METH_STATIC, "Creates an NSEC3 RRType" },
- { "IXFR", reinterpret_cast<PyCFunction>(RRType_IXFR), METH_NOARGS | METH_STATIC, "Creates an IXFR RRType" },
- { "AXFR", reinterpret_cast<PyCFunction>(RRType_AXFR), METH_NOARGS | METH_STATIC, "Creates an AXFR RRType" },
- { "ANY", reinterpret_cast<PyCFunction>(RRType_ANY), METH_NOARGS | METH_STATIC, "Creates an ANY RRType" },
{ NULL, NULL, 0, NULL }
};
@@ -263,112 +225,6 @@ RRType_richcmp(s_RRType* self, s_RRType* other, int op) {
Py_RETURN_FALSE;
}
-//
-// Common function for RRType_A/NS/etc.
-//
-PyObject* RRType_createStatic(RRType stc) {
- s_RRType* ret = PyObject_New(s_RRType, &rrtype_type);
- if (ret != NULL) {
- ret->cppobj = new RRType(stc);
- }
- return (ret);
-}
-
-PyObject*
-RRType_NSEC3PARAM(s_RRType*) {
- return (RRType_createStatic(RRType::NSEC3PARAM()));
-}
-
-PyObject*
-RRType_DNAME(s_RRType*) {
- return (RRType_createStatic(RRType::DNAME()));
-}
-
-PyObject*
-RRType_PTR(s_RRType*) {
- return (RRType_createStatic(RRType::PTR()));
-}
-
-PyObject*
-RRType_MX(s_RRType*) {
- return (RRType_createStatic(RRType::MX()));
-}
-
-PyObject*
-RRType_DNSKEY(s_RRType*) {
- return (RRType_createStatic(RRType::DNSKEY()));
-}
-
-PyObject*
-RRType_TXT(s_RRType*) {
- return (RRType_createStatic(RRType::TXT()));
-}
-
-PyObject*
-RRType_RRSIG(s_RRType*) {
- return (RRType_createStatic(RRType::RRSIG()));
-}
-
-PyObject*
-RRType_NSEC(s_RRType*) {
- return (RRType_createStatic(RRType::NSEC()));
-}
-
-PyObject*
-RRType_AAAA(s_RRType*) {
- return (RRType_createStatic(RRType::AAAA()));
-}
-
-PyObject*
-RRType_DS(s_RRType*) {
- return (RRType_createStatic(RRType::DS()));
-}
-
-PyObject*
-RRType_OPT(s_RRType*) {
- return (RRType_createStatic(RRType::OPT()));
-}
-
-PyObject*
-RRType_A(s_RRType*) {
- return (RRType_createStatic(RRType::A()));
-}
-
-PyObject*
-RRType_NS(s_RRType*) {
- return (RRType_createStatic(RRType::NS()));
-}
-
-PyObject*
-RRType_CNAME(s_RRType*) {
- return (RRType_createStatic(RRType::CNAME()));
-}
-
-PyObject*
-RRType_SOA(s_RRType*) {
- return (RRType_createStatic(RRType::SOA()));
-}
-
-PyObject*
-RRType_NSEC3(s_RRType*) {
- return (RRType_createStatic(RRType::NSEC3()));
-}
-
-PyObject*
-RRType_IXFR(s_RRType*) {
- return (RRType_createStatic(RRType::IXFR()));
-}
-
-PyObject*
-RRType_AXFR(s_RRType*) {
- return (RRType_createStatic(RRType::AXFR()));
-}
-
-PyObject*
-RRType_ANY(s_RRType*) {
- return (RRType_createStatic(RRType::ANY()));
-}
-
Py_hash_t
RRType_hash(PyObject* pyself) {
const s_RRType* const self = static_cast<s_RRType*>(pyself);
diff --git a/src/lib/dns/python/tests/edns_python_test.py b/src/lib/dns/python/tests/edns_python_test.py
index b249213..150dfd6 100644
--- a/src/lib/dns/python/tests/edns_python_test.py
+++ b/src/lib/dns/python/tests/edns_python_test.py
@@ -108,8 +108,8 @@ class EDNSTest(unittest.TestCase):
def test_towire_renderer(self):
renderer = MessageRenderer()
- extrcode_noerror = Rcode.NOERROR().get_extended_code()
- extrcode_badvers = Rcode.BADVERS().get_extended_code()
+ extrcode_noerror = Rcode.NOERROR.get_extended_code()
+ extrcode_badvers = Rcode.BADVERS.get_extended_code()
self.assertEqual(1, self.edns_base.to_wire(renderer, extrcode_noerror))
wiredata = read_wire_data("edns_toWire1.wire")
@@ -148,7 +148,7 @@ class EDNSTest(unittest.TestCase):
self.assertEqual(0, renderer.get_length())
def test_towire_buffer(self):
- extrcode_noerror = Rcode.NOERROR().get_extended_code()
+ extrcode_noerror = Rcode.NOERROR.get_extended_code()
obuffer = bytes()
obuffer = self.edns_base.to_wire(obuffer, extrcode_noerror)
diff --git a/src/lib/dns/python/tests/message_python_test.py b/src/lib/dns/python/tests/message_python_test.py
index b9c0d5c..bf39a83 100644
--- a/src/lib/dns/python/tests/message_python_test.py
+++ b/src/lib/dns/python/tests/message_python_test.py
@@ -59,8 +59,8 @@ LONG_TXT4 = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef012
def create_message():
message_render = Message(Message.RENDER)
message_render.set_qid(0x1035)
- message_render.set_opcode(Opcode.QUERY())
- message_render.set_rcode(Rcode.NOERROR())
+ message_render.set_opcode(Opcode.QUERY)
+ message_render.set_rcode(Rcode.NOERROR)
message_render.set_header_flag(Message.HEADERFLAG_QR)
message_render.set_header_flag(Message.HEADERFLAG_RD)
message_render.set_header_flag(Message.HEADERFLAG_AA)
@@ -161,7 +161,7 @@ class MessageTest(unittest.TestCase):
def test_set_rcode(self):
self.assertRaises(TypeError, self.r.set_rcode, "wrong")
- rcode = Rcode.BADVERS()
+ rcode = Rcode.BADVERS
self.r.set_rcode(rcode)
self.assertEqual(rcode, self.r.get_rcode())
@@ -173,7 +173,7 @@ class MessageTest(unittest.TestCase):
def test_set_opcode(self):
self.assertRaises(TypeError, self.r.set_opcode, "wrong")
- opcode = Opcode.IQUERY()
+ opcode = Opcode.IQUERY
self.r.set_opcode(opcode)
self.assertEqual(opcode, self.r.get_opcode())
@@ -304,8 +304,8 @@ class MessageTest(unittest.TestCase):
self.assertRaises(TypeError, self.r.clear, 3)
def test_clear_question_section(self):
- self.r.add_question(Question(Name("www.example.com"), RRClass.IN(),
- RRType.A()))
+ self.r.add_question(Question(Name("www.example.com"), RRClass.IN,
+ RRType.A))
self.assertEqual(1, self.r.get_rr_count(Message.SECTION_QUESTION))
self.r.clear_section(Message.SECTION_QUESTION)
self.assertEqual(0, self.r.get_rr_count(Message.SECTION_QUESTION))
@@ -336,19 +336,19 @@ class MessageTest(unittest.TestCase):
renderer.get_data())
def test_to_wire_without_opcode(self):
- self.r.set_rcode(Rcode.NOERROR())
+ self.r.set_rcode(Rcode.NOERROR)
self.assertRaises(InvalidMessageOperation, self.r.to_wire,
MessageRenderer())
def test_to_wire_without_rcode(self):
- self.r.set_opcode(Opcode.QUERY())
+ self.r.set_opcode(Opcode.QUERY)
self.assertRaises(InvalidMessageOperation, self.r.to_wire,
MessageRenderer())
def __common_tsigmessage_setup(self, flags=[Message.HEADERFLAG_RD],
rrtype=RRType("A"), answer_data=None):
- self.r.set_opcode(Opcode.QUERY())
- self.r.set_rcode(Rcode.NOERROR())
+ self.r.set_opcode(Opcode.QUERY)
+ self.r.set_rcode(Rcode.NOERROR)
for flag in flags:
self.r.set_header_flag(flag)
if answer_data is not None:
@@ -407,8 +407,8 @@ class MessageTest(unittest.TestCase):
self.__common_tsig_checks("message_toWire4.wire")
def test_to_wire_tsig_truncation3(self):
- self.r.set_opcode(Opcode.QUERY())
- self.r.set_rcode(Rcode.NOERROR())
+ self.r.set_opcode(Opcode.QUERY)
+ self.r.set_rcode(Rcode.NOERROR)
for i in range(1, 68):
self.r.add_question(Question(Name("www.example.com"),
RRClass("IN"), RRType(i)))
@@ -469,11 +469,11 @@ test.example.com. 3600 IN A 192.0.2.2
self.assertEqual(msg_str, str(message_render))
def test_to_text_without_opcode(self):
- self.r.set_rcode(Rcode.NOERROR())
+ self.r.set_rcode(Rcode.NOERROR)
self.assertRaises(InvalidMessageOperation, self.r.to_text)
def test_to_text_without_rcode(self):
- self.r.set_opcode(Opcode.QUERY())
+ self.r.set_opcode(Opcode.QUERY)
self.assertRaises(InvalidMessageOperation, self.r.to_text)
def test_from_wire(self):
@@ -488,8 +488,8 @@ test.example.com. 3600 IN A 192.0.2.2
message_parse = Message(0)
factoryFromFile(message_parse, "message_fromWire1")
self.assertEqual(0x1035, message_parse.get_qid())
- self.assertEqual(Opcode.QUERY(), message_parse.get_opcode())
- self.assertEqual(Rcode.NOERROR(), message_parse.get_rcode())
+ self.assertEqual(Opcode.QUERY, message_parse.get_opcode())
+ self.assertEqual(Rcode.NOERROR, message_parse.get_rcode())
self.assertTrue(message_parse.get_header_flag(Message.HEADERFLAG_QR))
self.assertTrue(message_parse.get_header_flag(Message.HEADERFLAG_RD))
self.assertTrue(message_parse.get_header_flag(Message.HEADERFLAG_AA))
@@ -568,7 +568,7 @@ test.example.com. 3600 IN A 192.0.2.2
# Extended Rcode = BADVERS
message_parse = Message(Message.PARSE)
factoryFromFile(message_parse, "message_fromWire10.wire")
- self.assertEqual(Rcode.BADVERS(), message_parse.get_rcode())
+ self.assertEqual(Rcode.BADVERS, message_parse.get_rcode())
# Maximum extended Rcode
message_parse.clear(Message.PARSE)
diff --git a/src/lib/dns/python/tests/messagerenderer_python_test.py b/src/lib/dns/python/tests/messagerenderer_python_test.py
index 5362496..8d5f26f 100644
--- a/src/lib/dns/python/tests/messagerenderer_python_test.py
+++ b/src/lib/dns/python/tests/messagerenderer_python_test.py
@@ -31,8 +31,8 @@ class MessageRendererTest(unittest.TestCase):
message = Message(Message.RENDER)
message.set_qid(123)
- message.set_opcode(Opcode.QUERY())
- message.set_rcode(Rcode.NOERROR())
+ message.set_opcode(Opcode.QUERY)
+ message.set_rcode(Rcode.NOERROR)
message.add_question(Question(name, c, t))
self.message1 = message
@@ -40,8 +40,8 @@ class MessageRendererTest(unittest.TestCase):
message.set_qid(123)
message.set_header_flag(Message.HEADERFLAG_AA, True)
message.set_header_flag(Message.HEADERFLAG_QR, True)
- message.set_opcode(Opcode.QUERY())
- message.set_rcode(Rcode.NOERROR())
+ message.set_opcode(Opcode.QUERY)
+ message.set_rcode(Rcode.NOERROR)
message.add_question(Question(name, c, t))
rrset = RRset(name, c, t, ttl)
rrset.add_rdata(Rdata(t, c, "192.0.2.98"))
diff --git a/src/lib/dns/python/tests/nsec3hash_python_test.py b/src/lib/dns/python/tests/nsec3hash_python_test.py
index 1a247d0..320529a 100644
--- a/src/lib/dns/python/tests/nsec3hash_python_test.py
+++ b/src/lib/dns/python/tests/nsec3hash_python_test.py
@@ -24,9 +24,9 @@ class NSEC3HashTest(unittest.TestCase):
def setUp(self):
self.nsec3_common = "2T7B4G4VSA5SMI47K61MV5BV1A22BOJR A RRSIG"
- self.test_hash = NSEC3Hash(Rdata(RRType.NSEC3PARAM(), RRClass.IN(),
+ self.test_hash = NSEC3Hash(Rdata(RRType.NSEC3PARAM, RRClass.IN,
"1 0 12 aabbccdd"))
- self.test_hash_nsec3 = NSEC3Hash(Rdata(RRType.NSEC3(), RRClass.IN(),
+ self.test_hash_nsec3 = NSEC3Hash(Rdata(RRType.NSEC3, RRClass.IN,
"1 0 12 aabbccdd " +
self.nsec3_common))
def test_bad_construct(self):
@@ -37,20 +37,20 @@ class NSEC3HashTest(unittest.TestCase):
self.assertRaises(TypeError, NSEC3Hash, "1 0 12 aabbccdd")
# additional parameter
- self.assertRaises(TypeError, NSEC3Hash, Rdata(RRType.NSEC3PARAM(),
- RRClass.IN(),
+ self.assertRaises(TypeError, NSEC3Hash, Rdata(RRType.NSEC3PARAM,
+ RRClass.IN,
"1 0 12 aabbccdd"), 1)
# Invaid type of RDATA
- self.assertRaises(TypeError, NSEC3Hash, Rdata(RRType.A(), RRClass.IN(),
+ self.assertRaises(TypeError, NSEC3Hash, Rdata(RRType.A, RRClass.IN,
"192.0.2.1"))
def test_unknown_algorithm(self):
self.assertRaises(UnknownNSEC3HashAlgorithm, NSEC3Hash,
- Rdata(RRType.NSEC3PARAM(), RRClass.IN(),
+ Rdata(RRType.NSEC3PARAM, RRClass.IN,
"2 0 12 aabbccdd"))
self.assertRaises(UnknownNSEC3HashAlgorithm, NSEC3Hash,
- Rdata(RRType.NSEC3(), RRClass.IN(),
+ Rdata(RRType.NSEC3, RRClass.IN,
"2 0 12 aabbccdd " + self.nsec3_common))
def calculate_check(self, hash):
@@ -71,15 +71,15 @@ class NSEC3HashTest(unittest.TestCase):
# Using unusually large iterations, something larger than the 8-bit
#range. (expected hash value generated by BIND 9's dnssec-signzone)
- self.test_hash = NSEC3Hash(Rdata(RRType.NSEC3PARAM(),
- RRClass.IN(), "1 0 256 AABBCCDD"))
+ self.test_hash = NSEC3Hash(Rdata(RRType.NSEC3PARAM,
+ RRClass.IN, "1 0 256 AABBCCDD"))
self.assertEqual("COG6A52MJ96MNMV3QUCAGGCO0RHCC2Q3",
self.test_hash.calculate(Name("example.org")))
# Some boundary cases: 0-iteration and empty salt. Borrowed from the
# .com zone data.
- self.test_hash = NSEC3Hash(Rdata(RRType.NSEC3PARAM(),
- RRClass.IN(),"1 0 0 -"))
+ self.test_hash = NSEC3Hash(Rdata(RRType.NSEC3PARAM,
+ RRClass.IN,"1 0 0 -"))
self.assertEqual("CK0POJMG874LJREF7EFN8430QVIT8BSM",
self.test_hash.calculate(Name("com")))
@@ -90,39 +90,39 @@ class NSEC3HashTest(unittest.TestCase):
def check_match(self, hash, rrtype, postfix):
# If all parameters match, it's considered to be matched.
- self.assertTrue(hash.match(Rdata(rrtype, RRClass.IN(),
+ self.assertTrue(hash.match(Rdata(rrtype, RRClass.IN,
"1 0 12 aabbccdd" + postfix)))
# Algorithm doesn't match
- self.assertFalse(hash.match(Rdata(rrtype, RRClass.IN(),
+ self.assertFalse(hash.match(Rdata(rrtype, RRClass.IN,
"2 0 12 aabbccdd" + postfix)))
# Iterations doesn't match
- self.assertFalse(hash.match(Rdata(rrtype, RRClass.IN(),
+ self.assertFalse(hash.match(Rdata(rrtype, RRClass.IN,
"1 0 1 aabbccdd" + postfix)))
# Salt doesn't match
- self.assertFalse(hash.match(Rdata(rrtype, RRClass.IN(),
+ self.assertFalse(hash.match(Rdata(rrtype, RRClass.IN,
"1 0 12 aabbccde" + postfix)))
# Salt doesn't match: the other has an empty salt
- self.assertFalse(hash.match(Rdata(rrtype, RRClass.IN(),
+ self.assertFalse(hash.match(Rdata(rrtype, RRClass.IN,
"1 0 12 -" + postfix)))
# Flag doesn't matter
- self.assertTrue(hash.match(Rdata(rrtype, RRClass.IN(),
+ self.assertTrue(hash.match(Rdata(rrtype, RRClass.IN,
"1 1 12 aabbccdd" + postfix)))
def test_match(self):
- self.check_match(self.test_hash, RRType.NSEC3(),
+ self.check_match(self.test_hash, RRType.NSEC3,
" " + self.nsec3_common)
- self.check_match(self.test_hash_nsec3, RRType.NSEC3(),
+ self.check_match(self.test_hash_nsec3, RRType.NSEC3,
" " + self.nsec3_common)
- self.check_match(self.test_hash, RRType.NSEC3PARAM(), "")
- self.check_match(self.test_hash_nsec3, RRType.NSEC3PARAM(), "")
+ self.check_match(self.test_hash, RRType.NSEC3PARAM, "")
+ self.check_match(self.test_hash_nsec3, RRType.NSEC3PARAM, "")
# bad parameter checks
self.assertRaises(TypeError, self.test_hash.match, 1)
self.assertRaises(TypeError, self.test_hash.match,
- Rdata(RRType.NSEC3(), RRClass.IN(),
+ Rdata(RRType.NSEC3, RRClass.IN,
"1 0 12 aabbccdd " + self.nsec3_common), 1)
self.assertRaises(TypeError, self.test_hash.match,
- Rdata(RRType.A(), RRClass.IN(), "192.0.2.1"))
+ Rdata(RRType.A, RRClass.IN, "192.0.2.1"))
if __name__ == '__main__':
unittest.main()
diff --git a/src/lib/dns/python/tests/opcode_python_test.py b/src/lib/dns/python/tests/opcode_python_test.py
index 84f449f..d938aa6 100644
--- a/src/lib/dns/python/tests/opcode_python_test.py
+++ b/src/lib/dns/python/tests/opcode_python_test.py
@@ -34,53 +34,53 @@ class OpcodeTest(unittest.TestCase):
self.assertEqual(Opcode.UPDATE_CODE, Opcode(5).get_code())
self.assertEqual(Opcode.RESERVED15_CODE, Opcode(15).get_code())
- self.assertEqual(Opcode.QUERY_CODE, Opcode.QUERY().get_code())
- self.assertEqual(Opcode.IQUERY_CODE, Opcode.IQUERY().get_code())
- self.assertEqual(Opcode.NOTIFY_CODE, Opcode.NOTIFY().get_code())
- self.assertEqual(Opcode.UPDATE_CODE, Opcode.UPDATE().get_code())
- self.assertEqual(Opcode.RESERVED15_CODE, Opcode.RESERVED15().get_code())
+ self.assertEqual(Opcode.QUERY_CODE, Opcode.QUERY.get_code())
+ self.assertEqual(Opcode.IQUERY_CODE, Opcode.IQUERY.get_code())
+ self.assertEqual(Opcode.NOTIFY_CODE, Opcode.NOTIFY.get_code())
+ self.assertEqual(Opcode.UPDATE_CODE, Opcode.UPDATE.get_code())
+ self.assertEqual(Opcode.RESERVED15_CODE, Opcode.RESERVED15.get_code())
def test_get_code(self):
- self.assertEqual(0, Opcode.QUERY().get_code())
- self.assertEqual(1, Opcode.IQUERY().get_code())
- self.assertEqual(2, Opcode.STATUS().get_code())
- self.assertEqual(3, Opcode.RESERVED3().get_code())
- self.assertEqual(4, Opcode.NOTIFY().get_code())
- self.assertEqual(5, Opcode.UPDATE().get_code())
- self.assertEqual(6, Opcode.RESERVED6().get_code())
- self.assertEqual(7, Opcode.RESERVED7().get_code())
- self.assertEqual(8, Opcode.RESERVED8().get_code())
- self.assertEqual(9, Opcode.RESERVED9().get_code())
- self.assertEqual(10, Opcode.RESERVED10().get_code())
- self.assertEqual(11, Opcode.RESERVED11().get_code())
- self.assertEqual(12, Opcode.RESERVED12().get_code())
- self.assertEqual(13, Opcode.RESERVED13().get_code())
- self.assertEqual(14, Opcode.RESERVED14().get_code())
- self.assertEqual(15, Opcode.RESERVED15().get_code())
+ self.assertEqual(0, Opcode.QUERY.get_code())
+ self.assertEqual(1, Opcode.IQUERY.get_code())
+ self.assertEqual(2, Opcode.STATUS.get_code())
+ self.assertEqual(3, Opcode.RESERVED3.get_code())
+ self.assertEqual(4, Opcode.NOTIFY.get_code())
+ self.assertEqual(5, Opcode.UPDATE.get_code())
+ self.assertEqual(6, Opcode.RESERVED6.get_code())
+ self.assertEqual(7, Opcode.RESERVED7.get_code())
+ self.assertEqual(8, Opcode.RESERVED8.get_code())
+ self.assertEqual(9, Opcode.RESERVED9.get_code())
+ self.assertEqual(10, Opcode.RESERVED10.get_code())
+ self.assertEqual(11, Opcode.RESERVED11.get_code())
+ self.assertEqual(12, Opcode.RESERVED12.get_code())
+ self.assertEqual(13, Opcode.RESERVED13.get_code())
+ self.assertEqual(14, Opcode.RESERVED14.get_code())
+ self.assertEqual(15, Opcode.RESERVED15.get_code())
def test_to_text(self):
- self.assertEqual("QUERY", Opcode.QUERY().to_text())
- self.assertEqual("QUERY", str(Opcode.QUERY()))
- self.assertEqual("IQUERY", Opcode.IQUERY().to_text())
- self.assertEqual("STATUS", Opcode.STATUS().to_text())
- self.assertEqual("RESERVED3", Opcode.RESERVED3().to_text())
- self.assertEqual("NOTIFY", Opcode.NOTIFY().to_text())
- self.assertEqual("UPDATE", Opcode.UPDATE().to_text())
- self.assertEqual("RESERVED6", Opcode.RESERVED6().to_text())
- self.assertEqual("RESERVED7", Opcode.RESERVED7().to_text())
- self.assertEqual("RESERVED8", Opcode.RESERVED8().to_text())
- self.assertEqual("RESERVED9", Opcode.RESERVED9().to_text())
- self.assertEqual("RESERVED10", Opcode.RESERVED10().to_text())
- self.assertEqual("RESERVED11", Opcode.RESERVED11().to_text())
- self.assertEqual("RESERVED12", Opcode.RESERVED12().to_text())
- self.assertEqual("RESERVED13", Opcode.RESERVED13().to_text())
- self.assertEqual("RESERVED14", Opcode.RESERVED14().to_text())
- self.assertEqual("RESERVED15", Opcode.RESERVED15().to_text())
+ self.assertEqual("QUERY", Opcode.QUERY.to_text())
+ self.assertEqual("QUERY", str(Opcode.QUERY))
+ self.assertEqual("IQUERY", Opcode.IQUERY.to_text())
+ self.assertEqual("STATUS", Opcode.STATUS.to_text())
+ self.assertEqual("RESERVED3", Opcode.RESERVED3.to_text())
+ self.assertEqual("NOTIFY", Opcode.NOTIFY.to_text())
+ self.assertEqual("UPDATE", Opcode.UPDATE.to_text())
+ self.assertEqual("RESERVED6", Opcode.RESERVED6.to_text())
+ self.assertEqual("RESERVED7", Opcode.RESERVED7.to_text())
+ self.assertEqual("RESERVED8", Opcode.RESERVED8.to_text())
+ self.assertEqual("RESERVED9", Opcode.RESERVED9.to_text())
+ self.assertEqual("RESERVED10", Opcode.RESERVED10.to_text())
+ self.assertEqual("RESERVED11", Opcode.RESERVED11.to_text())
+ self.assertEqual("RESERVED12", Opcode.RESERVED12.to_text())
+ self.assertEqual("RESERVED13", Opcode.RESERVED13.to_text())
+ self.assertEqual("RESERVED14", Opcode.RESERVED14.to_text())
+ self.assertEqual("RESERVED15", Opcode.RESERVED15.to_text())
def test_richcmp(self):
- o1 = Opcode.QUERY()
- o2 = Opcode.NOTIFY()
- o3 = Opcode.NOTIFY()
+ o1 = Opcode.QUERY
+ o2 = Opcode.NOTIFY
+ o3 = Opcode.NOTIFY
self.assertTrue(o2 == o3)
self.assertFalse(o2 != o3)
self.assertTrue(o1 != o2)
diff --git a/src/lib/dns/python/tests/rcode_python_test.py b/src/lib/dns/python/tests/rcode_python_test.py
index 77fed3a..c4a8067 100644
--- a/src/lib/dns/python/tests/rcode_python_test.py
+++ b/src/lib/dns/python/tests/rcode_python_test.py
@@ -54,36 +54,36 @@ class RcodeTest(unittest.TestCase):
self.assertEqual(Rcode.RESERVED15_CODE, Rcode(15).get_code())
self.assertEqual(Rcode.BADVERS_CODE, Rcode(16).get_code())
- self.assertEqual(Rcode.NOERROR_CODE, Rcode.NOERROR().get_code())
- self.assertEqual(Rcode.FORMERR_CODE, Rcode.FORMERR().get_code())
- self.assertEqual(Rcode.NOTIMP_CODE, Rcode.NOTIMP().get_code())
- self.assertEqual(Rcode.REFUSED_CODE, Rcode.REFUSED().get_code())
- self.assertEqual(Rcode.RESERVED15_CODE, Rcode.RESERVED15().get_code())
- self.assertEqual(Rcode.BADVERS_CODE, Rcode.BADVERS().get_code())
+ self.assertEqual(Rcode.NOERROR_CODE, Rcode.NOERROR.get_code())
+ self.assertEqual(Rcode.FORMERR_CODE, Rcode.FORMERR.get_code())
+ self.assertEqual(Rcode.NOTIMP_CODE, Rcode.NOTIMP.get_code())
+ self.assertEqual(Rcode.REFUSED_CODE, Rcode.REFUSED.get_code())
+ self.assertEqual(Rcode.RESERVED15_CODE, Rcode.RESERVED15.get_code())
+ self.assertEqual(Rcode.BADVERS_CODE, Rcode.BADVERS.get_code())
def test_get_code(self):
- self.assertEqual(0, Rcode.NOERROR().get_code())
- self.assertEqual(1, Rcode.FORMERR().get_code())
- self.assertEqual(2, Rcode.SERVFAIL().get_code())
- self.assertEqual(3, Rcode.NXDOMAIN().get_code())
- self.assertEqual(4, Rcode.NOTIMP().get_code())
- self.assertEqual(5, Rcode.REFUSED().get_code())
- self.assertEqual(6, Rcode.YXDOMAIN().get_code())
- self.assertEqual(7, Rcode.YXRRSET().get_code())
- self.assertEqual(8, Rcode.NXRRSET().get_code())
- self.assertEqual(9, Rcode.NOTAUTH().get_code())
- self.assertEqual(10, Rcode.NOTZONE().get_code())
- self.assertEqual(11, Rcode.RESERVED11().get_code())
- self.assertEqual(12, Rcode.RESERVED12().get_code())
- self.assertEqual(13, Rcode.RESERVED13().get_code())
- self.assertEqual(14, Rcode.RESERVED14().get_code())
- self.assertEqual(15, Rcode.RESERVED15().get_code())
- self.assertEqual(16, Rcode.BADVERS().get_code())
+ self.assertEqual(0, Rcode.NOERROR.get_code())
+ self.assertEqual(1, Rcode.FORMERR.get_code())
+ self.assertEqual(2, Rcode.SERVFAIL.get_code())
+ self.assertEqual(3, Rcode.NXDOMAIN.get_code())
+ self.assertEqual(4, Rcode.NOTIMP.get_code())
+ self.assertEqual(5, Rcode.REFUSED.get_code())
+ self.assertEqual(6, Rcode.YXDOMAIN.get_code())
+ self.assertEqual(7, Rcode.YXRRSET.get_code())
+ self.assertEqual(8, Rcode.NXRRSET.get_code())
+ self.assertEqual(9, Rcode.NOTAUTH.get_code())
+ self.assertEqual(10, Rcode.NOTZONE.get_code())
+ self.assertEqual(11, Rcode.RESERVED11.get_code())
+ self.assertEqual(12, Rcode.RESERVED12.get_code())
+ self.assertEqual(13, Rcode.RESERVED13.get_code())
+ self.assertEqual(14, Rcode.RESERVED14.get_code())
+ self.assertEqual(15, Rcode.RESERVED15.get_code())
+ self.assertEqual(16, Rcode.BADVERS.get_code())
def test_get_extended_code(self):
- self.assertEqual(0, Rcode.NOERROR().get_extended_code())
- self.assertEqual(0, Rcode.YXRRSET().get_extended_code())
- self.assertEqual(1, Rcode.BADVERS().get_extended_code())
+ self.assertEqual(0, Rcode.NOERROR.get_extended_code())
+ self.assertEqual(0, Rcode.YXRRSET.get_extended_code())
+ self.assertEqual(1, Rcode.BADVERS.get_extended_code())
self.assertEqual(0xab, Rcode(0xabf).get_extended_code())
self.assertEqual(0xff, Rcode(0xfff).get_extended_code())
@@ -107,13 +107,13 @@ class RcodeTest(unittest.TestCase):
self.assertEqual("RESERVED15", Rcode(15).to_text())
self.assertEqual("BADVERS", Rcode(16).to_text())
- self.assertEqual("17", Rcode(Rcode.BADVERS().get_code() + 1).to_text())
+ self.assertEqual("17", Rcode(Rcode.BADVERS.get_code() + 1).to_text())
self.assertEqual("4095", Rcode(0xfff).to_text())
def test_richcmp(self):
- r1 = Rcode.NOERROR()
- r2 = Rcode.FORMERR()
- r3 = Rcode.FORMERR()
+ r1 = Rcode.NOERROR
+ r2 = Rcode.FORMERR
+ r3 = Rcode.FORMERR
self.assertTrue(r2 == r3)
self.assertTrue(r1 != r2)
self.assertFalse(r1 == r2)
diff --git a/src/lib/dns/python/tests/rrclass_python_test.py b/src/lib/dns/python/tests/rrclass_python_test.py
index a048c4c..880e331 100644
--- a/src/lib/dns/python/tests/rrclass_python_test.py
+++ b/src/lib/dns/python/tests/rrclass_python_test.py
@@ -23,8 +23,8 @@ from pydnspp import *
class RRClassTest(unittest.TestCase):
def setUp(self):
- self.c1 = RRClass.IN()
- self.c2 = RRClass.CH()
+ self.c1 = RRClass.IN
+ self.c2 = RRClass.CH
def test_init(self):
self.assertRaises(InvalidRRClass, RRClass, "wrong")
@@ -81,17 +81,17 @@ class RRClassTest(unittest.TestCase):
def test_hash(self):
# Exploiting the knowledge that the hash value is the numeric class
# value, we can predict the comparison result.
- self.assertEqual(hash(RRClass.IN()), hash(RRClass("IN")))
+ self.assertEqual(hash(RRClass.IN), hash(RRClass("IN")))
self.assertEqual(hash(RRClass("in")), hash(RRClass("IN")))
- self.assertNotEqual(hash(RRClass.IN()), hash(RRClass.CH()))
- self.assertNotEqual(hash(RRClass.IN()), hash(RRClass("CLASS65535")))
+ self.assertNotEqual(hash(RRClass.IN), hash(RRClass.CH))
+ self.assertNotEqual(hash(RRClass.IN), hash(RRClass("CLASS65535")))
def test_statics(self):
- self.assertEqual(RRClass.IN(), RRClass("IN"))
- self.assertEqual(RRClass.CH(), RRClass("CH"))
- self.assertEqual(RRClass.HS(), RRClass("HS"))
- self.assertEqual(254, RRClass.NONE().get_code())
- self.assertEqual(255, RRClass.ANY().get_code())
+ self.assertEqual(RRClass.IN, RRClass("IN"))
+ self.assertEqual(RRClass.CH, RRClass("CH"))
+ self.assertEqual(RRClass.HS, RRClass("HS"))
+ self.assertEqual(254, RRClass.NONE.get_code())
+ self.assertEqual(255, RRClass.ANY.get_code())
if __name__ == '__main__':
unittest.main()
diff --git a/src/lib/dns/python/tests/rrset_collection_python_test.py b/src/lib/dns/python/tests/rrset_collection_python_test.py
index 2cf286e..1bbbc80 100644
--- a/src/lib/dns/python/tests/rrset_collection_python_test.py
+++ b/src/lib/dns/python/tests/rrset_collection_python_test.py
@@ -34,64 +34,64 @@ class RRsetCollectionTest(unittest.TestCase):
self.assertRaises(TypeError, RRsetCollection, 1)
self.assertRaises(TypeError, RRsetCollection, # extra arg
b'example. 0 A 192.0.2.1',
- Name('example'), RRClass.IN(), 1)
+ Name('example'), RRClass.IN, 1)
self.assertRaises(TypeError, RRsetCollection, # incorrect order
- b'example. 0 A 192.0.2.1', RRClass.IN(),
+ b'example. 0 A 192.0.2.1', RRClass.IN,
Name('example'))
# constructor will result in C++ exception.
self.assertRaises(IscException, RRsetCollection,
TESTDATA_DIR + '/no_such_file', Name('example.org'),
- RRClass.IN())
+ RRClass.IN)
def check_find_result(self, rrsets):
# Commonly used check pattern
- found = rrsets.find(Name('www.example.org'), RRClass.IN(), RRType.A())
+ found = rrsets.find(Name('www.example.org'), RRClass.IN, RRType.A)
self.assertNotEqual(None, found)
self.assertEqual(Name('www.example.org'), found.get_name())
- self.assertEqual(RRClass.IN(), found.get_class())
- self.assertEqual(RRType.A(), found.get_type())
+ self.assertEqual(RRClass.IN, found.get_class())
+ self.assertEqual(RRType.A, found.get_type())
self.assertEqual('192.0.2.1', found.get_rdata()[0].to_text())
def test_find(self):
# Checking the underlying find() is called as intended, both for
# success and failure cases, and with two different constructors.
rrsets = RRsetCollection(TESTDATA_DIR + '/example.org',
- Name('example.org'), RRClass.IN())
+ Name('example.org'), RRClass.IN)
self.check_find_result(rrsets)
- self.assertEqual(None, rrsets.find(Name('example.org'), RRClass.IN(),
- RRType.A()))
+ self.assertEqual(None, rrsets.find(Name('example.org'), RRClass.IN,
+ RRType.A))
rrsets = RRsetCollection(b'www.example.org. 3600 IN A 192.0.2.1',
- Name('example.org'), RRClass.IN())
+ Name('example.org'), RRClass.IN)
self.check_find_result(rrsets)
- self.assertEqual(None, rrsets.find(Name('example.org'), RRClass.IN(),
- RRType.A()))
+ self.assertEqual(None, rrsets.find(Name('example.org'), RRClass.IN,
+ RRType.A))
def test_find_badargs(self):
rrsets = RRsetCollection()
# Check bad arguments: bad types
- self.assertRaises(TypeError, rrsets.find, 1, RRClass.IN(), RRType.A())
+ self.assertRaises(TypeError, rrsets.find, 1, RRClass.IN, RRType.A)
self.assertRaises(TypeError, rrsets.find, Name('example'), 1,
- RRType.A())
+ RRType.A)
self.assertRaises(TypeError, rrsets.find, Name('example'), 1,
- RRType.A())
+ RRType.A)
self.assertRaises(TypeError, rrsets.find, Name('example'),
- RRClass.IN(), 1)
- self.assertRaises(TypeError, rrsets.find, Name('example'), RRType.A(),
- RRClass.IN())
+ RRClass.IN, 1)
+ self.assertRaises(TypeError, rrsets.find, Name('example'), RRType.A,
+ RRClass.IN)
# Check bad arguments: too many/few arguments
self.assertRaises(TypeError, rrsets.find, Name('example'),
- RRClass.IN(), RRType.A(), 0)
+ RRClass.IN, RRType.A, 0)
self.assertRaises(TypeError, rrsets.find, Name('example'),
- RRClass.IN())
+ RRClass.IN)
def test_add_remove_rrset(self):
name = Name('www.example.org')
- rrclass = RRClass.IN()
- rrtype = RRType.A()
+ rrclass = RRClass.IN
+ rrtype = RRType.A
# Create a collection with no RRsets
rrsets = RRsetCollection()
@@ -134,7 +134,7 @@ class RRsetCollectionTest(unittest.TestCase):
pass
rrsets = EmptyRRsetCollection()
self.assertRaises(TypeError, rrsets.find, Name('www.example.org'),
- RRClass.IN(), RRType.A())
+ RRClass.IN, RRType.A)
if __name__ == '__main__':
unittest.main()
diff --git a/src/lib/dns/python/tests/rrset_python_test.py b/src/lib/dns/python/tests/rrset_python_test.py
index 0544872..010b60c 100644
--- a/src/lib/dns/python/tests/rrset_python_test.py
+++ b/src/lib/dns/python/tests/rrset_python_test.py
@@ -23,7 +23,7 @@ import os
from pydnspp import *
class TestModuleSpec(unittest.TestCase):
-
+
def setUp(self):
self.test_name = Name("test.example.com")
self.test_domain = Name("example.com")
@@ -78,8 +78,8 @@ class TestModuleSpec(unittest.TestCase):
def test_add_rdata(self):
# no iterator to read out yet (TODO: add addition test once implemented)
- self.assertRaises(TypeError, self.rrset_a.add_rdata, Rdata(RRType("NS"), RRClass("IN"), "test.name"))
- pass
+ self.assertRaises(TypeError, self.rrset_a.add_rdata,
+ Rdata(RRType("NS"), RRClass("IN"), "test.name."))
def test_to_text(self):
self.assertEqual("test.example.com. 3600 IN A 192.0.2.1\n"
@@ -126,6 +126,6 @@ class TestModuleSpec(unittest.TestCase):
# they would leak.
self.assertEqual(1, sys.getrefcount(self.rrset_a.get_rdata()))
self.assertEqual(1, sys.getrefcount(self.rrset_a.get_rdata()[0]))
-
+
if __name__ == '__main__':
unittest.main()
diff --git a/src/lib/dns/python/tests/rrtype_python_test.py b/src/lib/dns/python/tests/rrtype_python_test.py
index 4548b50..7d20136 100644
--- a/src/lib/dns/python/tests/rrtype_python_test.py
+++ b/src/lib/dns/python/tests/rrtype_python_test.py
@@ -119,35 +119,35 @@ class TestModuleSpec(unittest.TestCase):
def test_hash(self):
# Exploiting the knowledge that the hash value is the numeric class
# value, we can predict the comparison result.
- self.assertEqual(hash(RRType.AAAA()), hash(RRType("AAAA")))
+ self.assertEqual(hash(RRType.AAAA), hash(RRType("AAAA")))
self.assertEqual(hash(RRType("aaaa")), hash(RRType("AAAA")))
self.assertEqual(hash(RRType(28)), hash(RRType("AAAA")))
- self.assertNotEqual(hash(RRType.A()), hash(RRType.NS()))
- self.assertNotEqual(hash(RRType.AAAA()), hash(RRType("Type65535")))
+ self.assertNotEqual(hash(RRType.A), hash(RRType.NS))
+ self.assertNotEqual(hash(RRType.AAAA), hash(RRType("Type65535")))
def test_statics(self):
- self.assertEqual(RRType("NSEC3PARAM"), RRType.NSEC3PARAM())
- self.assertEqual(RRType("DNAME"), RRType.DNAME())
- self.assertEqual(RRType("PTR"), RRType.PTR())
- self.assertEqual(RRType("MX"), RRType.MX())
- self.assertEqual(RRType("DNSKEY"), RRType.DNSKEY())
- self.assertEqual(RRType("TXT"), RRType.TXT())
- self.assertEqual(RRType("RRSIG"), RRType.RRSIG())
- self.assertEqual(RRType("NSEC"), RRType.NSEC())
- self.assertEqual(RRType("AAAA"), RRType.AAAA())
- self.assertEqual(RRType("DS"), RRType.DS())
- self.assertEqual(RRType("OPT"), RRType.OPT())
- self.assertEqual(RRType("A"), RRType.A())
- self.assertEqual(RRType("NS"), RRType.NS())
- self.assertEqual(RRType("CNAME"), RRType.CNAME())
- self.assertEqual(RRType("SOA"), RRType.SOA())
- self.assertEqual(RRType("NSEC3"), RRType.NSEC3())
+ self.assertEqual(RRType("NSEC3PARAM"), RRType.NSEC3PARAM)
+ self.assertEqual(RRType("DNAME"), RRType.DNAME)
+ self.assertEqual(RRType("PTR"), RRType.PTR)
+ self.assertEqual(RRType("MX"), RRType.MX)
+ self.assertEqual(RRType("DNSKEY"), RRType.DNSKEY)
+ self.assertEqual(RRType("TXT"), RRType.TXT)
+ self.assertEqual(RRType("RRSIG"), RRType.RRSIG)
+ self.assertEqual(RRType("NSEC"), RRType.NSEC)
+ self.assertEqual(RRType("AAAA"), RRType.AAAA)
+ self.assertEqual(RRType("DS"), RRType.DS)
+ self.assertEqual(RRType("OPT"), RRType.OPT)
+ self.assertEqual(RRType("A"), RRType.A)
+ self.assertEqual(RRType("NS"), RRType.NS)
+ self.assertEqual(RRType("CNAME"), RRType.CNAME)
+ self.assertEqual(RRType("SOA"), RRType.SOA)
+ self.assertEqual(RRType("NSEC3"), RRType.NSEC3)
# these can't be built with string input
# (see the original cpp TODO)
- self.assertEqual(251, RRType.IXFR().get_code())
- self.assertEqual(252, RRType.AXFR().get_code())
- self.assertEqual(255, RRType.ANY().get_code())
+ self.assertEqual(251, RRType.IXFR.get_code())
+ self.assertEqual(252, RRType.AXFR.get_code())
+ self.assertEqual(255, RRType.ANY.get_code())
if __name__ == '__main__':
unittest.main()
diff --git a/src/lib/dns/python/tests/tsig_python_test.py b/src/lib/dns/python/tests/tsig_python_test.py
index 4d99175..282431c 100644
--- a/src/lib/dns/python/tests/tsig_python_test.py
+++ b/src/lib/dns/python/tests/tsig_python_test.py
@@ -40,7 +40,7 @@ class TSIGContextTest(unittest.TestCase):
self.keyring = TSIGKeyRing()
self.message = Message(Message.RENDER)
self.renderer = MessageRenderer()
- self.test_class = RRClass.IN()
+ self.test_class = RRClass.IN
self.test_ttl = RRTTL(86400)
self.secret = base64.b64decode(b"SFuWd/q99SzF8Yzd1QbB9g==")
self.tsig_ctx = TSIGContext(TSIGKey(self.test_name,
@@ -59,12 +59,12 @@ class TSIGContextTest(unittest.TestCase):
# Note: intentionally use camelCase so that we can easily copy-paste
# corresponding C++ tests.
def createMessageAndSign(self, id, qname, ctx, message_flags=RD_FLAG,
- qtype=RRType.A(), answer_data=None,
+ qtype=RRType.A, answer_data=None,
answer_type=None, add_question=True,
- rcode=Rcode.NOERROR()):
+ rcode=Rcode.NOERROR):
self.message.clear(Message.RENDER)
self.message.set_qid(id)
- self.message.set_opcode(Opcode.QUERY())
+ self.message.set_opcode(Opcode.QUERY)
self.message.set_rcode(rcode)
if (message_flags & QR_FLAG) != 0:
self.message.set_header_flag(Message.HEADERFLAG_QR)
@@ -120,7 +120,7 @@ class TSIGContextTest(unittest.TestCase):
self.assertEqual(TSIGContext.STATE_INIT, self.tsig_ctx.get_state())
# And there should be no error code.
- self.assertEqual(TSIGError(Rcode.NOERROR()), self.tsig_ctx.get_error())
+ self.assertEqual(TSIGError(Rcode.NOERROR), self.tsig_ctx.get_error())
# No message signed yet
self.assertRaises(TSIGContextError, self.tsig_ctx.last_had_signature)
@@ -249,7 +249,7 @@ class TSIGContextTest(unittest.TestCase):
tsig = self.createMessageAndSign(self.qid, self.test_name,
self.tsig_verify_ctx,
QR_FLAG|AA_FLAG|RD_FLAG,
- RRType.A(), "192.0.2.1")
+ RRType.A, "192.0.2.1")
expected_mac = b"\x8f\xcd\xa6\x6a\x7c\xd1\xa3\xb9\x94\x8e\xb1\x86" + \
b"\x9d\x38\x4a\x9f"
@@ -280,7 +280,7 @@ class TSIGContextTest(unittest.TestCase):
zone_name = Name("example.com")
tsig = self.createMessageAndSign(axfr_qid, zone_name, self.tsig_ctx,
- 0, RRType.AXFR())
+ 0, RRType.AXFR)
received_data = read_wire_data("tsig_verify1.wire")
self.commonVerifyChecks(self.tsig_verify_ctx, tsig, received_data,
@@ -289,10 +289,10 @@ class TSIGContextTest(unittest.TestCase):
tsig = self.createMessageAndSign(axfr_qid, zone_name,
self.tsig_verify_ctx,
- AA_FLAG|QR_FLAG, RRType.AXFR(),
+ AA_FLAG|QR_FLAG, RRType.AXFR,
"ns.example.com. root.example.com." +\
" 2011041503 7200 3600 2592000 1200",
- RRType.SOA())
+ RRType.SOA)
received_data = read_wire_data("tsig_verify2.wire")
self.commonVerifyChecks(self.tsig_ctx, tsig, received_data,
@@ -302,8 +302,8 @@ class TSIGContextTest(unittest.TestCase):
b"\x60\x34\x13\x09\x68"
tsig = self.createMessageAndSign(axfr_qid, zone_name,
self.tsig_verify_ctx,
- AA_FLAG|QR_FLAG, RRType.AXFR(),
- "ns.example.com.", RRType.NS(),
+ AA_FLAG|QR_FLAG, RRType.AXFR,
+ "ns.example.com.", RRType.NS,
False)
self.commonSignChecks(tsig, axfr_qid, 0x4da8e951, expected_mac)
@@ -316,7 +316,7 @@ class TSIGContextTest(unittest.TestCase):
test_qid = 0x7fc4
tsig = self.createMessageAndSign(test_qid, self.test_name,
- self.tsig_ctx, 0, RRType.SOA())
+ self.tsig_ctx, 0, RRType.SOA)
# "advance the clock" and try validating, which should fail due to
# BADTIME
@@ -328,8 +328,8 @@ class TSIGContextTest(unittest.TestCase):
# make and sign a response in the context of TSIG error.
tsig = self.createMessageAndSign(test_qid, self.test_name,
self.tsig_verify_ctx,
- QR_FLAG, RRType.SOA(), None, None,
- True, Rcode.NOTAUTH())
+ QR_FLAG, RRType.SOA, None, None,
+ True, Rcode.NOTAUTH)
expected_otherdata = b"\x00\x00\x4d\xa8\xbe\x86"
expected_mac = b"\xd4\xb0\x43\xf6\xf4\x44\x95\xec\x8a\x01\x26" +\
@@ -344,7 +344,7 @@ class TSIGContextTest(unittest.TestCase):
fix_current_time(0x4da8b9d6)
tsig = self.createMessageAndSign(self.qid, self.test_name,
- self.tsig_ctx, 0, RRType.SOA())
+ self.tsig_ctx, 0, RRType.SOA)
# "rewind the clock" and try validating, which should fail due to
# BADTIME
@@ -361,7 +361,7 @@ class TSIGContextTest(unittest.TestCase):
fix_current_time(0x4da8b9d6)
tsig = self.createMessageAndSign(self.qid, self.test_name,
- self.tsig_ctx, 0, RRType.SOA())
+ self.tsig_ctx, 0, RRType.SOA)
fix_current_time(0x4da8b9d6 + 301)
self.assertEqual(TSIGError.BAD_TIME,
@@ -382,7 +382,7 @@ class TSIGContextTest(unittest.TestCase):
def test_badtime_overflow(self):
fix_current_time(200)
tsig = self.createMessageAndSign(self.qid, self.test_name,
- self.tsig_ctx, 0, RRType.SOA())
+ self.tsig_ctx, 0, RRType.SOA)
# This should be in the okay range, but since "200 - fudge" overflows
# and we compare them as 64-bit unsigned integers, it results in a
@@ -522,7 +522,7 @@ class TSIGContextTest(unittest.TestCase):
self.tsig_verify_ctx.get_state())
self.createMessageAndSign(self.qid, self.test_name,
self.tsig_verify_ctx,
- QR_FLAG|AA_FLAG|RD_FLAG, RRType.A(),
+ QR_FLAG|AA_FLAG|RD_FLAG, RRType.A,
"192.0.2.1")
self.assertEqual(TSIGContext.STATE_SENT_RESPONSE,
self.tsig_verify_ctx.get_state())
diff --git a/src/lib/dns/python/tests/tsigerror_python_test.py b/src/lib/dns/python/tests/tsigerror_python_test.py
index a968b6b..01860d3 100644
--- a/src/lib/dns/python/tests/tsigerror_python_test.py
+++ b/src/lib/dns/python/tests/tsigerror_python_test.py
@@ -28,7 +28,7 @@ class TSIGErrorTest(unittest.TestCase):
def test_from_rcode(self):
# We use RCODE for code values from 0-15.
- self.assertEqual(0, TSIGError(Rcode.NOERROR()).get_code())
+ self.assertEqual(0, TSIGError(Rcode.NOERROR).get_code())
self.assertEqual(15, TSIGError(Rcode(15)).get_code())
# From error code 16 TSIG errors define a separate space, so passing
@@ -50,19 +50,19 @@ class TSIGErrorTest(unittest.TestCase):
self.assertEqual(TSIGError.BAD_TIME_CODE, TSIGError.BAD_TIME.get_code())
def test_equal(self):
- self.assertTrue(TSIGError.NOERROR == TSIGError(Rcode.NOERROR()))
- self.assertTrue(TSIGError(Rcode.NOERROR()) == TSIGError.NOERROR)
+ self.assertTrue(TSIGError.NOERROR == TSIGError(Rcode.NOERROR))
+ self.assertTrue(TSIGError(Rcode.NOERROR) == TSIGError.NOERROR)
self.assertTrue(TSIGError.BAD_SIG == TSIGError(16))
self.assertTrue(TSIGError(16) == TSIGError.BAD_SIG)
def test_nequal(self):
- self.assertTrue(TSIGError.BAD_KEY != TSIGError(Rcode.NOERROR()))
- self.assertTrue(TSIGError(Rcode.NOERROR()) != TSIGError.BAD_KEY)
+ self.assertTrue(TSIGError.BAD_KEY != TSIGError(Rcode.NOERROR))
+ self.assertTrue(TSIGError(Rcode.NOERROR) != TSIGError.BAD_KEY)
def test_to_text(self):
# TSIGError derived from the standard Rcode
- self.assertEqual("NOERROR", TSIGError(Rcode.NOERROR()).to_text())
+ self.assertEqual("NOERROR", TSIGError(Rcode.NOERROR).to_text())
# Well known TSIG errors
self.assertEqual("BADSIG", TSIGError.BAD_SIG.to_text())
@@ -74,21 +74,21 @@ class TSIGErrorTest(unittest.TestCase):
self.assertEqual("65535", TSIGError(65535).to_text());
# also check str() works same way
- self.assertEqual("NOERROR", str(TSIGError(Rcode.NOERROR())))
+ self.assertEqual("NOERROR", str(TSIGError(Rcode.NOERROR)))
self.assertEqual("BADSIG", str(TSIGError.BAD_SIG))
def test_to_rcode(self):
# TSIGError derived from the standard Rcode
- self.assertEqual(Rcode.NOERROR(), TSIGError(Rcode.NOERROR()).to_rcode())
+ self.assertEqual(Rcode.NOERROR, TSIGError(Rcode.NOERROR).to_rcode())
# Well known TSIG errors
- self.assertEqual(Rcode.NOTAUTH(), TSIGError.BAD_SIG.to_rcode())
- self.assertEqual(Rcode.NOTAUTH(), TSIGError.BAD_KEY.to_rcode())
- self.assertEqual(Rcode.NOTAUTH(), TSIGError.BAD_TIME.to_rcode())
+ self.assertEqual(Rcode.NOTAUTH, TSIGError.BAD_SIG.to_rcode())
+ self.assertEqual(Rcode.NOTAUTH, TSIGError.BAD_KEY.to_rcode())
+ self.assertEqual(Rcode.NOTAUTH, TSIGError.BAD_TIME.to_rcode())
# Unknown (or not yet supported) codes are treated as SERVFAIL.
- self.assertEqual(Rcode.SERVFAIL(), TSIGError(19).to_rcode())
- self.assertEqual(Rcode.SERVFAIL(), TSIGError(65535).to_rcode())
+ self.assertEqual(Rcode.SERVFAIL, TSIGError(19).to_rcode())
+ self.assertEqual(Rcode.SERVFAIL, TSIGError(65535).to_rcode())
# Check there's no redundant refcount (which would cause leak)
self.assertEqual(1, sys.getrefcount(TSIGError.BAD_SIG.to_rcode()))
diff --git a/src/lib/dns/python/tests/zone_checker_python_test.py b/src/lib/dns/python/tests/zone_checker_python_test.py
index 66b6c47..dc7d258 100644
--- a/src/lib/dns/python/tests/zone_checker_python_test.py
+++ b/src/lib/dns/python/tests/zone_checker_python_test.py
@@ -35,8 +35,8 @@ class ZoneCheckerTest(unittest.TestCase):
rrsets = RRsetCollection(b'example.org. 0 SOA . . 0 0 0 0 0\n' +
b'example.org. 0 NS ns.example.org.\n' +
b'ns.example.org. 0 A 192.0.2.1\n',
- Name('example.org'), RRClass.IN())
- self.assertTrue(check_zone(Name('example.org'), RRClass.IN(),
+ Name('example.org'), RRClass.IN)
+ self.assertTrue(check_zone(Name('example.org'), RRClass.IN,
rrsets,
(lambda r: self.__callback(r, errors),
lambda r: self.__callback(r, warns))))
@@ -45,8 +45,8 @@ class ZoneCheckerTest(unittest.TestCase):
# Check fails and one additional warning.
rrsets = RRsetCollection(b'example.org. 0 NS ns.example.org.',
- Name('example.org'), RRClass.IN())
- self.assertFalse(check_zone(Name('example.org'), RRClass.IN(), rrsets,
+ Name('example.org'), RRClass.IN)
+ self.assertFalse(check_zone(Name('example.org'), RRClass.IN, rrsets,
(lambda r: self.__callback(r, errors),
lambda r: self.__callback(r, warns))))
self.assertEqual(['zone example.org/IN: has 0 SOA records'], errors)
@@ -56,7 +56,7 @@ class ZoneCheckerTest(unittest.TestCase):
# Same RRset collection, suppressing callbacks
errors = []
warns = []
- self.assertFalse(check_zone(Name('example.org'), RRClass.IN(), rrsets,
+ self.assertFalse(check_zone(Name('example.org'), RRClass.IN, rrsets,
(None, None)))
self.assertEqual([], errors)
self.assertEqual([], warns)
@@ -64,29 +64,29 @@ class ZoneCheckerTest(unittest.TestCase):
def test_check_badarg(self):
rrsets = RRsetCollection()
# Bad types
- self.assertRaises(TypeError, check_zone, 1, RRClass.IN(), rrsets,
+ self.assertRaises(TypeError, check_zone, 1, RRClass.IN, rrsets,
(None, None))
self.assertRaises(TypeError, check_zone, Name('example'), 1, rrsets,
(None, None))
- self.assertRaises(TypeError, check_zone, Name('example'), RRClass.IN(),
+ self.assertRaises(TypeError, check_zone, Name('example'), RRClass.IN,
1, (None, None))
- self.assertRaises(TypeError, check_zone, Name('example'), RRClass.IN(),
+ self.assertRaises(TypeError, check_zone, Name('example'), RRClass.IN,
rrsets, 1)
# Bad callbacks
- self.assertRaises(TypeError, check_zone, Name('example'), RRClass.IN(),
+ self.assertRaises(TypeError, check_zone, Name('example'), RRClass.IN,
rrsets, (None, None, None))
- self.assertRaises(TypeError, check_zone, Name('example'), RRClass.IN(),
+ self.assertRaises(TypeError, check_zone, Name('example'), RRClass.IN,
rrsets, (1, None))
- self.assertRaises(TypeError, check_zone, Name('example'), RRClass.IN(),
+ self.assertRaises(TypeError, check_zone, Name('example'), RRClass.IN,
rrsets, (None, 1))
# Extra/missing args
- self.assertRaises(TypeError, check_zone, Name('example'), RRClass.IN(),
+ self.assertRaises(TypeError, check_zone, Name('example'), RRClass.IN,
rrsets, (None, None), 1)
- self.assertRaises(TypeError, check_zone, Name('example'), RRClass.IN(),
+ self.assertRaises(TypeError, check_zone, Name('example'), RRClass.IN,
rrsets)
- check_zone(Name('example'), RRClass.IN(), rrsets, (None, None))
+ check_zone(Name('example'), RRClass.IN, rrsets, (None, None))
def test_check_callback_fail(self):
# Let the call raise a Python exception. It should be propagated to
@@ -96,7 +96,7 @@ class ZoneCheckerTest(unittest.TestCase):
# Using an empty collection, triggering an error callback.
self.assertRaises(FakeException, check_zone, Name('example.org'),
- RRClass.IN(), RRsetCollection(),
+ RRClass.IN, RRsetCollection(),
(__bad_callback, None))
# An unusual case: the callback is expected to return None, but if it
@@ -108,7 +108,7 @@ class ZoneCheckerTest(unittest.TestCase):
ref_checker = RefChecker()
orig_refcnt = sys.getrefcount(ref_checker)
- check_zone(Name('example.org'), RRClass.IN(), RRsetCollection(),
+ check_zone(Name('example.org'), RRClass.IN, RRsetCollection(),
(lambda r: __callback(r, ref_checker), None))
self.assertEqual(orig_refcnt, sys.getrefcount(ref_checker))
@@ -132,48 +132,45 @@ class ZoneCheckerTest(unittest.TestCase):
raise FakeException('find error')
if self.__find_result is not 'use_default':
return self.__find_result
- if rrtype == RRType.SOA():
- soa = RRset(Name('example'), RRClass.IN(), rrtype,
- RRTTL(0))
- soa.add_rdata(Rdata(RRType.SOA(), RRClass.IN(),
+ if rrtype == RRType.SOA:
+ soa = RRset(Name('example'), RRClass.IN, rrtype, RRTTL(0))
+ soa.add_rdata(Rdata(RRType.SOA, RRClass.IN,
'. . 0 0 0 0 0'))
return soa
- if rrtype == RRType.NS():
- ns = RRset(Name('example'), RRClass.IN(), rrtype,
- RRTTL(0))
- ns.add_rdata(Rdata(RRType.NS(), RRClass.IN(),
- 'example.org'))
+ if rrtype == RRType.NS:
+ ns = RRset(Name('example'), RRClass.IN, rrtype, RRTTL(0))
+ ns.add_rdata(Rdata(RRType.NS, RRClass.IN, 'example.org.'))
return ns
return None
# A successful case. Just checking it works in that case.
rrsets = FakeRRsetCollection()
- self.assertTrue(check_zone(Name('example'), RRClass.IN(), rrsets,
+ self.assertTrue(check_zone(Name('example'), RRClass.IN, rrsets,
(None, None)))
# Likewise, normal case but zone check fails.
rrsets = FakeRRsetCollection(False, None)
- self.assertFalse(check_zone(Name('example'), RRClass.IN(), rrsets,
+ self.assertFalse(check_zone(Name('example'), RRClass.IN, rrsets,
(None, None)))
# Our find() returns a bad type of result.
rrsets = FakeRRsetCollection(False, 1)
- self.assertRaises(TypeError, check_zone, Name('example'), RRClass.IN(),
+ self.assertRaises(TypeError, check_zone, Name('example'), RRClass.IN,
rrsets, (None, None))
# Our find() returns an empty SOA RRset. C++ zone checker code
# throws, which results in IscException.
rrsets = FakeRRsetCollection(False, RRset(Name('example'),
- RRClass.IN(),
- RRType.SOA(), RRTTL(0)))
+ RRClass.IN,
+ RRType.SOA, RRTTL(0)))
self.assertRaises(IscException, check_zone, Name('example'),
- RRClass.IN(), rrsets, (None, None))
+ RRClass.IN, rrsets, (None, None))
# Our find() raises an exception. That exception is propagated to
# the top level.
rrsets = FakeRRsetCollection(True)
self.assertRaises(FakeException, check_zone, Name('example'),
- RRClass.IN(), rrsets, (None, None))
+ RRClass.IN, rrsets, (None, None))
if __name__ == '__main__':
unittest.main()
diff --git a/src/lib/dns/rdata/generic/cname_5.cc b/src/lib/dns/rdata/generic/cname_5.cc
index 5bb0aea..e87eeec 100644
--- a/src/lib/dns/rdata/generic/cname_5.cc
+++ b/src/lib/dns/rdata/generic/cname_5.cc
@@ -22,15 +22,49 @@
#include <dns/rdata.h>
#include <dns/rdataclass.h>
+#include <dns/rdata/generic/detail/lexer_util.h>
+
using namespace std;
using namespace isc::util;
+using isc::dns::rdata::generic::detail::createNameFromLexer;
// BEGIN_ISC_NAMESPACE
// BEGIN_RDATA_NAMESPACE
+/// \brief Constructor from string.
+///
+/// The given string must represent a valid CNAME RDATA. There can be extra
+/// space characters at the beginning or end of the text (which are simply
+/// ignored), but other extra text, including a new line, will make the
+/// construction fail with an exception.
+///
+/// The CNAME must be absolute since there's no parameter that specifies
+/// the origin name; if it is not absolute, \c MissingNameOrigin
+/// exception will be thrown. These must not be represented as a quoted
+/// string.
+///
+/// \throw Others Exception from the Name and RRTTL constructors.
+/// \throw InvalidRdataText Other general syntax errors.
CNAME::CNAME(const std::string& namestr) :
- cname_(namestr)
-{}
+ // Fill in dummy name and replace it soon below.
+ cname_(Name::ROOT_NAME())
+{
+ try {
+ std::istringstream ss(namestr);
+ MasterLexer lexer;
+ lexer.pushSource(ss);
+
+ cname_ = createNameFromLexer(lexer, NULL);
+
+ if (lexer.getNextToken().getType() != MasterToken::END_OF_FILE) {
+ isc_throw(InvalidRdataText, "extra input text for CNAME: "
+ << namestr);
+ }
+ } catch (const MasterLexer::LexerError& ex) {
+ isc_throw(InvalidRdataText, "Failed to construct CNAME from '" <<
+ namestr << "': " << ex.what());
+ }
+}
CNAME::CNAME(InputBuffer& buffer, size_t) :
Rdata(), cname_(buffer)
@@ -39,6 +73,27 @@ CNAME::CNAME(InputBuffer& buffer, size_t) :
// check consistency.
}
+/// \brief Constructor with a context of MasterLexer.
+///
+/// The \c lexer should point to the beginning of valid textual
+/// representation of a CNAME RDATA. The CNAME field can be
+/// non-absolute if \c origin is non-NULL, in which case \c origin is
+/// used to make it absolute. It must not be represented as a quoted
+/// string.
+///
+/// \throw MasterLexer::LexerError General parsing error such as missing field.
+/// \throw Other Exceptions from the Name and RRTTL constructors if
+/// construction of textual fields as these objects fail.
+///
+/// \param lexer A \c MasterLexer object parsing a master file for the
+/// RDATA to be created
+/// \param origin If non NULL, specifies the origin of CNAME when it
+/// is non-absolute.
+CNAME::CNAME(MasterLexer& lexer, const Name* origin,
+ MasterLoader::Options, MasterLoaderCallbacks&) :
+ cname_(createNameFromLexer(lexer, origin))
+{}
+
CNAME::CNAME(const CNAME& other) :
Rdata(), cname_(other.cname_)
{}
diff --git a/src/lib/dns/rdata/generic/dname_39.cc b/src/lib/dns/rdata/generic/dname_39.cc
index a22fcc3..d1d349e 100644
--- a/src/lib/dns/rdata/generic/dname_39.cc
+++ b/src/lib/dns/rdata/generic/dname_39.cc
@@ -22,15 +22,49 @@
#include <dns/rdata.h>
#include <dns/rdataclass.h>
+#include <dns/rdata/generic/detail/lexer_util.h>
+
using namespace std;
using namespace isc::util;
+using isc::dns::rdata::generic::detail::createNameFromLexer;
// BEGIN_ISC_NAMESPACE
// BEGIN_RDATA_NAMESPACE
+/// \brief Constructor from string.
+///
+/// The given string must represent a valid DNAME RDATA. There can be extra
+/// space characters at the beginning or end of the text (which are simply
+/// ignored), but other extra text, including a new line, will make the
+/// construction fail with an exception.
+///
+/// The TARGET must be absolute since there's no parameter that specifies
+/// the origin name; if it is not absolute, \c MissingNameOrigin
+/// exception will be thrown. These must not be represented as a quoted
+/// string.
+///
+/// \throw Others Exception from the Name and RRTTL constructors.
+/// \throw InvalidRdataText Other general syntax errors.
DNAME::DNAME(const std::string& namestr) :
- dname_(namestr)
-{}
+ // Fill in dummy name and replace it soon below.
+ dname_(Name::ROOT_NAME())
+{
+ try {
+ std::istringstream ss(namestr);
+ MasterLexer lexer;
+ lexer.pushSource(ss);
+
+ dname_ = createNameFromLexer(lexer, NULL);
+
+ if (lexer.getNextToken().getType() != MasterToken::END_OF_FILE) {
+ isc_throw(InvalidRdataText, "extra input text for DNAME: "
+ << namestr);
+ }
+ } catch (const MasterLexer::LexerError& ex) {
+ isc_throw(InvalidRdataText, "Failed to construct DNAME from '" <<
+ namestr << "': " << ex.what());
+ }
+}
DNAME::DNAME(InputBuffer& buffer, size_t) :
dname_(buffer)
@@ -39,6 +73,27 @@ DNAME::DNAME(InputBuffer& buffer, size_t) :
// check consistency.
}
+/// \brief Constructor with a context of MasterLexer.
+///
+/// The \c lexer should point to the beginning of valid textual
+/// representation of a DNAME RDATA. The TARGET field can be
+/// non-absolute if \c origin is non-NULL, in which case \c origin is
+/// used to make it absolute. It must not be represented as a quoted
+/// string.
+///
+/// \throw MasterLexer::LexerError General parsing error such as missing field.
+/// \throw Other Exceptions from the Name and RRTTL constructors if
+/// construction of textual fields as these objects fail.
+///
+/// \param lexer A \c MasterLexer object parsing a master file for the
+/// RDATA to be created
+/// \param origin If non NULL, specifies the origin of TARGET when it
+/// is non-absolute.
+DNAME::DNAME(MasterLexer& lexer, const Name* origin,
+ MasterLoader::Options, MasterLoaderCallbacks&) :
+ dname_(createNameFromLexer(lexer, origin))
+{}
+
DNAME::DNAME(const DNAME& other) :
Rdata(), dname_(other.dname_)
{}
diff --git a/src/lib/dns/rdata/generic/mx_15.cc b/src/lib/dns/rdata/generic/mx_15.cc
index b95ba05..12ada97 100644
--- a/src/lib/dns/rdata/generic/mx_15.cc
+++ b/src/lib/dns/rdata/generic/mx_15.cc
@@ -26,9 +26,12 @@
#include <dns/rdata.h>
#include <dns/rdataclass.h>
+#include <dns/rdata/generic/detail/lexer_util.h>
+
using namespace std;
using boost::lexical_cast;
using namespace isc::util;
+using isc::dns::rdata::generic::detail::createNameFromLexer;
// BEGIN_ISC_NAMESPACE
// BEGIN_RDATA_NAMESPACE
@@ -40,21 +43,80 @@ MX::MX(InputBuffer& buffer, size_t) :
// check consistency.
}
+/// \brief Constructor from string.
+///
+/// The given string must represent a valid MX RDATA. There can be extra
+/// space characters at the beginning or end of the text (which are simply
+/// ignored), but other extra text, including a new line, will make the
+/// construction fail with an exception.
+///
+/// The EXCHANGE name must be absolute since there's no parameter that
+/// specifies the origin name; if it is not absolute, \c MissingNameOrigin
+/// exception will be thrown. It must not be represented as a quoted
+/// string.
+///
+/// See the construction that takes \c MasterLexer for other fields.
+///
+/// \throw Others Exception from the Name and RRTTL constructors.
+/// \throw InvalidRdataText Other general syntax errors.
MX::MX(const std::string& mx_str) :
- preference_(0), mxname_(".")
+ // Fill in dummy name and replace them soon below.
+ preference_(0), mxname_(Name::ROOT_NAME())
{
- istringstream iss(mx_str);
- uint16_t pref;
- string mxname;
-
- iss >> pref >> mxname;
+ try {
+ std::istringstream ss(mx_str);
+ MasterLexer lexer;
+ lexer.pushSource(ss);
+
+ const uint32_t num =
+ lexer.getNextToken(MasterToken::NUMBER).getNumber();
+ if (num > 65535) {
+ isc_throw(InvalidRdataText, "Invalid MX preference in: "
+ << mx_str);
+ }
+ preference_ = static_cast<uint16_t>(num);
+
+ mxname_ = createNameFromLexer(lexer, NULL);
+
+ if (lexer.getNextToken().getType() != MasterToken::END_OF_FILE) {
+ isc_throw(InvalidRdataText, "extra input text for MX: "
+ << mx_str);
+ }
+ } catch (const MasterLexer::LexerError& ex) {
+ isc_throw(InvalidRdataText, "Failed to construct MX from '" <<
+ mx_str << "': " << ex.what());
+ }
+}
- if (iss.bad() || iss.fail() || !iss.eof()) {
- isc_throw(InvalidRdataText, "Invalid MX text format");
+/// \brief Constructor with a context of MasterLexer.
+///
+/// The \c lexer should point to the beginning of valid textual representation
+/// of an MX RDATA. The EXCHANGE field can be non-absolute if \c origin
+/// is non-NULL, in which case \c origin is used to make it absolute.
+/// It must not be represented as a quoted string.
+///
+/// The PREFERENCE field must be a valid decimal representation of an
+/// unsigned 16-bit integer.
+///
+/// \throw MasterLexer::LexerError General parsing error such as missing field.
+/// \throw Other Exceptions from the Name and RRTTL constructors if
+/// construction of textual fields as these objects fail.
+///
+/// \param lexer A \c MasterLexer object parsing a master file for the
+/// RDATA to be created
+/// \param origin If non NULL, specifies the origin of EXCHANGE when it
+/// is non-absolute.
+MX::MX(MasterLexer& lexer, const Name* origin,
+ MasterLoader::Options, MasterLoaderCallbacks&) :
+ preference_(0), mxname_(".")
+{
+ const uint32_t num = lexer.getNextToken(MasterToken::NUMBER).getNumber();
+ if (num > 65535) {
+ isc_throw(InvalidRdataText, "Invalid MX preference: " << num);
}
+ preference_ = static_cast<uint16_t>(num);
- preference_ = pref;
- mxname_ = Name(mxname);
+ mxname_ = createNameFromLexer(lexer, origin);
}
MX::MX(uint16_t preference, const Name& mxname) :
diff --git a/src/lib/dns/rdata/generic/ns_2.cc b/src/lib/dns/rdata/generic/ns_2.cc
index 631da9d..d75ab7d 100644
--- a/src/lib/dns/rdata/generic/ns_2.cc
+++ b/src/lib/dns/rdata/generic/ns_2.cc
@@ -22,15 +22,49 @@
#include <dns/rdata.h>
#include <dns/rdataclass.h>
+#include <dns/rdata/generic/detail/lexer_util.h>
+
using namespace std;
using namespace isc::util;
+using isc::dns::rdata::generic::detail::createNameFromLexer;
// BEGIN_ISC_NAMESPACE
// BEGIN_RDATA_NAMESPACE
+/// \brief Constructor from string.
+///
+/// The given string must represent a valid NS RDATA. There can be extra
+/// space characters at the beginning or end of the text (which are simply
+/// ignored), but other extra text, including a new line, will make the
+/// construction fail with an exception.
+///
+/// The NSDNAME must be absolute since there's no parameter that
+/// specifies the origin name; if it is not absolute, \c
+/// MissingNameOrigin exception will be thrown. These must not be
+/// represented as a quoted string.
+///
+/// \throw Others Exception from the Name and RRTTL constructors.
+/// \throw InvalidRdataText Other general syntax errors.
NS::NS(const std::string& namestr) :
- nsname_(namestr)
-{}
+ // Fill in dummy name and replace them soon below.
+ nsname_(Name::ROOT_NAME())
+{
+ try {
+ std::istringstream ss(namestr);
+ MasterLexer lexer;
+ lexer.pushSource(ss);
+
+ nsname_ = createNameFromLexer(lexer, NULL);
+
+ if (lexer.getNextToken().getType() != MasterToken::END_OF_FILE) {
+ isc_throw(InvalidRdataText, "extra input text for NS: "
+ << namestr);
+ }
+ } catch (const MasterLexer::LexerError& ex) {
+ isc_throw(InvalidRdataText, "Failed to construct NS from '" <<
+ namestr << "': " << ex.what());
+ }
+}
NS::NS(InputBuffer& buffer, size_t) :
nsname_(buffer)
@@ -39,6 +73,27 @@ NS::NS(InputBuffer& buffer, size_t) :
// check consistency.
}
+/// \brief Constructor with a context of MasterLexer.
+///
+/// The \c lexer should point to the beginning of valid textual
+/// representation of an NS RDATA. The NSDNAME field can be
+/// non-absolute if \c origin is non-NULL, in which case \c origin is
+/// used to make it absolute. It must not be represented as a quoted
+/// string.
+///
+/// \throw MasterLexer::LexerError General parsing error such as missing field.
+/// \throw Other Exceptions from the Name and RRTTL constructors if
+/// construction of textual fields as these objects fail.
+///
+/// \param lexer A \c MasterLexer object parsing a master file for the
+/// RDATA to be created
+/// \param origin If non NULL, specifies the origin of NSDNAME when it
+/// is non-absolute.
+NS::NS(MasterLexer& lexer, const Name* origin,
+ MasterLoader::Options, MasterLoaderCallbacks&) :
+ nsname_(createNameFromLexer(lexer, origin))
+{}
+
NS::NS(const NS& other) :
Rdata(), nsname_(other.nsname_)
{}
diff --git a/src/lib/dns/rdata/generic/ptr_12.cc b/src/lib/dns/rdata/generic/ptr_12.cc
index b76fc7f..080faee 100644
--- a/src/lib/dns/rdata/generic/ptr_12.cc
+++ b/src/lib/dns/rdata/generic/ptr_12.cc
@@ -22,15 +22,49 @@
#include <dns/rdata.h>
#include <dns/rdataclass.h>
+#include <dns/rdata/generic/detail/lexer_util.h>
+
using namespace std;
using namespace isc::util;
+using isc::dns::rdata::generic::detail::createNameFromLexer;
// BEGIN_ISC_NAMESPACE
// BEGIN_RDATA_NAMESPACE
+/// \brief Constructor from string.
+///
+/// The given string must represent a valid PTR RDATA. There can be
+/// extra space characters at the beginning or end of the text (which
+/// are simply ignored), but other extra text, including a new line,
+/// will make the construction fail with an exception.
+///
+/// The PTRDNAME must be absolute since there's no parameter that
+/// specifies the origin name; if it is not absolute, \c
+/// MissingNameOrigin exception will be thrown. These must not be
+/// represented as a quoted string.
+///
+/// \throw Others Exception from the Name and RRTTL constructors.
+/// \throw InvalidRdataText Other general syntax errors.
PTR::PTR(const std::string& type_str) :
- ptr_name_(type_str)
-{}
+ // Fill in dummy name and replace them soon below.
+ ptr_name_(Name::ROOT_NAME())
+{
+ try {
+ std::istringstream ss(type_str);
+ MasterLexer lexer;
+ lexer.pushSource(ss);
+
+ ptr_name_ = createNameFromLexer(lexer, NULL);
+
+ if (lexer.getNextToken().getType() != MasterToken::END_OF_FILE) {
+ isc_throw(InvalidRdataText, "extra input text for PTR: "
+ << type_str);
+ }
+ } catch (const MasterLexer::LexerError& ex) {
+ isc_throw(InvalidRdataText, "Failed to construct PTR from '" <<
+ type_str << "': " << ex.what());
+ }
+}
PTR::PTR(InputBuffer& buffer, size_t) :
ptr_name_(buffer)
@@ -39,6 +73,27 @@ PTR::PTR(InputBuffer& buffer, size_t) :
// check consistency.
}
+/// \brief Constructor with a context of MasterLexer.
+///
+/// The \c lexer should point to the beginning of valid textual
+/// representation of a PTR RDATA. The PTRDNAME field can be
+/// non-absolute if \c origin is non-NULL, in which case \c origin is
+/// used to make it absolute. It must not be represented as a quoted
+/// string.
+///
+/// \throw MasterLexer::LexerError General parsing error such as missing field.
+/// \throw Other Exceptions from the Name and RRTTL constructors if
+/// construction of textual fields as these objects fail.
+///
+/// \param lexer A \c MasterLexer object parsing a master file for the
+/// RDATA to be created
+/// \param origin If non NULL, specifies the origin of PTRDNAME when it
+/// is non-absolute.
+PTR::PTR(MasterLexer& lexer, const Name* origin,
+ MasterLoader::Options, MasterLoaderCallbacks&) :
+ ptr_name_(createNameFromLexer(lexer, origin))
+{}
+
PTR::PTR(const PTR& source) :
Rdata(), ptr_name_(source.ptr_name_)
{}
diff --git a/src/lib/dns/rdata/in_1/srv_33.cc b/src/lib/dns/rdata/in_1/srv_33.cc
index af8bbe3..ac62071 100644
--- a/src/lib/dns/rdata/in_1/srv_33.cc
+++ b/src/lib/dns/rdata/in_1/srv_33.cc
@@ -25,9 +25,12 @@
#include <dns/rdata.h>
#include <dns/rdataclass.h>
+#include <dns/rdata/generic/detail/lexer_util.h>
+
using namespace std;
using namespace isc::util;
using namespace isc::util::str;
+using isc::dns::rdata::generic::detail::createNameFromLexer;
// BEGIN_ISC_NAMESPACE
// BEGIN_RDATA_NAMESPACE
@@ -48,45 +51,57 @@ struct SRVImpl {
/// \brief Constructor from string.
///
-/// \c srv_str must be formatted as follows:
-/// \code <Priority> <Weight> <Port> <Target>
-/// \endcode
-/// where
-/// - <Priority>, <Weight>, and <Port> are an unsigned
-/// 16-bit decimal integer.
-/// - <Target> is a valid textual representation of domain name.
-///
-/// An example of valid string is:
-/// \code "1 5 1500 example.com." \endcode
-///
-/// <b>Exceptions</b>
-///
-/// If <Target> is not a valid domain name, a corresponding exception
-/// from the \c Name class will be thrown;
-/// if %any of the other bullet points above is not met, an exception of
-/// class \c InvalidRdataText will be thrown.
-/// This constructor internally involves resource allocation, and if it fails
-/// a corresponding standard exception will be thrown.
+/// The given string must represent a valid SRV RDATA. There can be extra
+/// space characters at the beginning or end of the text (which are simply
+/// ignored), but other extra text, including a new line, will make the
+/// construction fail with an exception.
+///
+/// The TARGET name must be absolute since there's no parameter that
+/// specifies the origin name; if it is not absolute, \c MissingNameOrigin
+/// exception will be thrown. It must not be represented as a quoted
+/// string.
+///
+/// See the construction that takes \c MasterLexer for other fields.
+///
+/// \throw Others Exception from the Name and RRTTL constructors.
+/// \throw InvalidRdataText Other general syntax errors.
SRV::SRV(const std::string& srv_str) :
impl_(NULL)
{
- istringstream iss(srv_str);
-
try {
- const int32_t priority = tokenToNum<int32_t, 16>(getToken(iss));
- const int32_t weight = tokenToNum<int32_t, 16>(getToken(iss));
- const int32_t port = tokenToNum<int32_t, 16>(getToken(iss));
- const Name targetname(getToken(iss));
-
- if (!iss.eof()) {
- isc_throw(InvalidRdataText, "Unexpected input for SRV RDATA: " <<
- srv_str);
+ std::istringstream ss(srv_str);
+ MasterLexer lexer;
+ lexer.pushSource(ss);
+
+ uint32_t num = lexer.getNextToken(MasterToken::NUMBER).getNumber();
+ if (num > 65535) {
+ isc_throw(InvalidRdataText, "Invalid SRV priority in: " << srv_str);
+ }
+ const uint16_t priority = static_cast<uint16_t>(num);
+
+ num = lexer.getNextToken(MasterToken::NUMBER).getNumber();
+ if (num > 65535) {
+ isc_throw(InvalidRdataText, "Invalid SRV weight in: " << srv_str);
+ }
+ const uint16_t weight = static_cast<uint16_t>(num);
+
+ num = lexer.getNextToken(MasterToken::NUMBER).getNumber();
+ if (num > 65535) {
+ isc_throw(InvalidRdataText, "Invalid SRV port in: " << srv_str);
+ }
+ const uint16_t port = static_cast<uint16_t>(num);
+
+ const Name targetname = createNameFromLexer(lexer, NULL);
+
+ if (lexer.getNextToken().getType() != MasterToken::END_OF_FILE) {
+ isc_throw(InvalidRdataText, "extra input text for SRV: "
+ << srv_str);
}
impl_ = new SRVImpl(priority, weight, port, targetname);
- } catch (const StringTokenError& ste) {
- isc_throw(InvalidRdataText, "Invalid SRV text: " <<
- ste.what() << ": " << srv_str);
+ } catch (const MasterLexer::LexerError& ex) {
+ isc_throw(InvalidRdataText, "Failed to construct SRV from '" <<
+ srv_str << "': " << ex.what());
}
}
@@ -112,14 +127,58 @@ SRV::SRV(InputBuffer& buffer, size_t rdata_len) {
isc_throw(InvalidRdataLength, "SRV too short");
}
- uint16_t priority = buffer.readUint16();
- uint16_t weight = buffer.readUint16();
- uint16_t port = buffer.readUint16();
+ const uint16_t priority = buffer.readUint16();
+ const uint16_t weight = buffer.readUint16();
+ const uint16_t port = buffer.readUint16();
const Name targetname(buffer);
impl_ = new SRVImpl(priority, weight, port, targetname);
}
+/// \brief Constructor with a context of MasterLexer.
+///
+/// The \c lexer should point to the beginning of valid textual representation
+/// of an SRV RDATA. The TARGET field can be non-absolute if \c origin
+/// is non-NULL, in which case \c origin is used to make it absolute.
+/// It must not be represented as a quoted string.
+///
+/// The PRIORITY, WEIGHT and PORT fields must each be a valid decimal
+/// representation of an unsigned 16-bit integers respectively.
+///
+/// \throw MasterLexer::LexerError General parsing error such as missing field.
+/// \throw Other Exceptions from the Name and RRTTL constructors if
+/// construction of textual fields as these objects fail.
+///
+/// \param lexer A \c MasterLexer object parsing a master file for the
+/// RDATA to be created
+/// \param origin If non NULL, specifies the origin of TARGET when it
+/// is non-absolute.
+SRV::SRV(MasterLexer& lexer, const Name* origin,
+ MasterLoader::Options, MasterLoaderCallbacks&)
+{
+ uint32_t num = lexer.getNextToken(MasterToken::NUMBER).getNumber();
+ if (num > 65535) {
+ isc_throw(InvalidRdataText, "Invalid SRV priority: " << num);
+ }
+ const uint16_t priority = static_cast<uint16_t>(num);
+
+ num = lexer.getNextToken(MasterToken::NUMBER).getNumber();
+ if (num > 65535) {
+ isc_throw(InvalidRdataText, "Invalid SRV weight: " << num);
+ }
+ const uint16_t weight = static_cast<uint16_t>(num);
+
+ num = lexer.getNextToken(MasterToken::NUMBER).getNumber();
+ if (num > 65535) {
+ isc_throw(InvalidRdataText, "Invalid SRV port: " << num);
+ }
+ const uint16_t port = static_cast<uint16_t>(num);
+
+ const Name targetname = createNameFromLexer(lexer, origin);
+
+ impl_ = new SRVImpl(priority, weight, port, targetname);
+}
+
/// \brief The copy constructor.
///
/// It internally allocates a resource, and if it fails a corresponding
diff --git a/src/lib/dns/rdata/template.h b/src/lib/dns/rdata/template.h
index 9e84cc3..3bfeb85 100644
--- a/src/lib/dns/rdata/template.h
+++ b/src/lib/dns/rdata/template.h
@@ -39,6 +39,10 @@
// Note: do not remove the comment lines beginning with "BEGIN_" and "END_".
// These are markers used by a script for auto-generating build-able source
// files.
+//
+// On completion of implementing a new type of Rdata, remove the corresponding
+// entry from the meta_types dictionary of gen-rdatacode.py.in. Otherwise
+// it will cause build failure.
class MyType : public Rdata {
public:
diff --git a/src/lib/dns/rrclass-placeholder.h b/src/lib/dns/rrclass-placeholder.h
index 1ff4163..89dc49d 100644
--- a/src/lib/dns/rrclass-placeholder.h
+++ b/src/lib/dns/rrclass-placeholder.h
@@ -294,28 +294,14 @@ public:
// BEGIN_WELL_KNOWN_CLASS_DECLARATIONS
// END_WELL_KNOWN_CLASS_DECLARATIONS
-
- static const RRClass& NONE();
private:
- // \brief Meta-classes
- enum {
- RRCLASS_RESERVED0 = 0,
- RRCLASS_NONE = 254
- };
uint16_t classcode_;
};
// BEGIN_WELL_KNOWN_CLASS_DEFINITIONS
// END_WELL_KNOWN_CLASS_DEFINITIONS
-inline const RRClass&
-RRClass::NONE() {
- static RRClass rrclass(RRCLASS_NONE);
-
- return (rrclass);
-}
-
///
/// \brief Insert the \c RRClass as a string into stream.
///
diff --git a/src/lib/dns/rrtype-placeholder.h b/src/lib/dns/rrtype-placeholder.h
index 273a486..5541635 100644
--- a/src/lib/dns/rrtype-placeholder.h
+++ b/src/lib/dns/rrtype-placeholder.h
@@ -262,43 +262,13 @@ public:
// BEGIN_WELL_KNOWN_TYPE_DECLARATIONS
// END_WELL_KNOWN_TYPE_DECLARATIONS
- static const RRType& IXFR();
- static const RRType& AXFR();
- static const RRType& ANY();
-
private:
- // \brief Meta-classes
- // XXX: these should be implemented using rrparamregistry
- enum {
- RRTYPE_IXFR = 251,
- RRTYPE_AXFR = 252,
- RRTYPE_ANY = 255
- };
-
uint16_t typecode_;
};
// BEGIN_WELL_KNOWN_TYPE_DEFINITIONS
// END_WELL_KNOWN_TYPE_DEFINITIONS
-inline const RRType&
-RRType::IXFR() {
- static RRType rrtype(RRTYPE_IXFR);
- return (rrtype);
-}
-
-inline const RRType&
-RRType::AXFR() {
- static RRType rrtype(RRTYPE_AXFR);
- return (rrtype);
-}
-
-inline const RRType&
-RRType::ANY() {
- static RRType rrtype(RRTYPE_ANY);
- return (rrtype);
-}
-
///
/// \brief Insert the \c RRType as a string into stream.
///
diff --git a/src/lib/dns/tests/rdata_cname_unittest.cc b/src/lib/dns/tests/rdata_cname_unittest.cc
index 6451f72..5f602f0 100644
--- a/src/lib/dns/tests/rdata_cname_unittest.cc
+++ b/src/lib/dns/tests/rdata_cname_unittest.cc
@@ -33,11 +33,16 @@ using namespace isc::dns::rdata;
namespace {
class Rdata_CNAME_Test : public RdataTest {
- // there's nothing to specialize
+public:
+ Rdata_CNAME_Test() :
+ rdata_cname("cn.example.com."),
+ rdata_cname2("cn2.example.com.")
+ {}
+
+ const generic::CNAME rdata_cname;
+ const generic::CNAME rdata_cname2;
};
-const generic::CNAME rdata_cname("cn.example.com");
-const generic::CNAME rdata_cname2("cn2.example.com");
const uint8_t wiredata_cname[] = {
0x02, 0x63, 0x6e, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x03,
0x63, 0x6f, 0x6d, 0x00 };
@@ -50,16 +55,21 @@ const uint8_t wiredata_cname2[] = {
0x03, 0x63, 0x6e, 0x32, 0xc0, 0x03 };
TEST_F(Rdata_CNAME_Test, createFromText) {
- EXPECT_EQ(0, rdata_cname.compare(generic::CNAME("cn.example.com")));
+ EXPECT_EQ(0, rdata_cname.compare(generic::CNAME("cn.example.com.")));
// explicitly add a trailing dot. should be the same RDATA.
EXPECT_EQ(0, rdata_cname.compare(generic::CNAME("cn.example.com.")));
// should be case sensitive.
- EXPECT_EQ(0, rdata_cname.compare(generic::CNAME("CN.EXAMPLE.COM")));
+ EXPECT_EQ(0, rdata_cname.compare(generic::CNAME("CN.EXAMPLE.COM.")));
// RDATA of a class-independent type should be recognized for any
// "unknown" class.
EXPECT_EQ(0, rdata_cname.compare(*createRdata(RRType("CNAME"),
RRClass(65000),
- "cn.example.com")));
+ "cn.example.com.")));
+}
+
+TEST_F(Rdata_CNAME_Test, badText) {
+ // Extra text at end of line
+ EXPECT_THROW(generic::CNAME("cname.example.com. extra."), InvalidRdataText);
}
TEST_F(Rdata_CNAME_Test, createFromWire) {
@@ -79,7 +89,7 @@ TEST_F(Rdata_CNAME_Test, createFromWire) {
"rdata_cname_fromWire", 71),
DNSMessageFORMERR);
- EXPECT_EQ(0, generic::CNAME("cn2.example.com").compare(
+ EXPECT_EQ(0, generic::CNAME("cn2.example.com.").compare(
*rdataFactoryFromFile(RRType("CNAME"), RRClass("IN"),
"rdata_cname_fromWire", 55)));
EXPECT_THROW(*rdataFactoryFromFile(RRType("CNAME"), RRClass("IN"),
@@ -90,7 +100,17 @@ TEST_F(Rdata_CNAME_Test, createFromWire) {
TEST_F(Rdata_CNAME_Test, createFromLexer) {
EXPECT_EQ(0, rdata_cname.compare(
*test::createRdataUsingLexer(RRType::CNAME(), RRClass::IN(),
- "cn.example.com")));
+ "cn.example.com.")));
+
+ // test::createRdataUsingLexer() constructs relative to
+ // "example.org." origin.
+ EXPECT_EQ(0, generic::CNAME("cname10.example.org.").compare(
+ *test::createRdataUsingLexer(RRType::CNAME(), RRClass::IN(),
+ "cname10")));
+
+ // Extra text at end of line
+ EXPECT_FALSE(test::createRdataUsingLexer(RRType::CNAME(), RRClass::IN(),
+ "cname.example.com. extra."));
}
TEST_F(Rdata_CNAME_Test, toWireBuffer) {
diff --git a/src/lib/dns/tests/rdata_dname_unittest.cc b/src/lib/dns/tests/rdata_dname_unittest.cc
index c4e517c..7209e36 100644
--- a/src/lib/dns/tests/rdata_dname_unittest.cc
+++ b/src/lib/dns/tests/rdata_dname_unittest.cc
@@ -33,11 +33,16 @@ using namespace isc::dns::rdata;
namespace {
class Rdata_DNAME_Test : public RdataTest {
- // there's nothing to specialize
+public:
+ Rdata_DNAME_Test() :
+ rdata_dname("dn.example.com."),
+ rdata_dname2("dn2.example.com.")
+ {}
+
+ const generic::DNAME rdata_dname;
+ const generic::DNAME rdata_dname2;
};
-const generic::DNAME rdata_dname("dn.example.com");
-const generic::DNAME rdata_dname2("dn2.example.com");
const uint8_t wiredata_dname[] = {
0x02, 0x64, 0x6e, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x03,
0x63, 0x6f, 0x6d, 0x00 };
@@ -52,16 +57,21 @@ const uint8_t wiredata_dname2[] = {
0x63, 0x6f, 0x6d, 0x00 };
TEST_F(Rdata_DNAME_Test, createFromText) {
- EXPECT_EQ(0, rdata_dname.compare(generic::DNAME("dn.example.com")));
+ EXPECT_EQ(0, rdata_dname.compare(generic::DNAME("dn.example.com.")));
// explicitly add a trailing dot. should be the same RDATA.
EXPECT_EQ(0, rdata_dname.compare(generic::DNAME("dn.example.com.")));
// should be case sensitive.
- EXPECT_EQ(0, rdata_dname.compare(generic::DNAME("DN.EXAMPLE.COM")));
+ EXPECT_EQ(0, rdata_dname.compare(generic::DNAME("DN.EXAMPLE.COM.")));
// RDATA of a class-independent type should be recognized for any
// "unknown" class.
EXPECT_EQ(0, rdata_dname.compare(*createRdata(RRType("DNAME"),
RRClass(65000),
- "dn.example.com")));
+ "dn.example.com.")));
+}
+
+TEST_F(Rdata_DNAME_Test, badText) {
+ // Extra text at end of line
+ EXPECT_THROW(generic::DNAME("dname.example.com. extra."), InvalidRdataText);
}
TEST_F(Rdata_DNAME_Test, createFromWire) {
@@ -81,7 +91,7 @@ TEST_F(Rdata_DNAME_Test, createFromWire) {
"rdata_dname_fromWire", 71),
DNSMessageFORMERR);
- EXPECT_EQ(0, generic::DNAME("dn2.example.com").compare(
+ EXPECT_EQ(0, generic::DNAME("dn2.example.com.").compare(
*rdataFactoryFromFile(RRType("DNAME"), RRClass("IN"),
"rdata_dname_fromWire", 55)));
EXPECT_THROW(*rdataFactoryFromFile(RRType("DNAME"), RRClass("IN"),
@@ -92,7 +102,17 @@ TEST_F(Rdata_DNAME_Test, createFromWire) {
TEST_F(Rdata_DNAME_Test, createFromLexer) {
EXPECT_EQ(0, rdata_dname.compare(
*test::createRdataUsingLexer(RRType::DNAME(), RRClass::IN(),
- "dn.example.com")));
+ "dn.example.com.")));
+
+ // test::createRdataUsingLexer() constructs relative to
+ // "example.org." origin.
+ EXPECT_EQ(0, generic::DNAME("dname8.example.org.").compare(
+ *test::createRdataUsingLexer(RRType::DNAME(), RRClass::IN(),
+ "dname8")));
+
+ // Extra text at end of line
+ EXPECT_FALSE(test::createRdataUsingLexer(RRType::DNAME(), RRClass::IN(),
+ "dname.example.com. extra."));
}
TEST_F(Rdata_DNAME_Test, toWireBuffer) {
diff --git a/src/lib/dns/tests/rdata_mx_unittest.cc b/src/lib/dns/tests/rdata_mx_unittest.cc
index 6c6039a..6e4eaba 100644
--- a/src/lib/dns/tests/rdata_mx_unittest.cc
+++ b/src/lib/dns/tests/rdata_mx_unittest.cc
@@ -32,13 +32,16 @@ using namespace isc::dns::rdata;
namespace {
class Rdata_MX_Test : public RdataTest {
- // there's nothing to specialize
-};
+public:
+ Rdata_MX_Test() :
+ rdata_mx(10, Name("mx.example.com"))
+ {}
-const generic::MX rdata_mx(10, Name("mx.example.com"));
+ const generic::MX rdata_mx;
+};
TEST_F(Rdata_MX_Test, createFromText) {
- const generic::MX rdata_mx2("10 mx.example.com");
+ const generic::MX rdata_mx2("10 mx.example.com.");
EXPECT_EQ(0, rdata_mx2.compare(rdata_mx));
}
@@ -48,6 +51,12 @@ TEST_F(Rdata_MX_Test, badText) {
EXPECT_THROW(const generic::MX rdata_mx("SPOON"), InvalidRdataText);
EXPECT_THROW(const generic::MX rdata_mx("10 mx. example.com."),
InvalidRdataText);
+ // No origin and relative
+ EXPECT_THROW(const generic::MX rdata_mx("10 mx.example.com"),
+ MissingNameOrigin);
+ // Extra text at end of line
+ EXPECT_THROW(const generic::MX rdata_mx("10 mx.example.com. extra."),
+ InvalidRdataText);
}
TEST_F(Rdata_MX_Test, copy) {
@@ -65,11 +74,25 @@ TEST_F(Rdata_MX_Test, createFromWire) {
TEST_F(Rdata_MX_Test, createFromLexer) {
EXPECT_EQ(0, rdata_mx.compare(
*test::createRdataUsingLexer(RRType::MX(), RRClass::IN(),
- "10 mx.example.com")));
+ "10 mx.example.com.")));
+
+ // test::createRdataUsingLexer() constructs relative to
+ // "example.org." origin.
+ EXPECT_EQ(0, generic::MX("10 mx2.example.org.").compare(
+ *test::createRdataUsingLexer(RRType::MX(), RRClass::IN(),
+ "10 mx2")));
// Exceptions cause NULL to be returned.
EXPECT_FALSE(test::createRdataUsingLexer(RRType::MX(), RRClass::IN(),
- "10 mx. example.com"));
+ "10 mx. example.com."));
+
+ // 65536 is larger than maximum possible preference
+ EXPECT_FALSE(test::createRdataUsingLexer(RRType::MX(), RRClass::IN(),
+ "65536 mx.example.com."));
+
+ // Extra text at end of line
+ EXPECT_FALSE(test::createRdataUsingLexer(RRType::MX(), RRClass::IN(),
+ "10 mx.example.com. extra."));
}
TEST_F(Rdata_MX_Test, toWireRenderer) {
diff --git a/src/lib/dns/tests/rdata_ns_unittest.cc b/src/lib/dns/tests/rdata_ns_unittest.cc
index d536393..53eb670 100644
--- a/src/lib/dns/tests/rdata_ns_unittest.cc
+++ b/src/lib/dns/tests/rdata_ns_unittest.cc
@@ -33,11 +33,16 @@ using namespace isc::dns::rdata;
namespace {
class Rdata_NS_Test : public RdataTest {
- // there's nothing to specialize
+public:
+ Rdata_NS_Test() :
+ rdata_ns("ns.example.com."),
+ rdata_ns2("ns2.example.com.")
+ {}
+
+ const generic::NS rdata_ns;
+ const generic::NS rdata_ns2;
};
-const generic::NS rdata_ns("ns.example.com");
-const generic::NS rdata_ns2("ns2.example.com");
const uint8_t wiredata_ns[] = {
0x02, 0x6e, 0x73, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x03,
0x63, 0x6f, 0x6d, 0x00 };
@@ -50,15 +55,20 @@ const uint8_t wiredata_ns2[] = {
0x03, 0x6e, 0x73, 0x32, 0xc0, 0x03 };
TEST_F(Rdata_NS_Test, createFromText) {
- EXPECT_EQ(0, rdata_ns.compare(generic::NS("ns.example.com")));
+ EXPECT_EQ(0, rdata_ns.compare(generic::NS("ns.example.com.")));
// explicitly add a trailing dot. should be the same RDATA.
EXPECT_EQ(0, rdata_ns.compare(generic::NS("ns.example.com.")));
// should be case sensitive.
- EXPECT_EQ(0, rdata_ns.compare(generic::NS("NS.EXAMPLE.COM")));
+ EXPECT_EQ(0, rdata_ns.compare(generic::NS("NS.EXAMPLE.COM.")));
// RDATA of a class-independent type should be recognized for any
// "unknown" class.
EXPECT_EQ(0, rdata_ns.compare(*createRdata(RRType("NS"), RRClass(65000),
- "ns.example.com")));
+ "ns.example.com.")));
+}
+
+TEST_F(Rdata_NS_Test, badText) {
+ // Extra input at end of line
+ EXPECT_THROW(generic::NS("ns.example.com. extra."), InvalidRdataText);
}
TEST_F(Rdata_NS_Test, createFromWire) {
@@ -78,7 +88,7 @@ TEST_F(Rdata_NS_Test, createFromWire) {
"rdata_ns_fromWire", 71),
DNSMessageFORMERR);
- EXPECT_EQ(0, generic::NS("ns2.example.com").compare(
+ EXPECT_EQ(0, generic::NS("ns2.example.com.").compare(
*rdataFactoryFromFile(RRType("NS"), RRClass("IN"),
"rdata_ns_fromWire", 55)));
EXPECT_THROW(*rdataFactoryFromFile(RRType("NS"), RRClass("IN"),
@@ -89,11 +99,21 @@ TEST_F(Rdata_NS_Test, createFromWire) {
TEST_F(Rdata_NS_Test, createFromLexer) {
EXPECT_EQ(0, rdata_ns.compare(
*test::createRdataUsingLexer(RRType::NS(), RRClass::IN(),
- "ns.example.com")));
+ "ns.example.com.")));
+
+ // test::createRdataUsingLexer() constructs relative to
+ // "example.org." origin.
+ EXPECT_EQ(0, generic::NS("ns8.example.org.").compare(
+ *test::createRdataUsingLexer(RRType::NS(), RRClass::IN(),
+ "ns8")));
// Exceptions cause NULL to be returned.
EXPECT_FALSE(test::createRdataUsingLexer(RRType::NS(), RRClass::IN(),
""));
+
+ // Extra input at end of line
+ EXPECT_FALSE(test::createRdataUsingLexer(RRType::NS(), RRClass::IN(),
+ "ns.example.com. extra."));
}
TEST_F(Rdata_NS_Test, toWireBuffer) {
@@ -119,13 +139,13 @@ TEST_F(Rdata_NS_Test, toText) {
}
TEST_F(Rdata_NS_Test, compare) {
- generic::NS small("a.example");
- generic::NS large("example");
+ generic::NS small("a.example.");
+ generic::NS large("example.");
EXPECT_TRUE(Name("a.example") > Name("example"));
EXPECT_GT(0, small.compare(large));
}
TEST_F(Rdata_NS_Test, getNSName) {
- EXPECT_EQ(Name("ns.example.com"), rdata_ns.getNSName());
+ EXPECT_EQ(Name("ns.example.com."), rdata_ns.getNSName());
}
}
diff --git a/src/lib/dns/tests/rdata_ptr_unittest.cc b/src/lib/dns/tests/rdata_ptr_unittest.cc
index 44b849a..5d6d37d 100644
--- a/src/lib/dns/tests/rdata_ptr_unittest.cc
+++ b/src/lib/dns/tests/rdata_ptr_unittest.cc
@@ -37,11 +37,16 @@ using namespace isc::dns::rdata;
namespace {
class Rdata_PTR_Test : public RdataTest {
- // there's nothing to specialize
+public:
+ Rdata_PTR_Test() :
+ rdata_ptr("ns.example.com."),
+ rdata_ptr2("ns2.example.com.")
+ {}
+
+ const generic::PTR rdata_ptr;
+ const generic::PTR rdata_ptr2;
};
-const generic::PTR rdata_ptr("ns.example.com");
-const generic::PTR rdata_ptr2("ns2.example.com");
const uint8_t wiredata_ptr[] = {
0x02, 0x6e, 0x73, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x03,
0x63, 0x6f, 0x6d, 0x00 };
@@ -54,15 +59,20 @@ const uint8_t wiredata_ptr2[] = {
0x03, 0x6e, 0x73, 0x32, 0xc0, 0x03 };
TEST_F(Rdata_PTR_Test, createFromText) {
- EXPECT_EQ(0, rdata_ptr.compare(generic::PTR("ns.example.com")));
+ EXPECT_EQ(0, rdata_ptr.compare(generic::PTR("ns.example.com.")));
// explicitly add a trailing dot. should be the same RDATA.
EXPECT_EQ(0, rdata_ptr.compare(generic::PTR("ns.example.com.")));
// should be case sensitive.
- EXPECT_EQ(0, rdata_ptr.compare(generic::PTR("NS.EXAMPLE.COM")));
+ EXPECT_EQ(0, rdata_ptr.compare(generic::PTR("NS.EXAMPLE.COM.")));
// RDATA of a class-independent type should be recognized for any
// "unknown" class.
EXPECT_EQ(0, rdata_ptr.compare(*createRdata(RRType("PTR"), RRClass(65000),
- "ns.example.com")));
+ "ns.example.com.")));
+}
+
+TEST_F(Rdata_PTR_Test, badText) {
+ // Extra text at end of line
+ EXPECT_THROW(generic::PTR("foo.example.com. extra."), InvalidRdataText);
}
TEST_F(Rdata_PTR_Test, createFromWire) {
@@ -82,7 +92,7 @@ TEST_F(Rdata_PTR_Test, createFromWire) {
"rdata_ns_fromWire", 71),
DNSMessageFORMERR);
- EXPECT_EQ(0, generic::PTR("ns2.example.com").compare(
+ EXPECT_EQ(0, generic::PTR("ns2.example.com.").compare(
*rdataFactoryFromFile(RRType("PTR"), RRClass("IN"),
"rdata_ns_fromWire", 55)));
EXPECT_THROW(*rdataFactoryFromFile(RRType("PTR"), RRClass("IN"),
@@ -93,7 +103,17 @@ TEST_F(Rdata_PTR_Test, createFromWire) {
TEST_F(Rdata_PTR_Test, createFromLexer) {
EXPECT_EQ(0, rdata_ptr.compare(
*test::createRdataUsingLexer(RRType::PTR(), RRClass::IN(),
- "ns.example.com")));
+ "ns.example.com.")));
+
+ // test::createRdataUsingLexer() constructs relative to
+ // "example.org." origin.
+ EXPECT_EQ(0, generic::PTR("foo0.example.org.").compare(
+ *test::createRdataUsingLexer(RRType::PTR(), RRClass::IN(),
+ "foo0")));
+
+ // Extra text at end of line
+ EXPECT_FALSE(test::createRdataUsingLexer(RRType::PTR(), RRClass::IN(),
+ "foo.example.com. extra."));
}
TEST_F(Rdata_PTR_Test, toWireBuffer) {
@@ -119,8 +139,8 @@ TEST_F(Rdata_PTR_Test, toText) {
}
TEST_F(Rdata_PTR_Test, compare) {
- generic::PTR small("a.example");
- generic::PTR large("example");
+ generic::PTR small("a.example.");
+ generic::PTR large("example.");
EXPECT_TRUE(Name("a.example") > Name("example"));
EXPECT_GT(0, small.compare(large));
}
diff --git a/src/lib/dns/tests/rdata_srv_unittest.cc b/src/lib/dns/tests/rdata_srv_unittest.cc
index 066755f..6ca0c7f 100644
--- a/src/lib/dns/tests/rdata_srv_unittest.cc
+++ b/src/lib/dns/tests/rdata_srv_unittest.cc
@@ -33,14 +33,23 @@ using namespace isc::dns::rdata;
namespace {
class Rdata_SRV_Test : public RdataTest {
- // there's nothing to specialize
+public:
+ Rdata_SRV_Test() :
+ srv_txt("1 5 1500 a.example.com."),
+ srv_txt2("1 5 1400 example.com."),
+ too_long_label("012345678901234567890123456789"
+ "0123456789012345678901234567890123."),
+ rdata_srv(srv_txt),
+ rdata_srv2(srv_txt2)
+ {}
+
+ const string srv_txt;
+ const string srv_txt2;
+ const string too_long_label;
+ const in::SRV rdata_srv;
+ const in::SRV rdata_srv2;
};
-string srv_txt("1 5 1500 a.example.com.");
-string srv_txt2("1 5 1400 example.com.");
-string too_long_label("012345678901234567890123456789"
- "0123456789012345678901234567890123");
-
// 1 5 1500 a.example.com.
const uint8_t wiredata_srv[] = {
0x00, 0x01, 0x00, 0x05, 0x05, 0xdc, 0x01, 0x61, 0x07, 0x65, 0x78,
@@ -50,9 +59,6 @@ const uint8_t wiredata_srv2[] = {
0x00, 0x01, 0x00, 0x05, 0x05, 0x78, 0x07, 0x65, 0x78, 0x61, 0x6d,
0x70, 0x6c, 0x65, 0x03, 0x63, 0x6f, 0x6d, 0x00};
-const in::SRV rdata_srv(srv_txt);
-const in::SRV rdata_srv2(srv_txt2);
-
TEST_F(Rdata_SRV_Test, createFromText) {
EXPECT_EQ(1, rdata_srv.getPriority());
EXPECT_EQ(5, rdata_srv.getWeight());
@@ -78,6 +84,8 @@ TEST_F(Rdata_SRV_Test, badText) {
// bad name
EXPECT_THROW(in::SRV("1 5 1500 a.example.com." + too_long_label),
TooLongLabel);
+ // Extra text at end of line
+ EXPECT_THROW(in::SRV("1 5 1500 a.example.com. extra."), InvalidRdataText);
}
TEST_F(Rdata_SRV_Test, assignment) {
@@ -124,10 +132,29 @@ TEST_F(Rdata_SRV_Test, createFromLexer) {
*test::createRdataUsingLexer(RRType::SRV(), RRClass::IN(),
"1 5 1500 a.example.com.")));
+ // test::createRdataUsingLexer() constructs relative to
+ // "example.org." origin.
+ EXPECT_EQ(0, in::SRV("1 5 1500 server16.example.org.").compare(
+ *test::createRdataUsingLexer(RRType::SRV(), RRClass::IN(),
+ "1 5 1500 server16")));
+
// Exceptions cause NULL to be returned.
+
+ // Bad priority
+ EXPECT_FALSE(test::createRdataUsingLexer(RRType::SRV(), RRClass::IN(),
+ "65536 5 1500 "
+ "a.example.com."));
+ // Bad weight
+ EXPECT_FALSE(test::createRdataUsingLexer(RRType::SRV(), RRClass::IN(),
+ "1 65536 1500 "
+ "a.example.com."));
+ // Bad port
EXPECT_FALSE(test::createRdataUsingLexer(RRType::SRV(), RRClass::IN(),
"1 5 281474976710656 "
"a.example.com."));
+ // Extra text at end of line
+ EXPECT_FALSE(test::createRdataUsingLexer(RRType::SRV(), RRClass::IN(),
+ "1 5 1500 a.example.com. extra."));
}
TEST_F(Rdata_SRV_Test, toWireBuffer) {
diff --git a/src/lib/dns/tests/rrclass_unittest.cc b/src/lib/dns/tests/rrclass_unittest.cc
index 11f1c54..17af873 100644
--- a/src/lib/dns/tests/rrclass_unittest.cc
+++ b/src/lib/dns/tests/rrclass_unittest.cc
@@ -148,4 +148,29 @@ TEST_F(RRClassTest, LeftShiftOperator) {
oss << RRClass::IN();
EXPECT_EQ(RRClass::IN().toText(), oss.str());
}
+
+// Below, we'll check definitions for all well-known RR classes; whether they
+// are defined and have the correct parameter values. Test data are generated
+// from the list available at:
+// http://www.iana.org/assignments/dns-parameters/dns-parameters.xml
+struct ClassParam {
+ const char* const txt; // "IN", "CH", etc
+ const uint16_t code; // 1, 3,
+ const RRClass& (*obj)(); // RRClass::IN(), etc
+} known_classes[] = {
+ {"IN", 1, RRClass::IN}, {"CH", 3, RRClass::CH}, {"HS", 4, RRClass::HS},
+ {"NONE", 254, RRClass::NONE}, {"ANY", 255, RRClass::ANY},
+ {NULL, 0, NULL}
+};
+
+TEST(RRClassConstTest, wellKnowns) {
+ for (int i = 0; known_classes[i].txt; ++i) {
+ SCOPED_TRACE("Checking well known RRClass: " +
+ string(known_classes[i].txt));
+ EXPECT_EQ(known_classes[i].code,
+ RRClass(known_classes[i].txt).getCode());
+ EXPECT_EQ(known_classes[i].code,
+ (*known_classes[i].obj)().getCode());
+ }
+}
}
diff --git a/src/lib/dns/tests/rrset_unittest.cc b/src/lib/dns/tests/rrset_unittest.cc
index 725eea7..d16ce3c 100644
--- a/src/lib/dns/tests/rrset_unittest.cc
+++ b/src/lib/dns/tests/rrset_unittest.cc
@@ -168,7 +168,7 @@ TEST_F(RRsetTest, addRdataPtr) {
// Pointer version of addRdata() doesn't type check and does allow to
//add a different type of Rdata as a result.
rrset_a_empty.addRdata(createRdata(RRType::NS(), RRClass::IN(),
- "ns.example.com"));
+ "ns.example.com."));
EXPECT_EQ(3, rrset_a_empty.getRdataCount());
}
@@ -205,7 +205,7 @@ TEST_F(RRsetTest, toText) {
// Unless it is type ANY or NONE
EXPECT_EQ("test.example.com. 3600 ANY A\n",
rrset_any_a_empty.toText());
- EXPECT_EQ("test.example.com. 3600 CLASS254 A\n",
+ EXPECT_EQ("test.example.com. 3600 NONE A\n",
rrset_none_a_empty.toText());
}
diff --git a/src/lib/dns/tests/rrtype_unittest.cc b/src/lib/dns/tests/rrtype_unittest.cc
index 28ecee6..ee302a1 100644
--- a/src/lib/dns/tests/rrtype_unittest.cc
+++ b/src/lib/dns/tests/rrtype_unittest.cc
@@ -145,4 +145,57 @@ TEST_F(RRTypeTest, LeftShiftOperator) {
oss << RRType::A();
EXPECT_EQ(RRType::A().toText(), oss.str());
}
+
+// Below, we'll check definitions for all well-known RR types; whether they
+// are defined and have the correct parameter values. Test data are generated
+// from the list available at:
+// http://www.iana.org/assignments/dns-parameters/dns-parameters.xml
+struct TypeParam {
+ const char* const txt; // "A", "AAAA", "NS", etc
+ const uint16_t code; // 1, 28, 2, etc
+ const RRType& (*obj)(); // RRType::A(), etc
+} known_types[] = {
+ {"A", 1, RRType::A}, {"NS", 2, RRType::NS}, {"MD", 3, RRType::MD},
+ {"MF", 4, RRType::MF}, {"CNAME", 5, RRType::CNAME},
+ {"SOA", 6, RRType::SOA}, {"MB", 7, RRType::MB}, {"MG", 8, RRType::MG},
+ {"MR", 9, RRType::MR}, {"NULL", 10, RRType::Null},
+ {"WKS", 11, RRType::WKS}, {"PTR", 12, RRType::PTR},
+ {"HINFO", 13, RRType::HINFO}, {"MINFO", 14, RRType::MINFO},
+ {"MX", 15, RRType::MX}, {"TXT", 16, RRType::TXT}, {"RP", 17, RRType::RP},
+ {"AFSDB", 18, RRType::AFSDB}, {"X25", 19, RRType::X25},
+ {"ISDN", 20, RRType::ISDN}, {"RT", 21, RRType::RT},
+ {"NSAP", 22, RRType::NSAP}, {"NSAP-PTR", 23, RRType::NSAP_PTR},
+ {"SIG", 24, RRType::SIG}, {"KEY", 25, RRType::KEY},
+ {"PX", 26, RRType::PX}, {"GPOS", 27, RRType::GPOS},
+ {"AAAA", 28, RRType::AAAA}, {"LOC", 29, RRType::LOC},
+ {"NXT", 30, RRType::NXT}, {"SRV", 33, RRType::SRV},
+ {"NAPTR", 35, RRType::NAPTR}, {"KX", 36, RRType::KX},
+ {"CERT", 37, RRType::CERT}, {"A6", 38, RRType::A6},
+ {"DNAME", 39, RRType::DNAME}, {"OPT", 41, RRType::OPT},
+ {"APL", 42, RRType::APL}, {"DS", 43, RRType::DS},
+ {"SSHFP", 44, RRType::SSHFP}, {"IPSECKEY", 45, RRType::IPSECKEY},
+ {"RRSIG", 46, RRType::RRSIG}, {"NSEC", 47, RRType::NSEC},
+ {"DNSKEY", 48, RRType::DNSKEY}, {"DHCID", 49, RRType::DHCID},
+ {"NSEC3", 50, RRType::NSEC3}, {"NSEC3PARAM", 51, RRType::NSEC3PARAM},
+ {"TLSA", 52, RRType::TLSA}, {"HIP", 55, RRType::HIP},
+ {"SPF", 99, RRType::SPF}, {"UNSPEC", 103, RRType::UNSPEC},
+ {"NID", 104, RRType::NID}, {"L32", 105, RRType::L32},
+ {"L64", 106, RRType::L64}, {"LP", 107, RRType::LP},
+ {"TKEY", 249, RRType::TKEY}, {"TSIG", 250, RRType::TSIG},
+ {"IXFR", 251, RRType::IXFR}, {"AXFR", 252, RRType::AXFR},
+ {"MAILB", 253, RRType::MAILB}, {"MAILA", 254, RRType::MAILA},
+ {"ANY", 255, RRType::ANY}, {"URI", 256, RRType::URI},
+ {"CAA", 257, RRType::CAA}, {"DLV", 32769, RRType::DLV},
+ {NULL, 0, NULL}
+};
+
+TEST(RRTypeConstTest, wellKnowns) {
+ for (int i = 0; known_types[i].txt; ++i) {
+ SCOPED_TRACE("Checking well known RRType: " +
+ string(known_types[i].txt));
+ EXPECT_EQ(known_types[i].code, RRType(known_types[i].txt).getCode());
+ EXPECT_EQ(known_types[i].code,
+ (*known_types[i].obj)().getCode());
+ }
+}
}
diff --git a/src/lib/dns/tests/zone_checker_unittest.cc b/src/lib/dns/tests/zone_checker_unittest.cc
index dbe204d..320cda6 100644
--- a/src/lib/dns/tests/zone_checker_unittest.cc
+++ b/src/lib/dns/tests/zone_checker_unittest.cc
@@ -160,7 +160,7 @@ TEST_F(ZoneCheckerTest, checkSOA) {
// Likewise, if the SOA RRset contains non SOA Rdata, it should be a bug.
rrsets_->removeRRset(zname_, zclass_, RRType::SOA());
soa_.reset(new RRset(zname_, zclass_, RRType::SOA(), RRTTL(60)));
- soa_->addRdata(createRdata(RRType::NS(), zclass_, "ns.example.com"));
+ soa_->addRdata(createRdata(RRType::NS(), zclass_, "ns.example.com."));
rrsets_->addRRset(soa_);
EXPECT_THROW(checkZone(zname_, zclass_, *rrsets_, callbacks_), Unexpected);
checkIssues(); // no error/warning should be reported
@@ -218,7 +218,7 @@ TEST_F(ZoneCheckerTest, checkNSData) {
// If there's a CNAME at the name instead, it's an error.
rrsets_->removeRRset(Name("*.example.com"), zclass_, RRType::A());
RRsetPtr cname(new RRset(ns_name, zclass_, RRType::CNAME(), RRTTL(60)));
- cname->addRdata(generic::CNAME("cname.example.com"));
+ cname->addRdata(generic::CNAME("cname.example.com."));
rrsets_->addRRset(cname);
EXPECT_FALSE(checkZone(zname_, zclass_, *rrsets_, callbacks_));
expected_errors_.push_back("zone example.com/IN: NS 'ns.example.com' is "
@@ -245,7 +245,7 @@ TEST_F(ZoneCheckerTest, checkNSData) {
rrsets_->removeRRset(ns_name, zclass_, RRType::CNAME());
rrsets_->removeRRset(zname_, zclass_, RRType::NS());
ns_.reset(new RRset(zname_, zclass_, RRType::NS(), RRTTL(60)));
- ns_->addRdata(generic::NS("ns.example.org"));
+ ns_->addRdata(generic::NS("ns.example.org."));
rrsets_->addRRset(ns_);
EXPECT_TRUE(checkZone(zname_, zclass_, *rrsets_, callbacks_));
checkIssues();
@@ -274,7 +274,7 @@ TEST_F(ZoneCheckerTest, checkNSWithDelegation) {
rrsets_->addRRset(ns_);
RRsetPtr child_ns(new RRset(Name("child.example.com"), zclass_,
RRType::NS(), RRTTL(60)));
- child_ns->addRdata(generic::NS("ns.example.org"));
+ child_ns->addRdata(generic::NS("ns.example.org."));
rrsets_->addRRset(child_ns);
EXPECT_TRUE(checkZone(zname_, zclass_, *rrsets_, callbacks_));
checkIssues();
@@ -282,7 +282,7 @@ TEST_F(ZoneCheckerTest, checkNSWithDelegation) {
// Zone cut at the NS name. Same result.
rrsets_->removeRRset(child_ns->getName(), zclass_, RRType::NS());
child_ns.reset(new RRset(ns_name, zclass_, RRType::NS(), RRTTL(60)));
- child_ns->addRdata(generic::NS("ns.example.org"));
+ child_ns->addRdata(generic::NS("ns.example.org."));
rrsets_->addRRset(child_ns);
EXPECT_TRUE(checkZone(zname_, zclass_, *rrsets_, callbacks_));
checkIssues();
@@ -291,7 +291,7 @@ TEST_F(ZoneCheckerTest, checkNSWithDelegation) {
rrsets_->removeRRset(child_ns->getName(), zclass_, RRType::NS());
child_ns.reset(new RRset(Name("another.ns.child.example.com"), zclass_,
RRType::NS(), RRTTL(60)));
- child_ns->addRdata(generic::NS("ns.example.org"));
+ child_ns->addRdata(generic::NS("ns.example.org."));
rrsets_->addRRset(child_ns);
EXPECT_TRUE(checkZone(zname_, zclass_, *rrsets_, callbacks_));
expected_warns_.push_back("zone example.com/IN: NS has no address");
@@ -309,7 +309,7 @@ TEST_F(ZoneCheckerTest, checkNSWithDNAME) {
ns_->addRdata(generic::NS(ns_name));
rrsets_->addRRset(ns_);
RRsetPtr dname(new RRset(zname_, zclass_, RRType::DNAME(), RRTTL(60)));
- dname->addRdata(generic::DNAME("example.org"));
+ dname->addRdata(generic::DNAME("example.org."));
rrsets_->addRRset(dname);
EXPECT_FALSE(checkZone(zname_, zclass_, *rrsets_, callbacks_));
expected_errors_.push_back("zone example.com/IN: NS 'ns.child.example.com'"
@@ -320,7 +320,7 @@ TEST_F(ZoneCheckerTest, checkNSWithDNAME) {
rrsets_->removeRRset(zname_, zclass_, RRType::DNAME());
dname.reset(new RRset(Name("child.example.com"), zclass_, RRType::DNAME(),
RRTTL(60)));
- dname->addRdata(generic::DNAME("example.org"));
+ dname->addRdata(generic::DNAME("example.org."));
rrsets_->addRRset(dname);
EXPECT_FALSE(checkZone(zname_, zclass_, *rrsets_, callbacks_));
expected_errors_.push_back("zone example.com/IN: NS 'ns.child.example.com'"
@@ -332,7 +332,7 @@ TEST_F(ZoneCheckerTest, checkNSWithDNAME) {
// this implementation prefers the NS and skips further checks.
ns_.reset(new RRset(Name("child.example.com"), zclass_, RRType::NS(),
RRTTL(60)));
- ns_->addRdata(generic::NS("ns.example.org"));
+ ns_->addRdata(generic::NS("ns.example.org."));
rrsets_->addRRset(ns_);
EXPECT_TRUE(checkZone(zname_, zclass_, *rrsets_, callbacks_));
checkIssues();
@@ -342,7 +342,7 @@ TEST_F(ZoneCheckerTest, checkNSWithDNAME) {
rrsets_->removeRRset(dname->getName(), zclass_, RRType::DNAME());
rrsets_->removeRRset(ns_->getName(), zclass_, RRType::NS());
dname.reset(new RRset(ns_name, zclass_, RRType::DNAME(), RRTTL(60)));
- dname->addRdata(generic::DNAME("example.org"));
+ dname->addRdata(generic::DNAME("example.org."));
rrsets_->addRRset(dname);
EXPECT_TRUE(checkZone(zname_, zclass_, *rrsets_, callbacks_));
expected_warns_.push_back("zone example.com/IN: NS has no address");
diff --git a/src/lib/nsas/tests/nameserver_address_store_unittest.cc b/src/lib/nsas/tests/nameserver_address_store_unittest.cc
index 6ddae72..ceb5775 100644
--- a/src/lib/nsas/tests/nameserver_address_store_unittest.cc
+++ b/src/lib/nsas/tests/nameserver_address_store_unittest.cc
@@ -386,7 +386,7 @@ TEST_F(NameserverAddressStoreTest, CombinedTest) {
// But we do not answer it right away. We create a new zone and
// let this nameserver entry get out.
- rrns_->addRdata(rdata::generic::NS("example.cz"));
+ rrns_->addRdata(rdata::generic::NS("example.cz."));
nsas.lookupAndAnswer(EXAMPLE_CO_UK, RRClass::IN(), rrns_, getCallback());
// It really should ask something, one of the nameservers
diff --git a/src/lib/nsas/tests/nsas_test.h b/src/lib/nsas/tests/nsas_test.h
index d6b4d92..9f92149 100644
--- a/src/lib/nsas/tests/nsas_test.h
+++ b/src/lib/nsas/tests/nsas_test.h
@@ -264,8 +264,8 @@ protected:
rrch_->addRdata(ConstRdataPtr(new RdataTest<A>("1324")));
// NS records take a single name
- rrns_->addRdata(rdata::generic::NS("example.fr"));
- rrns_->addRdata(rdata::generic::NS("example.de"));
+ rrns_->addRdata(rdata::generic::NS("example.fr."));
+ rrns_->addRdata(rdata::generic::NS("example.de."));
// Single NS record with 0 TTL
rr_single_->addRdata(rdata::generic::NS(ns_name_));
diff --git a/src/lib/python/bind10_config.py.in b/src/lib/python/bind10_config.py.in
index 6db64e2..9cd8d66 100644
--- a/src/lib/python/bind10_config.py.in
+++ b/src/lib/python/bind10_config.py.in
@@ -43,8 +43,8 @@ def reload():
# the system.
# PLUGIN_PATHS: configuration modules that are not associated to specific
# process
- # LIBEXECPATH: Paths to programs invoked by the boss process
- # The boss process (directly or via a helper module) uses this as
+ # LIBEXECPATH: Paths to programs invoked by the b10-init process
+ # The b10-init process (directly or via a helper module) uses this as
# the prefererred PATH before starting a child process.
# When "FROM_SOURCE", it lists the directories where the programs are
# built so that when BIND 10 is experimentally started on the source
@@ -53,7 +53,7 @@ def reload():
# B10_FROM_SOURCE_LOCALSTATEDIR is specifically intended to be used for
# tests where we want to use various types of configuration within the test
# environment. (We may want to make it even more generic so that the path
- # is passed from the boss process)
+ # is passed from the b10-init process)
if "B10_FROM_SOURCE" in os.environ:
if "B10_FROM_SOURCE_LOCALSTATEDIR" in os.environ:
DATA_PATH = os.environ["B10_FROM_SOURCE_LOCALSTATEDIR"]
diff --git a/src/lib/python/isc/__init__.py b/src/lib/python/isc/__init__.py
index 029f110..37138a2 100644
--- a/src/lib/python/isc/__init__.py
+++ b/src/lib/python/isc/__init__.py
@@ -1,7 +1,3 @@
-# On some systems, it appears the dynamic linker gets
-# confused if the order is not right here
-# There is probably a solution for this, but for now:
-# order is important here!
-import isc.cc
-import isc.config
-import isc.datasrc
+"""
+This is the top directory for common BIND 10 Python modules and packages.
+"""
diff --git a/src/lib/python/isc/bind10/Makefile.am b/src/lib/python/isc/bind10/Makefile.am
index aa5d0ab..8d2f179 100644
--- a/src/lib/python/isc/bind10/Makefile.am
+++ b/src/lib/python/isc/bind10/Makefile.am
@@ -3,3 +3,8 @@ SUBDIRS = . tests
python_PYTHON = __init__.py sockcreator.py component.py special_component.py \
socket_cache.py
pythondir = $(pyexecdir)/isc/bind10
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/bind10/component.py b/src/lib/python/isc/bind10/component.py
index febeb10..2efb376 100644
--- a/src/lib/python/isc/bind10/component.py
+++ b/src/lib/python/isc/bind10/component.py
@@ -28,12 +28,12 @@ configuration). This is yet to be designed.
"""
import isc.log
-from isc.log_messages.bind10_messages import *
+from isc.log_messages.init_messages import *
import time
import os
import signal
-logger = isc.log.Logger("boss")
+logger = isc.log.Logger("init")
DBG_TRACE_DATA = 20
DBG_TRACE_DETAILED = 80
@@ -96,13 +96,13 @@ class BaseComponent:
that is already shutting down, impossible to stop, etc. We need to add more
states in future to handle it properly.
"""
- def __init__(self, boss, kind):
+ def __init__(self, b10_init, kind):
"""
Creates the component in not running mode.
The parameters are:
- - `boss` the boss object to plug into. The component needs to plug
- into it to know when it failed, etc.
+ - `b10_init` the b10_init object to plug into. The component needs
+ to plug into it to know when it failed, etc.
- `kind` is the kind of component. It may be one of:
* 'core' means the system can't run without it and it can't be
safely restarted. If it does not start, the system is brought
@@ -127,7 +127,7 @@ class BaseComponent:
Note that the __init__ method of child class should have these
parameters:
- __init__(self, process, boss, kind, address=None, params=None)
+ __init__(self, process, b10_init, kind, address=None, params=None)
The extra parameters are:
- `process` - which program should be started.
@@ -153,7 +153,7 @@ class BaseComponent:
raise ValueError('Component kind can not be ' + kind)
self.__state = STATE_STOPPED
self._kind = kind
- self._boss = boss
+ self._b10_init = b10_init
self._original_start_time = None
def start(self):
@@ -204,13 +204,14 @@ class BaseComponent:
def failed(self, exit_code):
"""
- Notify the component it crashed. This will be called from boss object.
+ Notify the component it crashed. This will be called from b10_init
+ object.
If you try to call failed on a component that is not running,
a ValueError is raised.
If it is a core component or needed component and it was started only
- recently, the component will become dead and will ask the boss to shut
+ recently, the component will become dead and will ask b10_init to shut
down with error exit status. A dead component can't be started again.
Otherwise the component will try to restart.
@@ -253,7 +254,7 @@ class BaseComponent:
self._original_start_time):
self.__state = STATE_DEAD
logger.fatal(BIND10_COMPONENT_UNSATISFIED, self.name())
- self._boss.component_shutdown(1)
+ self._b10_init.component_shutdown(1)
return False
# This means we want to restart
else:
@@ -326,7 +327,7 @@ class BaseComponent:
should be registered).
You should register all the processes created by calling
- self._boss.register_process.
+ self._b10_init.register_process.
"""
pass
@@ -407,15 +408,15 @@ class Component(BaseComponent):
directly. It is not recommended to override methods of this class
on one-by-one basis.
"""
- def __init__(self, process, boss, kind, address=None, params=None,
+ def __init__(self, process, b10_init, kind, address=None, params=None,
start_func=None):
"""
Creates the component in not running mode.
The parameters are:
- `process` is the name of the process to start.
- - `boss` the boss object to plug into. The component needs to plug
- into it to know when it failed, etc.
+ - `b10_init` the b10-init object to plug into. The component needs to
+ plug into it to know when it failed, etc.
- `kind` is the kind of component. Refer to the documentation of
BaseComponent for details.
- `address` is the address on message bus. It is used to ask it to
@@ -429,7 +430,7 @@ class Component(BaseComponent):
There's a sensible default if not provided, which just launches
the program without any special care.
"""
- BaseComponent.__init__(self, boss, kind)
+ BaseComponent.__init__(self, b10_init, kind)
self._process = process
self._start_func = start_func
self._address = address
@@ -443,25 +444,26 @@ class Component(BaseComponent):
process and return the procinfo object describing the running process.
If you don't provide the _start_func, the usual startup by calling
- boss.start_simple is performed.
+ b10_init.start_simple is performed.
"""
# This one is not tested. For one, it starts a real process
# which is out of scope of unit tests, for another, it just
- # delegates the starting to other function in boss (if a derived
+ # delegates the starting to other function in b10_init (if a derived
# class does not provide an override function), which is tested
# by use.
if self._start_func is not None:
procinfo = self._start_func()
else:
# TODO Handle params, etc
- procinfo = self._boss.start_simple(self._process)
+ procinfo = self._b10_init.start_simple(self._process)
self._procinfo = procinfo
- self._boss.register_process(self.pid(), self)
+ self._b10_init.register_process(self.pid(), self)
def _stop_internal(self):
- self._boss.stop_process(self._process, self._address, self.pid())
+ self._b10_init.stop_process(self._process, self._address, self.pid())
# TODO Some way to wait for the process that doesn't want to
- # terminate and kill it would prove nice (or add it to boss somewhere?)
+ # terminate and kill it would prove nice (or add it to b10_init
+ # somewhere?)
def name(self):
"""
@@ -498,7 +500,7 @@ class Configurator:
b10-auth as core, it is safe to stop that one.
The parameters are:
- * `boss`: The boss we are managing for.
+ * `b10_init`: The b10-init we are managing for.
* `specials`: Dict of specially started components. Each item is a class
representing the component.
@@ -527,13 +529,14 @@ class Configurator:
priority are started before the ones with lower priority. If it is
not present, it defaults to 0.
"""
- def __init__(self, boss, specials = {}):
+ def __init__(self, b10_init, specials = {}):
"""
Initializes the configurator, but nothing is started yet.
- The boss parameter is the boss object used to start and stop processes.
+ The b10_init parameter is the b10-init object used to start and stop
+ processes.
"""
- self.__boss = boss
+ self.__b10_init = b10_init
# These could be __private, but as we access them from within unittest,
# it's more comfortable to have them just _protected.
@@ -551,7 +554,7 @@ class Configurator:
def startup(self, configuration):
"""
Starts the first set of processes. This configuration is expected
- to be hardcoded from the boss itself to start the configuration
+ to be hardcoded from the b10-init itself to start the configuration
manager and other similar things.
"""
if self._running:
@@ -642,7 +645,7 @@ class Configurator:
# TODO: Better error handling
creator = self.__specials[component_config['special']]
component = creator(component_config.get('process', cname),
- self.__boss, component_config['kind'],
+ self.__b10_init, component_config['kind'],
component_config.get('address'),
component_config.get('params'))
priority = component_config.get('priority', 0)
diff --git a/src/lib/python/isc/bind10/sockcreator.py b/src/lib/python/isc/bind10/sockcreator.py
index 593d1a6..db9e6c5 100644
--- a/src/lib/python/isc/bind10/sockcreator.py
+++ b/src/lib/python/isc/bind10/sockcreator.py
@@ -20,10 +20,10 @@ import errno
import copy
import subprocess
import copy
-from isc.log_messages.bind10_messages import *
+from isc.log_messages.init_messages import *
from libutil_io_python import recv_fd
-logger = isc.log.Logger("boss")
+logger = isc.log.Logger("init")
"""
Module that comunicates with the privileged socket creator (b10-sockcreator).
@@ -251,7 +251,7 @@ class Creator(Parser):
"""Function used before running a program that needs to run as a
different user."""
# Put us into a separate process group so we don't get
- # SIGINT signals on Ctrl-C (the boss will shut everthing down by
+ # SIGINT signals on Ctrl-C (b10-init will shut everthing down by
# other means).
os.setpgrp()
diff --git a/src/lib/python/isc/bind10/socket_cache.py b/src/lib/python/isc/bind10/socket_cache.py
index d6c1175..1c5199c 100644
--- a/src/lib/python/isc/bind10/socket_cache.py
+++ b/src/lib/python/isc/bind10/socket_cache.py
@@ -106,7 +106,8 @@ class Cache:
This is the cache for sockets from socket creator. The purpose of cache
is to hold the sockets that were requested, until they are no longer
needed. One reason is, the socket is created before it is sent over the
- unix domain socket in boss, so we need to keep it somewhere for a while.
+ unix domain socket in b10-init, so we need to keep it somewhere for a
+ while.
The other reason is, a single socket might be requested multiple times.
So we keep it here in case someone else might ask for it.
@@ -114,7 +115,7 @@ class Cache:
Each socket kept here has a reference count and when it drops to zero,
it is removed from cache and closed.
- This is expected to be part of Boss, it is not a general utility class.
+ This is expected to be part of Init, it is not a general utility class.
It is not expected to be subclassed. The methods and members are named
as protected so tests are easier access into them.
@@ -175,7 +176,7 @@ class Cache:
restrictions and of all copies of socket handed out are considered,
so it can be raised even if you call it with share_mode 'ANY').
- isc.bind10.sockcreator.CreatorError: fatal creator errors are
- propagated. Thay should cause the boss to exit if ever encountered.
+ propagated. Thay should cause b10-init to exit if ever encountered.
Note that it isn't guaranteed the tokens would be unique and they
should be used as an opaque handle only.
@@ -220,11 +221,11 @@ class Cache:
one returned from previous call from get_token. The token can be used
only once to receive the socket.
- The application is a token representing the application that requested
- it. Currently, boss uses the file descriptor of connection from the
- application, but anything which can be a key in a dict is OK from the
- cache's point of view. You just need to use the same thing in
- drop_application.
+ The application is a token representing the application that
+ requested it. Currently, b10-init uses the file descriptor of
+ connection from the application, but anything which can be a key in
+ a dict is OK from the cache's point of view. You just need to use
+ the same thing in drop_application.
In case the token is considered invalid (it doesn't come from the
get_token, it was already used, the socket wasn't picked up soon
diff --git a/src/lib/python/isc/bind10/special_component.py b/src/lib/python/isc/bind10/special_component.py
index dcd9b64..3196795 100644
--- a/src/lib/python/isc/bind10/special_component.py
+++ b/src/lib/python/isc/bind10/special_component.py
@@ -26,23 +26,23 @@ class SockCreator(BaseComponent):
Note: _creator shouldn't be reset explicitly once created. The
underlying Popen object would then wait() the child process internally,
- which breaks the assumption of the boss, who is expecting to see
+ which breaks the assumption of b10-init, who is expecting to see
the process die in waitpid().
"""
- def __init__(self, process, boss, kind, address=None, params=None):
- BaseComponent.__init__(self, boss, kind)
+ def __init__(self, process, b10_init, kind, address=None, params=None):
+ BaseComponent.__init__(self, b10_init, kind)
self.__creator = None
def _start_internal(self):
- self._boss.curproc = 'b10-sockcreator'
+ self._b10_init.curproc = 'b10-sockcreator'
self.__creator = isc.bind10.sockcreator.Creator(LIBEXECPATH + ':' +
os.environ['PATH'])
- self._boss.register_process(self.pid(), self)
- self._boss.set_creator(self.__creator)
- self._boss.log_started(self.pid())
+ self._b10_init.register_process(self.pid(), self)
+ self._b10_init.set_creator(self.__creator)
+ self._b10_init.log_started(self.pid())
# We are now ready for switching user.
- self._boss.change_user()
+ self._b10_init.change_user()
def _stop_internal(self):
self.__creator.terminate()
@@ -64,12 +64,12 @@ class SockCreator(BaseComponent):
class Msgq(Component):
"""
- The message queue. Starting is passed to boss, stopping is not supported
- and we leave the boss kill it by signal.
+ The message queue. Starting is passed to b10-init, stopping is not
+ supported and we leave b10-init kill it by signal.
"""
- def __init__(self, process, boss, kind, address=None, params=None):
- Component.__init__(self, process, boss, kind, None, None,
- boss.start_msgq)
+ def __init__(self, process, b10_init, kind, address=None, params=None):
+ Component.__init__(self, process, b10_init, kind, None, None,
+ b10_init.start_msgq)
def _stop_internal(self):
"""
@@ -78,7 +78,7 @@ class Msgq(Component):
But as it is stateless, it's OK to kill it.
So we disable this method (as the only time it could be called is
- during shutdown) and wait for the boss to kill it in the next shutdown
+ during shutdown) and wait for b10-init to kill it in the next shutdown
step.
This actually breaks the recommendation at Component we shouldn't
@@ -89,24 +89,24 @@ class Msgq(Component):
pass
class CfgMgr(Component):
- def __init__(self, process, boss, kind, address=None, params=None):
- Component.__init__(self, process, boss, kind, 'ConfigManager',
- None, boss.start_cfgmgr)
+ def __init__(self, process, b10_init, kind, address=None, params=None):
+ Component.__init__(self, process, b10_init, kind, 'ConfigManager',
+ None, b10_init.start_cfgmgr)
class Auth(Component):
- def __init__(self, process, boss, kind, address=None, params=None):
- Component.__init__(self, process, boss, kind, 'Auth', None,
- boss.start_auth)
+ def __init__(self, process, b10_init, kind, address=None, params=None):
+ Component.__init__(self, process, b10_init, kind, 'Auth', None,
+ b10_init.start_auth)
class Resolver(Component):
- def __init__(self, process, boss, kind, address=None, params=None):
- Component.__init__(self, process, boss, kind, 'Resolver', None,
- boss.start_resolver)
+ def __init__(self, process, b10_init, kind, address=None, params=None):
+ Component.__init__(self, process, b10_init, kind, 'Resolver', None,
+ b10_init.start_resolver)
class CmdCtl(Component):
- def __init__(self, process, boss, kind, address=None, params=None):
- Component.__init__(self, process, boss, kind, 'Cmdctl', None,
- boss.start_cmdctl)
+ def __init__(self, process, b10_init, kind, address=None, params=None):
+ Component.__init__(self, process, b10_init, kind, 'Cmdctl', None,
+ b10_init.start_cmdctl)
def get_specials():
"""
List of specially started components. Each one should be the class than can
diff --git a/src/lib/python/isc/bind10/tests/component_test.py b/src/lib/python/isc/bind10/tests/component_test.py
index 8603201..adc035e 100644
--- a/src/lib/python/isc/bind10/tests/component_test.py
+++ b/src/lib/python/isc/bind10/tests/component_test.py
@@ -31,9 +31,9 @@ class TestError(Exception):
"""
pass
-class BossUtils:
+class InitUtils:
"""
- A class that brings some utilities for pretending we're Boss.
+ A class that brings some utilities for pretending we're Init.
This is expected to be inherited by the testcases themselves.
"""
def setUp(self):
@@ -70,7 +70,7 @@ class BossUtils:
isc.bind10.component.time.time = lambda: tm + 30
# Few functions that pretend to start something. Part of pretending of
- # being boss.
+ # being b10-init.
def start_msgq(self):
pass
@@ -86,7 +86,7 @@ class BossUtils:
def start_cmdctl(self):
pass
-class ComponentTests(BossUtils, unittest.TestCase):
+class ComponentTests(InitUtils, unittest.TestCase):
"""
Tests for the bind10.component.Component class
"""
@@ -94,7 +94,7 @@ class ComponentTests(BossUtils, unittest.TestCase):
"""
Pretend a newly started system.
"""
- BossUtils.setUp(self)
+ InitUtils.setUp(self)
self._shutdown = False
self._exitcode = None
self.__start_called = False
@@ -103,7 +103,7 @@ class ComponentTests(BossUtils, unittest.TestCase):
self.__registered_processes = {}
self.__stop_process_params = None
self.__start_simple_params = None
- # Pretending to be boss
+ # Pretending to be b10-init
self.__change_user_called = False
def change_user(self):
@@ -149,7 +149,7 @@ class ComponentTests(BossUtils, unittest.TestCase):
its behaviour.
The process used is some nonsense, as this isn't used in this
- kind of tests and we pretend to be the boss.
+ kind of tests and we pretend to be the b10-init.
"""
component = Component('No process', self, kind, 'homeless', [])
component._start_internal = self.__start
@@ -176,7 +176,7 @@ class ComponentTests(BossUtils, unittest.TestCase):
Test the correct data are stored inside the component.
"""
component = self.__create_component('core')
- self.assertEqual(self, component._boss)
+ self.assertEqual(self, component._b10_init)
self.assertEqual("No process", component._process)
self.assertEqual(None, component._start_func)
self.assertEqual("homeless", component._address)
@@ -539,7 +539,7 @@ class ComponentTests(BossUtils, unittest.TestCase):
def register_process(self, pid, process):
"""
- Part of pretending to be a boss
+ Part of pretending to be a b10-init
"""
self.__registered_processes[pid] = process
@@ -570,13 +570,13 @@ class ComponentTests(BossUtils, unittest.TestCase):
def stop_process(self, process, address, pid):
"""
- Part of pretending to be boss.
+ Part of pretending to be b10-init.
"""
self.__stop_process_params = (process, address, pid)
def start_simple(self, process):
"""
- Part of pretending to be boss.
+ Part of pretending to be b10-init.
"""
self.__start_simple_params = process
@@ -632,14 +632,14 @@ class ComponentTests(BossUtils, unittest.TestCase):
def set_creator(self, creator):
"""
- Part of faking being the boss. Check the creator (faked as well)
+ Part of faking being the b10-init. Check the creator (faked as well)
is passed here.
"""
self.assertTrue(isinstance(creator, self.FakeCreator))
def log_started(self, pid):
"""
- Part of faking the boss. Check the pid is the one of the fake creator.
+ Part of faking the b10-init. Check the pid is the one of the fake creator.
"""
self.assertEqual(42, pid)
@@ -706,13 +706,13 @@ class FailComponent(BaseComponent):
"""
A mock component that fails whenever it is started.
"""
- def __init__(self, name, boss, kind, address=None, params=None):
- BaseComponent.__init__(self, boss, kind)
+ def __init__(self, name, b10_init, kind, address=None, params=None):
+ BaseComponent.__init__(self, b10_init, kind)
def _start_internal(self):
raise TestError("test error")
-class ConfiguratorTest(BossUtils, unittest.TestCase):
+class ConfiguratorTest(InitUtils, unittest.TestCase):
"""
Tests for the configurator.
"""
@@ -720,7 +720,7 @@ class ConfiguratorTest(BossUtils, unittest.TestCase):
"""
Prepare some test data for the tests.
"""
- BossUtils.setUp(self)
+ InitUtils.setUp(self)
self.log = []
# The core "hardcoded" configuration
self.__core = {
@@ -755,11 +755,12 @@ class ConfiguratorTest(BossUtils, unittest.TestCase):
self.__core_log = self.__core_log_create + self.__core_log_start
self.__specials = { 'test': self.__component_test }
- def __component_test(self, process, boss, kind, address=None, params=None):
+ def __component_test(self, process, b10_init, kind, address=None,
+ params=None):
"""
Create a test component. It will log events to us.
"""
- self.assertEqual(self, boss)
+ self.assertEqual(self, b10_init)
return TestComponent(self, process, kind, address, params)
def test_init(self):
diff --git a/src/lib/python/isc/cc/Makefile.am b/src/lib/python/isc/cc/Makefile.am
index b0ba3b2..ba6fe50 100644
--- a/src/lib/python/isc/cc/Makefile.am
+++ b/src/lib/python/isc/cc/Makefile.am
@@ -1,6 +1,19 @@
SUBDIRS = . tests
-python_PYTHON = __init__.py data.py session.py message.py
+python_PYTHON = __init__.py data.py session.py message.py logger.py
+BUILT_SOURCES = $(PYTHON_LOGMSGPKG_DIR)/work/pycc_messages.py
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/pycc_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+CLEANFILES = $(PYTHON_LOGMSGPKG_DIR)/work/pycc_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/pycc_messages.pyc
+
+EXTRA_DIST = pycc_messages.mes
+
+# Define rule to build logging source files from message file
+$(PYTHON_LOGMSGPKG_DIR)/work/pycc_messages.py: pycc_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/pycc_messages.mes
pythondir = $(pyexecdir)/isc/cc
diff --git a/src/lib/python/isc/cc/logger.py b/src/lib/python/isc/cc/logger.py
new file mode 100644
index 0000000..5fd440f
--- /dev/null
+++ b/src/lib/python/isc/cc/logger.py
@@ -0,0 +1,26 @@
+# Copyright (C) 2013 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+""" This is a logging utility module for other modules of the cc library
+package.
+
+"""
+
+import isc.log
+
+# C++ version of the CC module uses 'cc'; using the same name does not
+# necessarily cause disruption, but we use a different name to minimize
+# possible confusion.
+logger = isc.log.Logger('pycc')
diff --git a/src/lib/python/isc/cc/pycc_messages.mes b/src/lib/python/isc/cc/pycc_messages.mes
new file mode 100644
index 0000000..997b899
--- /dev/null
+++ b/src/lib/python/isc/cc/pycc_messages.mes
@@ -0,0 +1,20 @@
+# Copyright (C) 2013 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the libddns_messages python module.
+
+% PYCC_LNAME_RECEIVED received local name: %1
+Debug message: the local module received its unique identifier (name)
+from msgq on completion of establishing the session with msgq.
diff --git a/src/lib/python/isc/cc/session.py b/src/lib/python/isc/cc/session.py
index 33a47bd..caac553 100644
--- a/src/lib/python/isc/cc/session.py
+++ b/src/lib/python/isc/cc/session.py
@@ -22,6 +22,9 @@ import threading
import bind10_config
import isc.cc.message
+import isc.log
+from isc.cc.logger import logger
+from isc.log_messages.pycc_messages import *
class ProtocolError(Exception): pass
class NetworkError(Exception): pass
@@ -60,6 +63,8 @@ class Session:
self._lname = msg["lname"]
if not self._lname:
raise ProtocolError("Could not get local name")
+ logger.debug(logger.DBGLVL_TRACE_BASIC, PYCC_LNAME_RECEIVED,
+ self._lname)
except socket.error as se:
raise SessionError(se)
diff --git a/src/lib/python/isc/config/cfgmgr.py b/src/lib/python/isc/config/cfgmgr.py
index 9563cab..bc24cbb 100644
--- a/src/lib/python/isc/config/cfgmgr.py
+++ b/src/lib/python/isc/config/cfgmgr.py
@@ -68,6 +68,62 @@ class ConfigManagerData:
self.db_filename = data_path + os.sep + file_name
self.data_path = data_path
+ def check_for_updates(file_config):
+ """
+ Given the parsed JSON data from the config file,
+ check whether it needs updating due to version changes.
+ Return the data with updates (or the original data if no
+ updates were necessary).
+ Even though it is at this moment not technically necessary, this
+ function makes and returns a copy of the given data.
+ """
+ config = copy.deepcopy(file_config)
+ if 'version' in config:
+ data_version = config['version']
+ else:
+ # If it is not present, assume latest or earliest?
+ data_version = 1
+
+ # For efficiency, if up-to-date, return now
+ if data_version == config_data.BIND10_CONFIG_DATA_VERSION:
+ return config
+
+ # Don't know what to do if it is more recent
+ if data_version > config_data.BIND10_CONFIG_DATA_VERSION:
+ raise ConfigManagerDataReadError(
+ "Cannot load configuration file: version "
+ "%d not yet supported" % config['version'])
+
+ # At some point we might give up supporting older versions
+ if data_version < 1:
+ raise ConfigManagerDataReadError(
+ "Cannot load configuration file: version "
+ "%d no longer supported" % config['version'])
+
+ # Ok, so we have a still-supported older version. Apply all
+ # updates
+ new_data_version = data_version
+ if new_data_version == 1:
+ # only format change, no other changes necessary
+ new_data_version = 2
+ if new_data_version == 2:
+ # 'Boss' got changed to 'Init'; If for some reason both are
+ # present, simply ignore the old one
+ if 'Boss' in config:
+ if not 'Init' in config:
+ config['Init'] = config['Boss']
+ del config['Boss']
+ else:
+ # This should not happen, but we don't want to overwrite
+ # any config in this case, so warn about it
+ logger.warn(CFGMGR_CONFIG_UPDATE_BOSS_AND_INIT_FOUND)
+ new_data_version = 3
+
+ config['version'] = new_data_version
+ logger.info(CFGMGR_AUTOMATIC_CONFIG_DATABASE_UPDATE, data_version,
+ new_data_version)
+ return config
+
def read_from_file(data_path, file_name):
"""Read the current configuration found in the file file_name.
If file_name is absolute, data_path is ignored. Otherwise
@@ -90,21 +146,7 @@ class ConfigManagerData:
# If possible, we automatically convert to the new
# scheme and update the configuration
# If not, we raise an exception
- if 'version' in file_config:
- if file_config['version'] == config_data.BIND10_CONFIG_DATA_VERSION:
- config.data = file_config
- elif file_config['version'] == 1:
- # only format change, no other changes necessary
- file_config['version'] = 2
- logger.info(CFGMGR_AUTOMATIC_CONFIG_DATABASE_UPDATE, 1, 2)
- config.data = file_config
- else:
- if config_data.BIND10_CONFIG_DATA_VERSION > file_config['version']:
- raise ConfigManagerDataReadError("Cannot load configuration file: version %d no longer supported" % file_config['version'])
- else:
- raise ConfigManagerDataReadError("Cannot load configuration file: version %d not yet supported" % file_config['version'])
- else:
- raise ConfigManagerDataReadError("No version information in configuration file " + config.db_filename)
+ config.data = ConfigManagerData.check_for_updates(file_config)
except IOError as ioe:
# if IOError is 'no such file or directory', then continue
# (raise empty), otherwise fail (raise error)
@@ -210,7 +252,7 @@ class ConfigManager:
else:
self.cc = isc.cc.Session()
self.cc.group_subscribe("ConfigManager")
- self.cc.group_subscribe("Boss", "ConfigManager")
+ self.cc.group_subscribe("Init", "ConfigManager")
self.running = False
# As a core module, CfgMgr is different than other modules,
# as it does not use a ModuleCCSession, and hence needs
@@ -232,10 +274,10 @@ class ConfigManager:
# handler, so make it use defaults (and flush any buffered logs)
ccsession.default_logconfig_handler({}, self.log_config_data)
- def notify_boss(self):
- """Notifies the Boss module that the Config Manager is running"""
+ def notify_b10_init(self):
+ """Notifies the Init module that the Config Manager is running"""
# TODO: Use a real, broadcast notification here.
- self.cc.group_sendmsg({"running": "ConfigManager"}, "Boss")
+ self.cc.group_sendmsg({"running": "ConfigManager"}, "Init")
def set_module_spec(self, spec):
"""Adds a ModuleSpec"""
@@ -551,7 +593,7 @@ class ConfigManager:
def run(self):
"""Runs the configuration manager."""
self.running = True
- while (self.running):
+ while self.running:
# we just wait eternally for any command here, so disable
# timeouts for this specific recv
self.cc.set_timeout(0)
@@ -566,3 +608,4 @@ class ConfigManager:
# Only respond if there actually is something to respond with
if answer is not None:
self.cc.group_reply(env, answer)
+ logger.info(CFGMGR_STOPPED_BY_COMMAND)
diff --git a/src/lib/python/isc/config/cfgmgr_messages.mes b/src/lib/python/isc/config/cfgmgr_messages.mes
index 8701db3..73b6cef 100644
--- a/src/lib/python/isc/config/cfgmgr_messages.mes
+++ b/src/lib/python/isc/config/cfgmgr_messages.mes
@@ -41,6 +41,16 @@ system. The most likely cause is that msgq is not running.
The configuration manager is starting, reading and saving the configuration
settings to the shown file.
+% CFGMGR_CONFIG_UPDATE_BOSS_AND_INIT_FOUND Configuration found for both 'Boss' and 'Init', ignoring 'Boss'
+In the process of updating the configuration from version 2 to version 3,
+the configuration manager has found that there are existing configurations
+for both the old value 'Boss' and the new value 'Init'. This should in
+theory not happen, as in older versions 'Init' does not exist, and in newer
+versions 'Boss' does not exist. The configuration manager will continue
+with the update process, leaving the values for both as they are, so as not
+to overwrite any settings. However, the values for 'Boss' are ignored by
+BIND 10, and it is probably wise to check the configuration file manually.
+
% CFGMGR_DATA_READ_ERROR error reading configuration database from disk: %1
There was a problem reading the persistent configuration data as stored
on disk. The file may be corrupted, or it is of a version from where
@@ -61,6 +71,9 @@ error is given. The most likely cause is that the system does not have
write access to the configuration database file. The updated
configuration is not stored.
+% CFGMGR_STOPPED_BY_COMMAND received shutdown command, shutting down
+The configuration manager received a shutdown command, and is exiting.
+
% CFGMGR_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down
There was a keyboard interrupt signal to stop the cfgmgr daemon. The
daemon will now shut down.
diff --git a/src/lib/python/isc/config/config_data.py b/src/lib/python/isc/config/config_data.py
index ae61e2a..495d20b 100644
--- a/src/lib/python/isc/config/config_data.py
+++ b/src/lib/python/isc/config/config_data.py
@@ -27,7 +27,7 @@ import copy
class ConfigDataError(Exception): pass
-BIND10_CONFIG_DATA_VERSION = 2
+BIND10_CONFIG_DATA_VERSION = 3
# Helper functions
def spec_part_is_list(spec_part):
diff --git a/src/lib/python/isc/config/tests/cfgmgr_test.py b/src/lib/python/isc/config/tests/cfgmgr_test.py
index 5322901..d99fb86 100644
--- a/src/lib/python/isc/config/tests/cfgmgr_test.py
+++ b/src/lib/python/isc/config/tests/cfgmgr_test.py
@@ -49,6 +49,49 @@ class TestConfigManagerData(unittest.TestCase):
self.assertEqual(self.config_manager_data.db_filename,
self.writable_data_path + os.sep + "b10-config.db")
+ def test_check_for_updates_up_to_date(self):
+ # This should automatically give an up-to-date version
+ file_config = ConfigManagerData.read_from_file(
+ self.writable_data_path, "b10-config.db").data
+ updated_config = ConfigManagerData.check_for_updates(file_config)
+ self.assertEqual(file_config, updated_config)
+
+ def test_check_for_updates_from_1(self):
+ config = { "version": 1,
+ "foo": "bar",
+ "something": [ 1, 2, 3 ] }
+ updated = ConfigManagerData.check_for_updates(config)
+ config['version'] = config_data.BIND10_CONFIG_DATA_VERSION
+ self.assertEqual(config, updated)
+
+ def test_check_for_updates_from_2(self):
+ # No 'Boss' present, no change (except version)
+ config = { "version": 2,
+ "foo": "bar",
+ "something": [ 1, 2, 3 ] }
+ updated = ConfigManagerData.check_for_updates(config)
+ config['version'] = config_data.BIND10_CONFIG_DATA_VERSION
+ self.assertEqual(config, updated)
+
+ # With Boss, should be changed to 'Init'
+ config = { "version": 2,
+ "Boss": { "some config": 1 },
+ "something": [ 1, 2, 3 ] }
+ updated = ConfigManagerData.check_for_updates(config)
+ config = { "version": config_data.BIND10_CONFIG_DATA_VERSION,
+ "Init": { "some config": 1 },
+ "something": [ 1, 2, 3 ] }
+ self.assertEqual(config, updated)
+
+ # With Boss AND Init, no change
+ config = { "version": 2,
+ "Boss": { "some config": 1 },
+ "Init": { "some other config": 1 },
+ "something": [ 1, 2, 3 ] }
+ updated = ConfigManagerData.check_for_updates(config)
+ config['version'] = config_data.BIND10_CONFIG_DATA_VERSION
+ self.assertEqual(config, updated)
+
def test_read_from_file(self):
ConfigManagerData.read_from_file(self.writable_data_path, "b10-config.db")
self.assertRaises(ConfigManagerDataEmpty,
@@ -174,12 +217,12 @@ class TestConfigManager(unittest.TestCase):
self.assertEqual(self.cm.data_path, self.writable_data_path)
self.assertIsNotNone(self.cm.config)
self.assertTrue(self.fake_session.has_subscription("ConfigManager"))
- self.assertTrue(self.fake_session.has_subscription("Boss", "ConfigManager"))
+ self.assertTrue(self.fake_session.has_subscription("Init", "ConfigManager"))
self.assertFalse(self.cm.running)
- def test_notify_boss(self):
- self.cm.notify_boss()
- msg = self.fake_session.get_message("Boss", None)
+ def test_notify_b10_init(self):
+ self.cm.notify_b10_init()
+ msg = self.fake_session.get_message("Init", None)
self.assertTrue(msg)
# this one is actually wrong, but 'current status quo'
self.assertEqual(msg, {"running": "ConfigManager"})
@@ -539,7 +582,8 @@ class TestConfigManager(unittest.TestCase):
def test_set_config_all(self):
my_ok_answer = { 'result': [ 0 ] }
- self.assertEqual({"version": 2}, self.cm.config.data)
+ self.assertEqual({"version": config_data.BIND10_CONFIG_DATA_VERSION},
+ self.cm.config.data)
self.fake_session.group_sendmsg(my_ok_answer, "ConfigManager")
self.cm.handle_msg(ccsession.create_command(
diff --git a/src/lib/python/isc/datasrc/tests/clientlist_test.py b/src/lib/python/isc/datasrc/tests/clientlist_test.py
index ea39d4e..bdac69c 100644
--- a/src/lib/python/isc/datasrc/tests/clientlist_test.py
+++ b/src/lib/python/isc/datasrc/tests/clientlist_test.py
@@ -43,8 +43,8 @@ class ClientListTest(unittest.TestCase):
Test the constructor. It should accept an RRClass. Check it
reject invalid inputs.
"""
- isc.datasrc.ConfigurableClientList(isc.dns.RRClass.IN())
- isc.datasrc.ConfigurableClientList(isc.dns.RRClass.CH())
+ isc.datasrc.ConfigurableClientList(isc.dns.RRClass.IN)
+ isc.datasrc.ConfigurableClientList(isc.dns.RRClass.CH)
# Not enough arguments
self.assertRaises(TypeError, isc.datasrc.ConfigurableClientList)
# Bad types of arguments
@@ -52,7 +52,7 @@ class ClientListTest(unittest.TestCase):
self.assertRaises(TypeError, isc.datasrc.ConfigurableClientList, "IN")
# Too many arguments
self.assertRaises(TypeError, isc.datasrc.ConfigurableClientList,
- isc.dns.RRClass.IN(), isc.dns.RRClass.IN())
+ isc.dns.RRClass.IN, isc.dns.RRClass.IN)
def test_configure(self):
"""
@@ -60,7 +60,7 @@ class ClientListTest(unittest.TestCase):
ones are acceptend and invalid rejected. We check the changes
have effect.
"""
- self.clist = isc.datasrc.ConfigurableClientList(isc.dns.RRClass.IN())
+ self.clist = isc.datasrc.ConfigurableClientList(isc.dns.RRClass.IN)
# This should be NOP now
self.clist.configure("[]", True)
# Check the zone is not there yet
@@ -102,7 +102,7 @@ class ClientListTest(unittest.TestCase):
Test the find accepts the right arguments, some of them can be omitted,
etc.
"""
- self.clist = isc.datasrc.ConfigurableClientList(isc.dns.RRClass.IN())
+ self.clist = isc.datasrc.ConfigurableClientList(isc.dns.RRClass.IN)
self.clist.configure('''[{
"type": "MasterFiles",
"params": {
diff --git a/src/lib/python/isc/datasrc/tests/datasrc_test.py b/src/lib/python/isc/datasrc/tests/datasrc_test.py
index 36cf951..64f3e53 100644
--- a/src/lib/python/isc/datasrc/tests/datasrc_test.py
+++ b/src/lib/python/isc/datasrc/tests/datasrc_test.py
@@ -51,8 +51,8 @@ def check_for_rrset(expected_rrsets, rrset):
return False
def create_soa(serial):
- soa = RRset(Name('example.org'), RRClass.IN(), RRType.SOA(), RRTTL(3600))
- soa.add_rdata(Rdata(RRType.SOA(), RRClass.IN(),
+ soa = RRset(Name('example.org'), RRClass.IN, RRType.SOA, RRTTL(3600))
+ soa.add_rdata(Rdata(RRType.SOA, RRClass.IN,
'ns1.example.org. admin.example.org. ' +
str(serial) + ' 3600 1800 2419200 7200'))
return soa
@@ -66,13 +66,13 @@ def test_findall_common(self, tested):
result, rrset, _ = tested.find_all(isc.dns.Name("www.sql1.example.com"),
ZoneFinder.FIND_DEFAULT)
self.assertEqual(ZoneFinder.DELEGATION, result)
- expected = RRset(Name('sql1.example.com.'), RRClass.IN(), RRType.NS(),
+ expected = RRset(Name('sql1.example.com.'), RRClass.IN, RRType.NS,
RRTTL(3600))
- expected.add_rdata(Rdata(RRType.NS(), RRClass.IN(),
+ expected.add_rdata(Rdata(RRType.NS, RRClass.IN,
'dns01.example.com.'))
- expected.add_rdata(Rdata(RRType.NS(), RRClass.IN(),
+ expected.add_rdata(Rdata(RRType.NS, RRClass.IN,
'dns02.example.com.'))
- expected.add_rdata(Rdata(RRType.NS(), RRClass.IN(),
+ expected.add_rdata(Rdata(RRType.NS, RRClass.IN,
'dns03.example.com.'))
self.assertTrue(rrsets_equal(expected, rrset))
@@ -88,16 +88,16 @@ def test_findall_common(self, tested):
self.assertEqual(2, len(rrsets))
rrsets.sort(key=lambda rrset: rrset.get_type().to_text())
expected = [
- RRset(Name('mix.example.com.'), RRClass.IN(), RRType.A(),
+ RRset(Name('mix.example.com.'), RRClass.IN, RRType.A,
RRTTL(3600)),
- RRset(Name('mix.example.com.'), RRClass.IN(), RRType.AAAA(),
+ RRset(Name('mix.example.com.'), RRClass.IN, RRType.AAAA,
RRTTL(3600))
]
- expected[0].add_rdata(Rdata(RRType.A(), RRClass.IN(), "192.0.2.1"))
- expected[0].add_rdata(Rdata(RRType.A(), RRClass.IN(), "192.0.2.2"))
- expected[1].add_rdata(Rdata(RRType.AAAA(), RRClass.IN(),
+ expected[0].add_rdata(Rdata(RRType.A, RRClass.IN, "192.0.2.1"))
+ expected[0].add_rdata(Rdata(RRType.A, RRClass.IN, "192.0.2.2"))
+ expected[1].add_rdata(Rdata(RRType.AAAA, RRClass.IN,
"2001:db8::1"))
- expected[1].add_rdata(Rdata(RRType.AAAA(), RRClass.IN(),
+ expected[1].add_rdata(Rdata(RRType.AAAA, RRClass.IN,
"2001:db8::2"))
for (rrset, exp) in zip(rrsets, expected):
self.assertTrue(rrsets_equal(exp, rrset))
@@ -158,9 +158,9 @@ class DataSrcClient(unittest.TestCase):
expected_rrset_list = []
name = isc.dns.Name("sql1.example.com")
- rrclass = isc.dns.RRClass.IN()
+ rrclass = isc.dns.RRClass.IN
add_rrset(expected_rrset_list, name, rrclass,
- isc.dns.RRType.DNSKEY(), isc.dns.RRTTL(3600),
+ isc.dns.RRType.DNSKEY, isc.dns.RRTTL(3600),
[
"256 3 5 AwEAAdYdRhBAEY67R/8G1N5AjGF6asIiNh/pNGeQ8xDQP13J"+
"N2lo+sNqWcmpYNhuVqRbLB+mamsU1XcCICSBvAlSmfz/ZUdafX23knAr"+
@@ -168,7 +168,7 @@ class DataSrcClient(unittest.TestCase):
"5fs0dE/xLztL/CzZ"
])
add_rrset(expected_rrset_list, name, rrclass,
- isc.dns.RRType.DNSKEY(), isc.dns.RRTTL(3600),
+ isc.dns.RRType.DNSKEY, isc.dns.RRTTL(3600),
[
"257 3 5 AwEAAbaKDSa9XEFTsjSYpUTHRotTS9Tz3krfDucugW5UokGQ"+
"KC26QlyHXlPTZkC+aRFUs/dicJX2kopndLcnlNAPWiKnKtrsFSCnIJDB"+
@@ -179,22 +179,22 @@ class DataSrcClient(unittest.TestCase):
"jRWAzGsxJiJyjd6w2k0="
])
add_rrset(expected_rrset_list, name, rrclass,
- isc.dns.RRType.NS(), isc.dns.RRTTL(3600),
+ isc.dns.RRType.NS, isc.dns.RRTTL(3600),
[
"dns01.example.com."
])
add_rrset(expected_rrset_list, name, rrclass,
- isc.dns.RRType.NS(), isc.dns.RRTTL(3600),
+ isc.dns.RRType.NS, isc.dns.RRTTL(3600),
[
"dns02.example.com."
])
add_rrset(expected_rrset_list, name, rrclass,
- isc.dns.RRType.NS(), isc.dns.RRTTL(3600),
+ isc.dns.RRType.NS, isc.dns.RRTTL(3600),
[
"dns03.example.com."
])
add_rrset(expected_rrset_list, name, rrclass,
- isc.dns.RRType.NSEC(), isc.dns.RRTTL(7200),
+ isc.dns.RRType.NSEC, isc.dns.RRTTL(7200),
[
"www.sql1.example.com. NS SOA RRSIG NSEC DNSKEY"
])
@@ -204,36 +204,36 @@ class DataSrcClient(unittest.TestCase):
# Since we passed separate_rrs = True to get_iterator, we get several
# sets of RRSIGs, one for each TTL
add_rrset(expected_rrset_list, name, rrclass,
- isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
+ isc.dns.RRType.RRSIG, isc.dns.RRTTL(3600), None)
add_rrset(expected_rrset_list, name, rrclass,
- isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
+ isc.dns.RRType.RRSIG, isc.dns.RRTTL(3600), None)
add_rrset(expected_rrset_list, name, rrclass,
- isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
+ isc.dns.RRType.RRSIG, isc.dns.RRTTL(3600), None)
add_rrset(expected_rrset_list, name, rrclass,
- isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
+ isc.dns.RRType.RRSIG, isc.dns.RRTTL(3600), None)
add_rrset(expected_rrset_list, name, rrclass,
- isc.dns.RRType.RRSIG(), isc.dns.RRTTL(7200), None)
+ isc.dns.RRType.RRSIG, isc.dns.RRTTL(7200), None)
add_rrset(expected_rrset_list, name, rrclass,
- isc.dns.RRType.SOA(), isc.dns.RRTTL(3600),
+ isc.dns.RRType.SOA, isc.dns.RRTTL(3600),
[
"master.example.com. admin.example.com. 678 3600 1800 2419200 7200"
])
name = isc.dns.Name("www.sql1.example.com.")
add_rrset(expected_rrset_list, name, rrclass,
- isc.dns.RRType.A(), isc.dns.RRTTL(3600),
+ isc.dns.RRType.A, isc.dns.RRTTL(3600),
[
"192.0.2.100"
])
name = isc.dns.Name("www.sql1.example.com.")
add_rrset(expected_rrset_list, name, rrclass,
- isc.dns.RRType.NSEC(), isc.dns.RRTTL(7200),
+ isc.dns.RRType.NSEC, isc.dns.RRTTL(7200),
[
"sql1.example.com. A RRSIG NSEC"
])
add_rrset(expected_rrset_list, name, rrclass,
- isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
+ isc.dns.RRType.RRSIG, isc.dns.RRTTL(3600), None)
add_rrset(expected_rrset_list, name, rrclass,
- isc.dns.RRType.RRSIG(), isc.dns.RRTTL(7200), None)
+ isc.dns.RRType.RRSIG, isc.dns.RRTTL(7200), None)
# rrs is an iterator, but also has direct get_next_rrset(), use
# the latter one here
@@ -287,11 +287,11 @@ class DataSrcClient(unittest.TestCase):
dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
iterator = dsc.get_iterator(isc.dns.Name("sql1.example.com."))
expected_soa = isc.dns.RRset(isc.dns.Name("sql1.example.com."),
- isc.dns.RRClass.IN(),
- isc.dns.RRType.SOA(),
+ isc.dns.RRClass.IN,
+ isc.dns.RRType.SOA,
isc.dns.RRTTL(3600))
- expected_soa.add_rdata(isc.dns.Rdata(isc.dns.RRType.SOA(),
- isc.dns.RRClass.IN(),
+ expected_soa.add_rdata(isc.dns.Rdata(isc.dns.RRType.SOA,
+ isc.dns.RRClass.IN,
"master.example.com. " +
"admin.example.com. 678 " +
"3600 1800 2419200 7200"))
@@ -337,7 +337,7 @@ class DataSrcClient(unittest.TestCase):
result, finder = dsc.find_zone(isc.dns.Name("example.com"))
self.assertEqual(finder.SUCCESS, result)
- self.assertEqual(isc.dns.RRClass.IN(), finder.get_class())
+ self.assertEqual(isc.dns.RRClass.IN, finder.get_class())
self.assertEqual("example.com.", finder.get_origin().to_text())
test_findall_common(self, finder)
@@ -347,11 +347,11 @@ class DataSrcClient(unittest.TestCase):
result, finder = dsc.find_zone(isc.dns.Name("example.com"))
self.assertEqual(finder.SUCCESS, result)
- self.assertEqual(isc.dns.RRClass.IN(), finder.get_class())
+ self.assertEqual(isc.dns.RRClass.IN, finder.get_class())
self.assertEqual("example.com.", finder.get_origin().to_text())
result, rrset, _ = finder.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.A(),
+ isc.dns.RRType.A,
finder.FIND_DEFAULT)
self.assertEqual(finder.SUCCESS, result)
self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
@@ -359,13 +359,13 @@ class DataSrcClient(unittest.TestCase):
# Check the optional parameters are optional
result, rrset, _ = finder.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.A())
+ isc.dns.RRType.A)
self.assertEqual(finder.SUCCESS, result)
self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
rrset.to_text())
result, rrset, _ = finder.find(isc.dns.Name("www.sql1.example.com"),
- isc.dns.RRType.A(),
+ isc.dns.RRType.A,
finder.FIND_DEFAULT)
self.assertEqual(finder.DELEGATION, result)
self.assertEqual("sql1.example.com. 3600 IN NS dns01.example.com.\n" +
@@ -374,7 +374,7 @@ class DataSrcClient(unittest.TestCase):
rrset.to_text())
result, rrset, _ = finder.find(isc.dns.Name("doesnotexist.example.com"),
- isc.dns.RRType.A(),
+ isc.dns.RRType.A,
finder.FIND_DEFAULT)
self.assertEqual(finder.NXDOMAIN, result)
self.assertEqual(None, rrset)
@@ -382,16 +382,16 @@ class DataSrcClient(unittest.TestCase):
self.assertRaises(isc.datasrc.OutOfZone, finder.find,
isc.dns.Name("www.some.other.domain"),
- isc.dns.RRType.A())
+ isc.dns.RRType.A)
result, rrset, _ = finder.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.TXT(),
+ isc.dns.RRType.TXT,
finder.FIND_DEFAULT)
self.assertEqual(finder.NXRRSET, result)
self.assertEqual(None, rrset)
result, rrset, _ = finder.find(isc.dns.Name("cname-ext.example.com"),
- isc.dns.RRType.A(),
+ isc.dns.RRType.A,
finder.FIND_DEFAULT)
self.assertEqual(finder.CNAME, result)
self.assertEqual(
@@ -400,14 +400,14 @@ class DataSrcClient(unittest.TestCase):
result, rrset, flags = \
finder.find(isc.dns.Name("foo.wild.example.com"),
- isc.dns.RRType.A(), finder.FIND_DEFAULT)
+ isc.dns.RRType.A, finder.FIND_DEFAULT)
self.assertEqual(finder.SUCCESS, result)
self.assertEqual(finder.RESULT_WILDCARD, flags)
self.assertEqual("foo.wild.example.com. 3600 IN A 192.0.2.255\n",
rrset.to_text())
result, rrset, _ = finder.find(isc.dns.Name("foo.wild.example.com"),
- isc.dns.RRType.TXT(),
+ isc.dns.RRType.TXT,
finder.FIND_DEFAULT)
self.assertEqual(finder.NXRRSET, result)
self.assertTrue(finder.RESULT_WILDCARD, flags)
@@ -415,7 +415,7 @@ class DataSrcClient(unittest.TestCase):
self.assertRaises(TypeError, finder.find,
"foo",
- isc.dns.RRType.A(),
+ isc.dns.RRType.A,
finder.FIND_DEFAULT)
self.assertRaises(TypeError, finder.find,
isc.dns.Name("cname-ext.example.com"),
@@ -423,7 +423,7 @@ class DataSrcClient(unittest.TestCase):
finder.FIND_DEFAULT)
self.assertRaises(TypeError, finder.find,
isc.dns.Name("cname-ext.example.com"),
- isc.dns.RRType.A(),
+ isc.dns.RRType.A,
"foo")
class DataSrcUpdater(unittest.TestCase):
@@ -451,7 +451,7 @@ class DataSrcUpdater(unittest.TestCase):
dsc = isc.datasrc.DataSourceClient("sqlite3", WRITE_ZONE_DB_CONFIG)
updater = dsc.get_updater(isc.dns.Name("example.com"), False)
result, rrset, _ = updater.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.A(),
+ isc.dns.RRType.A,
ZoneFinder.FIND_DEFAULT)
self.assertEqual(ZoneFinder.SUCCESS, result)
self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
@@ -459,7 +459,7 @@ class DataSrcUpdater(unittest.TestCase):
# Omit optional parameters
result, rrset, _ = updater.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.A())
+ isc.dns.RRType.A)
self.assertEqual(ZoneFinder.SUCCESS, result)
self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
rrset.to_text())
@@ -471,11 +471,11 @@ class DataSrcUpdater(unittest.TestCase):
# first make sure, through a separate finder, that some record exists
result, finder = dsc.find_zone(isc.dns.Name("example.com"))
self.assertEqual(finder.SUCCESS, result)
- self.assertEqual(isc.dns.RRClass.IN(), finder.get_class())
+ self.assertEqual(isc.dns.RRClass.IN, finder.get_class())
self.assertEqual("example.com.", finder.get_origin().to_text())
result, rrset, _ = finder.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.A(),
+ isc.dns.RRType.A,
finder.FIND_DEFAULT)
self.assertEqual(finder.SUCCESS, result)
self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
@@ -490,13 +490,13 @@ class DataSrcUpdater(unittest.TestCase):
# The record should be gone in the updater, but not in the original
# finder (since we have not committed)
result, rrset, _ = updater.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.A(),
+ isc.dns.RRType.A,
finder.FIND_DEFAULT)
self.assertEqual(finder.NXDOMAIN, result)
self.assertEqual(None, rrset)
result, rrset, _ = finder.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.A(),
+ isc.dns.RRType.A,
finder.FIND_DEFAULT)
self.assertEqual(finder.SUCCESS, result)
self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
@@ -508,7 +508,7 @@ class DataSrcUpdater(unittest.TestCase):
# the record should be gone now in the 'real' finder as well
result, rrset, _ = finder.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.A(),
+ isc.dns.RRType.A,
finder.FIND_DEFAULT)
self.assertEqual(finder.NXDOMAIN, result)
self.assertEqual(None, rrset)
@@ -522,7 +522,7 @@ class DataSrcUpdater(unittest.TestCase):
self.assertRaises(isc.datasrc.Error, updater.commit)
result, rrset, _ = finder.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.A(),
+ isc.dns.RRType.A,
finder.FIND_DEFAULT)
self.assertEqual(finder.SUCCESS, result)
self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
@@ -537,26 +537,26 @@ class DataSrcUpdater(unittest.TestCase):
rrsets = updater.get_rrset_collection()
# From this point we cannot make further updates
- rrset = RRset(isc.dns.Name('www.example.com'), isc.dns.RRClass.IN(),
- isc.dns.RRType.AAAA(), isc.dns.RRTTL(10))
- rrset.add_rdata(isc.dns.Rdata(isc.dns.RRType.AAAA(),
- isc.dns.RRClass.IN(), '2001:db8::1'))
+ rrset = RRset(isc.dns.Name('www.example.com'), isc.dns.RRClass.IN,
+ isc.dns.RRType.AAAA, isc.dns.RRTTL(10))
+ rrset.add_rdata(isc.dns.Rdata(isc.dns.RRType.AAAA,
+ isc.dns.RRClass.IN, '2001:db8::1'))
self.assertRaises(isc.datasrc.Error, updater.add_rrset, rrset)
# Checks basic API
found = rrsets.find(isc.dns.Name("www.example.com"),
- isc.dns.RRClass.IN(), isc.dns.RRType.A())
+ isc.dns.RRClass.IN, isc.dns.RRType.A)
self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
found.to_text())
self.assertEqual(None, rrsets.find(isc.dns.Name("www.example.com"),
- isc.dns.RRClass.IN(),
- isc.dns.RRType.AAAA()))
+ isc.dns.RRClass.IN,
+ isc.dns.RRType.AAAA))
# Once committed collection cannot be used any more.
updater.commit()
self.assertRaises(isc.dns.RRsetCollectionError,
rrsets.find, isc.dns.Name("www.example.com"),
- isc.dns.RRClass.IN(), isc.dns.RRType.A())
+ isc.dns.RRClass.IN, isc.dns.RRType.A)
# When we destroy the RRsetCollection it should release the refcount
# to the updater.
@@ -578,10 +578,10 @@ class DataSrcUpdater(unittest.TestCase):
# see if a lookup succeeds in sqlite3 ds
result, finder = dsc_sql.find_zone(isc.dns.Name("example.com"))
self.assertEqual(finder.SUCCESS, result)
- self.assertEqual(isc.dns.RRClass.IN(), finder.get_class())
+ self.assertEqual(isc.dns.RRClass.IN, finder.get_class())
self.assertEqual("example.com.", finder.get_origin().to_text())
result, rrset, _ = finder.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.A(),
+ isc.dns.RRType.A,
finder.FIND_DEFAULT)
self.assertEqual(finder.SUCCESS, result)
self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
@@ -600,11 +600,11 @@ class DataSrcUpdater(unittest.TestCase):
# first make sure, through a separate finder, that some record exists
result, finder = dsc.find_zone(isc.dns.Name("example.com"))
self.assertEqual(finder.SUCCESS, result)
- self.assertEqual(isc.dns.RRClass.IN(), finder.get_class())
+ self.assertEqual(isc.dns.RRClass.IN, finder.get_class())
self.assertEqual("example.com.", finder.get_origin().to_text())
result, rrset, _ = finder.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.A(),
+ isc.dns.RRType.A,
finder.FIND_DEFAULT)
self.assertEqual(finder.SUCCESS, result)
self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
@@ -619,7 +619,7 @@ class DataSrcUpdater(unittest.TestCase):
# The record should be gone in the updater, but not in the original
# finder (since we have not committed)
result, rrset, _ = updater.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.A(),
+ isc.dns.RRType.A,
finder.FIND_DEFAULT)
self.assertEqual(finder.NXDOMAIN, result)
self.assertEqual(None, rrset)
@@ -629,7 +629,7 @@ class DataSrcUpdater(unittest.TestCase):
# the record should still be available in the 'real' finder as well
result, rrset, _ = finder.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.A(),
+ isc.dns.RRType.A,
finder.FIND_DEFAULT)
self.assertEqual(finder.SUCCESS, result)
self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
@@ -755,9 +755,9 @@ class JournalWrite(unittest.TestCase):
conn.close()
def create_a(self, address):
- a_rr = RRset(Name('www.example.org'), RRClass.IN(), RRType.A(),
+ a_rr = RRset(Name('www.example.org'), RRClass.IN, RRType.A,
RRTTL(3600))
- a_rr.add_rdata(Rdata(RRType.A(), RRClass.IN(), address))
+ a_rr.add_rdata(Rdata(RRType.A, RRClass.IN, address))
return (a_rr)
def test_journal_write(self):
diff --git a/src/lib/python/isc/datasrc/tests/zone_loader_test.py b/src/lib/python/isc/datasrc/tests/zone_loader_test.py
index 62f67cd..4cd4879 100644
--- a/src/lib/python/isc/datasrc/tests/zone_loader_test.py
+++ b/src/lib/python/isc/datasrc/tests/zone_loader_test.py
@@ -13,6 +13,7 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+import isc.log
import isc.datasrc
import isc.dns
@@ -96,7 +97,7 @@ class ZoneLoaderTests(unittest.TestCase):
"""
result, finder = self.client.find_zone(self.test_name)
self.assertEqual(self.client.SUCCESS, result)
- result, rrset, _ = finder.find(self.test_name, isc.dns.RRType.SOA())
+ result, rrset, _ = finder.find(self.test_name, isc.dns.RRType.SOA)
self.assertEqual(finder.SUCCESS, result)
self.assertEqual(soa_txt, rrset.to_text())
@@ -231,7 +232,7 @@ class ZoneLoaderTests(unittest.TestCase):
def test_wrong_class_from_client(self):
# For ds->ds loading, wrong class is detected upon construction
# Need a bit of the extended setup for CH source client
- clientlist = isc.datasrc.ConfigurableClientList(isc.dns.RRClass.CH())
+ clientlist = isc.datasrc.ConfigurableClientList(isc.dns.RRClass.CH)
clientlist.configure('[ { "type": "static", "params": "' +
STATIC_ZONE_FILE +'" } ]', False)
self.source_client, _, _ = clientlist.find(isc.dns.Name("bind."),
diff --git a/src/lib/python/isc/ddns/libddns_messages.mes b/src/lib/python/isc/ddns/libddns_messages.mes
index 406151c..abdd4e0 100644
--- a/src/lib/python/isc/ddns/libddns_messages.mes
+++ b/src/lib/python/isc/ddns/libddns_messages.mes
@@ -121,7 +121,7 @@ a bad class. The class of the update RRset must be either the same
as the class in the Zone Section, ANY, or NONE.
A FORMERR response is sent back to the client.
-% LIBDDNS_UPDATE_DATASRC_ERROR error in datasource during DDNS update: %1
+% LIBDDNS_UPDATE_DATASRC_COMMIT_FAILED error in datasource during DDNS update: %1
An error occurred while committing the DDNS update changes to the
datasource. The specific error is printed. A SERVFAIL response is sent
back to the client.
@@ -167,7 +167,7 @@ rejected by the zone's update ACL. When this library is used by
b10-ddns, the server will then completely ignore the request; no
response will be sent.
-% LIBDDNS_UPDATE_ERROR update client %1 for zone %2: %3
+% LIBDDNS_UPDATE_PROCESSING_FAILED update client %1 for zone %2: %3
Debug message. An error is found in processing a dynamic update
request. This log message is used for general errors that are not
normally expected to happen. So, in general, it would mean some
diff --git a/src/lib/python/isc/ddns/session.py b/src/lib/python/isc/ddns/session.py
index 60834fb..3368523 100644
--- a/src/lib/python/isc/ddns/session.py
+++ b/src/lib/python/isc/ddns/session.py
@@ -135,7 +135,7 @@ class DDNS_SOA:
def __write_soa_internal(self, origin_soa, soa_num):
'''Write back serial number to soa'''
new_soa = RRset(origin_soa.get_name(), origin_soa.get_class(),
- RRType.SOA(), origin_soa.get_ttl())
+ RRType.SOA, origin_soa.get_ttl())
soa_rdata_parts = origin_soa.get_rdata()[0].to_text().split()
soa_rdata_parts[2] = str(soa_num.get_value())
new_soa.add_rdata(Rdata(origin_soa.get_type(), origin_soa.get_class(),
@@ -248,18 +248,18 @@ class UpdateSession:
self.__check_update_acl(self.__zname, self.__zclass)
self._create_diff()
prereq_result = self.__check_prerequisites()
- if prereq_result != Rcode.NOERROR():
+ if prereq_result != Rcode.NOERROR:
self.__make_response(prereq_result)
return UPDATE_ERROR, self.__zname, self.__zclass
update_result = self.__do_update()
- if update_result != Rcode.NOERROR():
+ if update_result != Rcode.NOERROR:
self.__make_response(update_result)
return UPDATE_ERROR, self.__zname, self.__zclass
- self.__make_response(Rcode.NOERROR())
+ self.__make_response(Rcode.NOERROR)
return UPDATE_SUCCESS, self.__zname, self.__zclass
except UpdateError as e:
if not e.nolog:
- logger.debug(logger.DBGLVL_TRACE_BASIC, LIBDDNS_UPDATE_ERROR,
+ logger.debug(logger.DBGLVL_TRACE_BASIC, LIBDDNS_UPDATE_PROCESSING_FAILED,
ClientFormatter(self.__client_addr, self.__tsig),
ZoneFormatter(e.zname, e.zclass), e)
# If RCODE is specified, create a corresponding resonse and return
@@ -272,7 +272,7 @@ class UpdateSession:
except isc.datasrc.Error as e:
logger.error(LIBDDNS_DATASRC_ERROR,
ClientFormatter(self.__client_addr, self.__tsig), e)
- self.__make_response(Rcode.SERVFAIL())
+ self.__make_response(Rcode.SERVFAIL)
return UPDATE_ERROR, None, None
def _get_update_zone(self):
@@ -295,11 +295,11 @@ class UpdateSession:
n_zones = self.__message.get_rr_count(SECTION_ZONE)
if n_zones != 1:
raise UpdateError('Invalid number of records in zone section: ' +
- str(n_zones), None, None, Rcode.FORMERR())
+ str(n_zones), None, None, Rcode.FORMERR)
zrecord = self.__message.get_question()[0]
- if zrecord.get_type() != RRType.SOA():
+ if zrecord.get_type() != RRType.SOA:
raise UpdateError('update zone section contains non-SOA',
- None, None, Rcode.FORMERR())
+ None, None, Rcode.FORMERR)
# See if we're serving a primary zone specified in the zone section.
zname = zrecord.get_name()
@@ -316,12 +316,12 @@ class UpdateSession:
logger.debug(DBGLVL_TRACE_BASIC, LIBDDNS_UPDATE_FORWARD_FAIL,
ClientFormatter(self.__client_addr, self.__tsig),
ZoneFormatter(zname, zclass))
- raise UpdateError('forward', zname, zclass, Rcode.NOTIMP(), True)
+ raise UpdateError('forward', zname, zclass, Rcode.NOTIMP, True)
# zone wasn't found
logger.debug(DBGLVL_TRACE_BASIC, LIBDDNS_UPDATE_NOTAUTH,
ClientFormatter(self.__client_addr, self.__tsig),
ZoneFormatter(zname, zclass))
- raise UpdateError('notauth', zname, zclass, Rcode.NOTAUTH(), True)
+ raise UpdateError('notauth', zname, zclass, Rcode.NOTAUTH, True)
def _create_diff(self):
'''
@@ -352,7 +352,7 @@ class UpdateSession:
logger.info(LIBDDNS_UPDATE_DENIED,
ClientFormatter(self.__client_addr, self.__tsig),
ZoneFormatter(zname, zclass))
- raise UpdateError('rejected', zname, zclass, Rcode.REFUSED(), True)
+ raise UpdateError('rejected', zname, zclass, Rcode.REFUSED, True)
if action == DROP:
logger.info(LIBDDNS_UPDATE_DROPPED,
ClientFormatter(self.__client_addr, self.__tsig),
@@ -459,7 +459,7 @@ class UpdateSession:
def __check_prerequisites(self):
'''Check the prerequisites section of the UPDATE Message.
RFC2136 Section 2.4.
- Returns a dns Rcode signaling either no error (Rcode.NOERROR())
+ Returns a dns Rcode signaling either no error (Rcode.NOERROR)
or that one of the prerequisites failed (any other Rcode).
'''
@@ -473,20 +473,20 @@ class UpdateSession:
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
- return Rcode.NOTZONE()
+ return Rcode.NOTZONE
# Algorithm taken from RFC2136 Section 3.2
- if rrset.get_class() == RRClass.ANY():
+ if rrset.get_class() == RRClass.ANY:
if rrset.get_ttl().get_value() != 0 or\
rrset.get_rdata_count() != 0:
logger.info(LIBDDNS_PREREQ_FORMERR_ANY,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
- return Rcode.FORMERR()
- elif rrset.get_type() == RRType.ANY():
+ return Rcode.FORMERR
+ elif rrset.get_type() == RRType.ANY:
if not self.__prereq_name_in_use(rrset):
- rcode = Rcode.NXDOMAIN()
+ rcode = Rcode.NXDOMAIN
logger.info(LIBDDNS_PREREQ_NAME_IN_USE_FAILED,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
@@ -494,23 +494,23 @@ class UpdateSession:
return rcode
else:
if not self.__prereq_rrset_exists(rrset):
- rcode = Rcode.NXRRSET()
+ rcode = Rcode.NXRRSET
logger.info(LIBDDNS_PREREQ_RRSET_EXISTS_FAILED,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset), rcode)
return rcode
- elif rrset.get_class() == RRClass.NONE():
+ elif rrset.get_class() == RRClass.NONE:
if rrset.get_ttl().get_value() != 0 or\
rrset.get_rdata_count() != 0:
logger.info(LIBDDNS_PREREQ_FORMERR_NONE,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
- return Rcode.FORMERR()
- elif rrset.get_type() == RRType.ANY():
+ return Rcode.FORMERR
+ elif rrset.get_type() == RRType.ANY:
if not self.__prereq_name_not_in_use(rrset):
- rcode = Rcode.YXDOMAIN()
+ rcode = Rcode.YXDOMAIN
logger.info(LIBDDNS_PREREQ_NAME_NOT_IN_USE_FAILED,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
@@ -518,7 +518,7 @@ class UpdateSession:
return rcode
else:
if not self.__prereq_rrset_does_not_exist(rrset):
- rcode = Rcode.YXRRSET()
+ rcode = Rcode.YXRRSET
logger.info(LIBDDNS_PREREQ_RRSET_DOES_NOT_EXIST_FAILED,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
@@ -530,7 +530,7 @@ class UpdateSession:
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
- return Rcode.FORMERR()
+ return Rcode.FORMERR
else:
collect_rrsets(exact_match_rrsets, rrset)
else:
@@ -538,11 +538,11 @@ class UpdateSession:
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
- return Rcode.FORMERR()
+ return Rcode.FORMERR
for collected_rrset in exact_match_rrsets:
if not self.__prereq_rrset_exists_value(collected_rrset):
- rcode = Rcode.NXRRSET()
+ rcode = Rcode.NXRRSET
logger.info(LIBDDNS_PREREQ_RRSET_EXISTS_VAL_FAILED,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
@@ -550,7 +550,7 @@ class UpdateSession:
return rcode
# All prerequisites are satisfied
- return Rcode.NOERROR()
+ return Rcode.NOERROR
def __set_soa_rrset(self, rrset):
'''Sets the given rrset to the member __added_soa (which
@@ -570,7 +570,7 @@ class UpdateSession:
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
- return Rcode.NOTZONE()
+ return Rcode.NOTZONE
if rrset.get_class() == self.__zclass:
# In fact, all metatypes are in a specific range,
# so one check can test TKEY to ANY
@@ -581,52 +581,52 @@ class UpdateSession:
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
- return Rcode.FORMERR()
- if rrset.get_type() == RRType.SOA():
+ return Rcode.FORMERR
+ if rrset.get_type() == RRType.SOA:
# In case there's multiple soa records in the update
# somehow, just take the last
for rr in foreach_rr(rrset):
self.__set_soa_rrset(rr)
- elif rrset.get_class() == RRClass.ANY():
+ elif rrset.get_class() == RRClass.ANY:
if rrset.get_ttl().get_value() != 0:
logger.info(LIBDDNS_UPDATE_DELETE_NONZERO_TTL,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
- return Rcode.FORMERR()
+ return Rcode.FORMERR
if rrset.get_rdata_count() > 0:
logger.info(LIBDDNS_UPDATE_DELETE_RRSET_NOT_EMPTY,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
- return Rcode.FORMERR()
+ return Rcode.FORMERR
if rrset.get_type().get_code() >= 249 and\
rrset.get_type().get_code() <= 254:
logger.info(LIBDDNS_UPDATE_DELETE_BAD_TYPE,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
- return Rcode.FORMERR()
- elif rrset.get_class() == RRClass.NONE():
+ return Rcode.FORMERR
+ elif rrset.get_class() == RRClass.NONE:
if rrset.get_ttl().get_value() != 0:
logger.info(LIBDDNS_UPDATE_DELETE_RR_NONZERO_TTL,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
- return Rcode.FORMERR()
+ return Rcode.FORMERR
if rrset.get_type().get_code() >= 249:
logger.info(LIBDDNS_UPDATE_DELETE_RR_BAD_TYPE,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
- return Rcode.FORMERR()
+ return Rcode.FORMERR
else:
logger.info(LIBDDNS_UPDATE_BAD_CLASS,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
- return Rcode.FORMERR()
- return Rcode.NOERROR()
+ return Rcode.FORMERR
+ return Rcode.NOERROR
def __do_update_add_single_rr(self, rr, existing_rrset):
'''Helper for __do_update_add_rrs_to_rrset: only add the
@@ -657,7 +657,7 @@ class UpdateSession:
# For a number of cases, we may need to remove data in the zone
# (note; SOA is handled separately by __do_update, so that one
# is explicitely ignored here)
- if rrset.get_type() == RRType.SOA():
+ if rrset.get_type() == RRType.SOA:
return
result, orig_rrset, _ = self.__diff.find(rrset.get_name(),
rrset.get_type())
@@ -668,7 +668,7 @@ class UpdateSession:
return
elif result == ZoneFinder.SUCCESS:
# if update is cname, and zone rr is not, ignore
- if rrset.get_type() == RRType.CNAME():
+ if rrset.get_type() == RRType.CNAME:
# Remove original CNAME record (the new one
# is added below)
self.__diff.delete_data(orig_rrset)
@@ -679,7 +679,7 @@ class UpdateSession:
elif result == ZoneFinder.NXRRSET:
# There is data present, but not for this type.
# If this type is CNAME, ignore the update
- if rrset.get_type() == RRType.CNAME():
+ if rrset.get_type() == RRType.CNAME:
return
for rr in foreach_rr(rrset):
self.__do_update_add_single_rr(rr, orig_rrset)
@@ -696,8 +696,8 @@ class UpdateSession:
rrset.get_type())
if result == ZoneFinder.SUCCESS:
if to_delete.get_name() == self.__zname and\
- (to_delete.get_type() == RRType.SOA() or\
- to_delete.get_type() == RRType.NS()):
+ (to_delete.get_type() == RRType.SOA or\
+ to_delete.get_type() == RRType.NS):
# ignore
return
for rr in foreach_rr(to_delete):
@@ -749,8 +749,8 @@ class UpdateSession:
for to_delete in rrsets:
# if name == self.__zname and type is soa or ns, don't delete!
if to_delete.get_name() == self.__zname and\
- (to_delete.get_type() == RRType.SOA() or
- to_delete.get_type() == RRType.NS()):
+ (to_delete.get_type() == RRType.SOA or
+ to_delete.get_type() == RRType.NS):
continue
else:
for rr in foreach_rr(to_delete):
@@ -771,10 +771,10 @@ class UpdateSession:
to_delete = convert_rrset_class(rrset, self.__zclass)
if rrset.get_name() == self.__zname:
- if rrset.get_type() == RRType.SOA():
+ if rrset.get_type() == RRType.SOA:
# ignore
return
- elif rrset.get_type() == RRType.NS():
+ elif rrset.get_type() == RRType.NS:
# hmm. okay. annoying. There must be at least one left,
# delegate to helper method
self.__ns_deleter_helper(to_delete)
@@ -793,14 +793,14 @@ class UpdateSession:
# serial magic and add the newly created one
# get it from DS and to increment and stuff
- result, old_soa, _ = self.__diff.find(self.__zname, RRType.SOA(),
+ result, old_soa, _ = self.__diff.find(self.__zname, RRType.SOA,
ZoneFinder.NO_WILDCARD |
ZoneFinder.FIND_GLUE_OK)
# We may implement recovering from missing SOA data at some point, but
# for now servfail on such a broken state
if result != ZoneFinder.SUCCESS:
raise UpdateError("Error finding SOA record in datasource.",
- self.__zname, self.__zclass, Rcode.SERVFAIL())
+ self.__zname, self.__zclass, Rcode.SERVFAIL)
serial_operation = DDNS_SOA()
if self.__added_soa is not None and\
serial_operation.soa_update_check(old_soa, self.__added_soa):
@@ -820,7 +820,7 @@ class UpdateSession:
'''
# prescan
prescan_result = self.__do_prescan()
- if prescan_result != Rcode.NOERROR():
+ if prescan_result != Rcode.NOERROR:
return prescan_result
# update
@@ -841,22 +841,22 @@ class UpdateSession:
for rrset in self.__message.get_section(SECTION_UPDATE):
if rrset.get_class() == self.__zclass:
self.__do_update_add_rrs_to_rrset(rrset)
- elif rrset.get_class() == RRClass.ANY():
- if rrset.get_type() == RRType.ANY():
+ elif rrset.get_class() == RRClass.ANY:
+ if rrset.get_type() == RRType.ANY:
self.__do_update_delete_name(rrset)
else:
self.__do_update_delete_rrset(rrset)
- elif rrset.get_class() == RRClass.NONE():
+ elif rrset.get_class() == RRClass.NONE:
self.__do_update_delete_rrs_from_rrset(rrset)
self.__diff.commit()
- return Rcode.NOERROR()
+ return Rcode.NOERROR
except isc.datasrc.Error as dse:
- logger.info(LIBDDNS_UPDATE_DATASRC_ERROR, dse)
- return Rcode.SERVFAIL()
+ logger.info(LIBDDNS_UPDATE_DATASRC_COMMIT_FAILED, dse)
+ return Rcode.SERVFAIL
except Exception as uce:
logger.error(LIBDDNS_UPDATE_UNCAUGHT_EXCEPTION,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
uce)
- return Rcode.SERVFAIL()
+ return Rcode.SERVFAIL
diff --git a/src/lib/python/isc/ddns/tests/session_tests.py b/src/lib/python/isc/ddns/tests/session_tests.py
index f7c2d3c..bc25310 100644
--- a/src/lib/python/isc/ddns/tests/session_tests.py
+++ b/src/lib/python/isc/ddns/tests/session_tests.py
@@ -30,8 +30,8 @@ WRITE_ZONE_DB_FILE = TESTDATA_WRITE_PATH + "rwtest.sqlite3.copied"
WRITE_ZONE_DB_CONFIG = "{ \"database_file\": \"" + WRITE_ZONE_DB_FILE + "\"}"
TEST_ZONE_NAME = Name('example.org')
-UPDATE_RRTYPE = RRType.SOA()
-TEST_RRCLASS = RRClass.IN()
+UPDATE_RRTYPE = RRType.SOA
+TEST_RRCLASS = RRClass.IN
TEST_ZONE_RECORD = Question(TEST_ZONE_NAME, TEST_RRCLASS, UPDATE_RRTYPE)
TEST_CLIENT6 = ('2001:db8::1', 53, 0, 0)
TEST_CLIENT4 = ('192.0.2.1', 53)
@@ -42,8 +42,8 @@ def create_update_msg(zones=[TEST_ZONE_RECORD], prerequisites=[],
updates=[], tsig_key=None):
msg = Message(Message.RENDER)
msg.set_qid(5353) # arbitrary chosen
- msg.set_opcode(Opcode.UPDATE())
- msg.set_rcode(Rcode.NOERROR())
+ msg.set_opcode(Opcode.UPDATE)
+ msg.set_rcode(Rcode.NOERROR)
for z in zones:
msg.add_question(z)
for p in prerequisites:
@@ -99,7 +99,7 @@ class SessionModuleTests(unittest.TestCase):
def test_foreach_rr_in_rrset(self):
rrset = create_rrset("www.example.org", TEST_RRCLASS,
- RRType.A(), 3600, [ "192.0.2.1" ])
+ RRType.A, 3600, [ "192.0.2.1" ])
l = []
for rr in foreach_rr(rrset):
@@ -121,17 +121,17 @@ class SessionModuleTests(unittest.TestCase):
def test_convert_rrset_class(self):
# Converting an RRSET to a different class should work
# if the rdata types can be converted
- rrset = create_rrset("www.example.org", RRClass.NONE(), RRType.A(),
+ rrset = create_rrset("www.example.org", RRClass.NONE, RRType.A,
3600, [ b'\xc0\x00\x02\x01', b'\xc0\x00\x02\x02'])
- rrset2 = convert_rrset_class(rrset, RRClass.IN())
+ rrset2 = convert_rrset_class(rrset, RRClass.IN)
self.assertEqual("www.example.org. 3600 IN A 192.0.2.1\n" +
"www.example.org. 3600 IN A 192.0.2.2\n",
str(rrset2))
- rrset3 = convert_rrset_class(rrset2, RRClass.NONE())
- self.assertEqual("www.example.org. 3600 CLASS254 A \\# 4 " +
- "c0000201\nwww.example.org. 3600 CLASS254 " +
+ rrset3 = convert_rrset_class(rrset2, RRClass.NONE)
+ self.assertEqual("www.example.org. 3600 NONE A \\# 4 " +
+ "c0000201\nwww.example.org. 3600 NONE " +
"A \\# 4 c0000202\n",
str(rrset3))
@@ -140,10 +140,10 @@ class SessionModuleTests(unittest.TestCase):
# there was a ticket about making a better hierarchy for
# dns/parsing related exceptions)
self.assertRaises(InvalidRdataLength, convert_rrset_class,
- rrset, RRClass.CH())
+ rrset, RRClass.CH)
add_rdata(rrset, b'\xc0\x00')
self.assertRaises(DNSMessageFORMERR, convert_rrset_class,
- rrset, RRClass.IN())
+ rrset, RRClass.IN)
def test_collect_rrsets(self):
'''
@@ -152,25 +152,25 @@ class SessionModuleTests(unittest.TestCase):
'''
collected = []
- collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(),
- RRType.A(), 0, [ "192.0.2.1" ]))
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN,
+ RRType.A, 0, [ "192.0.2.1" ]))
# Same name and class, different type
- collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(),
- RRType.TXT(), 0, [ "one" ]))
- collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(),
- RRType.A(), 0, [ "192.0.2.2" ]))
- collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(),
- RRType.TXT(), 0, [ "two" ]))
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN,
+ RRType.TXT, 0, [ "one" ]))
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN,
+ RRType.A, 0, [ "192.0.2.2" ]))
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN,
+ RRType.TXT, 0, [ "two" ]))
# Same class and type as an existing one, different name
- collect_rrsets(collected, create_rrset("b.example.org", RRClass.IN(),
- RRType.A(), 0, [ "192.0.2.3" ]))
+ collect_rrsets(collected, create_rrset("b.example.org", RRClass.IN,
+ RRType.A, 0, [ "192.0.2.3" ]))
# Same name and type as an existing one, different class
- collect_rrsets(collected, create_rrset("a.example.org", RRClass.CH(),
- RRType.TXT(), 0, [ "one" ]))
- collect_rrsets(collected, create_rrset("b.example.org", RRClass.IN(),
- RRType.A(), 0, [ "192.0.2.4" ]))
- collect_rrsets(collected, create_rrset("a.example.org", RRClass.CH(),
- RRType.TXT(), 0, [ "two" ]))
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.CH,
+ RRType.TXT, 0, [ "one" ]))
+ collect_rrsets(collected, create_rrset("b.example.org", RRClass.IN,
+ RRType.A, 0, [ "192.0.2.4" ]))
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.CH,
+ RRType.TXT, 0, [ "two" ]))
strings = [ rrset.to_text() for rrset in collected ]
# note + vs , in this list
@@ -216,7 +216,7 @@ class SessionTestBase(unittest.TestCase):
'''Perform common checks on update resposne message.'''
self.assertTrue(msg.get_header_flag(Message.HEADERFLAG_QR))
# note: we convert opcode to text it'd be more helpful on failure.
- self.assertEqual(Opcode.UPDATE().to_text(), msg.get_opcode().to_text())
+ self.assertEqual(Opcode.UPDATE.to_text(), msg.get_opcode().to_text())
self.assertEqual(expected_rcode.to_text(), msg.get_rcode().to_text())
# All sections should be cleared
self.assertEqual(0, msg.get_rr_count(SECTION_ZONE))
@@ -230,22 +230,22 @@ class TestDDNSSOA(unittest.TestCase):
'''unittest for update_soa function'''
soa_update = DDNS_SOA()
soa_rr = create_rrset("example.org", TEST_RRCLASS,
- RRType.SOA(), 3600, ["ns1.example.org. " +
+ RRType.SOA, 3600, ["ns1.example.org. " +
"admin.example.org. " +
"1233 3600 1800 2419200 7200"])
expected_soa_rr = create_rrset("example.org", TEST_RRCLASS,
- RRType.SOA(), 3600, ["ns1.example.org. "
+ RRType.SOA, 3600, ["ns1.example.org. "
+ "admin.example.org. " +
"1234 3600 1800 2419200 7200"])
self.assertEqual(soa_update.update_soa(soa_rr).get_rdata()[0].to_text(),
expected_soa_rr.get_rdata()[0].to_text())
max_serial = 2 ** 32 - 1
soa_rdata = "%d %s"%(max_serial,"3600 1800 2419200 7200")
- soa_rr = create_rrset("example.org", TEST_RRCLASS, RRType.SOA(), 3600,
+ soa_rr = create_rrset("example.org", TEST_RRCLASS, RRType.SOA, 3600,
["ns1.example.org. " + "admin.example.org. " +
soa_rdata])
expected_soa_rr = create_rrset("example.org", TEST_RRCLASS,
- RRType.SOA(), 3600, ["ns1.example.org. "
+ RRType.SOA, 3600, ["ns1.example.org. "
+ "admin.example.org. " +
"1 3600 1800 2419200 7200"])
self.assertEqual(soa_update.update_soa(soa_rr).get_rdata()[0].to_text(),
@@ -253,11 +253,11 @@ class TestDDNSSOA(unittest.TestCase):
def test_soa_update_check(self):
'''unittest for soa_update_check function'''
- small_soa_rr = create_rrset("example.org", TEST_RRCLASS, RRType.SOA(),
+ small_soa_rr = create_rrset("example.org", TEST_RRCLASS, RRType.SOA,
3600, ["ns1.example.org. " +
"admin.example.org. " +
"1233 3600 1800 2419200 7200"])
- large_soa_rr = create_rrset("example.org", TEST_RRCLASS, RRType.SOA(),
+ large_soa_rr = create_rrset("example.org", TEST_RRCLASS, RRType.SOA,
3600, ["ns1.example.org. " +
"admin.example.org. " +
"1234 3600 1800 2419200 7200"])
@@ -269,11 +269,11 @@ class TestDDNSSOA(unittest.TestCase):
small_soa_rr))
small_serial = 1235 + 2 ** 31
soa_rdata = "%d %s"%(small_serial,"3600 1800 2419200 7200")
- small_soa_rr = create_rrset("example.org", TEST_RRCLASS, RRType.SOA(),
+ small_soa_rr = create_rrset("example.org", TEST_RRCLASS, RRType.SOA,
3600, ["ns1.example.org. " +
"admin.example.org. " +
soa_rdata])
- large_soa_rr = create_rrset("example.org", TEST_RRCLASS, RRType.SOA(),
+ large_soa_rr = create_rrset("example.org", TEST_RRCLASS, RRType.SOA,
3600, ["ns1.example.org. " +
"admin.example.org. " +
"1234 3600 1800 2419200 7200"])
@@ -305,41 +305,41 @@ class SessionTest(SessionTestBase):
self.assertEqual(UPDATE_ERROR, result)
self.assertEqual(None, zname)
self.assertEqual(None, zclass)
- self.check_response(session.get_message(), Rcode.FORMERR())
+ self.check_response(session.get_message(), Rcode.FORMERR)
# Zone section contains multiple records
msg = create_update_msg(zones=[TEST_ZONE_RECORD, TEST_ZONE_RECORD])
session = UpdateSession(msg, TEST_CLIENT4, None)
self.assertEqual(UPDATE_ERROR, session.handle()[0])
- self.check_response(session.get_message(), Rcode.FORMERR())
+ self.check_response(session.get_message(), Rcode.FORMERR)
# Zone section's type is not SOA
msg = create_update_msg(zones=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
- RRType.A())])
+ RRType.A)])
session = UpdateSession(msg, TEST_CLIENT4, None)
self.assertEqual(UPDATE_ERROR, session.handle()[0])
- self.check_response(session.get_message(), Rcode.FORMERR())
+ self.check_response(session.get_message(), Rcode.FORMERR)
def test_update_secondary(self):
# specified zone is configured as a secondary. Since this
# implementation doesn't support update forwarding, the result
# should be NOTIMP.
msg = create_update_msg(zones=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
- RRType.SOA())])
+ RRType.SOA)])
session = UpdateSession(msg, TEST_CLIENT4,
ZoneConfig({(TEST_ZONE_NAME, TEST_RRCLASS)},
TEST_RRCLASS, self._datasrc_client))
self.assertEqual(UPDATE_ERROR, session.handle()[0])
- self.check_response(session.get_message(), Rcode.NOTIMP())
+ self.check_response(session.get_message(), Rcode.NOTIMP)
def check_notauth(self, zname, zclass=TEST_RRCLASS):
'''Common test sequence for the 'notauth' test'''
- msg = create_update_msg(zones=[Question(zname, zclass, RRType.SOA())])
+ msg = create_update_msg(zones=[Question(zname, zclass, RRType.SOA)])
session = UpdateSession(msg, TEST_CLIENT4,
ZoneConfig({(TEST_ZONE_NAME, TEST_RRCLASS)},
TEST_RRCLASS, self._datasrc_client))
self.assertEqual(UPDATE_ERROR, session.handle()[0])
- self.check_response(session.get_message(), Rcode.NOTAUTH())
+ self.check_response(session.get_message(), Rcode.NOTAUTH)
def test_update_notauth(self):
'''Update attempt for non authoritative zones'''
@@ -349,7 +349,7 @@ class SessionTest(SessionTestBase):
# (match must be exact)
self.check_notauth(Name('sub.example.org'))
# zone class doesn't match
- self.check_notauth(Name('example.org'), RRClass.CH())
+ self.check_notauth(Name('example.org'), RRClass.CH)
def test_update_datasrc_error(self):
# if the data source client raises an exception, it should result in
@@ -358,17 +358,17 @@ class SessionTest(SessionTestBase):
def find_zone(self, name):
raise isc.datasrc.Error('faked exception')
msg = create_update_msg(zones=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
- RRType.SOA())])
+ RRType.SOA)])
session = UpdateSession(msg, TEST_CLIENT4,
ZoneConfig({(TEST_ZONE_NAME, TEST_RRCLASS)},
TEST_RRCLASS,
BadDataSourceClient()))
self.assertEqual(UPDATE_ERROR, session.handle()[0])
- self.check_response(session.get_message(), Rcode.SERVFAIL())
+ self.check_response(session.get_message(), Rcode.SERVFAIL)
def test_foreach_rr_in_rrset(self):
rrset = create_rrset("www.example.org", TEST_RRCLASS,
- RRType.A(), 3600, [ "192.0.2.1" ])
+ RRType.A, 3600, [ "192.0.2.1" ])
l = []
for rr in foreach_rr(rrset):
@@ -390,17 +390,17 @@ class SessionTest(SessionTestBase):
def test_convert_rrset_class(self):
# Converting an RRSET to a different class should work
# if the rdata types can be converted
- rrset = create_rrset("www.example.org", RRClass.NONE(), RRType.A(),
+ rrset = create_rrset("www.example.org", RRClass.NONE, RRType.A,
3600, [ b'\xc0\x00\x02\x01', b'\xc0\x00\x02\x02'])
- rrset2 = convert_rrset_class(rrset, RRClass.IN())
+ rrset2 = convert_rrset_class(rrset, RRClass.IN)
self.assertEqual("www.example.org. 3600 IN A 192.0.2.1\n" +
"www.example.org. 3600 IN A 192.0.2.2\n",
str(rrset2))
- rrset3 = convert_rrset_class(rrset2, RRClass.NONE())
- self.assertEqual("www.example.org. 3600 CLASS254 A \\# 4 " +
- "c0000201\nwww.example.org. 3600 CLASS254 " +
+ rrset3 = convert_rrset_class(rrset2, RRClass.NONE)
+ self.assertEqual("www.example.org. 3600 NONE A \\# 4 " +
+ "c0000201\nwww.example.org. 3600 NONE " +
"A \\# 4 c0000202\n",
str(rrset3))
@@ -409,10 +409,10 @@ class SessionTest(SessionTestBase):
# there was a ticket about making a better hierarchy for
# dns/parsing related exceptions)
self.assertRaises(InvalidRdataLength, convert_rrset_class,
- rrset, RRClass.CH())
+ rrset, RRClass.CH)
add_rdata(rrset, b'\xc0\x00')
self.assertRaises(DNSMessageFORMERR, convert_rrset_class,
- rrset, RRClass.IN())
+ rrset, RRClass.IN)
def test_collect_rrsets(self):
'''
@@ -421,25 +421,25 @@ class SessionTest(SessionTestBase):
'''
collected = []
- collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(),
- RRType.A(), 0, [ "192.0.2.1" ]))
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN,
+ RRType.A, 0, [ "192.0.2.1" ]))
# Same name and class, different type
- collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(),
- RRType.TXT(), 0, [ "one" ]))
- collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(),
- RRType.A(), 0, [ "192.0.2.2" ]))
- collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(),
- RRType.TXT(), 0, [ "two" ]))
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN,
+ RRType.TXT, 0, [ "one" ]))
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN,
+ RRType.A, 0, [ "192.0.2.2" ]))
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN,
+ RRType.TXT, 0, [ "two" ]))
# Same class and type as an existing one, different name
- collect_rrsets(collected, create_rrset("b.example.org", RRClass.IN(),
- RRType.A(), 0, [ "192.0.2.3" ]))
+ collect_rrsets(collected, create_rrset("b.example.org", RRClass.IN,
+ RRType.A, 0, [ "192.0.2.3" ]))
# Same name and type as an existing one, different class
- collect_rrsets(collected, create_rrset("a.example.org", RRClass.CH(),
- RRType.TXT(), 0, [ "one" ]))
- collect_rrsets(collected, create_rrset("b.example.org", RRClass.IN(),
- RRType.A(), 0, [ "192.0.2.4" ]))
- collect_rrsets(collected, create_rrset("a.example.org", RRClass.CH(),
- RRType.TXT(), 0, [ "two" ]))
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.CH,
+ RRType.TXT, 0, [ "one" ]))
+ collect_rrsets(collected, create_rrset("b.example.org", RRClass.IN,
+ RRType.A, 0, [ "192.0.2.4" ]))
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.CH,
+ RRType.TXT, 0, [ "two" ]))
strings = [ rrset.to_text() for rrset in collected ]
# note + vs , in this list
@@ -469,64 +469,64 @@ class SessionTest(SessionTestBase):
'''
# Basic existence checks
# www.example.org should have an A, but not an MX
- rrset = create_rrset("www.example.org", rrclass, RRType.A(), 0)
+ rrset = create_rrset("www.example.org", rrclass, RRType.A, 0)
self.__prereq_helper(method, expected, rrset)
- rrset = create_rrset("www.example.org", rrclass, RRType.MX(), 0)
+ rrset = create_rrset("www.example.org", rrclass, RRType.MX, 0)
self.__prereq_helper(method, not expected, rrset)
# example.org should have an MX, but not an A
- rrset = create_rrset("example.org", rrclass, RRType.MX(), 0)
+ rrset = create_rrset("example.org", rrclass, RRType.MX, 0)
self.__prereq_helper(method, expected, rrset)
- rrset = create_rrset("example.org", rrclass, RRType.A(), 0)
+ rrset = create_rrset("example.org", rrclass, RRType.A, 0)
self.__prereq_helper(method, not expected, rrset)
# Also check the case where the name does not even exist
- rrset = create_rrset("doesnotexist.example.org", rrclass, RRType.A(), 0)
+ rrset = create_rrset("doesnotexist.example.org", rrclass, RRType.A, 0)
self.__prereq_helper(method, not expected, rrset)
# Wildcard expansion should not be applied, but literal matches
# should work
- rrset = create_rrset("foo.wildcard.example.org", rrclass, RRType.A(), 0)
+ rrset = create_rrset("foo.wildcard.example.org", rrclass, RRType.A, 0)
self.__prereq_helper(method, not expected, rrset)
- rrset = create_rrset("*.wildcard.example.org", rrclass, RRType.A(), 0)
+ rrset = create_rrset("*.wildcard.example.org", rrclass, RRType.A, 0)
self.__prereq_helper(method, expected, rrset)
# Likewise, CNAME directly should match, but what it points to should
# not
- rrset = create_rrset("cname.example.org", rrclass, RRType.A(), 0)
+ rrset = create_rrset("cname.example.org", rrclass, RRType.A, 0)
self.__prereq_helper(method, not expected, rrset)
- rrset = create_rrset("cname.example.org", rrclass, RRType.CNAME(), 0)
+ rrset = create_rrset("cname.example.org", rrclass, RRType.CNAME, 0)
self.__prereq_helper(method, expected, rrset)
# And also make sure a delegation (itself) is not treated as existing
# data
- rrset = create_rrset("foo.sub.example.org", rrclass, RRType.A(), 0)
+ rrset = create_rrset("foo.sub.example.org", rrclass, RRType.A, 0)
self.__prereq_helper(method, not expected, rrset)
# But the delegation data itself should match
- rrset = create_rrset("sub.example.org", rrclass, RRType.NS(), 0)
+ rrset = create_rrset("sub.example.org", rrclass, RRType.NS, 0)
self.__prereq_helper(method, expected, rrset)
# As should glue
- rrset = create_rrset("ns.sub.example.org", rrclass, RRType.A(), 0)
+ rrset = create_rrset("ns.sub.example.org", rrclass, RRType.A, 0)
self.__prereq_helper(method, expected, rrset)
def test_check_prerequisite_exists(self):
method = self._session._UpdateSession__prereq_rrset_exists
self.__check_prerequisite_exists_combined(method,
- RRClass.ANY(),
+ RRClass.ANY,
True)
def test_check_prerequisite_does_not_exist(self):
method = self._session._UpdateSession__prereq_rrset_does_not_exist
self.__check_prerequisite_exists_combined(method,
- RRClass.NONE(),
+ RRClass.NONE,
False)
def test_check_prerequisite_exists_value(self):
method = self._session._UpdateSession__prereq_rrset_exists_value
- rrset = create_rrset("www.example.org", RRClass.IN(), RRType.A(), 0)
+ rrset = create_rrset("www.example.org", RRClass.IN, RRType.A, 0)
# empty one should not match
self.__prereq_helper(method, False, rrset)
@@ -539,11 +539,11 @@ class SessionTest(SessionTestBase):
self.__prereq_helper(method, False, rrset)
# Also test one with more than one RR
- rrset = create_rrset("example.org", RRClass.IN(), RRType.NS(), 0)
+ rrset = create_rrset("example.org", RRClass.IN, RRType.NS, 0)
self.__prereq_helper(method, False, rrset)
add_rdata(rrset, "ns1.example.org.")
self.__prereq_helper(method, False, rrset)
- add_rdata(rrset, "ns2.example.org")
+ add_rdata(rrset, "ns2.example.org.")
self.__prereq_helper(method, False, rrset)
add_rdata(rrset, "ns3.example.org.")
self.__prereq_helper(method, True, rrset)
@@ -551,7 +551,7 @@ class SessionTest(SessionTestBase):
self.__prereq_helper(method, False, rrset)
# Repeat that, but try a different order of Rdata addition
- rrset = create_rrset("example.org", RRClass.IN(), RRType.NS(), 0)
+ rrset = create_rrset("example.org", RRClass.IN, RRType.NS, 0)
self.__prereq_helper(method, False, rrset)
add_rdata(rrset, "ns3.example.org.")
self.__prereq_helper(method, False, rrset)
@@ -563,8 +563,8 @@ class SessionTest(SessionTestBase):
self.__prereq_helper(method, False, rrset)
# and test one where the name does not even exist
- rrset = create_rrset("doesnotexist.example.org", RRClass.IN(),
- RRType.A(), 0, [ "192.0.2.1" ])
+ rrset = create_rrset("doesnotexist.example.org", RRClass.IN,
+ RRType.A, 0, [ "192.0.2.1" ])
self.__prereq_helper(method, False, rrset)
def __check_prerequisite_name_in_use_combined(self, method, rrclass,
@@ -573,42 +573,42 @@ class SessionTest(SessionTestBase):
in behaviour) methods __prereq_name_in_use and
__prereq_name_not_in_use
'''
- rrset = create_rrset("example.org", rrclass, RRType.ANY(), 0)
+ rrset = create_rrset("example.org", rrclass, RRType.ANY, 0)
self.__prereq_helper(method, expected, rrset)
- rrset = create_rrset("www.example.org", rrclass, RRType.ANY(), 0)
+ rrset = create_rrset("www.example.org", rrclass, RRType.ANY, 0)
self.__prereq_helper(method, expected, rrset)
rrset = create_rrset("doesnotexist.example.org", rrclass,
- RRType.ANY(), 0)
+ RRType.ANY, 0)
self.__prereq_helper(method, not expected, rrset)
rrset = create_rrset("belowdelegation.sub.example.org", rrclass,
- RRType.ANY(), 0)
+ RRType.ANY, 0)
self.__prereq_helper(method, not expected, rrset)
rrset = create_rrset("foo.wildcard.example.org", rrclass,
- RRType.ANY(), 0)
+ RRType.ANY, 0)
self.__prereq_helper(method, not expected, rrset)
# empty nonterminal should not match
rrset = create_rrset("nonterminal.example.org", rrclass,
- RRType.ANY(), 0)
+ RRType.ANY, 0)
self.__prereq_helper(method, not expected, rrset)
rrset = create_rrset("empty.nonterminal.example.org", rrclass,
- RRType.ANY(), 0)
+ RRType.ANY, 0)
self.__prereq_helper(method, expected, rrset)
def test_check_prerequisite_name_in_use(self):
method = self._session._UpdateSession__prereq_name_in_use
self.__check_prerequisite_name_in_use_combined(method,
- RRClass.ANY(),
+ RRClass.ANY,
True)
def test_check_prerequisite_name_not_in_use(self):
method = self._session._UpdateSession__prereq_name_not_in_use
self.__check_prerequisite_name_in_use_combined(method,
- RRClass.NONE(),
+ RRClass.NONE,
False)
def check_prerequisite_result(self, expected, prerequisites):
@@ -632,7 +632,7 @@ class SessionTest(SessionTestBase):
self.assertEqual(expected.to_text(),
session._UpdateSession__message.get_rcode().to_text())
# And that the result looks right
- if expected == Rcode.NOERROR():
+ if expected == Rcode.NOERROR:
self.assertEqual(UPDATE_SUCCESS, result)
else:
self.assertEqual(UPDATE_ERROR, result)
@@ -672,7 +672,7 @@ class SessionTest(SessionTestBase):
self.assertEqual(expected.to_text(),
session._UpdateSession__message.get_rcode().to_text())
# And that the result looks right
- if expected == Rcode.NOERROR():
+ if expected == Rcode.NOERROR:
self.assertEqual(UPDATE_SUCCESS, result)
else:
self.assertEqual(UPDATE_ERROR, result)
@@ -685,78 +685,75 @@ class SessionTest(SessionTestBase):
# in the specific prerequisite type tests)
# Let's first define a number of prereq's that should succeed
- rrset_exists_yes = create_rrset("example.org", RRClass.ANY(),
- RRType.SOA(), 0)
+ rrset_exists_yes = create_rrset("example.org", RRClass.ANY,
+ RRType.SOA, 0)
- rrset_exists_value_yes = create_rrset("www.example.org", RRClass.IN(),
- RRType.A(), 0, [ "192.0.2.1" ])
+ rrset_exists_value_yes = create_rrset("www.example.org", RRClass.IN,
+ RRType.A, 0, [ "192.0.2.1" ])
rrset_does_not_exist_yes = create_rrset("foo.example.org",
- RRClass.NONE(), RRType.SOA(),
+ RRClass.NONE, RRType.SOA,
0)
- name_in_use_yes = create_rrset("www.example.org", RRClass.ANY(),
- RRType.ANY(), 0)
+ name_in_use_yes = create_rrset("www.example.org", RRClass.ANY,
+ RRType.ANY, 0)
- name_not_in_use_yes = create_rrset("foo.example.org", RRClass.NONE(),
- RRType.ANY(), 0)
+ name_not_in_use_yes = create_rrset("foo.example.org", RRClass.NONE,
+ RRType.ANY, 0)
- rrset_exists_value_1 = create_rrset("example.org", RRClass.IN(),
- RRType.NS(), 0,
- [ "ns1.example.org" ])
- rrset_exists_value_2 = create_rrset("example.org", RRClass.IN(),
- RRType.NS(), 0,
- [ "ns2.example.org" ])
- rrset_exists_value_3 = create_rrset("example.org", RRClass.IN(),
- RRType.NS(), 0,
- [ "ns3.example.org" ])
+ rrset_exists_value_1 = create_rrset("example.org", RRClass.IN,
+ RRType.NS, 0, ["ns1.example.org."])
+ rrset_exists_value_2 = create_rrset("example.org", RRClass.IN,
+ RRType.NS, 0, ["ns2.example.org."])
+ rrset_exists_value_3 = create_rrset("example.org", RRClass.IN,
+ RRType.NS, 0, ["ns3.example.org."])
# and a number that should not
- rrset_exists_no = create_rrset("foo.example.org", RRClass.ANY(),
- RRType.SOA(), 0)
+ rrset_exists_no = create_rrset("foo.example.org", RRClass.ANY,
+ RRType.SOA, 0)
- rrset_exists_value_no = create_rrset("www.example.org", RRClass.IN(),
- RRType.A(), 0, [ "192.0.2.2" ])
+ rrset_exists_value_no = create_rrset("www.example.org", RRClass.IN,
+ RRType.A, 0, [ "192.0.2.2" ])
- rrset_does_not_exist_no = create_rrset("example.org", RRClass.NONE(),
- RRType.SOA(), 0)
+ rrset_does_not_exist_no = create_rrset("example.org", RRClass.NONE,
+ RRType.SOA, 0)
- name_in_use_no = create_rrset("foo.example.org", RRClass.ANY(),
- RRType.ANY(), 0)
+ name_in_use_no = create_rrset("foo.example.org", RRClass.ANY,
+ RRType.ANY, 0)
- name_not_in_use_no = create_rrset("www.example.org", RRClass.NONE(),
- RRType.ANY(), 0)
+ name_not_in_use_no = create_rrset("www.example.org", RRClass.NONE,
+ RRType.ANY, 0)
# check 'no' result codes
- self.check_prerequisite_result(Rcode.NXRRSET(),
+ self.check_prerequisite_result(Rcode.NXRRSET,
[ rrset_exists_no ])
- self.check_prerequisite_result(Rcode.NXRRSET(),
+ self.check_prerequisite_result(Rcode.NXRRSET,
[ rrset_exists_value_no ])
- self.check_prerequisite_result(Rcode.YXRRSET(),
+ self.check_prerequisite_result(Rcode.YXRRSET,
[ rrset_does_not_exist_no ])
- self.check_prerequisite_result(Rcode.NXDOMAIN(),
+ self.check_prerequisite_result(Rcode.NXDOMAIN,
[ name_in_use_no ])
- self.check_prerequisite_result(Rcode.YXDOMAIN(),
+ self.check_prerequisite_result(Rcode.YXDOMAIN,
[ name_not_in_use_no ])
# the 'yes' codes should result in ok
# individually
- self.check_prerequisite_result(Rcode.NOERROR(),
+ self.check_prerequisite_result(Rcode.NOERROR,
[ rrset_exists_yes ] )
- self.check_prerequisite_result(Rcode.NOERROR(),
+ self.check_prerequisite_result(Rcode.NOERROR,
[ rrset_exists_value_yes ])
- self.check_prerequisite_result(Rcode.NOERROR(),
+ self.check_prerequisite_result(Rcode.NOERROR,
[ rrset_does_not_exist_yes ])
- self.check_prerequisite_result(Rcode.NOERROR(),
+ self.check_prerequisite_result(Rcode.NOERROR,
[ name_in_use_yes ])
- self.check_prerequisite_result(Rcode.NOERROR(),
+ self.check_prerequisite_result(Rcode.NOERROR,
[ name_not_in_use_yes ])
- self.check_prerequisite_result(Rcode.NOERROR(),
+ self.check_prerequisite_result(Rcode.NOERROR,
[ rrset_exists_value_1,
rrset_exists_value_2,
rrset_exists_value_3])
# and together
- self.check_prerequisite_result(Rcode.NOERROR(),
+ self.check_prerequisite_result(Rcode.NOERROR,
[ rrset_exists_yes,
rrset_exists_value_yes,
rrset_does_not_exist_yes,
@@ -768,7 +765,7 @@ class SessionTest(SessionTestBase):
# try out a permutation, note that one rrset is split up,
# and the order of the RRs should not matter
- self.check_prerequisite_result(Rcode.NOERROR(),
+ self.check_prerequisite_result(Rcode.NOERROR,
[ rrset_exists_value_3,
rrset_exists_yes,
rrset_exists_value_2,
@@ -777,7 +774,7 @@ class SessionTest(SessionTestBase):
# Should fail on the first error, even if most of the
# prerequisites are ok
- self.check_prerequisite_result(Rcode.NXDOMAIN(),
+ self.check_prerequisite_result(Rcode.NXDOMAIN,
[ rrset_exists_value_3,
rrset_exists_yes,
rrset_exists_value_2,
@@ -786,39 +783,39 @@ class SessionTest(SessionTestBase):
rrset_exists_value_1])
def test_prerequisite_notzone(self):
- rrset = create_rrset("some.other.zone.", RRClass.ANY(), RRType.SOA(), 0)
- self.check_prerequisite_result(Rcode.NOTZONE(), [ rrset ])
+ rrset = create_rrset("some.other.zone.", RRClass.ANY, RRType.SOA, 0)
+ self.check_prerequisite_result(Rcode.NOTZONE, [ rrset ])
def test_prerequisites_formerr(self):
# test for form errors in the prerequisite section
# Class ANY, non-zero TTL
- rrset = create_rrset("example.org", RRClass.ANY(), RRType.SOA(), 1)
- self.check_prerequisite_result(Rcode.FORMERR(), [ rrset ])
+ rrset = create_rrset("example.org", RRClass.ANY, RRType.SOA, 1)
+ self.check_prerequisite_result(Rcode.FORMERR, [ rrset ])
# Class ANY, but with rdata
- rrset = create_rrset("example.org", RRClass.ANY(), RRType.A(), 0,
+ rrset = create_rrset("example.org", RRClass.ANY, RRType.A, 0,
[ b'\x00\x00\x00\x00' ])
- self.check_prerequisite_result(Rcode.FORMERR(), [ rrset ])
+ self.check_prerequisite_result(Rcode.FORMERR, [ rrset ])
# Class NONE, non-zero TTL
- rrset = create_rrset("example.org", RRClass.NONE(), RRType.SOA(), 1)
- self.check_prerequisite_result(Rcode.FORMERR(), [ rrset ])
+ rrset = create_rrset("example.org", RRClass.NONE, RRType.SOA, 1)
+ self.check_prerequisite_result(Rcode.FORMERR, [ rrset ])
# Class NONE, but with rdata
- rrset = create_rrset("example.org", RRClass.NONE(), RRType.A(), 0,
+ rrset = create_rrset("example.org", RRClass.NONE, RRType.A, 0,
[ b'\x00\x00\x00\x00' ])
- self.check_prerequisite_result(Rcode.FORMERR(), [ rrset ])
+ self.check_prerequisite_result(Rcode.FORMERR, [ rrset ])
# Matching class and type, but non-zero TTL
- rrset = create_rrset("www.example.org", RRClass.IN(), RRType.A(), 1,
+ rrset = create_rrset("www.example.org", RRClass.IN, RRType.A, 1,
[ "192.0.2.1" ])
- self.check_prerequisite_result(Rcode.FORMERR(), [ rrset ])
+ self.check_prerequisite_result(Rcode.FORMERR, [ rrset ])
# Completely different class
- rrset = create_rrset("example.org", RRClass.CH(), RRType.TXT(), 0,
+ rrset = create_rrset("example.org", RRClass.CH, RRType.TXT, 0,
[ "foo" ])
- self.check_prerequisite_result(Rcode.FORMERR(), [ rrset ])
+ self.check_prerequisite_result(Rcode.FORMERR, [ rrset ])
def __prereq_helper(self, method, expected, rrset):
'''Calls the given method with self._datasrc_client
@@ -830,84 +827,84 @@ class SessionTest(SessionTestBase):
'''Prepare a number of RRsets to be used in several update tests
The rrsets are stored in self'''
orig_a_rrset = create_rrset("www.example.org", TEST_RRCLASS,
- RRType.A(), 3600, [ "192.0.2.1" ])
+ RRType.A, 3600, [ "192.0.2.1" ])
self.orig_a_rrset = orig_a_rrset
rrset_update_a = create_rrset("www.example.org", TEST_RRCLASS,
- RRType.A(), 3600,
+ RRType.A, 3600,
[ "192.0.2.2", "192.0.2.3" ])
self.rrset_update_a = rrset_update_a
rrset_update_soa = create_rrset("example.org", TEST_RRCLASS,
- RRType.SOA(), 3600,
+ RRType.SOA, 3600,
[ "ns1.example.org. " +
"admin.example.org. " +
"1233 3600 1800 2419200 7200" ])
self.rrset_update_soa = rrset_update_soa
- rrset_update_soa_del = create_rrset("example.org", RRClass.NONE(),
- RRType.SOA(), 0,
+ rrset_update_soa_del = create_rrset("example.org", RRClass.NONE,
+ RRType.SOA, 0,
[ "ns1.example.org. " +
"admin.example.org. " +
"1233 3600 1800 2419200 7200" ])
self.rrset_update_soa_del = rrset_update_soa_del
rrset_update_soa2 = create_rrset("example.org", TEST_RRCLASS,
- RRType.SOA(), 3600,
+ RRType.SOA, 3600,
[ "ns1.example.org. " +
"admin.example.org. " +
"4000 3600 1800 2419200 7200" ])
self.rrset_update_soa2 = rrset_update_soa2
- rrset_update_del_name = create_rrset("www.example.org", RRClass.ANY(),
- RRType.ANY(), 0)
+ rrset_update_del_name = create_rrset("www.example.org", RRClass.ANY,
+ RRType.ANY, 0)
self.rrset_update_del_name = rrset_update_del_name
- rrset_update_del_name_apex = create_rrset("example.org", RRClass.ANY(),
- RRType.ANY(), 0)
+ rrset_update_del_name_apex = create_rrset("example.org", RRClass.ANY,
+ RRType.ANY, 0)
self.rrset_update_del_name_apex = rrset_update_del_name_apex
- rrset_update_del_rrset = create_rrset("www.example.org", RRClass.ANY(),
- RRType.A(), 0)
+ rrset_update_del_rrset = create_rrset("www.example.org", RRClass.ANY,
+ RRType.A, 0)
self.rrset_update_del_rrset = rrset_update_del_rrset
- rrset_update_del_mx_apex = create_rrset("example.org", RRClass.ANY(),
- RRType.MX(), 0)
+ rrset_update_del_mx_apex = create_rrset("example.org", RRClass.ANY,
+ RRType.MX, 0)
self.rrset_update_del_mx_apex = rrset_update_del_mx_apex
- rrset_update_del_soa_apex = create_rrset("example.org", RRClass.ANY(),
- RRType.SOA(), 0)
+ rrset_update_del_soa_apex = create_rrset("example.org", RRClass.ANY,
+ RRType.SOA, 0)
self.rrset_update_del_soa_apex = rrset_update_del_soa_apex
- rrset_update_del_ns_apex = create_rrset("example.org", RRClass.ANY(),
- RRType.NS(), 0)
+ rrset_update_del_ns_apex = create_rrset("example.org", RRClass.ANY,
+ RRType.NS, 0)
self.rrset_update_del_ns_apex = rrset_update_del_ns_apex
rrset_update_del_rrset_part = create_rrset("www.example.org",
- RRClass.NONE(), RRType.A(),
+ RRClass.NONE, RRType.A,
0,
[ b'\xc0\x00\x02\x02',
b'\xc0\x00\x02\x03' ])
self.rrset_update_del_rrset_part = rrset_update_del_rrset_part
- rrset_update_del_rrset_ns = create_rrset("example.org", RRClass.NONE(),
- RRType.NS(), 0,
+ rrset_update_del_rrset_ns = create_rrset("example.org", RRClass.NONE,
+ RRType.NS, 0,
[ b'\x03ns1\x07example\x03org\x00',
b'\x03ns2\x07example\x03org\x00',
b'\x03ns3\x07example\x03org\x00' ])
self.rrset_update_del_rrset_ns = rrset_update_del_rrset_ns
- rrset_update_del_rrset_mx = create_rrset("example.org", RRClass.NONE(),
- RRType.MX(), 0,
+ rrset_update_del_rrset_mx = create_rrset("example.org", RRClass.NONE,
+ RRType.MX, 0,
[ b'\x00\x0a\x04mail\x07example\x03org\x00' ])
self.rrset_update_del_rrset_mx = rrset_update_del_rrset_mx
def test_acl_before_prereq(self):
- name_in_use_no = create_rrset("foo.example.org", RRClass.ANY(),
- RRType.ANY(), 0)
+ name_in_use_no = create_rrset("foo.example.org", RRClass.ANY,
+ RRType.ANY, 0)
# Test a prerequisite that would fail
- self.check_full_handle_result(Rcode.NXDOMAIN(), [], [ name_in_use_no ])
+ self.check_full_handle_result(Rcode.NXDOMAIN, [], [ name_in_use_no ])
# Change ACL so that it would be denied
self._acl_map = {(TEST_ZONE_NAME, TEST_RRCLASS):
@@ -915,7 +912,7 @@ class SessionTest(SessionTestBase):
# The prerequisite should now not be reached; it should fail on the
# ACL
- self.check_full_handle_result(Rcode.REFUSED(), [], [ name_in_use_no ])
+ self.check_full_handle_result(Rcode.REFUSED, [], [ name_in_use_no ])
def test_prescan(self):
'''Test whether the prescan succeeds on data that is ok, and whether
@@ -923,29 +920,29 @@ class SessionTest(SessionTestBase):
# prepare a set of correct update statements
self.__initialize_update_rrsets()
- self.check_prescan_result(Rcode.NOERROR(), [ self.rrset_update_a ])
+ self.check_prescan_result(Rcode.NOERROR, [ self.rrset_update_a ])
# check if soa is noticed
- self.check_prescan_result(Rcode.NOERROR(), [ self.rrset_update_soa ],
+ self.check_prescan_result(Rcode.NOERROR, [ self.rrset_update_soa ],
self.rrset_update_soa)
# Other types of succesful prechecks
- self.check_prescan_result(Rcode.NOERROR(), [ self.rrset_update_soa2 ],
+ self.check_prescan_result(Rcode.NOERROR, [ self.rrset_update_soa2 ],
self.rrset_update_soa2)
- self.check_prescan_result(Rcode.NOERROR(),
+ self.check_prescan_result(Rcode.NOERROR,
[ self.rrset_update_del_name ])
- self.check_prescan_result(Rcode.NOERROR(),
+ self.check_prescan_result(Rcode.NOERROR,
[ self.rrset_update_del_name_apex ])
- self.check_prescan_result(Rcode.NOERROR(),
+ self.check_prescan_result(Rcode.NOERROR,
[ self.rrset_update_del_rrset ])
- self.check_prescan_result(Rcode.NOERROR(),
+ self.check_prescan_result(Rcode.NOERROR,
[ self.rrset_update_del_mx_apex ])
- self.check_prescan_result(Rcode.NOERROR(),
+ self.check_prescan_result(Rcode.NOERROR,
[ self.rrset_update_del_rrset_part ])
# and check a few permutations of the above
# all of them (with one of the soas)
- self.check_prescan_result(Rcode.NOERROR(),
+ self.check_prescan_result(Rcode.NOERROR,
[
self.rrset_update_a,
self.rrset_update_soa,
@@ -960,16 +957,16 @@ class SessionTest(SessionTestBase):
# Two soas. Should we reject or simply use the last?
# (RFC is not really explicit on this, but between the lines I read
# use the last)
- self.check_prescan_result(Rcode.NOERROR(),
+ self.check_prescan_result(Rcode.NOERROR,
[ self.rrset_update_soa,
self.rrset_update_soa2 ],
self.rrset_update_soa2)
- self.check_prescan_result(Rcode.NOERROR(),
+ self.check_prescan_result(Rcode.NOERROR,
[ self.rrset_update_soa2,
self.rrset_update_soa ],
self.rrset_update_soa)
- self.check_prescan_result(Rcode.NOERROR(),
+ self.check_prescan_result(Rcode.NOERROR,
[
self.rrset_update_del_mx_apex,
self.rrset_update_del_name,
@@ -984,36 +981,36 @@ class SessionTest(SessionTestBase):
def test_prescan_failures(self):
'''Test whether prescan fails on bad data'''
# out of zone data
- rrset = create_rrset("different.zone", RRClass.ANY(), RRType.TXT(), 0)
- self.check_prescan_result(Rcode.NOTZONE(), [ rrset ])
+ rrset = create_rrset("different.zone", RRClass.ANY, RRType.TXT, 0)
+ self.check_prescan_result(Rcode.NOTZONE, [ rrset ])
# forbidden type, zone class
- rrset = create_rrset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.ANY(), 0,
+ rrset = create_rrset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.ANY, 0,
[ b'\x00' ])
- self.check_prescan_result(Rcode.FORMERR(), [ rrset ])
+ self.check_prescan_result(Rcode.FORMERR, [ rrset ])
# non-zero TTL, class ANY
- rrset = create_rrset(TEST_ZONE_NAME, RRClass.ANY(), RRType.TXT(), 1)
- self.check_prescan_result(Rcode.FORMERR(), [ rrset ])
+ rrset = create_rrset(TEST_ZONE_NAME, RRClass.ANY, RRType.TXT, 1)
+ self.check_prescan_result(Rcode.FORMERR, [ rrset ])
# non-zero Rdata, class ANY
- rrset = create_rrset(TEST_ZONE_NAME, RRClass.ANY(), RRType.TXT(), 0,
+ rrset = create_rrset(TEST_ZONE_NAME, RRClass.ANY, RRType.TXT, 0,
[ "foo" ])
- self.check_prescan_result(Rcode.FORMERR(), [ rrset ])
+ self.check_prescan_result(Rcode.FORMERR, [ rrset ])
# forbidden type, class ANY
- rrset = create_rrset(TEST_ZONE_NAME, RRClass.ANY(), RRType.AXFR(), 0,
+ rrset = create_rrset(TEST_ZONE_NAME, RRClass.ANY, RRType.AXFR, 0,
[ b'\x00' ])
- self.check_prescan_result(Rcode.FORMERR(), [ rrset ])
+ self.check_prescan_result(Rcode.FORMERR, [ rrset ])
# non-zero TTL, class NONE
- rrset = create_rrset(TEST_ZONE_NAME, RRClass.NONE(), RRType.TXT(), 1)
- self.check_prescan_result(Rcode.FORMERR(), [ rrset ])
+ rrset = create_rrset(TEST_ZONE_NAME, RRClass.NONE, RRType.TXT, 1)
+ self.check_prescan_result(Rcode.FORMERR, [ rrset ])
# forbidden type, class NONE
- rrset = create_rrset(TEST_ZONE_NAME, RRClass.NONE(), RRType.AXFR(), 0,
+ rrset = create_rrset(TEST_ZONE_NAME, RRClass.NONE, RRType.AXFR, 0,
[ b'\x00' ])
- self.check_prescan_result(Rcode.FORMERR(), [ rrset ])
+ self.check_prescan_result(Rcode.FORMERR, [ rrset ])
def __check_inzone_data(self, expected_result, name, rrtype,
expected_rrset = None):
@@ -1054,7 +1051,7 @@ class SessionTest(SessionTestBase):
# during this test, we will extend it at some point
extended_a_rrset = create_rrset("www.example.org", TEST_RRCLASS,
- RRType.A(), 3600,
+ RRType.A, 3600,
[ "192.0.2.1",
"192.0.2.2",
"192.0.2.3" ])
@@ -1062,90 +1059,90 @@ class SessionTest(SessionTestBase):
# Sanity check, make sure original data is really there before updates
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("www.example.org"),
- RRType.A(),
+ RRType.A,
self.orig_a_rrset)
# Add two rrs
- self.check_full_handle_result(Rcode.NOERROR(), [ self.rrset_update_a ])
+ self.check_full_handle_result(Rcode.NOERROR, [ self.rrset_update_a ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("www.example.org"),
- RRType.A(),
+ RRType.A,
extended_a_rrset)
# Adding the same RRsets should not make a difference.
- self.check_full_handle_result(Rcode.NOERROR(), [ self.rrset_update_a ])
+ self.check_full_handle_result(Rcode.NOERROR, [ self.rrset_update_a ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("www.example.org"),
- RRType.A(),
+ RRType.A,
extended_a_rrset)
# Now delete those two, and we should end up with the original RRset
- self.check_full_handle_result(Rcode.NOERROR(),
+ self.check_full_handle_result(Rcode.NOERROR,
[ self.rrset_update_del_rrset_part ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("www.example.org"),
- RRType.A(),
+ RRType.A,
self.orig_a_rrset)
# 'Deleting' them again should make no difference
- self.check_full_handle_result(Rcode.NOERROR(),
+ self.check_full_handle_result(Rcode.NOERROR,
[ self.rrset_update_del_rrset_part ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("www.example.org"),
- RRType.A(),
+ RRType.A,
self.orig_a_rrset)
# But deleting the entire rrset, independent of its contents, should
# work
- self.check_full_handle_result(Rcode.NOERROR(),
+ self.check_full_handle_result(Rcode.NOERROR,
[ self.rrset_update_del_rrset ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN,
isc.dns.Name("www.example.org"),
- RRType.A())
+ RRType.A)
# Check that if we update the SOA, it is updated to our value
- self.check_full_handle_result(Rcode.NOERROR(),
+ self.check_full_handle_result(Rcode.NOERROR,
[ self.rrset_update_soa2 ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("example.org"),
- RRType.SOA(),
+ RRType.SOA,
self.rrset_update_soa2)
def test_glue_deletions(self):
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("sub.example.org."),
- RRType.NS())
+ RRType.NS)
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("ns.sub.example.org."),
- RRType.A())
+ RRType.A)
# See that we can delete glue
rrset_delete_glue = create_rrset("ns.sub.example.org.",
- RRClass.ANY(),
- RRType.A(),
+ RRClass.ANY,
+ RRType.A,
0)
- self.check_full_handle_result(Rcode.NOERROR(),
+ self.check_full_handle_result(Rcode.NOERROR,
[ rrset_delete_glue ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("sub.example.org."),
- RRType.NS())
+ RRType.NS)
self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN,
isc.dns.Name("ns.sub.example.org."),
- RRType.A())
+ RRType.A)
# Check that we don't accidentally delete a delegation if we
# try to delete non-existent glue
rrset_delete_nonexistent_glue = create_rrset("foo.sub.example.org.",
- RRClass.ANY(),
- RRType.A(),
+ RRClass.ANY,
+ RRType.A,
0)
- self.check_full_handle_result(Rcode.NOERROR(),
+ self.check_full_handle_result(Rcode.NOERROR,
[ rrset_delete_nonexistent_glue ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("sub.example.org."),
- RRType.NS())
+ RRType.NS)
def test_update_add_new_data(self):
'''
@@ -1154,26 +1151,26 @@ class SessionTest(SessionTestBase):
# Add data at a completely new name
self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN,
isc.dns.Name("new.example.org"),
- RRType.A())
- rrset = create_rrset("new.example.org", TEST_RRCLASS, RRType.A(),
+ RRType.A)
+ rrset = create_rrset("new.example.org", TEST_RRCLASS, RRType.A,
3600, [ "192.0.2.1", "192.0.2.2" ])
- self.check_full_handle_result(Rcode.NOERROR(), [ rrset ])
+ self.check_full_handle_result(Rcode.NOERROR, [ rrset ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("new.example.org"),
- RRType.A(),
+ RRType.A,
rrset)
# Also try a name where data is present, but none of this
# specific type
self.__check_inzone_data(isc.datasrc.ZoneFinder.NXRRSET,
isc.dns.Name("new.example.org"),
- RRType.TXT())
- rrset = create_rrset("new.example.org", TEST_RRCLASS, RRType.TXT(),
+ RRType.TXT)
+ rrset = create_rrset("new.example.org", TEST_RRCLASS, RRType.TXT,
3600, [ "foo" ])
- self.check_full_handle_result(Rcode.NOERROR(), [ rrset ])
+ self.check_full_handle_result(Rcode.NOERROR, [ rrset ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("new.example.org"),
- RRType.TXT(),
+ RRType.TXT,
rrset)
def test_update_add_new_data_interspersed(self):
@@ -1186,36 +1183,36 @@ class SessionTest(SessionTestBase):
# Add data at a completely new name
self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN,
isc.dns.Name("new_a.example.org"),
- RRType.A())
+ RRType.A)
self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN,
isc.dns.Name("new_txt.example.org"),
- RRType.TXT())
+ RRType.TXT)
- rrset1 = create_rrset("new_a.example.org", TEST_RRCLASS, RRType.A(),
+ rrset1 = create_rrset("new_a.example.org", TEST_RRCLASS, RRType.A,
3600, [ "192.0.2.1" ])
- rrset2 = create_rrset("new_txt.example.org", TEST_RRCLASS, RRType.TXT(),
+ rrset2 = create_rrset("new_txt.example.org", TEST_RRCLASS, RRType.TXT,
3600, [ "foo" ])
- rrset3 = create_rrset("new_a.example.org", TEST_RRCLASS, RRType.A(),
+ rrset3 = create_rrset("new_a.example.org", TEST_RRCLASS, RRType.A,
3600, [ "192.0.2.2" ])
- self.check_full_handle_result(Rcode.NOERROR(),
+ self.check_full_handle_result(Rcode.NOERROR,
[ rrset1, rrset2, rrset3 ])
# The update should have merged rrset1 and rrset3
rrset_merged = create_rrset("new_a.example.org", TEST_RRCLASS,
- RRType.A(), 3600,
+ RRType.A, 3600,
[ "192.0.2.1", "192.0.2.2" ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("new_a.example.org"),
- RRType.A(),
+ RRType.A,
rrset_merged)
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("new_txt.example.org"),
- RRType.TXT(),
+ RRType.TXT,
rrset2)
def test_update_delete_name(self):
@@ -1227,21 +1224,21 @@ class SessionTest(SessionTestBase):
# First check it is there
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("www.example.org"),
- RRType.A())
+ RRType.A)
# Delete the entire name
- self.check_full_handle_result(Rcode.NOERROR(),
+ self.check_full_handle_result(Rcode.NOERROR,
[ self.rrset_update_del_name ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN,
isc.dns.Name("www.example.org"),
- RRType.A())
+ RRType.A)
# Should still be gone after pointless second delete
- self.check_full_handle_result(Rcode.NOERROR(),
+ self.check_full_handle_result(Rcode.NOERROR,
[ self.rrset_update_del_name ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN,
isc.dns.Name("www.example.org"),
- RRType.A())
+ RRType.A)
def test_update_apex_special_cases(self):
'''
@@ -1251,23 +1248,23 @@ class SessionTest(SessionTestBase):
# the original SOA
orig_soa_rrset = create_rrset("example.org", TEST_RRCLASS,
- RRType.SOA(), 3600,
+ RRType.SOA, 3600,
[ "ns1.example.org. " +
"admin.example.org. " +
"1234 3600 1800 2419200 7200" ])
# At some point, the SOA SERIAL will be auto-incremented
incremented_soa_rrset_01 = create_rrset("example.org", TEST_RRCLASS,
- RRType.SOA(), 3600, ["ns1.example.org. " +
+ RRType.SOA, 3600, ["ns1.example.org. " +
"admin.example.org. " +
"1235 3600 1800 2419200 7200" ])
incremented_soa_rrset_02 = create_rrset("example.org", TEST_RRCLASS,
- RRType.SOA(), 3600, ["ns1.example.org. " +
+ RRType.SOA, 3600, ["ns1.example.org. " +
"admin.example.org. " +
"1236 3600 1800 2419200 7200" ])
# We will delete some of the NS records
orig_ns_rrset = create_rrset("example.org", TEST_RRCLASS,
- RRType.NS(), 3600,
+ RRType.NS, 3600,
[ "ns1.example.org.",
"ns2.example.org.",
"ns3.example.org." ])
@@ -1275,48 +1272,48 @@ class SessionTest(SessionTestBase):
# Sanity check, make sure original data is really there before updates
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("example.org"),
- RRType.NS(),
+ RRType.NS,
orig_ns_rrset)
# We will delete the MX record later in this test, so let's make
# sure that it exists (we do not care about its value)
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("example.org"),
- RRType.MX())
+ RRType.MX)
# Check that we cannot delete the SOA record by direct deletion
# both by name+type and by full rrset
- self.check_full_handle_result(Rcode.NOERROR(),
+ self.check_full_handle_result(Rcode.NOERROR,
[ self.rrset_update_del_soa_apex,
self.rrset_update_soa_del ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("example.org"),
- RRType.SOA(),
+ RRType.SOA,
incremented_soa_rrset_01)
# If we delete everything at the apex, the SOA and NS rrsets should be
# untouched (but serial will be incremented)
- self.check_full_handle_result(Rcode.NOERROR(),
+ self.check_full_handle_result(Rcode.NOERROR,
[ self.rrset_update_del_name_apex ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("example.org"),
- RRType.SOA(),
+ RRType.SOA,
incremented_soa_rrset_02)
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("example.org"),
- RRType.NS(),
+ RRType.NS,
orig_ns_rrset)
# but the MX should be gone
self.__check_inzone_data(isc.datasrc.ZoneFinder.NXRRSET,
isc.dns.Name("example.org"),
- RRType.MX())
+ RRType.MX)
# Deleting the NS rrset by name and type only, it should also be left
# untouched
- self.check_full_handle_result(Rcode.NOERROR(),
+ self.check_full_handle_result(Rcode.NOERROR,
[ self.rrset_update_del_ns_apex ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("example.org"),
- RRType.NS(),
+ RRType.NS,
orig_ns_rrset)
def test_update_apex_special_case_ns_rrset(self):
@@ -1325,28 +1322,28 @@ class SessionTest(SessionTestBase):
self.__initialize_update_rrsets()
# When we are done, we should have a reduced NS rrset
short_ns_rrset = create_rrset("example.org", TEST_RRCLASS,
- RRType.NS(), 3600,
+ RRType.NS, 3600,
[ "ns3.example.org." ])
- self.check_full_handle_result(Rcode.NOERROR(),
+ self.check_full_handle_result(Rcode.NOERROR,
[ self.rrset_update_del_rrset_ns ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("example.org"),
- RRType.NS(),
+ RRType.NS,
short_ns_rrset)
def test_update_apex_special_case_ns_rrset2(self):
# If we add new NS records, then delete all existing ones, it
# should not keep any
self.__initialize_update_rrsets()
- new_ns = create_rrset("example.org", TEST_RRCLASS, RRType.NS(), 3600,
- [ "newns1.example.org", "newns2.example.org" ])
+ new_ns = create_rrset("example.org", TEST_RRCLASS, RRType.NS, 3600,
+ [ "newns1.example.org.", "newns2.example.org." ])
- self.check_full_handle_result(Rcode.NOERROR(),
+ self.check_full_handle_result(Rcode.NOERROR,
[ new_ns,
self.rrset_update_del_rrset_ns ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("example.org"),
- RRType.NS(),
+ RRType.NS,
new_ns)
def test_update_delete_normal_rrset_at_apex(self):
@@ -1358,12 +1355,12 @@ class SessionTest(SessionTestBase):
self.__initialize_update_rrsets()
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("example.org"),
- RRType.MX())
- self.check_full_handle_result(Rcode.NOERROR(),
+ RRType.MX)
+ self.check_full_handle_result(Rcode.NOERROR,
[ self.rrset_update_del_rrset_mx ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.NXRRSET,
isc.dns.Name("example.org"),
- RRType.MX())
+ RRType.MX)
def test_update_add_then_delete_rrset(self):
# If we add data, then delete the whole rrset, added data should
@@ -1371,13 +1368,13 @@ class SessionTest(SessionTestBase):
self.__initialize_update_rrsets()
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("www.example.org"),
- RRType.A())
- self.check_full_handle_result(Rcode.NOERROR(),
+ RRType.A)
+ self.check_full_handle_result(Rcode.NOERROR,
[ self.rrset_update_a,
self.rrset_update_del_rrset ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN,
isc.dns.Name("www.example.org"),
- RRType.A())
+ RRType.A)
def test_update_add_then_delete_name(self):
# If we add data, then delete the entire name, added data should
@@ -1385,13 +1382,13 @@ class SessionTest(SessionTestBase):
self.__initialize_update_rrsets()
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("www.example.org"),
- RRType.A())
- self.check_full_handle_result(Rcode.NOERROR(),
+ RRType.A)
+ self.check_full_handle_result(Rcode.NOERROR,
[ self.rrset_update_a,
self.rrset_update_del_name ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN,
isc.dns.Name("www.example.org"),
- RRType.A())
+ RRType.A)
def test_update_delete_then_add_rrset(self):
# If we delete an entire rrset, then add something there again,
@@ -1399,13 +1396,13 @@ class SessionTest(SessionTestBase):
self.__initialize_update_rrsets()
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("www.example.org"),
- RRType.A())
- self.check_full_handle_result(Rcode.NOERROR(),
+ RRType.A)
+ self.check_full_handle_result(Rcode.NOERROR,
[ self.rrset_update_del_rrset,
self.rrset_update_a ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("www.example.org"),
- RRType.A(),
+ RRType.A,
self.rrset_update_a)
def test_update_delete_then_add_rrset(self):
@@ -1414,13 +1411,13 @@ class SessionTest(SessionTestBase):
self.__initialize_update_rrsets()
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("www.example.org"),
- RRType.A())
- self.check_full_handle_result(Rcode.NOERROR(),
+ RRType.A)
+ self.check_full_handle_result(Rcode.NOERROR,
[ self.rrset_update_del_name,
self.rrset_update_a ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("www.example.org"),
- RRType.A(),
+ RRType.A,
self.rrset_update_a)
def test_update_cname_special_cases(self):
@@ -1428,31 +1425,31 @@ class SessionTest(SessionTestBase):
# Sanity check
orig_cname_rrset = create_rrset("cname.example.org", TEST_RRCLASS,
- RRType.CNAME(), 3600,
+ RRType.CNAME, 3600,
[ "www.example.org." ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.CNAME,
isc.dns.Name("cname.example.org"),
- RRType.A(),
+ RRType.A,
orig_cname_rrset)
# If we try to add data where a cname is preset
- rrset = create_rrset("cname.example.org", TEST_RRCLASS, RRType.A(),
+ rrset = create_rrset("cname.example.org", TEST_RRCLASS, RRType.A,
3600, [ "192.0.2.1" ])
- self.check_full_handle_result(Rcode.NOERROR(), [ rrset ])
+ self.check_full_handle_result(Rcode.NOERROR, [ rrset ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.CNAME,
isc.dns.Name("cname.example.org"),
- RRType.A(),
+ RRType.A,
orig_cname_rrset)
# But updating the cname itself should work
new_cname_rrset = create_rrset("cname.example.org", TEST_RRCLASS,
- RRType.CNAME(), 3600,
+ RRType.CNAME, 3600,
[ "mail.example.org." ])
- self.check_full_handle_result(Rcode.NOERROR(), [ new_cname_rrset ])
+ self.check_full_handle_result(Rcode.NOERROR, [ new_cname_rrset ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.CNAME,
isc.dns.Name("cname.example.org"),
- RRType.A(),
+ RRType.A,
new_cname_rrset)
self.__initialize_update_rrsets()
@@ -1461,27 +1458,27 @@ class SessionTest(SessionTestBase):
# present should do nothing either
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("www.example.org"),
- RRType.A(),
+ RRType.A,
self.orig_a_rrset)
new_cname_rrset = create_rrset("www.example.org", TEST_RRCLASS,
- RRType.CNAME(), 3600,
+ RRType.CNAME, 3600,
[ "mail.example.org." ])
- self.check_full_handle_result(Rcode.NOERROR(), [ new_cname_rrset ])
+ self.check_full_handle_result(Rcode.NOERROR, [ new_cname_rrset ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("www.example.org"),
- RRType.A(),
+ RRType.A,
self.orig_a_rrset)
def test_update_bad_class(self):
- rrset = create_rrset("example.org.", RRClass.CH(), RRType.TXT(), 0,
+ rrset = create_rrset("example.org.", RRClass.CH, RRType.TXT, 0,
[ "foo" ])
- self.check_full_handle_result(Rcode.FORMERR(), [ rrset ])
+ self.check_full_handle_result(Rcode.FORMERR, [ rrset ])
def test_uncaught_exception(self):
def my_exc():
raise Exception("foo")
self._session._UpdateSession__update_soa = my_exc
- self.assertEqual(Rcode.SERVFAIL().to_text(),
+ self.assertEqual(Rcode.SERVFAIL.to_text(),
self._session._UpdateSession__do_update().to_text())
class SessionACLTest(SessionTestBase):
@@ -1527,7 +1524,7 @@ class SessionACLTest(SessionTestBase):
self._datasrc_client,
acl_map))
self.assertEqual((UPDATE_ERROR, None, None), session.handle())
- self.check_response(session.get_message(), Rcode.REFUSED())
+ self.check_response(session.get_message(), Rcode.REFUSED)
# If the message contains TSIG, it should match the ACCEPT
# ACL entry, and the request should be granted.
diff --git a/src/lib/python/isc/ddns/tests/zone_config_tests.py b/src/lib/python/isc/ddns/tests/zone_config_tests.py
index 7facb48..0ada906 100644
--- a/src/lib/python/isc/ddns/tests/zone_config_tests.py
+++ b/src/lib/python/isc/ddns/tests/zone_config_tests.py
@@ -26,7 +26,7 @@ import socket
# Some common test parameters
TEST_ZONE_NAME = Name('example.org')
TEST_SECONDARY_ZONE_NAME = Name('example.com')
-TEST_RRCLASS = RRClass.IN()
+TEST_RRCLASS = RRClass.IN
TEST_TSIG_KEY = TSIGKey("example.com:SFuWd/q99SzF8Yzd1QbB9g==")
TEST_ACL_CONTEXT = isc.acl.dns.RequestContext(
socket.getaddrinfo("192.0.2.1", 1234, 0, socket.SOCK_DGRAM,
@@ -88,12 +88,12 @@ class ZoneConfigTest(unittest.TestCase):
# zone class doesn't match (but zone name matches)
self.__datasrc_client.set_find_result(DataSourceClient.SUCCESS)
zconfig = ZoneConfig({(TEST_SECONDARY_ZONE_NAME, TEST_RRCLASS)},
- RRClass.CH(), self.__datasrc_client)
+ RRClass.CH, self.__datasrc_client)
self.assertEqual((ZONE_NOTFOUND, None),
(zconfig.find_zone(TEST_ZONE_NAME, TEST_RRCLASS)))
# similar to the previous case, but also in the secondary list
zconfig = ZoneConfig({(TEST_ZONE_NAME, TEST_RRCLASS)},
- RRClass.CH(), self.__datasrc_client)
+ RRClass.CH, self.__datasrc_client)
self.assertEqual((ZONE_NOTFOUND, None),
(zconfig.find_zone(TEST_ZONE_NAME, TEST_RRCLASS)))
@@ -107,7 +107,7 @@ class ZoneConfigTest(unittest.TestCase):
zconfig = ZoneConfig({(TEST_SECONDARY_ZONE_NAME, TEST_RRCLASS),
(Name('example'), TEST_RRCLASS),
(Name('sub.example.org'), TEST_RRCLASS),
- (TEST_ZONE_NAME, RRClass.CH())},
+ (TEST_ZONE_NAME, RRClass.CH)},
TEST_RRCLASS, self.__datasrc_client)
self.assertEqual((ZONE_PRIMARY, self.__datasrc_client),
self.zconfig.find_zone(TEST_ZONE_NAME, TEST_RRCLASS))
@@ -134,7 +134,7 @@ class ACLConfigTest(unittest.TestCase):
# 'All reject' ACL will still apply for any other zones
acl = self.__zconfig.get_update_acl(Name('example.com'), TEST_RRCLASS)
self.assertEqual(REJECT, acl.execute(TEST_ACL_CONTEXT))
- acl = self.__zconfig.get_update_acl(TEST_ZONE_NAME, RRClass.CH())
+ acl = self.__zconfig.get_update_acl(TEST_ZONE_NAME, RRClass.CH)
self.assertEqual(REJECT, acl.execute(TEST_ACL_CONTEXT))
# Test with a map with a few more ACL entries. Should be nothing
@@ -143,14 +143,14 @@ class ACLConfigTest(unittest.TestCase):
REQUEST_LOADER.load([{"action": "REJECT"}]),
(TEST_ZONE_NAME, TEST_RRCLASS):
REQUEST_LOADER.load([{"action": "ACCEPT"}]),
- (TEST_ZONE_NAME, RRClass.CH()):
+ (TEST_ZONE_NAME, RRClass.CH):
REQUEST_LOADER.load([{"action": "DROP"}])}
self.__zconfig.set_update_acl_map(acl_map)
acl = self.__zconfig.get_update_acl(TEST_ZONE_NAME, TEST_RRCLASS)
self.assertEqual(ACCEPT, acl.execute(TEST_ACL_CONTEXT))
acl = self.__zconfig.get_update_acl(Name('example.com'), TEST_RRCLASS)
self.assertEqual(REJECT, acl.execute(TEST_ACL_CONTEXT))
- acl = self.__zconfig.get_update_acl(TEST_ZONE_NAME, RRClass.CH())
+ acl = self.__zconfig.get_update_acl(TEST_ZONE_NAME, RRClass.CH)
self.assertEqual(DROP, acl.execute(TEST_ACL_CONTEXT))
if __name__ == "__main__":
diff --git a/src/lib/python/isc/log_messages/Makefile.am b/src/lib/python/isc/log_messages/Makefile.am
index 97ff6e6..c8b9c7a 100644
--- a/src/lib/python/isc/log_messages/Makefile.am
+++ b/src/lib/python/isc/log_messages/Makefile.am
@@ -1,7 +1,7 @@
SUBDIRS = work
EXTRA_DIST = __init__.py
-EXTRA_DIST += bind10_messages.py
+EXTRA_DIST += init_messages.py
EXTRA_DIST += cmdctl_messages.py
EXTRA_DIST += ddns_messages.py
EXTRA_DIST += stats_messages.py
@@ -18,9 +18,10 @@ EXTRA_DIST += loadzone_messages.py
EXTRA_DIST += server_common_messages.py
EXTRA_DIST += dbutil_messages.py
EXTRA_DIST += msgq_messages.py
+EXTRA_DIST += pycc_messages.py
CLEANFILES = __init__.pyc
-CLEANFILES += bind10_messages.pyc
+CLEANFILES += init_messages.pyc
CLEANFILES += cmdctl_messages.pyc
CLEANFILES += ddns_messages.pyc
CLEANFILES += stats_messages.pyc
@@ -37,6 +38,7 @@ CLEANFILES += loadzone_messages.pyc
CLEANFILES += server_common_messages.pyc
CLEANFILES += dbutil_messages.pyc
CLEANFILES += msgq_messages.pyc
+CLEANFILES += pycc_messages.pyc
CLEANDIRS = __pycache__
diff --git a/src/lib/python/isc/log_messages/bind10_messages.py b/src/lib/python/isc/log_messages/bind10_messages.py
deleted file mode 100644
index 68ce94c..0000000
--- a/src/lib/python/isc/log_messages/bind10_messages.py
+++ /dev/null
@@ -1 +0,0 @@
-from work.bind10_messages import *
diff --git a/src/lib/python/isc/log_messages/init_messages.py b/src/lib/python/isc/log_messages/init_messages.py
new file mode 100644
index 0000000..15288bf
--- /dev/null
+++ b/src/lib/python/isc/log_messages/init_messages.py
@@ -0,0 +1 @@
+from work.init_messages import *
diff --git a/src/lib/python/isc/log_messages/pycc_messages.py b/src/lib/python/isc/log_messages/pycc_messages.py
new file mode 100644
index 0000000..77b3804
--- /dev/null
+++ b/src/lib/python/isc/log_messages/pycc_messages.py
@@ -0,0 +1 @@
+from work.pycc_messages import *
diff --git a/src/lib/python/isc/notify/notify_out.py b/src/lib/python/isc/notify/notify_out.py
index 46bb00b..1f75256 100644
--- a/src/lib/python/isc/notify/notify_out.py
+++ b/src/lib/python/isc/notify/notify_out.py
@@ -302,12 +302,12 @@ class NotifyOut:
format_zone_str(zone_name, zone_class))
return []
- result, ns_rrset, _ = finder.find(zone_name, RRType.NS())
+ result, ns_rrset, _ = finder.find(zone_name, RRType.NS)
if result is not finder.SUCCESS or ns_rrset is None:
logger.warn(NOTIFY_OUT_ZONE_NO_NS,
format_zone_str(zone_name, zone_class))
return []
- result, soa_rrset, _ = finder.find(zone_name, RRType.SOA())
+ result, soa_rrset, _ = finder.find(zone_name, RRType.SOA)
if result is not finder.SUCCESS or soa_rrset is None or \
soa_rrset.get_rdata_count() != 1:
logger.warn(NOTIFY_OUT_ZONE_BAD_SOA,
@@ -323,11 +323,11 @@ class NotifyOut:
ns_result, ns_finder = ds_client.find_zone(ns_name)
if ns_result is DataSourceClient.SUCCESS or \
ns_result is DataSourceClient.PARTIALMATCH:
- result, rrset, _ = ns_finder.find(ns_name, RRType.A())
+ result, rrset, _ = ns_finder.find(ns_name, RRType.A)
if result is ns_finder.SUCCESS and rrset is not None:
addrs.extend([a.to_text() for a in rrset.get_rdata()])
- result, rrset, _ = ns_finder.find(ns_name, RRType.AAAA())
+ result, rrset, _ = ns_finder.find(ns_name, RRType.AAAA)
if result is ns_finder.SUCCESS and rrset is not None:
addrs.extend([aaaa.to_text()
for aaaa in rrset.get_rdata()])
@@ -509,10 +509,10 @@ class NotifyOut:
msg = Message(Message.RENDER)
qid = random.randint(0, 0xFFFF)
msg.set_qid(qid)
- msg.set_opcode(Opcode.NOTIFY())
- msg.set_rcode(Rcode.NOERROR())
+ msg.set_opcode(Opcode.NOTIFY)
+ msg.set_rcode(Rcode.NOERROR)
msg.set_header_flag(Message.HEADERFLAG_AA)
- msg.add_question(Question(zone_name, zone_class, RRType.SOA()))
+ msg.add_question(Question(zone_name, zone_class, RRType.SOA))
msg.add_rrset(Message.SECTION_ANSWER, self._get_zone_soa(zone_name,
zone_class))
return msg, qid
@@ -531,7 +531,7 @@ class NotifyOut:
zone_name.to_text() + '/' +
zone_class.to_text() + ' not found')
- result, soa_rrset, _ = finder.find(zone_name, RRType.SOA())
+ result, soa_rrset, _ = finder.find(zone_name, RRType.SOA)
if result is not finder.SUCCESS or soa_rrset is None or \
soa_rrset.get_rdata_count() != 1:
raise NotifyOutDataSourceError('_get_zone_soa: Zone ' +
@@ -566,7 +566,7 @@ class NotifyOut:
Name(zone_notify_info.zone_name).to_text())
return _BAD_QUERY_NAME
- if msg.get_opcode() != Opcode.NOTIFY():
+ if msg.get_opcode() != Opcode.NOTIFY:
logger.warn(NOTIFY_OUT_REPLY_BAD_OPCODE, from_addr[0],
from_addr[1], msg.get_opcode().to_text())
return _BAD_OPCODE
diff --git a/src/lib/python/isc/notify/tests/notify_out_test.py b/src/lib/python/isc/notify/tests/notify_out_test.py
index 60c8f2f..ad1107f 100644
--- a/src/lib/python/isc/notify/tests/notify_out_test.py
+++ b/src/lib/python/isc/notify/tests/notify_out_test.py
@@ -377,7 +377,7 @@ class TestNotifyOut(unittest.TestCase):
def test_get_notify_slaves_from_ns(self):
records = self._notify._get_notify_slaves_from_ns(Name('example.net.'),
- RRClass.IN())
+ RRClass.IN)
self.assertEqual(6, len(records))
self.assertEqual('8:8::8:8', records[5])
self.assertEqual('7.7.7.7', records[4])
@@ -387,7 +387,7 @@ class TestNotifyOut(unittest.TestCase):
self.assertEqual('3.3.3.3', records[0])
records = self._notify._get_notify_slaves_from_ns(Name('example.com.'),
- RRClass.IN())
+ RRClass.IN)
self.assertEqual(3, len(records))
self.assertEqual('5:5::5:5', records[2])
self.assertEqual('4:4::4:4', records[1])
@@ -396,19 +396,19 @@ class TestNotifyOut(unittest.TestCase):
def test_get_notify_slaves_from_ns_unusual(self):
self._notify._db_file = TESTDATA_SRCDIR + '/brokentest.sqlite3'
self.assertEqual([], self._notify._get_notify_slaves_from_ns(
- Name('nons.example'), RRClass.IN()))
+ Name('nons.example'), RRClass.IN))
self.assertEqual([], self._notify._get_notify_slaves_from_ns(
- Name('nosoa.example'), RRClass.IN()))
+ Name('nosoa.example'), RRClass.IN))
self.assertEqual([], self._notify._get_notify_slaves_from_ns(
- Name('multisoa.example'), RRClass.IN()))
+ Name('multisoa.example'), RRClass.IN))
self.assertEqual([], self._notify._get_notify_slaves_from_ns(
- Name('nosuchzone.example'), RRClass.IN()))
+ Name('nosuchzone.example'), RRClass.IN))
# This will cause failure in getting access to the data source.
self._notify._db_file = TESTDATA_SRCDIR + '/nodir/error.sqlite3'
self.assertEqual([], self._notify._get_notify_slaves_from_ns(
- Name('example.com'), RRClass.IN()))
+ Name('example.com'), RRClass.IN))
def test_init_notify_out(self):
self._notify._init_notify_out(self._db_file)
diff --git a/src/lib/python/isc/server_common/dns_tcp.py b/src/lib/python/isc/server_common/dns_tcp.py
index 3b78d0d..9ce94fe 100644
--- a/src/lib/python/isc/server_common/dns_tcp.py
+++ b/src/lib/python/isc/server_common/dns_tcp.py
@@ -248,7 +248,7 @@ class DNSTCPContext:
ClientFormatter(self.__remote_addr),
self.__send_marker, total_len)
return self.SENDING
- logger.warn(PYSERVER_COMMON_DNS_TCP_SEND_ERROR,
+ logger.warn(PYSERVER_COMMON_DNS_TCP_SEND_FAILED,
ClientFormatter(self.__remote_addr),
self.__send_marker, total_len, ex)
self.__sock.close()
diff --git a/src/lib/python/isc/server_common/server_common_messages.mes b/src/lib/python/isc/server_common/server_common_messages.mes
index bd4e3cc..f22ce65 100644
--- a/src/lib/python/isc/server_common/server_common_messages.mes
+++ b/src/lib/python/isc/server_common/server_common_messages.mes
@@ -27,7 +27,7 @@ transmitted over a TCP connection, possibly after multiple send
operations. The destination address and the total size of the message
(including the 2-byte length field) are shown in the log message.
-% PYSERVER_COMMON_DNS_TCP_SEND_ERROR failed to send TCP message to %1 (%2/%3 bytes sent): %4
+% PYSERVER_COMMON_DNS_TCP_SEND_FAILED failed to send TCP message to %1 (%2/%3 bytes sent): %4
A DNS message has been attempted to be sent out over a TCP connection,
but it failed due to some network error. Although it's not expected
to happen too often, it can still happen for various reasons. The
diff --git a/src/lib/python/isc/statistics/tests/counters_test.py b/src/lib/python/isc/statistics/tests/counters_test.py
index 550fd1f..2d791c4 100644
--- a/src/lib/python/isc/statistics/tests/counters_test.py
+++ b/src/lib/python/isc/statistics/tests/counters_test.py
@@ -139,9 +139,9 @@ class TestBasicMethods(unittest.TestCase):
counters._get_counter(self.counters._statistics._data,
counter_name),
concurrency * number)
- self.assertGreater(
+ self.assertGreaterEqual(
counters._get_counter(self.counters._statistics._data,
- timer_name), 0)
+ timer_name), 0.0)
def test_concat(self):
# only strings
@@ -200,7 +200,7 @@ class BaseTestCounters():
if name.find('time_to_') == 0:
self.counters.start_timer(*args)
self.counters.stop_timer(*args)
- self.assertGreater(self.counters.get(*args), 0)
+ self.assertGreaterEqual(self.counters.get(*args), 0.0)
sec = self.counters.get(*args)
for zone_str in (self._entire_server, TEST_ZONE_NAME_STR):
isc.cc.data.set(self._statistics_data,
diff --git a/src/lib/python/isc/sysinfo/sysinfo.py b/src/lib/python/isc/sysinfo/sysinfo.py
index 8e4610c..099ac89 100644
--- a/src/lib/python/isc/sysinfo/sysinfo.py
+++ b/src/lib/python/isc/sysinfo/sysinfo.py
@@ -44,7 +44,7 @@ class SysInfo:
self._net_stats = 'Unknown\n'
self._net_connections = 'Unknown\n'
- # The following are Linux speicific, and should eventually be removed
+ # The following are Linux specific, and should eventually be removed
# from this level; for now we simply default to None (so they won't
# be printed)
self._platform_distro = None
@@ -162,9 +162,12 @@ class SysInfoPOSIX(SysInfo):
u = os.uname()
self._platform_name = u[0]
+ self._hostname = u[1]
self._platform_version = u[2]
self._platform_machine = u[4]
+ self._loadavg = os.getloadavg()
+
class SysInfoLinux(SysInfoPOSIX):
"""Linux implementation of the SysInfo class.
See the SysInfo class documentation for more information.
@@ -322,8 +325,8 @@ class SysInfoBSD(SysInfoPOSIX):
except (subprocess.CalledProcessError, OSError):
self._net_connections = 'Warning: "netstat -nr" command failed.\n'
-class SysInfoOpenBSD(SysInfoBSD):
- """OpenBSD implementation of the SysInfo class.
+class SysInfoNetBSD(SysInfoBSD):
+ """NetBSD and OpenBSD implementation of the SysInfo class.
See the SysInfo class documentation for more information.
"""
def __init__(self):
@@ -499,8 +502,8 @@ def SysInfoFromFactory():
osname = platform.system()
if osname == 'Linux':
return SysInfoLinux()
- elif osname == 'OpenBSD':
- return SysInfoOpenBSD()
+ elif (osname == 'NetBSD') or (osname == 'OpenBSD'):
+ return SysInfoNetBSD()
elif osname == 'FreeBSD':
return SysInfoFreeBSD()
elif osname == 'Darwin':
@@ -508,4 +511,4 @@ def SysInfoFromFactory():
elif osname == 'BIND10Testcase':
return SysInfoTestcase()
else:
- return SysInfo()
+ return SysInfoPOSIX()
diff --git a/src/lib/python/isc/testutils/rrset_utils.py b/src/lib/python/isc/testutils/rrset_utils.py
index 7eac772..eb3da28 100644
--- a/src/lib/python/isc/testutils/rrset_utils.py
+++ b/src/lib/python/isc/testutils/rrset_utils.py
@@ -30,7 +30,7 @@ def rrsets_equal(a, b):
a.get_class() == b.get_class() and \
a.get_type() == b.get_type() and \
a.get_ttl() == b.get_ttl() and \
- (a.get_type() == RRType.RRSIG() or
+ (a.get_type() == RRType.RRSIG or
sorted(a.get_rdata()) == sorted(b.get_rdata()))
# The following are short cut utilities to create an RRset of a specific
@@ -38,25 +38,25 @@ def rrsets_equal(a, b):
# tests, so we define default values for them for convenience.
def create_a(name, address, ttl=3600):
- rrset = RRset(name, RRClass.IN(), RRType.A(), RRTTL(ttl))
- rrset.add_rdata(Rdata(RRType.A(), RRClass.IN(), address))
+ rrset = RRset(name, RRClass.IN, RRType.A, RRTTL(ttl))
+ rrset.add_rdata(Rdata(RRType.A, RRClass.IN, address))
return rrset
def create_aaaa(name, address, ttl=3600):
- rrset = RRset(name, RRClass.IN(), RRType.AAAA(), RRTTL(ttl))
- rrset.add_rdata(Rdata(RRType.AAAA(), RRClass.IN(), address))
+ rrset = RRset(name, RRClass.IN, RRType.AAAA, RRTTL(ttl))
+ rrset.add_rdata(Rdata(RRType.AAAA, RRClass.IN, address))
return rrset
def create_ns(nsname, name=Name('example.com'), ttl=3600):
'''For convenience we use a default name often used as a zone name'''
- rrset = RRset(name, RRClass.IN(), RRType.NS(), RRTTL(ttl))
- rrset.add_rdata(Rdata(RRType.NS(), RRClass.IN(), nsname))
+ rrset = RRset(name, RRClass.IN, RRType.NS, RRTTL(ttl))
+ rrset.add_rdata(Rdata(RRType.NS, RRClass.IN, nsname))
return rrset
-def create_cname(target='target.example.com', name=Name('example.com'),
+def create_cname(target='target.example.com.', name=Name('example.com'),
ttl=3600):
- rrset = RRset(name, RRClass.IN(), RRType.CNAME(), RRTTL(ttl))
- rrset.add_rdata(Rdata(RRType.CNAME(), RRClass.IN(), target))
+ rrset = RRset(name, RRClass.IN, RRType.CNAME, RRTTL(ttl))
+ rrset.add_rdata(Rdata(RRType.CNAME, RRClass.IN, target))
return rrset
def create_generic(name, rdlen, type=RRType('TYPE65300'), ttl=3600):
@@ -67,16 +67,16 @@ def create_generic(name, rdlen, type=RRType('TYPE65300'), ttl=3600):
The RDATA will be filled with specified length of all-0 data.
'''
- rrset = RRset(name, RRClass.IN(), type, RRTTL(ttl))
- rrset.add_rdata(Rdata(type, RRClass.IN(), '\\# ' +
+ rrset = RRset(name, RRClass.IN, type, RRTTL(ttl))
+ rrset.add_rdata(Rdata(type, RRClass.IN, '\\# ' +
str(rdlen) + ' ' + '00' * rdlen))
return rrset
def create_soa(serial, name=Name('example.com'), ttl=3600):
'''For convenience we use a default name often used as a zone name'''
- rrset = RRset(name, RRClass.IN(), RRType.SOA(), RRTTL(ttl))
+ rrset = RRset(name, RRClass.IN, RRType.SOA, RRTTL(ttl))
rdata_str = 'master.example.com. admin.example.com. ' + \
str(serial) + ' 3600 1800 2419200 7200'
- rrset.add_rdata(Rdata(RRType.SOA(), RRClass.IN(), rdata_str))
+ rrset.add_rdata(Rdata(RRType.SOA, RRClass.IN, rdata_str))
return rrset
diff --git a/src/lib/python/isc/xfrin/diff.py b/src/lib/python/isc/xfrin/diff.py
index 8d0bb08..4e06eea 100644
--- a/src/lib/python/isc/xfrin/diff.py
+++ b/src/lib/python/isc/xfrin/diff.py
@@ -146,12 +146,12 @@ class Diff:
"""
# first add or delete must be of type SOA
if len(buf) == 0 and\
- rr.get_type() != isc.dns.RRType.SOA():
+ rr.get_type() != isc.dns.RRType.SOA:
raise ValueError("First " + operation +
" in single update mode must be of type SOA")
# And later adds or deletes may not
elif len(buf) != 0 and\
- rr.get_type() == isc.dns.RRType.SOA():
+ rr.get_type() == isc.dns.RRType.SOA:
raise ValueError("Multiple SOA records in single " +
"update mode " + operation)
buf.append((operation, rr))
@@ -238,8 +238,8 @@ class Diff:
'''A helper routine to identify whether two RRsets are of the
same 'type'. For RRSIGs we should consider type covered, too.
'''
- if rrset1.get_type() != isc.dns.RRType.RRSIG() or \
- rrset2.get_type != isc.dns.RRType.RRSIG():
+ if rrset1.get_type() != isc.dns.RRType.RRSIG or \
+ rrset2.get_type != isc.dns.RRType.RRSIG:
return rrset1.get_type() == rrset2.get_type()
# RR type of the both RRsets is RRSIG. Compare type covered.
# We know they have exactly one RDATA.
@@ -425,7 +425,7 @@ class Diff:
return a.get_name() == b.get_name() and\
a.get_type() == b.get_type() and\
a.get_rdata()[0] == b.get_rdata()[0]
- if rr.get_type() == isc.dns.RRType.SOA():
+ if rr.get_type() == isc.dns.RRType.SOA:
return buf
else:
return [ op for op in buf if not same_rr(op[1], rr)]
diff --git a/src/lib/python/isc/xfrin/tests/diff_tests.py b/src/lib/python/isc/xfrin/tests/diff_tests.py
index f013cd5..bb83340 100644
--- a/src/lib/python/isc/xfrin/tests/diff_tests.py
+++ b/src/lib/python/isc/xfrin/tests/diff_tests.py
@@ -57,8 +57,8 @@ class DiffTest(unittest.TestCase):
self.__find_all_name = None
self.__find_all_options = None
# Some common values
- self.__rrclass = RRClass.IN()
- self.__type = RRType.A()
+ self.__rrclass = RRClass.IN
+ self.__type = RRType.A
self.__ttl = RRTTL(3600)
# And RRsets
# Create two valid rrsets
@@ -81,27 +81,27 @@ class DiffTest(unittest.TestCase):
# Also create a few other (valid) rrsets
# A SOA record
self.__rrset_soa = RRset(Name('example.org.'), self.__rrclass,
- RRType.SOA(), RRTTL(3600))
- self.__rrset_soa.add_rdata(Rdata(RRType.SOA(), self.__rrclass,
+ RRType.SOA, RRTTL(3600))
+ self.__rrset_soa.add_rdata(Rdata(RRType.SOA, self.__rrclass,
"ns1.example.org. " +
"admin.example.org. " +
"1233 3600 1800 2419200 7200"))
# A few single-rr rrsets that together would for a multi-rr rrset
self.__rrset3 = RRset(Name('c.example.org.'), self.__rrclass,
- RRType.TXT(), self.__ttl)
- self.__rrset3.add_rdata(Rdata(RRType.TXT(), self.__rrclass, "one"))
+ RRType.TXT, self.__ttl)
+ self.__rrset3.add_rdata(Rdata(RRType.TXT, self.__rrclass, "one"))
self.__rrset4 = RRset(Name('c.example.org.'), self.__rrclass,
- RRType.TXT(), self.__ttl)
- self.__rrset4.add_rdata(Rdata(RRType.TXT(), self.__rrclass, "two"))
+ RRType.TXT, self.__ttl)
+ self.__rrset4.add_rdata(Rdata(RRType.TXT, self.__rrclass, "two"))
self.__rrset5 = RRset(Name('c.example.org.'), self.__rrclass,
- RRType.TXT(), self.__ttl)
- self.__rrset5.add_rdata(Rdata(RRType.TXT(), self.__rrclass, "three"))
+ RRType.TXT, self.__ttl)
+ self.__rrset5.add_rdata(Rdata(RRType.TXT, self.__rrclass, "three"))
self.__rrset6 = RRset(Name('d.example.org.'), self.__rrclass,
- RRType.A(), self.__ttl)
- self.__rrset6.add_rdata(Rdata(RRType.A(), self.__rrclass, "192.0.2.1"))
+ RRType.A, self.__ttl)
+ self.__rrset6.add_rdata(Rdata(RRType.A, self.__rrclass, "192.0.2.1"))
self.__rrset7 = RRset(Name('d.example.org.'), self.__rrclass,
- RRType.A(), self.__ttl)
- self.__rrset7.add_rdata(Rdata(RRType.A(), self.__rrclass, "192.0.2.2"))
+ RRType.A, self.__ttl)
+ self.__rrset7.add_rdata(Rdata(RRType.A, self.__rrclass, "192.0.2.2"))
def __mock_compact(self):
"""
@@ -316,7 +316,7 @@ class DiffTest(unittest.TestCase):
self.assertRaises(ValueError, diff.add_data, self.__rrset2)
self.assertRaises(ValueError, diff.delete_data, self.__rrset1)
self.assertRaises(ValueError, diff.find, Name('foo.example.org.'),
- RRType.A())
+ RRType.A)
self.assertRaises(ValueError, diff.find_all, Name('foo.example.org.'))
diff.apply = orig_apply
self.assertRaises(ValueError, diff.apply)
@@ -435,9 +435,9 @@ class DiffTest(unittest.TestCase):
Test a wrong class of rrset is rejected.
"""
diff = Diff(self, Name('example.org.'))
- rrset = RRset(Name('a.example.org.'), RRClass.CH(), RRType.NS(),
+ rrset = RRset(Name('a.example.org.'), RRClass.CH, RRType.NS,
self.__ttl)
- rrset.add_rdata(Rdata(RRType.NS(), RRClass.CH(), 'ns.example.org.'))
+ rrset.add_rdata(Rdata(RRType.NS, RRClass.CH, 'ns.example.org.'))
self.assertRaises(ValueError, diff.add_data, rrset)
self.assertRaises(ValueError, diff.delete_data, rrset)
@@ -517,14 +517,14 @@ class DiffTest(unittest.TestCase):
'''
diff = Diff(self, Name('example.org.'))
rrsig1 = RRset(Name('example.org'), self.__rrclass,
- RRType.RRSIG(), RRTTL(3600))
- rrsig1.add_rdata(Rdata(RRType.RRSIG(), self.__rrclass,
+ RRType.RRSIG, RRTTL(3600))
+ rrsig1.add_rdata(Rdata(RRType.RRSIG, self.__rrclass,
'A 5 3 3600 20000101000000 20000201000000 ' +
'0 example.org. FAKEFAKEFAKE'))
diff.add_data(rrsig1)
rrsig2 = RRset(Name('example.org'), self.__rrclass,
- RRType.RRSIG(), RRTTL(1800))
- rrsig2.add_rdata(Rdata(RRType.RRSIG(), self.__rrclass,
+ RRType.RRSIG, RRTTL(1800))
+ rrsig2.add_rdata(Rdata(RRType.RRSIG, self.__rrclass,
'AAAA 5 3 3600 20000101000000 20000201000000 ' +
'1 example.org. FAKEFAKEFAKE'))
diff.add_data(rrsig2)
@@ -558,7 +558,7 @@ class DiffTest(unittest.TestCase):
'''
diff_multi = Diff(self, Name('example.org.'), single_update_mode=False)
self.assertRaises(ValueError, diff_multi.find_updated,
- Name('example.org.'), RRType.A())
+ Name('example.org.'), RRType.A)
self.assertRaises(ValueError, diff_multi.find_all_updated,
Name('example.org.'))
@@ -571,12 +571,12 @@ class DiffTest(unittest.TestCase):
'''
# full rrset for A (to check compact())
- txt = RRset(Name('c.example.org.'), self.__rrclass, RRType.TXT(),
+ txt = RRset(Name('c.example.org.'), self.__rrclass, RRType.TXT,
RRTTL(3600))
txt.add_rdata(Rdata(txt.get_type(), txt.get_class(), "one"))
txt.add_rdata(Rdata(txt.get_type(), txt.get_class(), "two"))
txt.add_rdata(Rdata(txt.get_type(), txt.get_class(), "three"))
- a = RRset(Name('d.example.org.'), self.__rrclass, RRType.A(),
+ a = RRset(Name('d.example.org.'), self.__rrclass, RRType.A,
RRTTL(3600))
a.add_rdata(Rdata(a.get_type(), a.get_class(), "192.0.2.1"))
a.add_rdata(Rdata(a.get_type(), a.get_class(), "192.0.2.2"))
@@ -680,7 +680,7 @@ class DiffTest(unittest.TestCase):
def test_find(self):
diff = Diff(self, Name('example.org.'))
name = Name('www.example.org.')
- rrtype = RRType.A()
+ rrtype = RRType.A
self.assertFalse(self.__find_called)
self.assertEqual(None, self.__find_name)
@@ -698,7 +698,7 @@ class DiffTest(unittest.TestCase):
def test_find_options(self):
diff = Diff(self, Name('example.org.'))
name = Name('foo.example.org.')
- rrtype = RRType.TXT()
+ rrtype = RRType.TXT
options = ZoneFinder.NO_WILDCARD
self.assertEqual("find_return", diff.find(name, rrtype, options))
@@ -998,8 +998,8 @@ class DiffTest(unittest.TestCase):
# Add a second rr with different type at same name
add_rrset = RRset(self.__rrset3.get_name(), self.__rrclass,
- RRType.A(), self.__ttl)
- add_rrset.add_rdata(Rdata(RRType.A(), self.__rrclass, "192.0.2.2"))
+ RRType.A, self.__ttl)
+ add_rrset.add_rdata(Rdata(RRType.A, self.__rrclass, "192.0.2.2"))
diff.add_data(add_rrset)
self.__check_find_all_call(diff.find_all_updated, self.__rrset3,
@@ -1131,8 +1131,8 @@ class DiffTest(unittest.TestCase):
self.assertTrue(isinstance(collection, self.Collection))
# The collection is just the mock from above, so this doesn't do much
# testing, but we check that the mock got through and didn't get hurt.
- self.assertIsNone(collection.find(Name('example.org'), RRClass.IN(),
- RRType.SOA()))
+ self.assertIsNone(collection.find(Name('example.org'), RRClass.IN,
+ RRType.SOA))
if __name__ == "__main__":
isc.log.init("bind10")
diff --git a/src/lib/resolve/recursive_query.cc b/src/lib/resolve/recursive_query.cc
index 7eae6fe..8d4ae58 100644
--- a/src/lib/resolve/recursive_query.cc
+++ b/src/lib/resolve/recursive_query.cc
@@ -609,7 +609,7 @@ SERVFAIL:
if (category == ResponseClassifier::RCODE) {
// Special case as this message takes two arguments.
- LOG_DEBUG(logger, RESLIB_DBG_RESULTS, RESLIB_RCODE_ERROR).
+ LOG_DEBUG(logger, RESLIB_DBG_RESULTS, RESLIB_RCODE_RETURNED).
arg(questionText(question_)).arg(rcode);
} else {
diff --git a/src/lib/resolve/resolve_messages.mes b/src/lib/resolve/resolve_messages.mes
index 6447082..c89dedb 100644
--- a/src/lib/resolve/resolve_messages.mes
+++ b/src/lib/resolve/resolve_messages.mes
@@ -133,7 +133,7 @@ A debug message indicating that a protocol error was received and that
the resolver is repeating the query to the same nameserver. After this
repeated query, there will be the indicated number of retries left.
-% RESLIB_RCODE_ERROR response to query for <%1> returns RCODE of %2
+% RESLIB_RCODE_RETURNED response to query for <%1> returns RCODE of %2
A debug message, the response to the specified query indicated an error
that is not covered by a specific code path. A SERVFAIL will be returned.
diff --git a/src/lib/resolve/tests/response_classifier_unittest.cc b/src/lib/resolve/tests/response_classifier_unittest.cc
index 23c8666..30aeabb 100644
--- a/src/lib/resolve/tests/response_classifier_unittest.cc
+++ b/src/lib/resolve/tests/response_classifier_unittest.cc
@@ -131,9 +131,9 @@ public:
// ... the CNAME records
rrs_in_cname_www1->addRdata(ConstRdataPtr(
- new CNAME("www.example.com")));
+ new CNAME("www.example.com.")));
rrs_in_cname_www2->addRdata(ConstRdataPtr(
- new CNAME("www1.example.com")));
+ new CNAME("www1.example.com.")));
}
Message msg_a; // Pointer to message in RENDER state
diff --git a/src/lib/server_common/portconfig.cc b/src/lib/server_common/portconfig.cc
index 530c919..b214ef5 100644
--- a/src/lib/server_common/portconfig.cc
+++ b/src/lib/server_common/portconfig.cc
@@ -152,7 +152,7 @@ installListenAddresses(const AddressList& new_addresses,
throw;
} catch (const exception& e) {
// Any other kind of exception is fatal. It might mean we are in
- // inconsistent state with the boss/socket creator, so we abort
+ // inconsistent state with the b10-init/socket creator, so we abort
// to make sure it doesn't last.
LOG_FATAL(logger, SRVCOMM_EXCEPTION_ALLOC).arg(e.what());
abort();
diff --git a/src/lib/server_common/portconfig.h b/src/lib/server_common/portconfig.h
index 0795728..7213e09 100644
--- a/src/lib/server_common/portconfig.h
+++ b/src/lib/server_common/portconfig.h
@@ -92,8 +92,9 @@ parseAddresses(isc::data::ConstElementPtr addresses,
/// but removes all the sockets it listened on. One of the exceptions is
/// propagated.
///
-/// The ports are requested from the socket creator through boss. Therefore
-/// you need to initialize the SocketRequestor before using this function.
+/// The ports are requested from the socket creator through b10-init.
+/// Therefore you need to initialize the SocketRequestor before using this
+/// function.
///
/// \param new_addresses are the addresses you want to listen on.
/// \param address_store is the place you store your current addresses. It is
@@ -107,7 +108,7 @@ parseAddresses(isc::data::ConstElementPtr addresses,
///
/// \throw asiolink::IOError when initialization or closing of socket fails.
/// \throw isc::server_common::SocketRequestor::Socket error when the
-/// boss/socket creator doesn't want to give us the socket.
+/// b10-init/socket creator doesn't want to give us the socket.
/// \throw std::bad_alloc when allocation fails.
/// \throw isc::InvalidOperation when the function is called and the
/// SocketRequestor isn't initialized yet.
diff --git a/src/lib/server_common/socket_request.cc b/src/lib/server_common/socket_request.cc
index e471ad0..981930d 100644
--- a/src/lib/server_common/socket_request.cc
+++ b/src/lib/server_common/socket_request.cc
@@ -34,21 +34,21 @@ namespace server_common {
namespace {
SocketRequestor* requestor(NULL);
-// Before the boss process calls send_fd, it first sends this
+// Before the b10-init process calls send_fd, it first sends this
// string to indicate success, followed by the file descriptor
const std::string& CREATOR_SOCKET_OK() {
static const std::string str("1\n");
return (str);
}
-// Before the boss process calls send_fd, it sends this
+// Before the b10-init process calls send_fd, it sends this
// string to indicate failure. It will not send a file descriptor.
const std::string& CREATOR_SOCKET_UNAVAILABLE() {
static const std::string str("0\n");
return (str);
}
-// The name of the ccsession command to request a socket from boss
+// The name of the ccsession command to request a socket from b10-init
// (the actual format of command and response are hardcoded in their
// respective methods)
const std::string& REQUEST_SOCKET_COMMAND() {
@@ -56,7 +56,7 @@ const std::string& REQUEST_SOCKET_COMMAND() {
return (str);
}
-// The name of the ccsession command to tell boss we no longer need
+// The name of the ccsession command to tell b10-init we no longer need
// a socket (the actual format of command and response are hardcoded
// in their respective methods)
const std::string& RELEASE_SOCKET_COMMAND() {
@@ -69,7 +69,7 @@ const size_t SOCKET_ERROR_CODE = 2;
const size_t SHARE_ERROR_CODE = 3;
// A helper converter from numeric protocol ID to the corresponding string.
-// used both for generating a message for the boss process and for logging.
+// used both for generating a message for the b10-init process and for logging.
inline const char*
protocolString(SocketRequestor::Protocol protocol) {
switch (protocol) {
@@ -84,7 +84,7 @@ protocolString(SocketRequestor::Protocol protocol) {
// Creates the cc session message to request a socket.
// The actual command format is hardcoded, and should match
-// the format as read in bind10_src.py.in
+// the format as read in b10-init.py.in
isc::data::ConstElementPtr
createRequestSocketMessage(SocketRequestor::Protocol protocol,
const std::string& address, uint16_t port,
@@ -125,7 +125,7 @@ createReleaseSocketMessage(const std::string& token) {
return (isc::config::createCommand(RELEASE_SOCKET_COMMAND(), release));
}
-// Checks and parses the response receive from Boss
+// Checks and parses the response receive from Init
// If successful, token and path will be set to the values found in the
// answer.
// If the response was an error response, or does not contain the
@@ -158,7 +158,7 @@ readRequestSocketAnswer(isc::data::ConstElementPtr recv_msg,
path = answer->get("path")->stringValue();
}
-// Connect to the domain socket that has been received from Boss.
+// Connect to the domain socket that has been received from Init.
// (i.e. the one that is used to pass created sockets over).
//
// This should only be called if the socket had not been connected to
@@ -211,14 +211,14 @@ createFdShareSocket(const std::string& path) {
// \return the socket fd that has been read
int
getSocketFd(const std::string& token, int sock_pass_fd) {
- // Tell the boss the socket token.
+ // Tell b10-init the socket token.
const std::string token_data = token + "\n";
if (!isc::util::io::write_data(sock_pass_fd, token_data.c_str(),
token_data.size())) {
isc_throw(SocketRequestor::SocketError, "Error writing socket token");
}
- // Boss first sends some data to signal that getting the socket
+ // Init first sends some data to signal that getting the socket
// from its cache succeeded
char status[3]; // We need a space for trailing \0, hence 3
memset(status, 0, 3);
@@ -226,7 +226,7 @@ getSocketFd(const std::string& token, int sock_pass_fd) {
isc_throw(SocketRequestor::SocketError,
"Error reading status code while requesting socket");
}
- // Actual status value hardcoded by boss atm.
+ // Actual status value hardcoded by b10-init atm.
if (CREATOR_SOCKET_UNAVAILABLE() == status) {
isc_throw(SocketRequestor::SocketError,
"CREATOR_SOCKET_UNAVAILABLE returned");
@@ -258,7 +258,7 @@ getSocketFd(const std::string& token, int sock_pass_fd) {
}
// This implementation class for SocketRequestor uses
-// a CC session for communication with the boss process,
+// a CC session for communication with the b10-init process,
// and fd_share to read out the socket(s).
// Since we only use a reference to the session, it must never
// be closed during the lifetime of this class
@@ -300,10 +300,10 @@ public:
share_name.empty() ? app_name_ :
share_name);
- // Send it to boss
- const int seq = session_.group_sendmsg(request_msg, "Boss");
+ // Send it to b10-init
+ const int seq = session_.group_sendmsg(request_msg, "Init");
- // Get the answer from the boss.
+ // Get the answer from b10-init.
// Just do a blocking read, we can't really do much anyway
isc::data::ConstElementPtr env, recv_msg;
if (!session_.group_recvmsg(env, recv_msg, false, seq)) {
@@ -330,12 +330,12 @@ public:
const isc::data::ConstElementPtr release_msg =
createReleaseSocketMessage(token);
- // Send it to boss
- const int seq = session_.group_sendmsg(release_msg, "Boss");
+ // Send it to b10-init
+ const int seq = session_.group_sendmsg(release_msg, "Init");
LOG_DEBUG(logger, DBGLVL_TRACE_DETAIL, SOCKETREQUESTOR_RELEASESOCKET).
arg(token);
- // Get the answer from the boss.
+ // Get the answer from b10-init.
// Just do a blocking read, we can't really do much anyway
isc::data::ConstElementPtr env, recv_msg;
if (!session_.group_recvmsg(env, recv_msg, false, seq)) {
diff --git a/src/lib/server_common/tests/portconfig_unittest.cc b/src/lib/server_common/tests/portconfig_unittest.cc
index 0c971ee..48d69ba 100644
--- a/src/lib/server_common/tests/portconfig_unittest.cc
+++ b/src/lib/server_common/tests/portconfig_unittest.cc
@@ -330,8 +330,8 @@ TEST_F(InstallListenAddressesDeathTest, inconsistent) {
}
}
-// If we are unable to tell the boss we closed a socket, we abort, as we are
-// not consistent with the boss most probably.
+// If we are unable to tell the b10-init we closed a socket, we abort, as we
+// are not consistent with b10-init most probably.
TEST_F(InstallListenAddressesDeathTest, cantClose) {
if (!isc::util::unittests::runningOnValgrind()) {
installListenAddresses(valid_, store_, dnss_);
diff --git a/src/lib/server_common/tests/socket_requestor_test.cc b/src/lib/server_common/tests/socket_requestor_test.cc
index ac1731f..9085ba9 100644
--- a/src/lib/server_common/tests/socket_requestor_test.cc
+++ b/src/lib/server_common/tests/socket_requestor_test.cc
@@ -76,7 +76,7 @@ TEST(SocketRequestorAccess, initialized) {
initTestSocketRequestor(NULL);
}
-// This class contains a fake (module)ccsession to emulate answers from Boss
+// This class contains a fake (module)ccsession to emulate answers from Init
class SocketRequestorTest : public ::testing::Test {
public:
SocketRequestorTest() : session(ElementPtr(new ListElement),
@@ -100,7 +100,7 @@ public:
}
// Creates a valid socket request answer, as it would be sent by
- // Boss. 'valid' in terms of format, not values
+ // Init. 'valid' in terms of format, not values
void
addAnswer(const std::string& token, const std::string& path) {
ElementPtr answer_part = Element::createMap();
@@ -141,7 +141,7 @@ createExpectedRequest(const std::string& address,
// create the envelope
const ElementPtr packet = Element::createList();
- packet->add(Element::create("Boss"));
+ packet->add(Element::create("Init"));
packet->add(Element::create("*"));
packet->add(createCommand("get_socket", command_args));
packet->add(Element::create(-1));
@@ -282,7 +282,7 @@ createExpectedRelease(const std::string& token) {
// create the envelope
const ElementPtr packet = Element::createList();
- packet->add(Element::create("Boss"));
+ packet->add(Element::create("Init"));
packet->add(Element::create("*"));
packet->add(createCommand("drop_socket", command_args));
packet->add(Element::create(-1));
diff --git a/src/lib/util/unittests/fork.cc b/src/lib/util/unittests/fork.cc
index 3414a3c..7ed22f8 100644
--- a/src/lib/util/unittests/fork.cc
+++ b/src/lib/util/unittests/fork.cc
@@ -93,10 +93,10 @@ provide_input(int *read_pipe, const void *input, const size_t length)
/*
* This creates a pipe, forks and reads the pipe and compares it
- * with given data. Used to check output of run in asynchronous way.
+ * with given data. Used to check output of run in an asynchronous way.
*/
pid_t
-check_output(int *write_pipe, const void *output, const size_t length)
+check_output(int *write_pipe, const void* const output, const size_t length)
{
int pipes[2];
if (pipe(pipes)) {
@@ -109,9 +109,7 @@ check_output(int *write_pipe, const void *output, const size_t length)
return pid;
} else {
close(pipes[1]);
- // We don't return the memory, but we're in tests and end this process
- // right away.
- unsigned char *buffer = new unsigned char[length + 1];
+ unsigned char* buffer = new unsigned char[length + 1];
// Try to read one byte more to see if the output ends here
size_t got_length(read_data(pipes[0], buffer, length + 1));
bool ok(true);
@@ -133,8 +131,10 @@ check_output(int *write_pipe, const void *output, const size_t length)
fprintf(stderr, "%02hhx", output_c[i]);
}
fprintf(stderr, "\n");
+ delete [] buffer;
exit(1);
} else {
+ delete [] buffer;
exit(0);
}
}
diff --git a/src/lib/util/unittests/fork.h b/src/lib/util/unittests/fork.h
index d5623a7..6b9e749 100644
--- a/src/lib/util/unittests/fork.h
+++ b/src/lib/util/unittests/fork.h
@@ -40,10 +40,10 @@ bool
process_ok(pid_t process);
pid_t
-provide_input(int *read_pipe, const void *input, const size_t length);
+provide_input(int* read_pipe, const void* input, const size_t length);
pid_t
-check_output(int *write_pipe, const void *output, const size_t length);
+check_output(int* write_pipe, const void* const output, const size_t length);
} // End of the namespace
}
diff --git a/tests/lettuce/configurations/auth/auth_badzone.config.orig b/tests/lettuce/configurations/auth/auth_badzone.config.orig
index ab11bc9..f86882a 100644
--- a/tests/lettuce/configurations/auth/auth_badzone.config.orig
+++ b/tests/lettuce/configurations/auth/auth_badzone.config.orig
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [{
"severity": "DEBUG",
@@ -29,7 +29,7 @@
]
}
},
- "Boss": {
+ "Init": {
"components": {
"b10-auth": { "kind": "needed", "special": "auth" },
"b10-cmdctl": { "special": "cmdctl", "kind": "needed" }
diff --git a/tests/lettuce/configurations/auth/auth_basic.config.orig b/tests/lettuce/configurations/auth/auth_basic.config.orig
index 4067fb1..24f615c 100644
--- a/tests/lettuce/configurations/auth/auth_basic.config.orig
+++ b/tests/lettuce/configurations/auth/auth_basic.config.orig
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [ {
"debuglevel": 99,
@@ -13,7 +13,7 @@
"address": "127.0.0.1"
} ]
},
- "Boss": {
+ "Init": {
"components": {
"b10-auth": { "kind": "needed", "special": "auth" },
"b10-cmdctl": { "special": "cmdctl", "kind": "needed" }
diff --git a/tests/lettuce/configurations/bindctl/bindctl.config.orig b/tests/lettuce/configurations/bindctl/bindctl.config.orig
index 3530b3e..ef0e8e2 100644
--- a/tests/lettuce/configurations/bindctl/bindctl.config.orig
+++ b/tests/lettuce/configurations/bindctl/bindctl.config.orig
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [ {
"debuglevel": 99,
@@ -17,7 +17,7 @@
"data_sources": {
"classes": {}
},
- "Boss": {
+ "Init": {
"components": {
"b10-cmdctl": { "special": "cmdctl", "kind": "needed" }
}
diff --git a/tests/lettuce/configurations/bindctl_commands.config.orig b/tests/lettuce/configurations/bindctl_commands.config.orig
index b60201d..980262b 100644
--- a/tests/lettuce/configurations/bindctl_commands.config.orig
+++ b/tests/lettuce/configurations/bindctl_commands.config.orig
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [ {
"debuglevel": 99,
@@ -23,7 +23,7 @@
"address": "127.0.0.1"
} ]
},
- "Boss": {
+ "Init": {
"components": {
"b10-auth": { "kind": "dispensable", "special": "auth" },
"b10-xfrin": { "address": "Xfrin", "kind": "dispensable" },
diff --git a/tests/lettuce/configurations/ddns/ddns.config.orig b/tests/lettuce/configurations/ddns/ddns.config.orig
index 93e7c1c..02978be 100644
--- a/tests/lettuce/configurations/ddns/ddns.config.orig
+++ b/tests/lettuce/configurations/ddns/ddns.config.orig
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [
{
@@ -39,7 +39,7 @@
]
}
},
- "Boss": {
+ "Init": {
"components": {
"b10-xfrout": {
"kind": "dispensable",
diff --git a/tests/lettuce/configurations/ddns/noddns.config.orig b/tests/lettuce/configurations/ddns/noddns.config.orig
index 7a9a947..d075924 100644
--- a/tests/lettuce/configurations/ddns/noddns.config.orig
+++ b/tests/lettuce/configurations/ddns/noddns.config.orig
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [
{
@@ -35,7 +35,7 @@
]
}
},
- "Boss": {
+ "Init": {
"components": {
"b10-xfrout": {"kind": "dispensable"},
"b10-auth": {"kind": "needed", "special": "auth"},
diff --git a/tests/lettuce/configurations/default.config b/tests/lettuce/configurations/default.config
index 9e1d3d1..2713def 100644
--- a/tests/lettuce/configurations/default.config
+++ b/tests/lettuce/configurations/default.config
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [ {
"debuglevel": 99,
diff --git a/tests/lettuce/configurations/example.org.config.orig b/tests/lettuce/configurations/example.org.config.orig
index c5545ed..7da6304 100644
--- a/tests/lettuce/configurations/example.org.config.orig
+++ b/tests/lettuce/configurations/example.org.config.orig
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [ {
"debuglevel": 99,
@@ -26,7 +26,7 @@
]
}
},
- "Boss": {
+ "Init": {
"components": {
"b10-auth": { "kind": "needed", "special": "auth" },
"b10-cmdctl": { "special": "cmdctl", "kind": "needed" }
diff --git a/tests/lettuce/configurations/example.org.inmem.config b/tests/lettuce/configurations/example.org.inmem.config
index 7ea34b3..7ec921d 100644
--- a/tests/lettuce/configurations/example.org.inmem.config
+++ b/tests/lettuce/configurations/example.org.inmem.config
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [{
"severity": "DEBUG",
@@ -26,7 +26,7 @@
]
}
},
- "Boss": {
+ "Init": {
"components": {
"b10-auth": { "kind": "needed", "special": "auth" },
"b10-cmdctl": { "special": "cmdctl", "kind": "needed" }
diff --git a/tests/lettuce/configurations/example2.org.config b/tests/lettuce/configurations/example2.org.config
index eeb9733..3bb3330 100644
--- a/tests/lettuce/configurations/example2.org.config
+++ b/tests/lettuce/configurations/example2.org.config
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [ {
"severity": "DEBUG",
@@ -27,7 +27,7 @@
]
}
},
- "Boss": {
+ "Init": {
"components": {
"b10-auth": { "kind": "needed", "special": "auth" },
"b10-cmdctl": { "special": "cmdctl", "kind": "needed" }
diff --git a/tests/lettuce/configurations/inmemory_over_sqlite3/secondary.conf b/tests/lettuce/configurations/inmemory_over_sqlite3/secondary.conf
index 107c53f..d93a8c6 100644
--- a/tests/lettuce/configurations/inmemory_over_sqlite3/secondary.conf
+++ b/tests/lettuce/configurations/inmemory_over_sqlite3/secondary.conf
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [ {
"debuglevel": 99,
@@ -29,7 +29,7 @@
]
}
},
- "Boss": {
+ "Init": {
"components": {
"b10-auth": { "kind": "needed", "special": "auth" },
"b10-xfrin": { "address": "Xfrin", "kind": "dispensable" },
diff --git a/tests/lettuce/configurations/ixfr-out/testset1-config.db b/tests/lettuce/configurations/ixfr-out/testset1-config.db
index e78c84a..d5eaf83 100644
--- a/tests/lettuce/configurations/ixfr-out/testset1-config.db
+++ b/tests/lettuce/configurations/ixfr-out/testset1-config.db
@@ -9,7 +9,7 @@
}
]
},
- "version": 2,
+ "version": 3,
"Logging": {
"loggers":
[
@@ -51,7 +51,7 @@
]
}
},
- "Boss": {
+ "Init": {
"components": {
"b10-auth": { "kind": "needed", "special": "auth" },
"b10-xfrin": { "address": "Xfrin", "kind": "dispensable" },
diff --git a/tests/lettuce/configurations/multi_instance/multi_auth.config.orig b/tests/lettuce/configurations/multi_instance/multi_auth.config.orig
index fe482f9..96e25d8 100644
--- a/tests/lettuce/configurations/multi_instance/multi_auth.config.orig
+++ b/tests/lettuce/configurations/multi_instance/multi_auth.config.orig
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [ {
"debuglevel": 99,
@@ -24,7 +24,7 @@
}]
}
},
- "Boss": {
+ "Init": {
"components": {
"b10-auth-2": {"kind": "dispensable", "special": "auth"},
"b10-auth": {"kind": "dispensable", "special": "auth"},
diff --git a/tests/lettuce/configurations/no_db_file.config b/tests/lettuce/configurations/no_db_file.config
index bc4ff5f..9e6c168 100644
--- a/tests/lettuce/configurations/no_db_file.config
+++ b/tests/lettuce/configurations/no_db_file.config
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [ {
"severity": "DEBUG",
@@ -27,7 +27,7 @@
]
}
},
- "Boss": {
+ "Init": {
"components": {
"b10-auth": { "kind": "needed", "special": "auth" },
"b10-cmdctl": { "special": "cmdctl", "kind": "needed" }
diff --git a/tests/lettuce/configurations/nsec3/nsec3_auth.config b/tests/lettuce/configurations/nsec3/nsec3_auth.config
index 618c5ef..5dfffa1 100644
--- a/tests/lettuce/configurations/nsec3/nsec3_auth.config
+++ b/tests/lettuce/configurations/nsec3/nsec3_auth.config
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [
{
@@ -27,7 +27,7 @@
]
}
},
- "Boss": {
+ "Init": {
"components": {
"b10-auth": {"kind": "needed", "special": "auth"},
"b10-cmdctl": {"kind": "needed", "special": "cmdctl"}
diff --git a/tests/lettuce/configurations/resolver/resolver_basic.config.orig b/tests/lettuce/configurations/resolver/resolver_basic.config.orig
index 0adca9f..fe5ddd0 100644
--- a/tests/lettuce/configurations/resolver/resolver_basic.config.orig
+++ b/tests/lettuce/configurations/resolver/resolver_basic.config.orig
@@ -1 +1,31 @@
-{"version": 2, "Logging": {"loggers": [{"severity": "DEBUG", "name": "*", "debuglevel": 99}]}, "Resolver": {"query_acl": [{"action": "REJECT", "from": "127.0.0.1"}], "listen_on": [{"port": 47806, "address": "127.0.0.1"}]}, "Boss": {"components": {"b10-resolver": {"kind": "needed"}, "b10-cmdctl": {"kind": "needed", "special": "cmdctl"}}}}
+{
+ "version": 3,
+ "Logging": {
+ "loggers": [ {
+ "severity": "DEBUG",
+ "name": "*",
+ "debuglevel": 99
+ } ]
+ },
+ "Resolver": {
+ "query_acl": [ {
+ "action": "REJECT",
+ "from": "127.0.0.1"
+ } ],
+ "listen_on": [ {
+ "port": 47806,
+ "address": "127.0.0.1"
+ } ]
+ },
+ "Init": {
+ "components": {
+ "b10-resolver": {
+ "kind": "needed"
+ },
+ "b10-cmdctl": {
+ "kind": "needed",
+ "special": "cmdctl"
+ }
+ }
+ }
+}
diff --git a/tests/lettuce/configurations/xfrin/inmem_slave.conf b/tests/lettuce/configurations/xfrin/inmem_slave.conf
index cc1c997..fedf372 100644
--- a/tests/lettuce/configurations/xfrin/inmem_slave.conf
+++ b/tests/lettuce/configurations/xfrin/inmem_slave.conf
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [ {
"debuglevel": 99,
@@ -30,7 +30,7 @@
]
}
},
- "Boss": {
+ "Init": {
"components": {
"b10-auth": { "kind": "needed", "special": "auth" },
"b10-xfrin": { "address": "Xfrin", "kind": "dispensable" },
diff --git a/tests/lettuce/configurations/xfrin/retransfer_master.conf.orig b/tests/lettuce/configurations/xfrin/retransfer_master.conf.orig
index c04d917..1b2953d 100644
--- a/tests/lettuce/configurations/xfrin/retransfer_master.conf.orig
+++ b/tests/lettuce/configurations/xfrin/retransfer_master.conf.orig
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [ {
"debuglevel": 99,
@@ -36,7 +36,7 @@
"Stats": {
"poll-interval": 1
},
- "Boss": {
+ "Init": {
"components": {
"b10-auth": { "kind": "needed", "special": "auth" },
"b10-xfrout": { "address": "Xfrout", "kind": "dispensable" },
diff --git a/tests/lettuce/configurations/xfrin/retransfer_master_nons.conf.orig b/tests/lettuce/configurations/xfrin/retransfer_master_nons.conf.orig
index 80cc3db..bccadf7 100644
--- a/tests/lettuce/configurations/xfrin/retransfer_master_nons.conf.orig
+++ b/tests/lettuce/configurations/xfrin/retransfer_master_nons.conf.orig
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [ {
"debuglevel": 99,
@@ -36,7 +36,7 @@
"Stats": {
"poll-interval": 1
},
- "Boss": {
+ "Init": {
"components": {
"b10-auth": { "kind": "needed", "special": "auth" },
"b10-xfrout": { "address": "Xfrout", "kind": "dispensable" },
diff --git a/tests/lettuce/configurations/xfrin/retransfer_slave.conf.orig b/tests/lettuce/configurations/xfrin/retransfer_slave.conf.orig
index cef04cf..2e6b17f 100644
--- a/tests/lettuce/configurations/xfrin/retransfer_slave.conf.orig
+++ b/tests/lettuce/configurations/xfrin/retransfer_slave.conf.orig
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [ {
"debuglevel": 99,
@@ -24,7 +24,7 @@
}]
}
},
- "Boss": {
+ "Init": {
"components": {
"b10-auth": { "kind": "needed", "special": "auth" },
"b10-xfrin": { "address": "Xfrin", "kind": "dispensable" },
diff --git a/tests/lettuce/configurations/xfrin/retransfer_slave_notify.conf b/tests/lettuce/configurations/xfrin/retransfer_slave_notify.conf
index d977c58..a5c22b1 100644
--- a/tests/lettuce/configurations/xfrin/retransfer_slave_notify.conf
+++ b/tests/lettuce/configurations/xfrin/retransfer_slave_notify.conf
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [ {
"debuglevel": 99,
@@ -37,7 +37,7 @@
"class": "IN"
} ]
},
- "Boss": {
+ "Init": {
"components": {
"b10-auth": { "kind": "needed", "special": "auth" },
"b10-xfrin": { "address": "Xfrin", "kind": "dispensable" },
diff --git a/tests/lettuce/data/commands/bad_command b/tests/lettuce/data/commands/bad_command
index 95d1694..2daa7cb 100644
--- a/tests/lettuce/data/commands/bad_command
+++ b/tests/lettuce/data/commands/bad_command
@@ -1,8 +1,8 @@
!echo shouldshow
# just add something so the test can verify it's reverted
-config add /Boss/components b10-auth
-config set /Boss/components/b10-auth/kind needed
-config set /Boss/components/b10-auth/special auth
+config add /Init/components b10-auth
+config set /Init/components/b10-auth/kind needed
+config set /Init/components/b10-auth/special auth
bad command
# this should not be reached
!echo shouldnotshow
diff --git a/tests/lettuce/features/bindctl_commands.feature b/tests/lettuce/features/bindctl_commands.feature
index 57406c3..b9fef82 100644
--- a/tests/lettuce/features/bindctl_commands.feature
+++ b/tests/lettuce/features/bindctl_commands.feature
@@ -7,7 +7,7 @@ Feature: control with bindctl
# a number of modules. It then removes all non-essential modules,
# and checks whether they do disappear from the list of running
# modules (note that it 'misuses' the help command for this,
- # there is a Boss command 'show_processes' but it's output is
+ # there is a Init command 'show_processes' but it's output is
# currently less standardized than 'help')
Given I have bind10 running with configuration bindctl_commands.config
And wait for bind10 stderr message BIND10_STARTED_CC
@@ -19,7 +19,7 @@ Feature: control with bindctl
And wait for bind10 stderr message STATS_STARTING
And wait for bind10 stderr message STATSHTTPD_STARTED
- Then remove bind10 configuration Boss/components/NOSUCHMODULE
+ Then remove bind10 configuration Init/components/NOSUCHMODULE
last bindctl output should contain Error
bind10 module Xfrout should be running
@@ -30,29 +30,29 @@ Feature: control with bindctl
bind10 module StatsHttpd should be running
bind10 module Resolver should not be running
- Then remove bind10 configuration Boss/components value b10-xfrout
+ Then remove bind10 configuration Init/components value b10-xfrout
And wait for new bind10 stderr message BIND10_PROCESS_ENDED
last bindctl output should not contain Error
# assuming it won't error for further modules (if it does, the final
# 'should not be running' tests would fail anyway)
- Then remove bind10 configuration Boss/components value b10-stats-httpd
+ Then remove bind10 configuration Init/components value b10-stats-httpd
And wait for new bind10 stderr message BIND10_PROCESS_ENDED
last bindctl output should not contain Error
- Then remove bind10 configuration Boss/components value b10-stats
+ Then remove bind10 configuration Init/components value b10-stats
And wait for new bind10 stderr message BIND10_PROCESS_ENDED
last bindctl output should not contain Error
- Then remove bind10 configuration Boss/components value b10-zonemgr
+ Then remove bind10 configuration Init/components value b10-zonemgr
And wait for new bind10 stderr message BIND10_PROCESS_ENDED
last bindctl output should not contain Error
- Then remove bind10 configuration Boss/components value b10-xfrin
+ Then remove bind10 configuration Init/components value b10-xfrin
And wait for new bind10 stderr message BIND10_PROCESS_ENDED
last bindctl output should not contain Error
- Then remove bind10 configuration Boss/components value b10-auth
+ Then remove bind10 configuration Init/components value b10-auth
And wait for new bind10 stderr message BIND10_PROCESS_ENDED
last bindctl output should not contain Error
@@ -103,7 +103,7 @@ Feature: control with bindctl
last bindctl output should not contain shouldnotshow
# This would fail if the entire list was passed, or the configuration
# was committed
- send bind10 the command config show Boss/components
+ send bind10 the command config show Init/components
last bindctl output should not contain b10-auth
# nested_command contains another execute script
@@ -124,8 +124,8 @@ Feature: control with bindctl
When I send bind10 the command execute init_authoritative_server show
# just test some parts of the output
- last bindctl output should contain /Boss/components/b10-auth/special
- last bindctl output should contain /Boss/components/b10-zonemgr/kind
+ last bindctl output should contain /Init/components/b10-auth/special
+ last bindctl output should contain /Init/components/b10-zonemgr/kind
last bindctl output should contain Please
# nothing should have been changed
diff --git a/tests/lettuce/features/ddns_system.feature b/tests/lettuce/features/ddns_system.feature
index 8e279a7..184c8ae 100644
--- a/tests/lettuce/features/ddns_system.feature
+++ b/tests/lettuce/features/ddns_system.feature
@@ -48,7 +48,7 @@ Feature: DDNS System
And wait for new bind10 stderr message DDNS_STOPPED
# Test 7
- # BoB should restart it
+ # Init should restart it
And wait for new bind10 stderr message DDNS_STARTED
# Test 8
@@ -65,7 +65,7 @@ Feature: DDNS System
# Test 9
When I send bind10 the command Auth shutdown
And wait for new bind10 stderr message AUTH_SHUTDOWN
- # BoB should restart it automatically
+ # Init should restart it automatically
And wait for new bind10 stderr message AUTH_SERVER_STARTED
# Test 10
diff --git a/tests/lettuce/features/default.feature b/tests/lettuce/features/default.feature
index ce7ee1e..bd81f12 100644
--- a/tests/lettuce/features/default.feature
+++ b/tests/lettuce/features/default.feature
@@ -8,7 +8,7 @@ Feature: default bind10 config
And wait for bind10 stderr message STATS_STARTING
# These should be running
- bind10 module Boss should be running
+ bind10 module Init should be running
And bind10 module Logging should be running
And bind10 module Stats should be running
diff --git a/tests/lettuce/features/multi_instance.feature b/tests/lettuce/features/multi_instance.feature
index 4ce135a..3ab06eb 100644
--- a/tests/lettuce/features/multi_instance.feature
+++ b/tests/lettuce/features/multi_instance.feature
@@ -34,7 +34,7 @@ Feature: Multiple instances
If I remember the pid of process b10-auth
And remember the pid of process b10-auth-2
- When I remove bind10 configuration Boss/components value b10-auth-2
+ When I remove bind10 configuration Init/components value b10-auth-2
And wait for new bind10 stderr message BIND10_PROCESS_ENDED
Then the pid of process b10-auth should not have changed
@@ -42,9 +42,9 @@ Feature: Multiple instances
When I send bind10 the following commands
"""
- config add Boss/components b10-auth-2
- config set Boss/components/b10-auth-2/special auth
- config set Boss/components/b10-auth-2/kind needed
+ config add Init/components b10-auth-2
+ config set Init/components/b10-auth-2/special auth
+ config set Init/components/b10-auth-2/kind needed
config commit
"""
And wait for new bind10 stderr message AUTH_SERVER_STARTED
@@ -53,7 +53,7 @@ Feature: Multiple instances
Then the pid of process b10-auth should not have changed
A query for example.com should have rcode REFUSED
- When I remove bind10 configuration Boss/components value b10-auth
+ When I remove bind10 configuration Init/components value b10-auth
And wait for new bind10 stderr message BIND10_PROCESS_ENDED
Then the pid of process b10-auth-2 should not have changed
A query for example.com should have rcode REFUSED
diff --git a/tests/lettuce/features/terrain/bind10_control.py b/tests/lettuce/features/terrain/bind10_control.py
index 142a78e..7cc7d3e 100644
--- a/tests/lettuce/features/terrain/bind10_control.py
+++ b/tests/lettuce/features/terrain/bind10_control.py
@@ -202,28 +202,28 @@ def parse_bindctl_output_as_data_structure():
"parseable data structure: '" + output + "': " + str(ve)
def find_process_pid(step, process_name):
- """Helper function to request the running processes from Boss, and
+ """Helper function to request the running processes from Init, and
return the pid of the process with the given process_name.
- Fails with an assert if the response from boss is not valid JSON,
+ Fails with an assert if the response from b10-init is not valid JSON,
or if the process with the given name is not found.
"""
# show_processes output is a list of lists, where the inner lists
# are of the form [ pid, "name" ]
# Not checking data form; errors will show anyway (if these turn
# out to be too vague, we can change this)
- step.given('send bind10 the command Boss show_processes')
+ step.given('send bind10 the command Init show_processes')
running_processes = parse_bindctl_output_as_data_structure()
for process in running_processes:
if process[1] == process_name:
return process[0]
assert False, "Process named " + process_name +\
- " not found in output of Boss show_processes";
+ " not found in output of Init show_processes";
@step("remember the pid of process ([\S]+)")
def remember_pid(step, process_name):
"""Stores the PID of the process with the given name as returned by
- Boss show_processes command.
+ Init show_processes command.
Fails if the process with the given name does not appear to exist.
Stores the component_name->pid value in the dict world.process_pids.
This should only be used by the related step
@@ -239,7 +239,7 @@ def remember_pid(step, process_name):
@step('pid of process ([\S]+) should not have changed')
def check_pid(step, process_name):
"""Checks the PID of the process with the given name as returned by
- Boss show_processes command.
+ Init show_processes command.
Fails if the process with the given name does not appear to exist.
Fails if the process with the given name exists, but has a different
pid than it had when the step 'remember the pid of process' was
@@ -343,9 +343,9 @@ def configure_ddns_on(step):
step.behave_as("""
When I send bind10 the following commands
\"\"\"
- config add Boss/components b10-ddns
- config set Boss/components/b10-ddns/kind dispensable
- config set Boss/components/b10-ddns/address DDNS
+ config add Init/components b10-ddns
+ config set Init/components/b10-ddns/kind dispensable
+ config set Init/components/b10-ddns/address DDNS
config commit
\"\"\"
""")
@@ -358,7 +358,7 @@ def configure_ddns_off(step):
step.behave_as("""
When I send bind10 the following commands
\"\"\"
- config remove Boss/components b10-ddns
+ config remove Init/components b10-ddns
config commit
\"\"\"
""")
diff --git a/tests/system/Makefile.am b/tests/system/Makefile.am
index 663258b..aed1d79 100644
--- a/tests/system/Makefile.am
+++ b/tests/system/Makefile.am
@@ -6,7 +6,7 @@ distclean-local:
# Most of the files under this directory (including test subdirectories)
# must be listed in EXTRA_DIST.
-EXTRA_DIST = README cleanall.sh ifconfig.sh start.pl stop.pl run.sh runall.sh
+EXTRA_DIST = README cleanall.sh ifconfig.sh start.pl stop.pl runall.sh
EXTRA_DIST += common/default_user.csv
EXTRA_DIST += glue/auth.good glue/example.good glue/noglue.good glue/test.good
EXTRA_DIST += glue/tests.sh glue/clean.sh
diff --git a/tests/system/bindctl/tests.sh b/tests/system/bindctl/tests.sh
index 75c91de..ca58240 100755
--- a/tests/system/bindctl/tests.sh
+++ b/tests/system/bindctl/tests.sh
@@ -38,8 +38,8 @@ if [ $status != 0 ]; then echo "I:failed"; fi
n=`expr $n + 1`
echo "I:Starting b10-auth and checking that it works ($n)"
-echo 'config add Boss/components b10-auth
-config set Boss/components/b10-auth { "special": "auth", "kind": "needed" }
+echo 'config add Init/components b10-auth
+config set Init/components/b10-auth { "special": "auth", "kind": "needed" }
config commit
quit
' | $RUN_BINDCTL \
@@ -68,7 +68,7 @@ if [ $status != 0 ]; then echo "I:failed"; fi
n=`expr $n + 1`
echo "I:Stopping b10-auth and checking that ($n)"
-echo 'config remove Boss/components b10-auth
+echo 'config remove Init/components b10-auth
config commit
quit
' | $RUN_BINDCTL \
@@ -79,8 +79,8 @@ if [ $status != 0 ]; then echo "I:failed"; fi
n=`expr $n + 1`
echo "I:Restarting b10-auth and checking that ($n)"
-echo 'config add Boss/components b10-auth
-config set Boss/components/b10-auth { "special": "auth", "kind": "needed" }
+echo 'config add Init/components b10-auth
+config set Init/components/b10-auth { "special": "auth", "kind": "needed" }
config commit
quit
' | $RUN_BINDCTL \
@@ -143,8 +143,8 @@ n=`expr $n + 1`
echo "I:Starting more b10-auths and checking that ($n)"
for i in 2 3
do
- echo 'config add Boss/components b10-auth-'$i'
-config set Boss/components/b10-auth-'$i' { "special": "auth", "kind": "needed" }
+ echo 'config add Init/components b10-auth-'$i'
+config set Init/components/b10-auth-'$i' { "special": "auth", "kind": "needed" }
config commit
quit
' | $RUN_BINDCTL \
@@ -180,7 +180,7 @@ n=`expr $n + 1`
echo "I:Stopping extra b10-auths and checking that ($n)"
for i in 3 2
do
- echo 'config remove Boss/components b10-auth-'$i'
+ echo 'config remove Init/components b10-auth-'$i'
config commit
quit
' | $RUN_BINDCTL \
diff --git a/tests/system/glue/nsx1/b10-config.db.in b/tests/system/glue/nsx1/b10-config.db.in
index 5f93f3b..6802c53 100644
--- a/tests/system/glue/nsx1/b10-config.db.in
+++ b/tests/system/glue/nsx1/b10-config.db.in
@@ -23,7 +23,7 @@
}
]
},
- "Boss": {
+ "Init": {
"components": {
"b10-auth": {"kind": "needed", "special": "auth" },
"b10-xfrin": { "address": "Xfrin", "kind": "dispensable" },
diff --git a/tests/system/ixfr/b10-config.db.in b/tests/system/ixfr/b10-config.db.in
index b3b27a4..a36117d 100644
--- a/tests/system/ixfr/b10-config.db.in
+++ b/tests/system/ixfr/b10-config.db.in
@@ -38,7 +38,7 @@
"class": "IN"
}]
},
- "Boss": {
+ "Init": {
"components": {
"b10-auth": {"kind": "needed", "special": "auth" },
"b10-xfrin": { "address": "Xfrin", "kind": "dispensable" },
diff --git a/tools/query_cmp/src/lib/handledns.py b/tools/query_cmp/src/lib/handledns.py
index e33ce9e..e906bae 100755
--- a/tools/query_cmp/src/lib/handledns.py
+++ b/tools/query_cmp/src/lib/handledns.py
@@ -187,7 +187,7 @@ def send_req(query, server, port=53, timeout=5):
msg = Message(Message.RENDER)
msg.set_qid(int(qheader['id']))
- msg.set_opcode(Opcode.QUERY())
+ msg.set_opcode(Opcode.QUERY)
msg.set_rcode(Rcode(int(qheader['rcode'])))
if qheader['qr'] == 1:
More information about the bind10-changes
mailing list