BIND 10 trac2440, updated. 896dd15f4ac12826fa7e7dea909e310c9d1518b9 [2440] noted some differences from BIND 9
BIND 10 source code commits
bind10-changes at lists.isc.org
Sat Feb 16 05:01:17 UTC 2013
The branch, trac2440 has been updated
via 896dd15f4ac12826fa7e7dea909e310c9d1518b9 (commit)
via 759ad9e32d88e86db2e1ff749cb258fcf48b7e34 (commit)
via 231effe63004f65df8997eb6505c6df9259a30c5 (commit)
via e0bc8b2ac591202750d65388d825fccff620adab (commit)
via 769575c46f377163f257143f6328661d653c1281 (commit)
via 11da73b20b66ce4f9b848ace8554099d87116b5c (commit)
via 75920555b24b381c9931e699350792858fd1fbdc (commit)
via 017315cee2a710d8cdcfabc4deae5108e5f83c49 (commit)
via bc9b81968e1cda2586e0e82c2dfeb7188ae299f2 (commit)
via 4ef474eb389dc4bd7228d5c13a55db4a88de6509 (commit)
via e99268ff3a5b190b99168f92a081f5994d3b092b (commit)
via c8612ad245784bec63d3aff107c3765a5e922892 (commit)
via 42a5732b72152d692394dc1aefaefd6165e99771 (commit)
via 6580e209688b7f9c667d82b15c12425fd3d875c6 (commit)
via 36ba2daf7d81708e59b2d01202dbe6be03981244 (commit)
via 016d4a4bcb6c23de3107df178a71f75e46dbdba0 (commit)
via aa4361017c9850636abc61b9248a49cfdf253370 (commit)
via 67adaa92a4419673d07fa76f2add0430d423624d (commit)
via 12071a88b91b30fada5848868fc3ecd173785b37 (commit)
via e090b84bd5e2d79d7730563beda5e95140c5d03d (commit)
via e6f86f2f5eec8e6003c13d36804a767a840d96d6 (commit)
via 3f272be09aadc78d27333ddeba66ba7e9de25f1f (commit)
via 52f22bb0132d78d07145fec0f1fde03069f7e5fa (commit)
via 3e367ff100f7d1b76ec8743385917ba747b9ad34 (commit)
via 72106475f625075e3e49bdba04e6f9f26b105294 (commit)
via 5aa5b4e403893b1de767cea00b4a3f9d9a17422e (commit)
via dd2dbeb5a4507ba50ce1547972ab4ea1b56b4655 (commit)
via daf2abe68ce9c111334a15c14e440730f3a085e2 (commit)
via 1eef52e751a97720ba679256b24b2c90ed98b5fd (commit)
via 7782afcefe47162534a377769d9eda2c0fa960ff (commit)
via 660a0d164feaf055677f375977f7ed327ead893e (commit)
via 50e61198ea74d395b0bc9015c9f01ff29e357e48 (commit)
via aa5b7e1214e00019776d7bab0cf49df604d2a7e9 (commit)
via 9b16b116c909bedfc1353147f3948ba19f42fb5a (commit)
via 1d0c2004865d1bf322bf78d13630d992e39179fd (commit)
via dd447189c8e307c305415a34894c48972cea5a2c (commit)
via b4de3233542a0c98c04c0cf730bd8222efe897b7 (commit)
via 1a80b1dd71902c7942e11316b53c6cba4a16565d (commit)
via 59c744cf4838c919abe8763501208e02aff9526d (commit)
via b5e2be95d21ed750ad7cf5e15de2058aa8bc45f4 (commit)
via 1d6a2e3fb2715b445ce835847d7d353886495fea (commit)
via a12aed4bde955f0edb68717ee23895bbc78baccf (commit)
via ac75b8db7bb9c0b104672d0987d98ec8055c698c (commit)
via 3fa52fbaed9589ecad689ccf105bbf7365d26d62 (commit)
via 55f8f410621028a556a3b0af8d2d41bc0f60b08e (commit)
via 4d6818eb58a726d0abd92a171a0f18334a9eb3b7 (commit)
via 530f569e47b06f49402611bda07e1956cdf04a24 (commit)
via da67c0642c9403f08e278e2424bc7bfde74e034a (commit)
via 6f83737a9b9deaacd5ce0799cbda9e18fdb81c4b (commit)
via 4c439a4cca7768510b4549c73e0f43120d4c9739 (commit)
via fbf11f41c327130fbdb39fcf64daa16f278eb197 (commit)
via 733d42fa3d0b0b0f426a4817dcd022c764158d0d (commit)
via e5005185351cf73d4a611407c2cfcd163f80e428 (commit)
via bfefbfda28cb512b12643555790149b7c64414f3 (commit)
via 4d074c3e7048f8dde151e078bee4967949d3b32e (commit)
via f7a26a8f9ee4adf64d754e6c2a6c07977854c40c (commit)
via 89fbc1a1f41da33150176d8d0ba83ae8e88a03da (commit)
via 3b03a16056601b26d27db3a4cd0baced7e4ba756 (commit)
via 564f4b8990e4759f57033f4fc9de2359e3baf829 (commit)
via 005cba1c8d62e2f44ad05b512ac9b7be639da725 (commit)
via d85b2e22c5c45050d3191ee73c508bbd3cd1251f (commit)
via 64fb39c963cd9e6494f71dfe14d9dabdf869fdc8 (commit)
via 3a185f59245200be3c6b2e86340ac1c2ae464efb (commit)
via b43c93c8cb4e0256677c01d5f649093fc27c998a (commit)
via ffd4a283b18f8eaa453712e1e615e2440b12aa0d (commit)
via 0749f9e194505698031990eb7c544e8ec076fe10 (commit)
via 39c1d353784c56ac2f1c42836348393b7d80303e (commit)
via 2a1d32f1610c1b99a2b6bcfdf350fcf123c51e19 (commit)
via d76159997442d71928d459041d46d89a01fbdefc (commit)
via ebeb7923963456d7f62721327290b75572ab4279 (commit)
via aa4dcd59d930af330b7f082c40a395d0bc424d97 (commit)
via 8df1853f98c6fdfbdc186062426904d047259d53 (commit)
via 1ec905f87eaebf01ae956b1ec5ac05afbfae9836 (commit)
via 936279ca980c2c2b5b711f147703665f722ffd9c (commit)
via 6dae0d1f016190f74098fc92cc5200bdc9ea165f (commit)
via 37743e4ec13cbb7e4b2864ce54e3d9732a1e8101 (commit)
via 8b2f7c325534431cb4f6cca82c5d314583e03248 (commit)
via 06fa3ca48f34c5bb6c235a1fbda2cf00be69c2c8 (commit)
via 2baf0ceb67f23351cd20ac77978b29366f7c5638 (commit)
via 6ded04bbd0fb963754e4123cee99cb07de07a618 (commit)
via bae3798603affdb276f370c1ac6b33b011a5ed4f (commit)
via 6dc113cc0e20a4781ad0f991871966d244371440 (commit)
via 28c7d972110b86833c31631d563b07a2824efbab (commit)
via 3c200a3decdd5bbc30bbdb9c81f9f206eda98ad9 (commit)
via 3a8e933c07b9493fd5deb97d86433532c68cce8e (commit)
via 87da92a958f4eb914c981d0b3ddab3cf68d41803 (commit)
via f7a77b8a0b46d3a01fc3a31a303e029da1c5f6b3 (commit)
via ee5c66336b0baf8424d5be04670dd5b96344e07e (commit)
via 6eb06132ab8aa9e1e4c6b73a233952b4bf23baff (commit)
via 6b09c6be32b221b73a05f98de7814c650597883a (commit)
via d1ae5b15754d5d359c52fd6f58e23e0e0907d4f3 (commit)
via 3188ee1246e7e1b52b334cfa6edb04d2123ea759 (commit)
via 076dad2aada1624b0b632e4eee3e6a1907c51a16 (commit)
via cb95ae3f178ce497a8dce0aad4173a8586dc9ca2 (commit)
via f07dbb59715b26afeef7ff682bba887187ad169b (commit)
via b1f09a967614c9668bedc3d877bb12f1622222f1 (commit)
via db90d30b97219e365c3643fd6c79878eaf1ac518 (commit)
via 9a7c7bf6690bf3a6d85b5f3dc802d819825b3955 (commit)
via 8b84c4beb47bf82e9c8302c39b2144a4717f7d1b (commit)
via 2dffde2cef71ec693097f59f81fe40ada8035975 (commit)
via 19a4de9fd0738a4332169f432035fd4734311f53 (commit)
via 8404133f0a362cede4819322da6506b83968e79b (commit)
via 10d63236169056c2c14cf4807d05017599c0a3bd (commit)
via f95627b501f5bf720e74de1d2cdb17219c826f02 (commit)
via 2eeab2ebf0b7e9de2a7c73e553572dbc313d06a7 (commit)
via 8eb3dbe543fa39f7702a48422777df9752dd8afb (commit)
via 74110729288f3014cfde862c943bb68bc1fceb69 (commit)
via 4c56f4c81c03d04a1d2b8be3250ef1bea9979ef4 (commit)
via ddfbc7fa1ca5fc712e16801f5f24807f90d8e88b (commit)
via fbb78097f3aca9bd8fbf4cce1f1c7169719d04e5 (commit)
via a85aff6a94f4f8e08bf3beaaf4da6e282b28e253 (commit)
via 476b3eff5488d89b03d7ac34830ad52973e9b0bd (commit)
via 94df9fad7ddbe91fff09b601cfaf07fcfd346659 (commit)
via 5a0d055137287f81e23fbeedd35236fee274596d (commit)
via a01569277cda3f78b1171bbf79f15ecf502e81e2 (commit)
via 6aa1c3cc7dbb55d32304ada9b88037346ba929b0 (commit)
via bd93846c0d74b780b2e6a6547686c9bc2db6fe23 (commit)
via 971ac6698f44e468f072fab7baaea5eb6f6b77a3 (commit)
via 7d086e721bf8e5b081aeed8a95d9cf517f3cf7d8 (commit)
via acd8358522b4ac20a75684b6ec616269bcc705f2 (commit)
via 23610f5bf4d613f503793bf7c8526c67f95df223 (commit)
via 1c50c5a6ee7e9675e3ab154f2c7f975ef519fca2 (commit)
via b6b57fb469c8a0483c1050ec64dc46b6dfb1b40e (commit)
via 709b6c520e3e86c880655ded5cbe375d29f80aa9 (commit)
via 677e990f61d47065da92899bd3c82115cd977c8c (commit)
via 878c361337c79e35816bb7df20595b8a5faa3491 (commit)
via a08b7575e7d42740c3f659087420472753bbee16 (commit)
via 0a6fc5a8a77ac5f0e579dbb57da5ea5eb1923c39 (commit)
via c274bccc9c7ab201ac00f09896b2511791556278 (commit)
via 95b8519b040545420529f7dee5944826f6cc1224 (commit)
via 826ac1b1e637a6aa8b5763c4b810755ac0551446 (commit)
via cfa26ab8df9b533d32b8dd5aa405bf237bb58f12 (commit)
via 20f13469624c75aa090ad74f5ce2518adf826f01 (commit)
via 266bfcd67adafe326c13c6d835f5875628972d9f (commit)
via 666395baa9b73aca0312787b683a7d3a96e7ca86 (commit)
via 8431fb8b25cde01d16bfdefdc52b2eb2b07cb756 (commit)
via ac23914ba629242fa651b4d68b61e9e5380c0643 (commit)
via 2a1e4152da4fb42c78e57ada20de7e6a4d64da51 (commit)
via 4df79169011293abac9c08a1cc47e4f16f5e1750 (commit)
via 73ca6b06d15e426c0d57c41a0e126d7433acc598 (commit)
via 271e37c6b0befddbf9291577460868f0c67ea428 (commit)
via f59c311cfd135589af52c6f8a9afa45c03f84318 (commit)
via 814eb8ad33d8f85621d3daacd4c64c4d7e3dc43d (commit)
via 11f5ae7cddca20ae003331cd69b817be4aea790c (commit)
via fe0db0adf0796432ace4794c157efc2bfb79e008 (commit)
via 269cd1867284c4ebf44ebabacadc99c6413bcacb (commit)
via ed8243603f26e46d8ae48f0fb4bac7545c5ff4c9 (commit)
via 583de5551d162a5feb89bf5759e573b32d80f142 (commit)
via 53dec20f027b589c2202f182a01d8e33ef15c2a9 (commit)
via 937c982cf30ed26570ca8853c45f8afe1a32e3e7 (commit)
via bc42c4b3d31ed960581808d76e7150e0fc3eb1d9 (commit)
via e6348b6115a2525de18a2d70c4b15fece46a9c90 (commit)
via 794f240b5c6d6da7907f6efab5824658edf26fa6 (commit)
via 5351989c3736c027df8f164ee257feb6d51f9b72 (commit)
via c1b8e63ca9cf15d022805691970e3ecc0dd0e096 (commit)
via 10833250f2751b41b6d9e9b86be5396ec3b4e062 (commit)
via 691f87a3076fba4d5ce60a0960c1c07a35055e05 (commit)
via 57f3e532a4cb9ef57484b638cfb53296dc403e95 (commit)
via 31707f9fd9a78add4895394d0168d5664184b18f (commit)
via 5cfbdc70c17f139c6c0d7d74afba76240ecd0474 (commit)
via 76f20bfafceeca8bceefda457fe64904d4d21c59 (commit)
via 44699b4b18162581cd1dd39be5fb76ca536012e6 (commit)
via 8ed7d3706940a9a822503426fe4c09f3bf995610 (commit)
via d8c50d0baef0ad3a1339b1c03bd0d37a32719d86 (commit)
via 7e110b499a751ca43a473a94eb4448bdbe78a32a (commit)
via 2db190194445d191e1d1eb75f0fc1936d0a791df (commit)
via 17a9c25450a3129fa5787a5deb70d371a602f311 (commit)
via c5efaec3f9196e4b4cabccde691f30ad9de380ba (commit)
via ae25fcc7abecb79eb3d4b9df1e0b463c27f77258 (commit)
via 58ccf7aa110fe656c52060cbc7b2cc26b4a9ae9d (commit)
via 0aafc9b4325734e8d8d16805ad459f8dde153fdb (commit)
via af2672cf1515f459dfc5db30919c407abcb8273b (commit)
via e4cd5386850b79a824320a63bfed69f03358574f (commit)
via 7ddf561d6304d62ce755344b0e28ab9b025b9f27 (commit)
via e6603386c50335787bca7443a9716414cf68c7bc (commit)
via 5d1563f963952af222030a404b21a0bc8171e2ac (commit)
via 859b69891fe1fc3be67bbd293397fbe7b989eb26 (commit)
via 3ca5de640a863aa8b84e5d34b241a26a63992e11 (commit)
via ff930eca8ab894a76eb0a695ebde98a7bf117d45 (commit)
via 3d35bcb14cbe4eba0cf2a2605bad4ede5ae6c338 (commit)
via e8c5250c5b3924c34159c92b2e3a23b8878572e6 (commit)
via b8e8e7ed5ff301eb18db71ff83c46479984f5fde (commit)
via 9c42816e5f405de02ef30728d82fcb045cf6a803 (commit)
via f18d297fd5b1651ebf61d5d72ca0d64b92fece30 (commit)
via a5a7f330f48cf6a63e7f67d690ea3487c11c2cce (commit)
via e1ed220b645ea62b01d9a467f8724be4fd59a696 (commit)
via adb9e821ff8fa75c3ecc1ecc853ff3e90d1edfd7 (commit)
via 463700b34d94d933cc4244480e46f975c798d777 (commit)
via f575d8fc0316d69b2ea54788b43948488dface2e (commit)
via 0479cb528e77e29f67b3a05ed79a83c2ef483455 (commit)
via d985d0dde44a6c4b090cf211c295ab10a65924e1 (commit)
via 4f0716782cba2f94c577e80cc23e4759b2440907 (commit)
via 56e9d0e542d1396e9d0f373bb8cc1fd5d0a945fe (commit)
via 3f74193c572fefc2e718ca0952962494a9680ddb (commit)
via ba7573eb62566aece58b514dfcb3b5a322adfed1 (commit)
via 717d619224d93b6dc6da0cf6267deffc31e130ad (commit)
via 10fece1ac6ec02b033f00689fc31ac4f93570dda (commit)
via 9a7784d748031b97dbb5a55fed84dcca254096d5 (commit)
via 2ca96761f12b10e51faa6e394617baffbb68afdd (commit)
via b122fa56ec0caf72e351f973c9ef337b2b3122c1 (commit)
via ed2adc060ea123c5a6fec274950fdc68cde48059 (commit)
via 4c4e2711beeb2eed8104055cf66dd4e8151c4fd1 (commit)
via 93716b025a4755a8a2cbf250a9e4187741dbc9bb (commit)
via 21a9a30e94cfa668daef86935894d6b5257179f0 (commit)
via f7c0e1bf2a90711cd6df426b17465eedafaade00 (commit)
via 4dd6c4cc1c15974d1009c489f2e26e987479ea77 (commit)
via f94444fe1c05288c12f6255bb3552a8c8166745a (commit)
via d94641e52198629abdd3ffe82751b0b6ce11ff28 (commit)
via d8c2ea58e026b76c516f7ff6e3d6199af1b5ecc7 (commit)
via d4032a6703f672811b92811c10998d8eb718eec6 (commit)
via 83b06fb184c17f70a28ab307347e9eb075ee048d (commit)
via 6873aeabcd152ef491fa66b1292192e1c16e1215 (commit)
via ddde1f3034cb004bde11a8c1ba41944a2ce58fff (commit)
via 12807b1d6e3a0846198bcc062978c077ac101b91 (commit)
via b22bc32b2776aa8904ee4fae2e9f6b075eace0f4 (commit)
via df5cb12f60fc4f22b24218a54bf428f403cabe22 (commit)
via 72383e89593fdc91a83b0ffe875a4fdb5114fbfc (commit)
via 79b78d6538b85a97e1a84aec63928a6bd0251218 (commit)
via 2352105cc77dd16a583c8f4fdcaf9d680e450344 (commit)
via 3d4f3cd903294e03bafbf37963051fdcf2233cae (commit)
via ea780724240cfb2e065d3cfac7a32ba16e931dc3 (commit)
via 443be4318110331b2761b8d945dab4f05d765a1a (commit)
via 8a33bc3ac5aa1fd9e9532b08d9622530c2b5998b (commit)
via 8f2bbf55feb66a26d6b2c416fd902f488f9848ac (commit)
via 4a7f643283ece35b839644bc5aeadbdce6766c2d (commit)
via ac808e9e265fdafa0182e15da1f5acd1e94d797b (commit)
via 9b3c959af13111af1fa248c5010aa33ee7e307ee (commit)
via 1ec1ffb481e68fdd06d04fa38aaf6b4348669649 (commit)
via 6ad900eeff1c9e2c704dd5259565c28b8846aa37 (commit)
via 62bb1c4ddcb01d285edfdb04016d710597c708e7 (commit)
via 2a742e19fb8c128456c90b9ae679d2a4806313d3 (commit)
via 89fd37be8d063c6fce0fa731ab7f3ac9c45667ef (commit)
via 4760fba1fcc4d0fdb762b69cd6090002b9190be1 (commit)
via 5e9d101a16146d2bb60f128cba65083b72e04086 (commit)
via ad274a49ad5f36231b8997240272c89b4edf0c2f (commit)
via 6ff25a392e3d15ae5a9aa1ca11a4badeb95bf39d (commit)
via 0a85e0e55c24798e91bdbbd39bd1fbe97d72f3f8 (commit)
via bc5a59ac90aa09fc1d32212ab1b80428b46f9543 (commit)
via c6afb4411dea5f77e3f1ce923271f124583fa59e (commit)
via f969ebef7a0d51d33c9c2eb3f034ad25bda2f4f3 (commit)
via 552cbb67476914a81d0cdf226fc2806b1edf935e (commit)
via 385f6f102749b237bdab4854d7a30f38203a46cf (commit)
via 4921791213303bf926778b79873a30fdb334d033 (commit)
via ee17e979fcde48b59d91c74ac368244169065f3b (commit)
via 09b1a2f927483b407d70e98f5982f424cc872149 (commit)
via 0c19f89ff4a9838e93ed647ed34af6ffad3f420e (commit)
via cc9e3303451d440894ce918ae5f1e0d55b30f650 (commit)
via dcd93a56893257541fbe19fcd112f7fdda5cb7bb (commit)
via 1a235092e9050f116b87a1edc5b2b6095aacc9e8 (commit)
via 79a33db9ed37ba4715b35d1d9c74dcc550a50788 (commit)
via 44fe82eeedff8f69053c6f455134a256daab3340 (commit)
via 97953d08e3259263743138dcc81ef4332e585e39 (commit)
via 9859c1d73774b09bab6aee9dc8082428097aeae9 (commit)
via 521189be3a1dc89fc9010dfa9fa13a31ee233a38 (commit)
via cd2c2c8ec6cf8a335897176339c8150228be4a36 (commit)
via 779d9312bf84dcf7ce26bd1b1d6276179e026ad2 (commit)
via e283a05caf9dd0aefc7cd08a405dbc47510fbd43 (commit)
via 848cc407b16d4874dc5abec493248cd64db018c6 (commit)
via 5106746a3bcc359ba14fb569d6921b9cdf5b244a (commit)
via 6d750aafaabefdec3e2d3bb75343bc520b8ef6b5 (commit)
via 750c848d04cca9137a34383c6b44bb203e3d2a94 (commit)
via 9dbce1aa52c7342dc4fe4292983d33b826fc4217 (commit)
via 40bbfbc8f6fcfa08154b0c76ddbb41c2725f24f7 (commit)
via 0a94c9b7778c06dcadd0cf77ccf785afe53dc0b7 (commit)
via 0bd3e904e235bf19714dd6c39de6fd7810541d31 (commit)
via 555bf4c3d7d588de640091b4bf677c130d34d94d (commit)
via e4e606091def44d790b193fbcfe74623dd31ef43 (commit)
via 7491dd625f34f755d9764cf9e116cd8374be1d88 (commit)
via cabbe83773e3b4bc88f80f81cb4d370b0ef7a2f6 (commit)
via 1e2111996aa44a583fcdf0997a6aa170542f3ea2 (commit)
via fc03d665a777c47920a85bae41f3e359c61fa42e (commit)
via 2a9656fb9ac69e87273f14f4a6807986ee9739a8 (commit)
via a333e6c631fd02410295adfac58d8681c635f782 (commit)
via 3c7af17b0022c2404e22618bb46682991c7e6a3c (commit)
via 64dd4df0a88c7007cdc22249118fbc0d3d59cd70 (commit)
via 3d1ea31fe2461f28b7337c78623834eb987c73a5 (commit)
via 1f6f8905d7319519c6f2ebd5864eeb4d6d700d03 (commit)
via 388c202a9b3c95f3d8d1aa0c76803b1f398974f3 (commit)
via 09c24690d48f8b8b7239668216b8fed9bac179e0 (commit)
via 4d6c30931eee834bb63b11fabec9d84390a9f6b7 (commit)
via 368ceac8bfb49a0bd4c0a0c98fe624d623de9c6d (commit)
via 701cf211bb322f46cd0188b8adc2f8e9ad8ac8e6 (commit)
via eb1b9e524157d9c378ba0350fe3217d56dbc0665 (commit)
via aeec2dc1b9c511d17971ac63138576c37e7c5164 (commit)
via a50fe182f38a02d9cea3256e5908cfa1e1658e00 (commit)
via e5faeb5fa84b7218fde486347359504cf692510e (commit)
via 7bed043f816679ee3ee3c6d75202d47b1531b37f (commit)
via ffa68930da0e9e19f697f1345807108dca356386 (commit)
via 8b05639cdcea9e7fd65a50c1e62fb449a4526916 (commit)
via 91aa998226f1f91a232f2be59a53c9568c4ece77 (commit)
via bc66f6d0df259657efc39b87f33e68dd08062202 (commit)
via e99a3dd0a0c6e698a5b3108af017c76fa6d115ec (commit)
via 6317823834dced610691841ee7c082249fa1f8fe (commit)
via c99567a4b1d2f5cfe345239c74b981efc8a4d585 (commit)
via 1da05510732e259e76ef280d7b879fd237b746f3 (commit)
via ba3695664abb8b686c8fee6d25bd356b68cd5c7c (commit)
via 07414de00e7bbbafcb73166eb969ce1fd38105ee (commit)
via c00795e0f9d07c7f0d47f04098addd3144dce7d4 (commit)
via 4f3158323950fd4c5bbccfdfcb33b5bb48e01dbc (commit)
via e34d32533ce85b2ae710f6ab45ebd187cb3520b7 (commit)
via d72eca9cdec9acb880e136c1b908fb136c7e1957 (commit)
via 388434f8a4c3693e76b95dba9cb9d6ae183a6407 (commit)
via a3cd0126b46392c4c150d9e865ff82811e24006f (commit)
via 27e6119093723a1e46a239ec245a8b4b10677635 (commit)
via 79acb1197b7046a21034d5a1dadeb6b2efbf030a (commit)
via de49cb6406849be06c5452ca532bb1007fe5c509 (commit)
via 5b8a824054313bdecb8988b46e55cb2e94cb2d6c (commit)
via e0ffe686d622e43dfbf3e286252044d1fd20a6ed (commit)
via cf78a006585e1918376e7e5847102c4dc435be19 (commit)
via 217087aad01f35b863da3b69012d4b3904b35a7c (commit)
via 4b7b2f9a808352bb4ef1a71de3a46cdd79076050 (commit)
via 9bb9dbe6234769cbe525c24887eefe16ef1d7cee (commit)
via f6882b13b9264d0d64345a341a73c75ae6d9c535 (commit)
via 0d69610ffffd701cd3a6fb2645f9ff24b69cb3b7 (commit)
via e382b73b40ea40386202d1c91dfe57c8a4efc11f (commit)
via 8765402aa09dbbf42b7cf3e8449a633a850221c4 (commit)
via f0ad7b6307f49bb7bc4a45311e6f1b33b42dd885 (commit)
via f485d39ab6e394bc034b990f872572c3700d0312 (commit)
via a55716a04087917db71ff5b224b922d05ec6d6c6 (commit)
via 769bbb5edb0fe537e497ebe1a9fc3dfc130a5866 (commit)
via 0be5967e2ff6364d8d38ad297be99925dfe20cc4 (commit)
via d9421d93e3a65e6025281ece92706f7c2003ae77 (commit)
via e273dedf8a3875ddb241b796c19144d14819f5d8 (commit)
via fae9e0052b6f71b38e003bda562cc49ab4d4b9c7 (commit)
via 1bbf2ae947fdfb85b57fcd41a5c3f386bbadd221 (commit)
via aa76df71dcd756e7b233af47a6c936263813d90b (commit)
via 631366a33bb06d01547c0789edfadde7a17228bd (commit)
via e05c0b3ebb376233dd9648732fb5c992bb72e242 (commit)
via a60ee6bbefe447f7ce863641dd88cdb1d3f0766b (commit)
via d27b4c92e0aa23a8d39338377d705a75f78d1f3e (commit)
via a4bbade712c4a082d8ae95c9389021d27b74b382 (commit)
via 83c28cb3836d45e3e1abdc5a53c0d994148d1af2 (commit)
via ea6e8ea2e28a3c7b22ca2390f3509d791522959b (commit)
via c80274000aed9ab258b3e0b46ddb2c3212a7b877 (commit)
via 55b05119c73ef4858995efd2377c16e17f8a0f06 (commit)
via d7fd34a4bf69c07812461671ac8d8f0dde0ee440 (commit)
via 1bb3b6b661cc500b2a7cad2447347a23b7b1fadc (commit)
via d5a2bb8d02bb017405ac6f79e2eeffcb7fd23052 (commit)
via 9de5d287390383e072dbb2c4cbf9d60253893e41 (commit)
via 2887502a9e0084cb16c79aba684a65f9d4fda69a (commit)
via 0f4eb55137c6c0d2da2f14270a3c843fae14d187 (commit)
via 6fcb36d000e1da11d66baeb4b97278dd974d6a01 (commit)
via bca4daf3e3ea6538b6796e570daaeedb516b2dcb (commit)
via b7221c64cd88051434a3d254a0ec6a1fe07ddf6b (commit)
via d31f5bd476744ced094fc2aecb557b945586b056 (commit)
via bb6afeceea763fdfd012b8a2b6e9e4247f2d623d (commit)
via 931a65b517fd80cf03afbd8ff3c5c70a121f66e5 (commit)
via 54d482bf70ce2878423df57efd6bbb8138805914 (commit)
via 002b5ff189dde721ac10f21d83aadb0b9e641295 (commit)
via 4c38d52144ef9a80ee8edb705d98ce6baaf4c378 (commit)
via a4aa18ebf42a9f94818519a5805e7caf7bff61c7 (commit)
via 1ea439fadbf39dc980cff8c8848994bdbeeb1e9a (commit)
via d4d5c0ee659e6ecfb3e3384f2462a58559464f80 (commit)
via 2db3158111a1e1ae660fdb515f0d461e6798e27b (commit)
via 933430e40bb35135d741df5b9c8b944c5bf9be83 (commit)
via a0ac798d243153455ce1332a8ef3e1363b33273c (commit)
via 2443a6fc801159c879f4051f17c79b2e776b34bf (commit)
via 3886d1174c6552cc99c3d5fa2c727c3ee0bcc281 (commit)
via 3f1577b50df4375e51d94be2650ab2ae5e4d6698 (commit)
via 30276d77d28c951226859d2f1f452abfabdcf6f4 (commit)
via 2cecfb45e98d30493019d4664fc7ca320e377a2e (commit)
via f56d5006adee1ae27e38960d26e46b5fe488c965 (commit)
via fc075a50ee9a1d31c4851afc1f6df64c67951547 (commit)
via 6a2437122d5dddfc0ddfd923c5036331ca23fb12 (commit)
via f16d079d34dc61a5cea082328c3eb38beeea3d7c (commit)
via 45b03507d6ec3868648713f86e7789b15f78a367 (commit)
via 656b5d1cb045b35499ced064fae6bc1962f91ad6 (commit)
via 77c9f2b09e47184c42f56658dcd93c4eee36587a (commit)
from 4890e1207ce2c4fd0875c321e14c8931c6313e6b (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
- Log -----------------------------------------------------------------
commit 896dd15f4ac12826fa7e7dea909e310c9d1518b9
Author: JINMEI Tatuya <jinmei at isc.org>
Date: Fri Feb 15 20:59:17 2013 -0800
[2440] noted some differences from BIND 9
commit 759ad9e32d88e86db2e1ff749cb258fcf48b7e34
Author: JINMEI Tatuya <jinmei at isc.org>
Date: Fri Feb 15 20:18:03 2013 -0800
[2440] update documentation of RdataSet::create().
commit 231effe63004f65df8997eb6505c6df9259a30c5
Author: JINMEI Tatuya <jinmei at isc.org>
Date: Fri Feb 15 20:03:10 2013 -0800
[2440] handle varying TTL cases.
the behavior was changed slightly: on second thought it seemed to make
more sense to adopt the smallest TTL. An existing test case was removed
because it's now covered in more comprehensive tests for this change.
commit e0bc8b2ac591202750d65388d825fccff620adab
Author: JINMEI Tatuya <jinmei at isc.org>
Date: Fri Feb 15 18:58:38 2013 -0800
[2440] a bit of cleanup: unify 2 versons of RdataSet::create into single one.
commit 769575c46f377163f257143f6328661d653c1281
Author: JINMEI Tatuya <jinmei at isc.org>
Date: Fri Feb 15 15:49:31 2013 -0800
[2440] check RR type consistency in merge mode RdataSet::create.
commit 11da73b20b66ce4f9b848ace8554099d87116b5c
Author: JINMEI Tatuya <jinmei at isc.org>
Date: Fri Feb 15 15:39:36 2013 -0800
[2440] handle duplicate RDATA in RdataSet create appropriately.
commit 75920555b24b381c9931e699350792858fd1fbdc
Author: JINMEI Tatuya <jinmei at isc.org>
Date: Fri Feb 15 14:02:38 2013 -0800
[2440] refactoring: unify two versions of create().
commit 017315cee2a710d8cdcfabc4deae5108e5f83c49
Author: JINMEI Tatuya <jinmei at isc.org>
Date: Fri Feb 15 13:55:14 2013 -0800
[2440] mostly completed merge version of create().
with lot of duplicate code right now, and some corner cases are not yet
handled.
commit bc9b81968e1cda2586e0e82c2dfeb7188ae299f2
Author: JINMEI Tatuya <jinmei at isc.org>
Date: Thu Feb 14 19:29:01 2013 -0800
[2440] merge version of RdataSet::create.
all basic cases are covered and tested. still some code duplicate.
ignoring corner cases.
commit 4ef474eb389dc4bd7228d5c13a55db4a88de6509
Author: JINMEI Tatuya <jinmei at isc.org>
Date: Fri Feb 15 20:40:18 2013 -0800
[2440] documentation update for extensions to RdataEncoder with merge support.
commit e99268ff3a5b190b99168f92a081f5994d3b092b
Author: JINMEI Tatuya <jinmei at isc.org>
Date: Fri Feb 15 14:45:20 2013 -0800
[2440] update add methods of RdataEncoder to return bool depending on duplicate
This is necessary to extend RdataSet class so it calculates the correct
number of RDTA/RRSIGs excluding duplicates.
commit c8612ad245784bec63d3aff107c3765a5e922892
Author: JINMEI Tatuya <jinmei at isc.org>
Date: Wed Feb 13 23:05:22 2013 -0800
[2440] suppress duplicate Rdata in merge. extend test cases more.
commit 42a5732b72152d692394dc1aefaefd6165e99771
Author: JINMEI Tatuya <jinmei at isc.org>
Date: Wed Feb 13 22:21:20 2013 -0800
[2440] generalize the merge-start initialization using RdataReader.
commit 6580e209688b7f9c667d82b15c12425fd3d875c6
Author: JINMEI Tatuya <jinmei at isc.org>
Date: Wed Feb 13 20:04:12 2013 -0800
[2440] update encode() considering old RRSIGs. start() still uses hardcoding.
commit 36ba2daf7d81708e59b2d01202dbe6be03981244
Author: JINMEI Tatuya <jinmei at isc.org>
Date: Wed Feb 13 18:20:40 2013 -0800
[2440] extended RdataEncoder so it can begin with existing ecoded data.
some test cases are hardcoded, not all cases are covered yet.
commit 016d4a4bcb6c23de3107df178a71f75e46dbdba0
Author: JINMEI Tatuya <jinmei at isc.org>
Date: Mon Feb 11 20:38:09 2013 -0800
[2440] ignore duplicate RRSIG Rdata, too.
commit aa4361017c9850636abc61b9248a49cfdf253370
Author: JINMEI Tatuya <jinmei at isc.org>
Date: Mon Feb 11 20:10:34 2013 -0800
[2440] suppress duplicate Rdata in RdataEncoder::addRdata().
also, as a cleanup, removed an unused class member variable.
commit 67adaa92a4419673d07fa76f2add0430d423624d
Author: JINMEI Tatuya <jinmei at isc.org>
Date: Mon Feb 11 15:37:43 2013 -0800
[2440] unrelated cleanup: removed unnecessary include
-----------------------------------------------------------------------
Summary of changes:
ChangeLog | 155 +-
configure.ac | 45 +-
doc/Makefile.am | 2 +-
doc/differences.txt | 32 +
doc/guide/bind10-guide.xml | 1675 +++++++++++++++-----
examples/configure.ac | 5 +-
examples/m4/ax_isc_rpath.m4 | 62 +-
src/bin/auth/auth_config.cc | 2 +-
src/bin/auth/auth_messages.mes | 9 +-
src/bin/auth/auth_srv.cc | 6 +-
src/bin/auth/b10-auth.xml | 6 +-
src/bin/auth/main.cc | 34 +-
src/bin/auth/query.cc | 13 +-
src/bin/auth/tests/config_unittest.cc | 2 +-
src/bin/auth/tests/query_unittest.cc | 82 +-
src/bin/bind10/.gitignore | 3 +-
src/bin/bind10/Makefile.am | 32 +-
src/bin/bind10/README | 5 +-
src/bin/bind10/{bind10.xml => b10-init.xml} | 74 +-
src/bin/bind10/bind10.in | 11 +
src/bin/bind10/bind10.xml | 461 +-----
src/bin/bind10/creatorapi.txt | 28 +-
src/bin/bind10/{bind10_src.py.in => init.py.in} | 95 +-
src/bin/bind10/{bob.spec => init.spec} | 6 +-
.../{bind10_messages.mes => init_messages.mes} | 110 +-
src/bin/bind10/run_bind10.sh.in | 2 +-
src/bin/bind10/tests/Makefile.am | 2 +-
src/bin/bind10/tests/args_test.py | 100 +-
.../tests/{bind10_test.py.in => init_test.py.in} | 1078 ++++++-------
src/bin/bindctl/bindcmd.py | 177 ++-
src/bin/bindctl/bindctl.xml | 2 +-
src/bin/bindctl/bindctl_main.py.in | 2 +-
src/bin/bindctl/command_sets.py | 24 +-
src/bin/bindctl/run_bindctl.sh.in | 2 +-
src/bin/bindctl/tests/bindctl_test.py | 125 +-
src/bin/cfgmgr/b10-cfgmgr.py.in | 2 +-
src/bin/cfgmgr/tests/b10-cfgmgr_test.py.in | 8 +-
src/bin/cmdctl/Makefile.am | 17 +-
src/bin/cmdctl/b10-cmdctl.xml | 4 +-
src/bin/cmdctl/cmdctl.py.in | 57 +-
src/bin/cmdctl/cmdctl_messages.mes | 10 +
src/bin/cmdctl/tests/Makefile.am | 2 +-
src/bin/cmdctl/tests/cmdctl_test.py | 160 +-
src/bin/dbutil/b10-dbutil.xml | 2 +-
src/bin/dbutil/dbutil.py.in | 8 +-
src/bin/dbutil/tests/dbutil_test.sh.in | 102 +-
src/bin/dbutil/tests/testdata/Makefile.am | 1 +
.../tests/testdata/{v2_1.sqlite3 => v2_2.sqlite3} | Bin 15360 -> 16384 bytes
src/bin/ddns/b10-ddns.xml | 8 +-
src/bin/ddns/ddns.py.in | 8 +-
src/bin/ddns/ddns_messages.mes | 4 +-
src/bin/ddns/tests/ddns_test.py | 34 +-
src/bin/dhcp4/Makefile.am | 10 +-
src/bin/dhcp4/config_parser.cc | 151 +-
src/bin/dhcp4/ctrl_dhcp4_srv.cc | 70 +-
src/bin/dhcp4/ctrl_dhcp4_srv.h | 23 +-
src/bin/dhcp4/dhcp4.spec | 8 +-
src/bin/dhcp4/dhcp4_srv.cc | 170 +-
src/bin/dhcp4/dhcp4_srv.h | 27 +-
src/bin/dhcp4/tests/Makefile.am | 10 +-
src/bin/dhcp4/tests/config_parser_unittest.cc | 494 +++++-
src/bin/dhcp4/tests/dhcp4_srv_unittest.cc | 198 ++-
src/bin/dhcp4/tests/dhcp4_test.py | 2 +-
src/bin/dhcp6/Makefile.am | 10 +-
src/bin/dhcp6/config_parser.cc | 145 +-
src/bin/dhcp6/ctrl_dhcp6_srv.cc | 79 +-
src/bin/dhcp6/ctrl_dhcp6_srv.h | 23 +-
src/bin/dhcp6/dhcp6.spec | 8 +-
src/bin/dhcp6/dhcp6_srv.cc | 102 +-
src/bin/dhcp6/dhcp6_srv.h | 10 -
src/bin/dhcp6/tests/Makefile.am | 10 +-
src/bin/dhcp6/tests/config_parser_unittest.cc | 537 ++++++-
src/bin/dhcp6/tests/dhcp6_srv_unittest.cc | 3 +-
src/bin/dhcp6/tests/dhcp6_test.py | 4 +-
src/bin/loadzone/loadzone.py.in | 57 +-
src/bin/loadzone/loadzone_messages.mes | 11 +-
src/bin/loadzone/tests/correct/example.db | 14 +-
src/bin/loadzone/tests/correct/include.db | 4 +-
src/bin/loadzone/tests/correct/mix1.db | 4 +-
src/bin/loadzone/tests/correct/mix2.db | 4 +-
src/bin/loadzone/tests/correct/ttl1.db | 4 +-
src/bin/loadzone/tests/correct/ttl2.db | 4 +-
src/bin/loadzone/tests/correct/ttlext.db | 4 +-
src/bin/loadzone/tests/loadzone_test.py | 47 +-
src/bin/msgq/msgq.py.in | 159 +-
src/bin/msgq/msgq_messages.mes | 74 +-
src/bin/msgq/tests/msgq_test.py | 179 ++-
src/bin/resolver/b10-resolver.xml | 6 +-
src/bin/resolver/resolver.cc | 13 +-
src/bin/resolver/resolver_messages.mes | 13 +-
src/bin/sockcreator/tests/sockcreator_tests.cc | 9 +-
src/bin/stats/b10-stats-httpd.xml | 6 +-
src/bin/stats/b10-stats.xml | 10 +-
src/bin/stats/stats.py.in | 11 +-
src/bin/stats/stats.spec | 2 +-
src/bin/stats/stats_httpd.py.in | 58 +-
src/bin/stats/stats_httpd_messages.mes | 45 +-
src/bin/stats/stats_messages.mes | 3 +
src/bin/stats/tests/b10-stats-httpd_test.py | 10 +-
src/bin/stats/tests/b10-stats_test.py | 66 +-
src/bin/stats/tests/test_utils.py | 16 +-
src/bin/stats/tests/testdata/b10-config.db | 2 +-
src/bin/sysinfo/run_sysinfo.sh.in | 14 +-
src/bin/tests/process_rename_test.py.in | 2 +-
src/bin/xfrin/b10-xfrin.xml | 8 +-
src/bin/xfrin/tests/xfrin_test.py | 381 +++--
src/bin/xfrin/xfrin.py.in | 138 +-
src/bin/xfrin/xfrin_messages.mes | 30 +-
src/bin/xfrout/b10-xfrout.xml | 6 +-
src/bin/xfrout/tests/xfrout_test.py.in | 124 +-
src/bin/xfrout/xfrout.py.in | 71 +-
src/bin/xfrout/xfrout_messages.mes | 5 +-
src/bin/zonemgr/b10-zonemgr.xml | 8 +-
src/bin/zonemgr/zonemgr.py.in | 4 +-
src/bin/zonemgr/zonemgr_messages.mes | 4 +-
src/lib/cc/cc_messages.mes | 4 +
src/lib/cc/data.cc | 10 +-
src/lib/cc/session.cc | 1 +
src/lib/config/tests/ccsession_unittests.cc | 2 +
src/lib/datasrc/Makefile.am | 6 +-
src/lib/datasrc/client.h | 3 +-
src/lib/datasrc/database.cc | 21 +-
src/lib/datasrc/datasrc_messages.mes | 24 +-
src/lib/datasrc/memory/memory_client.h | 2 +-
src/lib/datasrc/memory/rdata_serialization.cc | 214 ++-
src/lib/datasrc/memory/rdata_serialization.h | 53 +-
src/lib/datasrc/memory/rdataset.cc | 85 +-
src/lib/datasrc/memory/rdataset.h | 32 +-
src/lib/datasrc/memory/treenode_rrset.cc | 7 +-
src/lib/datasrc/memory/treenode_rrset.h | 29 +-
src/lib/datasrc/memory/zone_data.cc | 30 +
src/lib/datasrc/memory/zone_data.h | 57 +-
src/lib/datasrc/memory/zone_data_loader.h | 2 +-
src/lib/datasrc/memory/zone_data_updater.cc | 11 +
src/lib/datasrc/memory/zone_finder.cc | 80 +-
src/lib/datasrc/memory/zone_finder.h | 16 +-
src/lib/datasrc/memory_datasrc.cc | 3 +-
src/lib/datasrc/rrset_collection_base.cc | 1 +
src/lib/datasrc/rrset_collection_base.h | 54 +-
src/lib/datasrc/sqlite3_accessor.cc | 5 +-
src/lib/datasrc/sqlite3_accessor_link.cc | 8 +-
src/lib/datasrc/tests/client_list_unittest.cc | 2 +-
src/lib/datasrc/tests/database_unittest.cc | 293 +++-
src/lib/datasrc/tests/faked_nsec3.h | 2 +-
.../datasrc/tests/master_loader_callbacks_test.cc | 2 +-
.../tests/memory/rdata_serialization_unittest.cc | 298 +++-
src/lib/datasrc/tests/memory/rdataset_unittest.cc | 406 ++++-
.../tests/memory/treenode_rrset_unittest.cc | 51 +-
.../tests/memory/zone_data_loader_unittest.cc | 19 +-
src/lib/datasrc/tests/memory/zone_data_unittest.cc | 28 +-
.../tests/memory/zone_data_updater_unittest.cc | 19 +-
.../datasrc/tests/memory/zone_finder_unittest.cc | 307 +++-
src/lib/datasrc/tests/memory_datasrc_unittest.cc | 4 +-
.../datasrc/tests/zone_finder_context_unittest.cc | 2 +-
src/lib/datasrc/tests/zone_loader_unittest.cc | 19 +-
src/lib/datasrc/zone.h | 781 +--------
src/lib/datasrc/zone_finder.cc | 95 +-
src/lib/datasrc/{zone.h => zone_finder.h} | 424 +----
src/lib/datasrc/zone_finder_context.cc | 2 +-
src/lib/datasrc/{iterator.h => zone_iterator.h} | 0
src/lib/datasrc/zone_loader.cc | 5 +-
src/lib/datasrc/zonetable.h | 2 +-
src/lib/dhcp/Makefile.am | 1 +
src/lib/dhcp/libdhcp++.cc | 75 +-
src/lib/dhcp/libdhcp++.h | 10 -
src/lib/dhcp/option.cc | 62 +-
src/lib/dhcp/option.h | 24 +-
src/lib/dhcp/option4_addrlst.cc | 2 +-
src/lib/dhcp/option4_addrlst.h | 10 +-
src/lib/dhcp/option_custom.cc | 25 +-
src/lib/dhcp/option_custom.h | 22 +-
src/lib/dhcp/option_definition.cc | 53 +-
src/lib/dhcp/option_definition.h | 66 +-
src/lib/dhcp/option_int_array.h | 24 +
src/lib/{dhcpsrv => dhcp}/option_space.cc | 2 +-
src/lib/{dhcpsrv => dhcp}/option_space.h | 0
src/lib/dhcp/pkt4.cc | 11 +-
src/lib/dhcp/pkt6.cc | 6 +-
src/lib/dhcp/std_option_defs.h | 244 +--
src/lib/dhcp/tests/Makefile.am | 1 +
src/lib/dhcp/tests/libdhcp++_unittest.cc | 36 +-
src/lib/dhcp/tests/option4_addrlst_unittest.cc | 4 +-
src/lib/dhcp/tests/option_definition_unittest.cc | 51 +-
src/lib/dhcp/tests/option_int_array_unittest.cc | 169 +-
.../tests/option_space_unittest.cc | 2 +-
src/lib/dhcp/tests/option_unittest.cc | 14 +-
src/lib/dhcpsrv/Makefile.am | 1 -
src/lib/dhcpsrv/alloc_engine.cc | 2 +-
src/lib/dhcpsrv/alloc_engine.h | 4 +-
src/lib/dhcpsrv/cfgmgr.h | 2 +-
src/lib/dhcpsrv/dbaccess_parser.cc | 2 +-
src/lib/dhcpsrv/lease_mgr.cc | 13 +-
src/lib/dhcpsrv/lease_mgr.h | 14 +-
src/lib/dhcpsrv/mysql_lease_mgr.cc | 25 +-
src/lib/dhcpsrv/option_space_container.h | 2 +-
src/lib/dhcpsrv/subnet.cc | 13 +-
src/lib/dhcpsrv/subnet.h | 4 +-
src/lib/dhcpsrv/tests/Makefile.am | 1 -
src/lib/dhcpsrv/tests/dbaccess_parser_unittest.cc | 4 +-
src/lib/dns/Makefile.am | 4 +
src/lib/dns/gen-rdatacode.py.in | 124 +-
src/lib/dns/name.cc | 1 -
src/lib/dns/python/opcode_python.cc | 140 --
src/lib/dns/python/pydnspp.cc | 230 ++-
src/lib/dns/python/rcode_python.cc | 146 --
src/lib/dns/python/rrclass_python.cc | 43 -
src/lib/dns/python/rrset_collection_python_inc.cc | 39 +-
src/lib/dns/python/rrtype_python.cc | 144 --
src/lib/dns/python/tests/edns_python_test.py | 6 +-
src/lib/dns/python/tests/message_python_test.py | 34 +-
.../python/tests/messagerenderer_python_test.py | 8 +-
src/lib/dns/python/tests/nsec3hash_python_test.py | 46 +-
src/lib/dns/python/tests/opcode_python_test.py | 82 +-
src/lib/dns/python/tests/rcode_python_test.py | 60 +-
src/lib/dns/python/tests/rrclass_python_test.py | 20 +-
.../python/tests/rrset_collection_python_test.py | 46 +-
src/lib/dns/python/tests/rrset_python_test.py | 8 +-
src/lib/dns/python/tests/rrtype_python_test.py | 44 +-
src/lib/dns/python/tests/tsig_python_test.py | 36 +-
src/lib/dns/python/tests/tsigerror_python_test.py | 26 +-
.../dns/python/tests/zone_checker_python_test.py | 61 +-
src/lib/dns/rdata/generic/cname_5.cc | 59 +-
src/lib/dns/rdata/generic/dname_39.cc | 59 +-
src/lib/dns/rdata/generic/mx_15.cc | 82 +-
src/lib/dns/rdata/generic/ns_2.cc | 59 +-
src/lib/dns/rdata/generic/ptr_12.cc | 59 +-
src/lib/dns/rdata/in_1/srv_33.cc | 129 +-
src/lib/dns/rdata/template.h | 4 +
src/lib/dns/rrclass-placeholder.h | 14 -
src/lib/dns/rrset_collection_base.h | 38 +-
src/lib/dns/rrtype-placeholder.h | 30 -
src/lib/dns/tests/rdata_cname_unittest.cc | 36 +-
src/lib/dns/tests/rdata_dname_unittest.cc | 36 +-
src/lib/dns/tests/rdata_mx_unittest.cc | 35 +-
src/lib/dns/tests/rdata_ns_unittest.cc | 42 +-
src/lib/dns/tests/rdata_ptr_unittest.cc | 40 +-
src/lib/dns/tests/rdata_srv_unittest.cc | 45 +-
src/lib/dns/tests/rrclass_unittest.cc | 25 +
src/lib/dns/tests/rrset_unittest.cc | 4 +-
src/lib/dns/tests/rrtype_unittest.cc | 53 +
src/lib/dns/tests/zone_checker_unittest.cc | 20 +-
.../tests/nameserver_address_store_unittest.cc | 2 +-
src/lib/nsas/tests/nsas_test.h | 4 +-
src/lib/python/bind10_config.py.in | 6 +-
src/lib/python/isc/__init__.py | 10 +-
src/lib/python/isc/bind10/Makefile.am | 5 +
src/lib/python/isc/bind10/component.py | 57 +-
src/lib/python/isc/bind10/sockcreator.py | 6 +-
src/lib/python/isc/bind10/socket_cache.py | 17 +-
src/lib/python/isc/bind10/special_component.py | 52 +-
src/lib/python/isc/bind10/tests/component_test.py | 39 +-
src/lib/python/isc/cc/Makefile.am | 15 +-
src/lib/python/isc/{server_common => cc}/logger.py | 12 +-
.../lib/python/isc/cc/pycc_messages.mes | 15 +-
src/lib/python/isc/cc/session.py | 5 +
src/lib/python/isc/cc/tests/message_test.py | 15 +-
src/lib/python/isc/cc/tests/session_test.py | 116 +-
src/lib/python/isc/config/cfgmgr.py | 83 +-
src/lib/python/isc/config/cfgmgr_messages.mes | 13 +
src/lib/python/isc/config/config_data.py | 2 +-
src/lib/python/isc/config/tests/cfgmgr_test.py | 122 +-
.../python/isc/config/tests/config_data_test.py | 27 +-
.../python/isc/config/tests/module_spec_test.py | 2 +-
src/lib/python/isc/datasrc/client_python.cc | 2 +-
src/lib/python/isc/datasrc/finder_python.cc | 2 +-
src/lib/python/isc/datasrc/iterator_python.cc | 2 +-
src/lib/python/isc/datasrc/sqlite3_ds.py | 4 +-
.../python/isc/datasrc/tests/clientlist_test.py | 10 +-
src/lib/python/isc/datasrc/tests/datasrc_test.py | 140 +-
.../python/isc/datasrc/tests/zone_loader_test.py | 5 +-
src/lib/python/isc/datasrc/updater_inc.cc | 51 +-
src/lib/python/isc/datasrc/updater_python.cc | 2 +-
src/lib/python/isc/ddns/libddns_messages.mes | 4 +-
src/lib/python/isc/ddns/session.py | 118 +-
src/lib/python/isc/ddns/tests/session_tests.py | 619 ++++----
src/lib/python/isc/ddns/tests/zone_config_tests.py | 14 +-
src/lib/python/isc/log_messages/Makefile.am | 6 +-
src/lib/python/isc/log_messages/bind10_messages.py | 1 -
src/lib/python/isc/log_messages/init_messages.py | 1 +
src/lib/python/isc/log_messages/pycc_messages.py | 1 +
src/lib/python/isc/notify/notify_out.py | 18 +-
src/lib/python/isc/notify/tests/notify_out_test.py | 18 +-
src/lib/python/isc/server_common/dns_tcp.py | 2 +-
.../isc/server_common/server_common_messages.mes | 2 +-
.../python/isc/statistics/tests/counters_test.py | 8 +-
src/lib/python/isc/sysinfo/sysinfo.py | 15 +-
src/lib/python/isc/testutils/rrset_utils.py | 28 +-
src/lib/python/isc/xfrin/diff.py | 23 +-
src/lib/python/isc/xfrin/tests/diff_tests.py | 105 +-
src/lib/resolve/recursive_query.cc | 2 +-
src/lib/resolve/resolve_messages.mes | 2 +-
.../resolve/tests/response_classifier_unittest.cc | 4 +-
src/lib/server_common/portconfig.cc | 2 +-
src/lib/server_common/portconfig.h | 7 +-
src/lib/server_common/socket_request.cc | 36 +-
src/lib/server_common/tests/portconfig_unittest.cc | 4 +-
.../server_common/tests/socket_requestor_test.cc | 8 +-
src/lib/util/unittests/fork.cc | 10 +-
src/lib/util/unittests/fork.h | 4 +-
.../configurations/auth/auth_badzone.config.orig | 4 +-
.../configurations/auth/auth_basic.config.orig | 4 +-
.../configurations/bindctl/bindctl.config.orig | 4 +-
.../configurations/bindctl_commands.config.orig | 4 +-
tests/lettuce/configurations/ddns/ddns.config.orig | 4 +-
.../lettuce/configurations/ddns/noddns.config.orig | 4 +-
tests/lettuce/configurations/default.config | 2 +-
.../lettuce/configurations/example.org.config.orig | 4 +-
.../configurations/example.org.inmem.config | 4 +-
tests/lettuce/configurations/example2.org.config | 4 +-
.../inmemory_over_sqlite3/secondary.conf | 4 +-
.../configurations/ixfr-out/testset1-config.db | 4 +-
.../multi_instance/multi_auth.config.orig | 4 +-
tests/lettuce/configurations/no_db_file.config | 4 +-
.../lettuce/configurations/nsec3/nsec3_auth.config | 4 +-
.../resolver/resolver_basic.config.orig | 32 +-
.../lettuce/configurations/xfrin/inmem_slave.conf | 4 +-
.../xfrin/retransfer_master.conf.orig | 4 +-
....conf.orig => retransfer_master_nons.conf.orig} | 8 +-
.../xfrin/retransfer_slave.conf.orig | 4 +-
.../xfrin/retransfer_slave_notify.conf | 4 +-
tests/lettuce/data/commands/bad_command | 6 +-
...xample.org.sqlite3 => example.org-nons.sqlite3} | Bin 15360 -> 15360 bytes
tests/lettuce/features/bindctl_commands.feature | 24 +-
tests/lettuce/features/ddns_system.feature | 4 +-
tests/lettuce/features/default.feature | 2 +-
tests/lettuce/features/multi_instance.feature | 10 +-
tests/lettuce/features/stats_httpd.feature | 16 +
tests/lettuce/features/terrain/bind10_control.py | 20 +-
tests/lettuce/features/terrain/http.py | 41 +
tests/lettuce/features/terrain/steps.py | 2 +-
tests/lettuce/features/terrain/terrain.py | 8 +-
tests/lettuce/features/xfrin_bind10.feature | 62 +-
.../lettuce/features/xfrin_notify_handling.feature | 2 +-
tests/system/Makefile.am | 2 +-
tests/system/bindctl/tests.sh | 16 +-
tests/system/glue/nsx1/b10-config.db.in | 2 +-
tests/system/ixfr/b10-config.db.in | 2 +-
tools/query_cmp/src/lib/handledns.py | 2 +-
338 files changed, 10944 insertions(+), 6655 deletions(-)
create mode 100644 doc/differences.txt
copy src/bin/bind10/{bind10.xml => b10-init.xml} (87%)
create mode 100755 src/bin/bind10/bind10.in
rename src/bin/bind10/{bind10_src.py.in => init.py.in} (95%)
rename src/bin/bind10/{bob.spec => init.spec} (94%)
rename src/bin/bind10/{bind10_messages.mes => init_messages.mes} (78%)
rename src/bin/bind10/tests/{bind10_test.py.in => init_test.py.in} (70%)
copy src/bin/dbutil/tests/testdata/{v2_1.sqlite3 => v2_2.sqlite3} (91%)
mode change 100644 => 100755 src/bin/stats/stats_httpd.py.in
copy src/lib/datasrc/{zone.h => zone_finder.h} (66%)
rename src/lib/datasrc/{iterator.h => zone_iterator.h} (100%)
rename src/lib/{dhcpsrv => dhcp}/option_space.cc (98%)
rename src/lib/{dhcpsrv => dhcp}/option_space.h (100%)
rename src/lib/{dhcpsrv => dhcp}/tests/option_space_unittest.cc (99%)
copy src/lib/python/isc/{server_common => cc}/logger.py (69%)
copy tests/system/glue/clean.sh => src/lib/python/isc/cc/pycc_messages.mes (66%)
mode change 100755 => 100644
delete mode 100644 src/lib/python/isc/log_messages/bind10_messages.py
create mode 100644 src/lib/python/isc/log_messages/init_messages.py
create mode 100644 src/lib/python/isc/log_messages/pycc_messages.py
copy tests/lettuce/configurations/xfrin/{retransfer_master.conf.orig => retransfer_master_nons.conf.orig} (87%)
copy tests/lettuce/data/{example.org.sqlite3 => example.org-nons.sqlite3} (89%)
create mode 100644 tests/lettuce/features/stats_httpd.feature
create mode 100644 tests/lettuce/features/terrain/http.py
-----------------------------------------------------------------------
diff --git a/ChangeLog b/ChangeLog
index 2297f84..ff55f8f 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,17 +1,146 @@
-553. [func] stephen
+571. [build] jinmei
+ The ./configure script can now handle output from python-config
+ --ldflags that contains a space after -L switches. This fixes
+ failure reported on some Solaris environments.
+ (Trac #2661, git e6f86f2f5eec8e6003c13d36804a767a840d96d6)
+
+570. [bug] tmark, marcin, tomek
+ b10-dhcp4: Address renewal now works properly for DHCPv4 clients
+ that do not send client ID.
+ (Trac #2702, git daf2abe68ce9c111334a15c14e440730f3a085e2)
+
+569. [bug] tomek
+ b10-dhcp4: Fix bug whereby a DHCP packet without a client ID
+ could crash the MySQL lease database backend.
+ (Trac #2697, git b5e2be95d21ed750ad7cf5e15de2058aa8bc45f4)
+
+568. [func] muks
+ Various message IDs have been renamed to remove the word 'ERROR'
+ from them when they are not logged at ERROR severity level.
+ (Trac #2672, git 660a0d164feaf055677f375977f7ed327ead893e)
+
+567. [doc] marcin, stephen, tomek
+ Update DHCP sections of the BIND 10 guide.
+ (Trac #2657, git 1d0c2004865d1bf322bf78d13630d992e39179fd)
+
+566. [func]* jinmei
+ libdns++/Python isc.dns: In Python isc.dns, function style
+ constants for RRType, RRClass, Rcode and Opcode were deprecated
+ and replaced with straightforward object constants, e.g., from
+ RRType.AAAA() to RRType.AAAA. This is a backward incompatible
+ change (see the Trac ticket for a conversion script if needed).
+ Also, these constants are now more consistent between C++
+ and Python, and RRType constants for all currently standardized
+ types are now supported (even if Rdata for these are not yet
+ available).
+ (Trac #1866 and #2409, git e5005185351cf73d4a611407c2cfcd163f80e428)
+
+565. [func]* jelte
+ The main initializer script (formerly known as either 'bind10',
+ 'boss', or 'bob'), has been renamed to b10-init (and Init in
+ configuration). Configuring which components are run is henceforth
+ done through '/Init/components', and the sbin/bind10 script is now
+ simply a shellscript that runs b10-init. Existing configuration is
+ automatically updated. NOTE: once configuration with this update
+ has been saved (by committing any new change with bindctl), you
+ cannot run older versions of BIND 10 anymore with this configuration.
+ (Trac #1901, git bae3798603affdb276f370c1ac6b33b011a5ed4f)
+
+564. [func] muks
+ libdns++: the CNAME, DNAME, MX, NS, PTR and SRV Rdata classes now
+ use the generic lexer in constructors from text. This means that
+ the name fields in such RRs in a zone file can now be non-absolute
+ (the origin name in that context will be used), e.g., when loaded
+ by b10-loadzone. One additional change to the libdns++ API is that
+ the existing string constructors for these Rdata classes also use
+ the generic lexer, and they now expect an absolute name (with the
+ trailing '.') in the name fields.
+ (Trac #2390, git a01569277cda3f78b1171bbf79f15ecf502e81e2)
+ (Trac #2656, git 5a0d055137287f81e23fbeedd35236fee274596d)
+
+563. [build] jinmei
+ Added --disable-rpath configure option to avoid embedding library
+ paths to binaries. Patch from Adam Tkac.
+ (Trac #2667, git 1c50c5a6ee7e9675e3ab154f2c7f975ef519fca2)
+
+562. [func]* vorner
+ The b10-xfrin now performs basic sanity check on just received
+ zone. It'll reject severely broken zones (such as missng NS
+ records).
+ (Trac #2439, git 44699b4b18162581cd1dd39be5fb76ca536012e6)
+
+561. [bug] kambe, jelte
+ b10-stats-httpd no longer dumps request information to the console,
+ but uses the bind10 logging system. Additionally, the logging
+ identifiers have been changed from STATHTTPD_* to STATSHTTPD_*
+ (Trac #1897, git 93716b025a4755a8a2cbf250a9e4187741dbc9bb)
+
+560. [bug] jinmei
+ b10-auth now sets the TTL of SOA RR for negative responses to
+ the minimum of the RR TTL and the minimum TTL of the SOA RDATA
+ as specified in RFC2308; previously the RR TTL was always used.
+ The ZoneFinder class was extended partly for implementing this
+ and partly for allowing further optimization.
+ (Trac #2309 and #2635, git ee17e979fcde48b59d91c74ac368244169065f3b)
+
+559. [bug] jelte
+ b10-cmdctl no longer aborts on basic file issues with its https
+ certificate or private key file. It performs additional checks, and
+ provides better error logs if these fail. Additionally, bindctl
+ provides a better error report if it is unable to connect over
+ https connection. This issue could occur if BIND 10 was installed
+ with root privileges but then started as a normal user.
+ (Trac #2595, git 09b1a2f927483b407d70e98f5982f424cc872149)
+
+558. [func] marcin
+ b10-dhcp4: server now adds configured options to its
+ responses to a client when client requests them.
+ A few basic options: Routers, Domain Name, Domain
+ Name Servers and Subnet Mask are added regardless
+ if client requested them or not.
+ (Trac #2591, git aeec2dc1b9c511d17971ac63138576c37e7c5164)
+
+557. [doc] stephen
+ Update DHCP sections of the BIND 10 guide.
+ (Trac #2642, git e5faeb5fa84b7218fde486347359504cf692510e)
+
+556. [bug] marcin
+ Fixed DHCP servers configuration whereby the servers did not
+ receive a configuration stored in the database on their startup.
+ Also, the configuration handler function now uses full configuration
+ instead of partial to configure the server. This guarantees that
+ dependencies between various configuration parameters are
+ fulfilled.
+ (Trac #2637, git 91aa998226f1f91a232f2be59a53c9568c4ece77)
+
+555. [func] marcin
+ The encapsulated option space name can be specified for
+ a DHCP option. It comprises sub-options being sent within
+ an option that encapsulates this option space.
+ (Trac #2314, git 27e6119093723a1e46a239ec245a8b4b10677635)
+
+554. [func] jinmei
+ b10-loadzone: improved completion log message and intermediate
+ reports: It now logs the precise number of loaded RRs on
+ completion, and intermediate reports show additional information
+ such as the estimated progress in percentage and estimated time
+ to complete.
+ (Trac #2574, git 5b8a824054313bdecb8988b46e55cb2e94cb2d6c)
+
+553. [func] stephen
Values of the parameters to access the DHCP server lease database
can now be set through the BIND 10 configuration mechanism.
(Trac #2559, git 6c6f405188cc02d2358e114c33daff58edabd52a)
-552. [bug] shane
- Build on Raspberry PI.
+552. [bug] shane
+ Build on Raspberry PI.
The main issue was use of char for reading from input streams,
which is incorrect, as EOF is returned as an int -1, which would
then get cast into a char -1.
A number of other minor issues were also fixed.
(Trac #2571, git 525333e187cc4bbbbde288105c9582c1024caa4a)
-551. [bug] shane
+551. [bug] shane
Kill msgq if we cannot connect to it on startup.
When the boss process was unable to connect to the msgq, it would
exit. However, it would leave the msgq process running. This has
@@ -75,8 +204,8 @@
543. [func]* jelte
When calling getFullConfig() as a module, , the configuration is now
returned as properly-structured JSON. Previously, the structure had
- been flattened, with all data being labelled by fully-qualified element
- names.
+ been flattened, with all data being labelled by fully-qualified
+ element names.
(Trac #2619, git bed3c88c25ea8f7e951317775e99ebce3340ca22)
542. [func] marcin
@@ -140,7 +269,7 @@
compile-time option --enable-debug.
(Trac #1081, git db55f102b30e76b72b134cbd77bd183cd01f95c0)
-534. [func]* vorner
+534. [func]* vorner
The b10-msgq now uses the same logging format as the rest
of the system. However, it still doesn't obey the common
configuration, as due to technical issues it is not able
@@ -2737,7 +2866,7 @@ bind10-devel-20110224 released on February 24, 2011
(Trac #496, git b9296ca023cc9e76cda48a7eeebb0119166592c5)
160. [func] jelte
- Updated the resolver to take 3 different timeout values;
+ Updated the resolver to take 3 different timeout values;
timeout_query for outstanding queries we sent while resolving
timeout_client for sending an answer back to the client
timeout_lookup for stopping the resolving
@@ -2916,7 +3045,7 @@ bind10-devel-20110120 released on January 20, 2011
(Trac #226, svn r3989)
136. [bug] jelte
- bindctl (and the configuration manager in general) now no longer
+ bindctl (and the configuration manager in general) now no longer
accepts 'unknown' data; i.e. data for modules that it does not know
about, or configuration items that are not specified in the .spec
files.
@@ -3158,7 +3287,7 @@ bind10-devel-20100917 released on September 17, 2010
(Trac #342, svn r2949)
94. [bug] jelte
- bin/xfrout: Fixed a problem in xfrout where only 2 or 3 RRs
+ bin/xfrout: Fixed a problem in xfrout where only 2 or 3 RRs
were used per DNS message in the xfrout stream.
(Trac #334, r2931)
@@ -3292,7 +3421,7 @@ bind10-devel-20100812 released on August 12, 2010
module. (Trac #275, r2459)
73. [bug] jelte
- Fixed a bug where in bindctl, locally changed settings were
+ Fixed a bug where in bindctl, locally changed settings were
reset when the list of running modules is updated. (Trac #285,
r2452)
@@ -3303,11 +3432,11 @@ bind10-devel-20100812 released on August 12, 2010
known such platform. (Trac #148, r2427)
71. [func] each
- Add "-a" (address) option to bind10 to specify an address for
+ Add "-a" (address) option to bind10 to specify an address for
the auth server to listen on.
70. [func] each
- Added a hot-spot cache to libdatasrc to speed up access to
+ Added a hot-spot cache to libdatasrc to speed up access to
repeatedly-queried data and reduce the number of queries to
the underlying database; this should substantially improve
performance. Also added a "-n" ("no cache") option to
diff --git a/configure.ac b/configure.ac
index d8bbef5..dcef042 100644
--- a/configure.ac
+++ b/configure.ac
@@ -2,9 +2,15 @@
# Process this file with autoconf to produce a configure script.
AC_PREREQ([2.59])
-AC_INIT(bind10, 20121219, bind10-dev at isc.org)
+AC_INIT(bind10, 20130205, bind10-dev at isc.org)
AC_CONFIG_SRCDIR(README)
-AM_INIT_AUTOMAKE([foreign])
+# serial-tests is not available in automake version before 1.13. In
+# automake 1.13 and higher, AM_PROG_INSTALL is undefined, so we'll check
+# that and conditionally use serial-tests.
+AM_INIT_AUTOMAKE(
+ [foreign]
+ m4_ifndef([AM_PROG_INSTALL], [serial-tests])
+)
m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])dnl be backward compatible
AC_CONFIG_HEADERS([config.h])
AC_CONFIG_MACRO_DIR([m4macros])
@@ -232,7 +238,22 @@ AM_CONDITIONAL(SET_ENV_LIBRARY_PATH, test $SET_ENV_LIBRARY_PATH = yes)
AC_SUBST(SET_ENV_LIBRARY_PATH)
AC_SUBST(ENV_LIBRARY_PATH)
-m4_define([_AM_PYTHON_INTERPRETER_LIST], [python python3.2 python3.1 python3])
+# Our experiments have shown Solaris 10 has broken support for the
+# IPV6_USE_MIN_MTU socket option for getsockopt(); it doesn't return the value
+# previously set via setsockopt(). We know it doesn't happen on one instance
+# on Solaris 11, but we don't know whether it happens for any Solaris 10
+# implementations or for earlier versions of Solaris. In any case, at the
+# moment this matters for only one unittest case, so we'll simply disable
+# the affected test using the following definition with the specific hardcoding
+# of that version of Solaris.
+case "$host" in
+*-solaris2.10)
+ AC_DEFINE([HAVE_BROKEN_GET_IPV6_USE_MIN_MTU], [1],
+ [Define to 1 if getsockopt(IPV6_USE_MIN_MTU) does not work])
+ ;;
+esac
+
+m4_define([_AM_PYTHON_INTERPRETER_LIST], [python python3.3 python3.2 python3.1 python3])
AC_ARG_WITH([pythonpath],
AC_HELP_STRING([--with-pythonpath=PATH],
[specify an absolute path to python executable when automatic version check (incorrectly) fails]),
@@ -292,8 +313,16 @@ AC_SUBST(COMMON_PYTHON_PATH)
if test -x ${PYTHON}-config; then
PYTHON_INCLUDES=`${PYTHON}-config --includes`
- for flag in `${PYTHON}-config --ldflags`; do
- # add any '-L..." flags to PYTHON_LDFLAGS
+ # Add any '-L..." flags to PYTHON_LDFLAGS. We first make a copy of
+ # python-config --ldflags, removing any spaces and tabs
+ # between "-L" and its argument (some instances of python-config
+ # insert a space, which would confuse the code below).
+ # Notes: if -L isn't contained at all we can simply skip this process,
+ # so we only go through the flag if it's contained; also, protecting
+ # the output with [] seems necessary for environment to avoid getting
+ # an empty output accidentally.
+ python_config_ldflags=[`${PYTHON}-config --ldflags | sed -ne 's/\([ \t]*-L\)[ ]*\([^ \t]*[ \t]*\)/\1\2/pg'`]
+ for flag in $python_config_ldflags; do
flag=`echo $flag | sed -ne 's/^\(\-L.*\)$/\1/p'`
if test "X${flag}" != X; then
PYTHON_LDFLAGS="$PYTHON_LDFLAGS ${flag}"
@@ -1127,6 +1156,7 @@ AC_CONFIG_FILES([Makefile
compatcheck/Makefile
src/Makefile
src/bin/Makefile
+ src/bin/bind10/bind10
src/bin/bind10/Makefile
src/bin/bind10/tests/Makefile
src/bin/cmdctl/Makefile
@@ -1310,9 +1340,9 @@ AC_OUTPUT([doc/version.ent
src/bin/sysinfo/run_sysinfo.sh
src/bin/stats/stats.py
src/bin/stats/stats_httpd.py
- src/bin/bind10/bind10_src.py
+ src/bin/bind10/init.py
src/bin/bind10/run_bind10.sh
- src/bin/bind10/tests/bind10_test.py
+ src/bin/bind10/tests/init_test.py
src/bin/bindctl/run_bindctl.sh
src/bin/bindctl/bindctl_main.py
src/bin/bindctl/tests/bindctl_test
@@ -1376,6 +1406,7 @@ AC_OUTPUT([doc/version.ent
chmod +x src/bin/xfrin/run_b10-xfrin.sh
chmod +x src/bin/xfrout/run_b10-xfrout.sh
chmod +x src/bin/zonemgr/run_b10-zonemgr.sh
+ chmod +x src/bin/bind10/bind10
chmod +x src/bin/bind10/run_bind10.sh
chmod +x src/bin/cmdctl/tests/cmdctl_test
chmod +x src/bin/dbutil/run_dbutil.sh
diff --git a/doc/Makefile.am b/doc/Makefile.am
index 7642220..3120280 100644
--- a/doc/Makefile.am
+++ b/doc/Makefile.am
@@ -1,6 +1,6 @@
SUBDIRS = guide
-EXTRA_DIST = version.ent.in
+EXTRA_DIST = version.ent.in differences.txt
devel:
mkdir -p html
diff --git a/doc/differences.txt b/doc/differences.txt
new file mode 100644
index 0000000..0491929
--- /dev/null
+++ b/doc/differences.txt
@@ -0,0 +1,32 @@
+Differences of Bind 10 to other software
+========================================
+
+Bind 9
+------
+
+TODO: There are definitely more differences than just this.
+
+* When an incoming zone transfer fails, for example because the
+ received zone doesn't contain a NS record, bind 9 stops serving the
+ zone and returns SERVFAIL to queries for that zone. Bind 10 still
+ uses the previous version of zone.
+
+* In-memory data source does not sort RDATA of each RRset (in the
+ DNSSEC order) while BIND 9 normally sorts them internally. The main
+ purpose of the BIND 9's behavior is to make the ordering
+ predictable, but if the RDATA are rotated in DNS responses (which
+ BIND 9 also does by default) the predictability wouldn't be that
+ useful for the clients. So we skip the sorting in the BIND 10
+ implementation to simplify the implementation (and possibly make it
+ a bit more efficient).
+
+* If different RRs of the same RRset and their RRSIGs have different
+ TTL when loaded to the in-memory data source, the lowest TTL among
+ all RRs (whether it's the covered RRset or RRSIGs) will be used.
+ BIND 9 shows some inconsistent policy on this point for unknown
+ reason (sometimes the TTL of the first RR is used, sometimes the
+ latest one is used). We differ here firstly for consistency, and
+ because it seems to be more compliant to the sense of RFC2181.
+ In any case, the administrator should make the TTLs same, especially
+ if the zone is signed, as described in RFC4034 (and, that will be
+ normally ensured by zone signing tools).
diff --git a/doc/guide/bind10-guide.xml b/doc/guide/bind10-guide.xml
index 5384d14..0d1913f 100644
--- a/doc/guide/bind10-guide.xml
+++ b/doc/guide/bind10-guide.xml
@@ -7,7 +7,7 @@
]>
<!--
- - Copyright (C) 2010-2012 Internet Systems Consortium, Inc. ("ISC")
+ - Copyright (C) 2010-2013 Internet Systems Consortium, Inc. ("ISC")
-
- Permission to use, copy, modify, and/or distribute this software for any
- purpose with or without fee is hereby granted, provided that the above
@@ -347,7 +347,7 @@ share/
share/bind10/
auth.spec
b10-cmdctl.pem
- bob.spec
+ init.spec
passwd.csv
man/
var/
@@ -432,9 +432,9 @@ var/
run): <screen>$ <userinput>bindctl</userinput></screen>
(Login with the provided default username and password.)
<screen>
-> <userinput>config add Boss/components b10-auth</userinput>
-> <userinput>config set Boss/components/b10-auth/special auth</userinput>
-> <userinput>config set Boss/components/b10-auth/kind needed</userinput>
+> <userinput>config add Init/components b10-auth</userinput>
+> <userinput>config set Init/components/b10-auth/special auth</userinput>
+> <userinput>config set Init/components/b10-auth/kind needed</userinput>
> <userinput>config commit</userinput>
> <userinput>quit</userinput>
</screen>
@@ -741,7 +741,12 @@ as a dependency earlier -->
</varlistentry>
</variablelist>
-
+ <note>
+ <para>
+ For additional instructions concerning the building and installation of
+ BIND 10 DHCP, see <xref linkend="dhcp-install-configure"/>.
+ </para>
+ </note>
</para>
<!-- TODO: lcov -->
@@ -761,9 +766,7 @@ as a dependency earlier -->
dependencies.
</para>
- <note>
- <para>For notes on configuring and building DHCPv6 with MySQL see <xref linkend="dhcp6-install">.</xref></para>
- </note>
+
</section>
<section>
@@ -1267,10 +1270,10 @@ TODO
<screen><userinput><module> <command> <replaceable>[argument(s)]</replaceable></userinput></screen>
- For example, the Boss module has a 'shutdown' command to shut down
+ For example, the Init module has a 'shutdown' command to shut down
BIND 10, with an optional argument 'help':
- <screen>> <userinput>Boss shutdown help</userinput>
+ <screen>> <userinput>Init shutdown help</userinput>
Command shutdown (Shut down BIND 10)
help (Get help for command)
This command has no parameters
@@ -1293,12 +1296,12 @@ Available module names:
</screen>
When 'help' is used as a command to a module, it shows the supported commands for the module; for example:
- <screen>> <userinput>Boss help</userinput>
-Module Boss Master process
+ <screen>> <userinput>Init help</userinput>
+Module Init Master process
Available commands:
help Get help for module.
shutdown Shut down BIND 10
- ping Ping the boss process
+ ping Ping the Init process
show_processes
List the running BIND 10 processes
</screen>
@@ -1639,7 +1642,7 @@ Parameters:
to maps.
</simpara>
<simpara>
- For example, the <command>Boss/components</command>
+ For example, the <command>Init/components</command>
elements is a named set;
adding, showing, and then removing an element
can be done with the following three commands (note
@@ -1647,13 +1650,13 @@ Parameters:
'example_module'):
</simpara>
<simpara>
- <command>config add Boss/components example_module</command>
+ <command>config add Init/components example_module</command>
</simpara>
<simpara>
- <command>config show Boss/components/example_module</command>
+ <command>config show Init/components/example_module</command>
</simpara>
<simpara>
- <command>config remove Boss/components example_module</command>
+ <command>config remove Init/components example_module</command>
</simpara>
</listitem>
</varlistentry>
@@ -1701,21 +1704,21 @@ Parameters:
<screen>> <userinput>execute init_authoritative_server show</userinput>
!echo adding Authoritative server component
-config add /Boss/components b10-auth
-config set /Boss/components/b10-auth/kind needed
-config set /Boss/components/b10-auth/special auth
+config add /Init/components b10-auth
+config set /Init/components/b10-auth/kind needed
+config set /Init/components/b10-auth/special auth
!echo adding Xfrin component
-config add /Boss/components b10-xfrin
-config set /Boss/components/b10-xfrin/address Xfrin
-config set /Boss/components/b10-xfrin/kind dispensable
+config add /Init/components b10-xfrin
+config set /Init/components/b10-xfrin/address Xfrin
+config set /Init/components/b10-xfrin/kind dispensable
!echo adding Xfrout component
-config add /Boss/components b10-xfrout
-config set /Boss/components/b10-xfrout/address Xfrout
-config set /Boss/components/b10-xfrout/kind dispensable
+config add /Init/components b10-xfrout
+config set /Init/components/b10-xfrout/address Xfrout
+config set /Init/components/b10-xfrout/kind dispensable
!echo adding Zone Manager component
-config add /Boss/components b10-zonemgr
-config set /Boss/components/b10-zonemgr/address Zonemgr
-config set /Boss/components/b10-zonemgr/kind dispensable
+config add /Init/components b10-zonemgr
+config set /Init/components/b10-zonemgr/address Zonemgr
+config set /Init/components/b10-zonemgr/kind dispensable
!echo Components added. Please enter "config commit" to
!echo finalize initial setup and run the components.
</screen>
@@ -1763,7 +1766,7 @@ config set /Boss/components/b10-zonemgr/kind dispensable
<section id="bindctl_execute_notes">
<title>Notes on execute scripts</title>
Within scripts, you can add or remove modules with the normal
- configuration commands for <command>Boss/components</command>.
+ configuration commands for <command>Init/components</command>.
However, as module
configuration and commands do not show up until the module is
running, it is currently not possible to add a module and set
@@ -2084,7 +2087,7 @@ AND_MATCH := "ALL": [ RULE_RAW, RULE_RAW, ... ]
<para>
The BIND 10 suite may be shut down by stopping the
parent <command>bind10</command> process. This may be done
- by running the <userinput>Boss shutdown</userinput> command
+ by running the <userinput>Init shutdown</userinput> command
at the <command>bindctl</command> prompt.
</para>
</section>
@@ -2098,7 +2101,7 @@ AND_MATCH := "ALL": [ RULE_RAW, RULE_RAW, ... ]
of the required <command>b10-sockcreator</command>,
<command>b10-msgq</command> and <command>b10-cfgmgr</command>
components.
- The configuration is in the <varname>Boss/components</varname>
+ The configuration is in the <varname>Init/components</varname>
section. Each element represents one component, which is
an abstraction of a process.
</para>
@@ -2106,10 +2109,10 @@ AND_MATCH := "ALL": [ RULE_RAW, RULE_RAW, ... ]
<para>
To add a process to the set, let's say the resolver (which
is not started by default), you would do this:
- <screen>> <userinput>config add Boss/components b10-resolver</userinput>
-> <userinput>config set Boss/components/b10-resolver/special resolver</userinput>
-> <userinput>config set Boss/components/b10-resolver/kind needed</userinput>
-> <userinput>config set Boss/components/b10-resolver/priority 10</userinput>
+ <screen>> <userinput>config add Init/components b10-resolver</userinput>
+> <userinput>config set Init/components/b10-resolver/special resolver</userinput>
+> <userinput>config set Init/components/b10-resolver/kind needed</userinput>
+> <userinput>config set Init/components/b10-resolver/priority 10</userinput>
> <userinput>config commit</userinput></screen></para>
<para>
@@ -2139,7 +2142,7 @@ AND_MATCH := "ALL": [ RULE_RAW, RULE_RAW, ... ]
<row><entry>b10-auth</entry><entry>auth</entry><entry>Authoritative DNS server</entry></row>
<row><entry>b10-resolver</entry><entry>resolver</entry><entry>DNS resolver</entry></row>
<row><entry>b10-cmdctl</entry><entry>cmdctl</entry><entry>Command control (remote control interface)</entry></row>
- <!-- TODO Either add xfrin and xfrout as well or clean up the workarounds in boss before the release -->
+ <!-- TODO Either add xfrin and xfrout as well or clean up the workarounds in b10-init before the release -->
</tbody>
</tgroup>
</table>
@@ -2223,9 +2226,9 @@ address, but the usual ones don't." mean? -->
You might want to do that to gain more performance (each one uses only
single core). Just put multiple entries under different names, like
this, with the same config:
- <screen>> <userinput>config add Boss/components b10-resolver-2</userinput>
-> <userinput>config set Boss/components/b10-resolver-2/special resolver</userinput>
-> <userinput>config set Boss/components/b10-resolver-2/kind needed</userinput>
+ <screen>> <userinput>config add Init/components b10-resolver-2</userinput>
+> <userinput>config set Init/components/b10-resolver-2/special resolver</userinput>
+> <userinput>config set Init/components/b10-resolver-2/kind needed</userinput>
> <userinput>config commit</userinput></screen>
</para>
<para>
@@ -2241,7 +2244,7 @@ address, but the usual ones don't." mean? -->
<para>
The running processes started by <command>bind10</command>
- may be listed by running <userinput>Boss show_processes</userinput>
+ may be listed by running <userinput>Init show_processes</userinput>
using <command>bindctl</command>.
</para>
@@ -2413,7 +2416,7 @@ can use various data source backends.
<simpara>Stop the authoritative DNS server.
This has an optional <varname>pid</varname> argument to
select the process ID to stop.
- (Note that the BIND 10 boss process may restart this service
+ (Note that the BIND 10 init process may restart this service
if configured.)
</simpara>
</listitem>
@@ -3017,9 +3020,9 @@ what is XfroutClient xfr_client??
It can be done by using the <command>bindctl</command>
utility. For example:
<screen>
-> <userinput>config add Boss/components b10-ddns</userinput>
-> <userinput>config set Boss/components/b10-ddns/address DDNS</userinput>
-> <userinput>config set Boss/components/b10-ddns/kind dispensable</userinput>
+> <userinput>config add Init/components b10-ddns</userinput>
+> <userinput>config set Init/components/b10-ddns/address DDNS</userinput>
+> <userinput>config set Init/components/b10-ddns/kind dispensable</userinput>
> <userinput>config commit</userinput>
</screen>
<note><simpara>
@@ -3210,10 +3213,10 @@ what is XfroutClient xfr_client??
<command>bindctl</command>, for example:
<screen>
-> <userinput>config add Boss/components b10-resolver</userinput>
-> <userinput>config set Boss/components/b10-resolver/special resolver</userinput>
-> <userinput>config set Boss/components/b10-resolver/kind needed</userinput>
-> <userinput>config set Boss/components/b10-resolver/priority 10</userinput>
+> <userinput>config add Init/components b10-resolver</userinput>
+> <userinput>config set Init/components/b10-resolver/special resolver</userinput>
+> <userinput>config set Init/components/b10-resolver/kind needed</userinput>
+> <userinput>config set Init/components/b10-resolver/priority 10</userinput>
> <userinput>config commit</userinput>
</screen>
@@ -3317,9 +3320,9 @@ then change those defaults with config set Resolver/forward_addresses[0]/address
</chapter>
- <chapter id="dhcp4">
- <title>DHCPv4 Server</title>
- <para>Dynamic Host Configuration Protocol for IPv4 (DHCP or
+ <chapter id="dhcp">
+ <title>DHCP</title>
+ <para>The Dynamic Host Configuration Protocol for IPv4 (DHCP or
DHCPv4) and Dynamic Host Configuration Protocol for IPv6 (DHCPv6)
are protocols that allow one node (server) to provision
configuration parameters to many hosts and devices (clients). To
@@ -3327,57 +3330,113 @@ then change those defaults with config set Resolver/forward_addresses[0]/address
be deployed that facilitate communication between servers and
clients. Even though principles of both DHCPv4 and DHCPv6 are
somewhat similar, these are two radically different
- protocols. BIND 10 offers server implementations for both DHCPv4
- and DHCPv6. This chapter is about DHCP for IPv4. For a description
- of the DHCPv6 server, see <xref linkend="dhcp6"/>.</para>
+ protocols. BIND 10 offers two server implementations, one for DHCPv4
+ and one for DHCPv6.</para>
+ <para>This chapter covers those parts of BIND 10 that are common to
+ both servers. DHCPv4-specific details are covered in <xref linkend="dhcp4"/>,
+ while those details specific to DHCPv6 are described in <xref linkend="dhcp6"/>
+ </para>
- <para>The DHCPv4 server component is currently under intense
- development. You may want to check out <ulink
- url="http://bind10.isc.org/wiki/Kea">BIND 10 DHCP (Kea) wiki</ulink>
- and recent posts on <ulink
- url="https://lists.isc.org/mailman/listinfo/bind10-dev">BIND 10
- developers mailing list</ulink>.</para>
+ <section id="dhcp-install-configure">
+ <title>DHCP Database Installation and Configuration</title>
+ <para>
+ BIND 10 DHCP stores its leases in a lease database. The software has been written in
+ a way that makes it possible to choose which database product should be used to
+ store the lease information. At present, only support for MySQL is provided, and that support must
+ be explicitly included when BIND 10 is built. This section covers the building of
+ BIND 10 with MySQL and the creation of the lease database.
+ </para>
+ <section>
+ <title>Install MySQL</title>
+ <para>
+ Install MySQL according to the instructions for your system. The client development
+ libraries must be installed.
+ </para>
+ </section>
+ <section>
+ <title>Build and Install BIND 10</title>
+ <para>
+ Build and install BIND 10 as described in <xref linkend="installation"/>, with
+ the following modification: to enable the MySQL database code, at the
+ "configure" step (see <xref linkend="configure"/>), specify the location of the
+ MySQL configuration program "mysql_config" with the "--with-dhcp-mysql" switch,
+ i.e.
+ <screen><userinput>./configure [other-options] --with-dhcp-mysql</userinput></screen>
+ ...if MySQL was installed in the default location, or:
+ <screen><userinput>./configure [other-options] --with-dhcp-mysql=<replaceable>path-to-mysql_config</replaceable></userinput></screen>
+ ...if not.
+ </para>
+ </section>
+ <section id="dhcp-database-create">
+ <title>Create MySQL Database and BIND 10 User</title>
+ <para>
+ The next task is to create both the lease database and the user under which the servers will
+ access it. A number of steps are required:
+ </para>
+ <para>
+ 1. Log into MySQL as "root":
+ <screen>$ <userinput>mysql -u root -p</userinput>
+Enter password:<userinput/>
+ :<userinput/>
+mysql></screen>
+ </para>
+ <para>
+ 2. Create the database:
+ <screen>mysql> <userinput>CREATE DATABASE <replaceable>database-name</replaceable>;</userinput></screen>
+ ... <replaceable>database-name</replaceable> is the name you have chosen for the database.
+ </para>
+ <para>
+ 3. Create the database tables:
+ <screen>mysql> <userinput>CONNECT <replaceable>database-name</replaceable>;</userinput>
+mysql> <userinput>SOURCE <replaceable>path-to-bind10</replaceable>/share/bind10/dhcpdb_create.mysql</userinput></screen>
+ </para>
+ <para>
+ 4. Create the user under which BIND 10 will access the database (and give it a password), then grant it access to the database tables:
+ <screen>mysql> <userinput>CREATE USER '<replaceable>user-name</replaceable>'@'localhost' IDENTIFIED BY '<replaceable>password</replaceable>';</userinput>
+mysql> <userinput>GRANT ALL ON <replaceable>database-name</replaceable>.* TO '<replaceable>user-name</replaceable>'@'localhost';</userinput></screen>
+ </para>
+ <para>
+ 5. Exit MySQL:
+ <screen>mysql> <userinput>quit</userinput>
+Bye<userinput/>
+$</screen>
+ </para>
+ </section>
+ </section>
- <para>The DHCPv4 and DHCPv6 components in BIND 10 architecture are
- internally code named <quote>Kea</quote>.</para>
+ </chapter>
+
+ <chapter id="dhcp4">
+ <title>The DHCPv4 Server</title>
+
+ <section id="dhcp4-start-stop">
+ <title>Starting and Stopping the DHCPv4 Server</title>
- <note>
<para>
- As of January 2013, the DHCPv4 component is a work in progress.
- That means that while it is capable of performing DHCP configuration,
- it is not fully functional. The server is able to offer,
- assign, renew, release and reuse expired leases, but some of the
- options are not configurable yet. In particular Router option is hardcoded.
- This means that the server is not really usable in actual deployments
- yet. See <xref linkend="dhcp4-limit"/> for a detailed description.
+ <command>b10-dhcp4</command> is the BIND 10 DHCPv4 server and, like other
+ parts of BIND 10, is configured through the <command>bindctl</command>
+ program.
</para>
- </note>
-
- <section id="dhcp4-usage">
- <title>DHCPv4 Server Usage</title>
- <para>BIND 10 has provided the DHCPv4 server component since December
- 2011. It is current experimental implementation and is not fully functional
- yet. It is mature enough to conduct tests in lab environment, but it has
- significant limitations. See <xref linkend="dhcp4-limit"/> for
- details.
+ <para>
+ After starting BIND 10 and entering bindctl, the first step
+ in configuring the server is to add it to the list of running BIND 10 services.
+<screen>
+> <userinput>config add Init/components b10-dhcp4</userinput>
+> <userinput>config set Init/components/b10-dhcp4/kind dispensable</userinput>
+> <userinput>config commit</userinput>
+</screen>
</para>
-
<para>
- <command>b10-dhcp4</command> is a BIND 10 component and is being
- run under BIND 10 framework. To add a DHCPv4 process to the set of running
- BIND 10 services, you can use following commands in <command>bindctl</command>:
- <screen>> <userinput>config add Boss/components b10-dhcp4</userinput>
-> <userinput>config set Boss/components/b10-dhcp4/kind dispensable</userinput>
-> <userinput>config commit</userinput></screen></para>
-
- <para>
- To stop running <command>b10-dhcp4</command>, please use the
- following command:
- <screen>> <userinput>config remove Boss/components b10-dhcp4</userinput>
-> <userinput>config commit</userinput></screen></para>
+ To remove <command>b10-dhcp4</command> from the set of running services,
+ the <command>b10-dhcp4</command> is removed from list of Init components:
+<screen>
+> <userinput>config remove Init/components b10-dhcp4</userinput>
+> <userinput>config commit</userinput>
+</screen>
+ </para>
<para>
- During start-up the server will detect available network interfaces
+ On start-up, the server will detect available network interfaces
and will attempt to open UDP sockets on all interfaces that
are up, running, are not loopback, and have IPv4 address
assigned.
@@ -3392,23 +3451,29 @@ then change those defaults with config set Resolver/forward_addresses[0]/address
</section>
- <section id="dhcp4-config">
- <title>DHCPv4 Server Configuration</title>
+ <section id="dhcp4-configuration">
+ <title>Configuring the DHCPv4 Server</title>
<para>
Once the server is started, it can be configured. To view the
current configuration, use the following command in <command>bindctl</command>:
<screen>
> <userinput>config show Dhcp4</userinput></screen>
- When starting Dhcp4 daemon for the first time, the default configuration
+ When starting the DHCPv4 daemon for the first time, the default configuration
will be available. It will look similar to this:
- <screen>
+<screen>
> <userinput>config show Dhcp4</userinput>
-Dhcp4/interface/ list (default)
-Dhcp4/renew-timer 1000 integer (default)
-Dhcp4/rebind-timer 2000 integer (default)
-Dhcp4/preferred-lifetime 3000 integer (default)
-Dhcp4/valid-lifetime 4000 integer (default)
-Dhcp4/subnet4 [] list (default)</screen>
+Dhcp4/interface/ list (default)
+Dhcp4/renew-timer 1000 integer (default)
+Dhcp4/rebind-timer 2000 integer (default)
+Dhcp4/valid-lifetime 4000 integer (default)
+Dhcp4/option-data [] list (default)
+Dhcp4/lease-database/type "memfile" string (default)
+Dhcp4/lease-database/name "" string (default)
+Dhcp4/lease-database/user "" string (default)
+Dhcp4/lease-database/host "" string (default)
+Dhcp4/lease-database/password "" string (default)
+Dhcp4/subnet4 [] list (default)
+</screen>
</para>
<para>
@@ -3423,6 +3488,69 @@ Dhcp4/subnet4 [] list (default)</screen>
per-subnet basis.
</para>
+ <section>
+ <title>Database Configuration</title>
+ <para>
+ All leases issued by the server are stored in the lease database. Currently,
+ the only supported database is MySQL
+ <footnote>
+ <para>
+ The server comes with an in-memory database ("memfile") configured as the default
+ database. This is used for internal testing and is not supported. In addition,
+ it does not store lease information on disk: lease information will be lost if the
+ server is restarted.
+ </para>
+ </footnote>, and so the server must be configured to
+ access the correct database with the appropriate credentials.
+ </para>
+ <note>
+ <para>
+ Database access information must be configured for the DHCPv4 server, even if
+ it has already been configured for the DHCPv6 server. The servers store their
+ information independently, so each server can use a separate
+ database or both servers can use the same database.
+ </para>
+ </note>
+ <para>
+ Database configuration is controlled through the Dhcp4/lease-database parameters.
+ The type of the database must be set to MySQL (although the string entered is "mysql"):
+<screen>
+> <userinput>config set Dhcp4/lease-database/type "mysql"</userinput>
+</screen>
+ Next, the name of the database is to hold the leases must be set: this is the
+ name used when the lease database was created (see <xref linkend="dhcp-database-create"/>).
+<screen>
+> <userinput>config set Dhcp4/lease-database/name "<replaceable>database-name</replaceable>"</userinput>
+</screen>
+ If the database is located on a different system to the DHCPv4 server, the
+ database host name must also be specified (although note that this configuration
+ may have a severe impact on server performance):
+<screen>
+> <userinput>config set Dhcp4/lease-database/host "<replaceable>remote-host-name</replaceable>"</userinput>
+</screen>
+ The usual state of affairs will be to have the database on the same machine as the
+ DHCPv4 server. In this case, set the value to the empty string (this is the default):
+<screen>
+> <userinput>config set Dhcp4/lease-database/host ""</userinput>
+</screen>
+ </para>
+ <para>
+ Finally, the credentials of the account under which the server will access the database
+ should be set:
+<screen>
+> <userinput>config set Dhcp4/lease-database/user "<replaceable>user-name</replaceable>"</userinput>
+> <userinput>config set Dhcp4/lease-database/password "<replaceable>password</replaceable>"</userinput>
+</screen>
+ If there is no password to the account, set the password to the empty string "". (This is also the default.)
+ </para>
+ <note>
+ <para>The password is echoed when entered and is stored in clear text in the BIND 10 configuration
+ database. Improved password security will be added in a future version of BIND 10 DHCP</para>
+ </note>
+ </section>
+
+ <section id="dhcp4-address-config">
+ <title>Configuration of Address Pools</title>
<para>
The essential role of DHCPv4 server is address assignment. The server
has to be configured with at least one subnet and one pool of dynamic
@@ -3462,7 +3590,7 @@ Dhcp4/subnet4 [] list (default)</screen>
> <userinput>config set Dhcp4/subnet4[1]/pool [ "192.0.3.0/24" ]</userinput>
> <userinput>config commit</userinput></screen>
Arrays are counted from 0. subnet[0] refers to the subnet defined in the
- previous example. The <command>config add Dhcp4/subnet4</command> adds
+ previous example. The <command>config add Dhcp4/subnet4</command> command adds
another (second) subnet. It can be referred to as
<command>Dhcp4/subnet4[1]</command>. In this example, we allow server to
dynamically assign all addresses available in the whole subnet.
@@ -3474,76 +3602,540 @@ Dhcp4/subnet4 [] list (default)</screen>
address) and the last (typically broadcast address) address from that pool.
In the aforementioned example of pool 192.0.3.0/24, both 192.0.3.0 and
192.0.3.255 addresses may be assigned as well. This may be invalid in some
- network configurations. If you want to avoid this, please use min-max notation.
+ network configurations. If you want to avoid this, please use the "min-max" notation.
+ </para>
+ </section>
+
+ <section id="dhcp4-std-options">
+ <title>Standard DHCPv4 options</title>
+ <para>
+ One of the major features of DHCPv4 server is to provide configuration
+ options to clients. Although there are several options that require
+ special behavior, most options are sent by the server only if the client
+ explicitly requested them. The following example shows how to
+ configure DNS servers, which is one of the most frequently used
+ options. Options specified in this way are considered global and apply
+ to all configured subnets.
+
+ <screen>
+> <userinput>config add Dhcp4/option-data</userinput>
+> <userinput>config set Dhcp4/option-data[0]/name "domain-name-servers"</userinput>
+> <userinput>config set Dhcp4/option-data[0]/code 6</userinput>
+> <userinput>config set Dhcp4/option-data[0]/space "dhcp4"</userinput>
+> <userinput>config set Dhcp4/option-data[0]/csv-format true</userinput>
+> <userinput>config set Dhcp4/option-data[0]/data "192.0.3.1, 192.0.3.2"</userinput>
+> <userinput>config commit</userinput>
+</screen>
+ </para>
+ <para>
+ The first line creates new entry in option-data table. It
+ contains information on all global options that the server is
+ supposed to configure in all subnets. The second line specifies
+ option name. For a complete list of currently supported names,
+ see <xref linkend="dhcp4-std-options-list"/> below.
+ The third line specifies option code, which must match one of the
+ values from that list. Line 4 specifies option space, which must always
+ be set to "dhcp4" as these are standard DHCPv4 options. For
+ other option spaces, including custom option spaces, see <xref
+ linkend="dhcp4-option-spaces"/>. The fifth line specifies the format in
+ which the data will be entered: use of CSV (comma
+ separated values) is recommended. The sixth line gives the actual value to
+ be sent to clients. Data is specified as a normal text, with
+ values separated by commas if more than one value is
+ allowed.
+ </para>
+
+ <para>
+ Options can also be configured as hexadecimal values. If csv-format is
+ set to false, option data must be specified as a hex string. The
+ following commands configure the domain-name-servers option for all
+ subnets with the following addresses: 192.0.3.1 and 192.0.3.2.
+ Note that csv-format is set to false.
+ <screen>
+> <userinput>config add Dhcp4/option-data</userinput>
+> <userinput>config set Dhcp4/option-data[0]/name "domain-name-servers"</userinput>
+> <userinput>config set Dhcp4/option-data[0]/code 6</userinput>
+> <userinput>config set Dhcp4/option-data[0]/space "dhcp4"</userinput>
+> <userinput>config set Dhcp4/option-data[0]/csv-format false</userinput>
+> <userinput>config set Dhcp4/option-data[0]/data "C0 00 03 01 C0 00 03 02"</userinput>
+> <userinput>config commit</userinput>
+ </screen>
+ </para>
+
+ <para>
+ It is possible to override options on a per-subnet basis. If
+ clients connected to most of your subnets are expected to get the
+ same values of a given option, you should use global options: you
+ can then override specific values for a small number of subnets.
+ On the other hand, if you use different values in each subnet,
+ it does not make sense to specify global option values
+ (Dhcp4/option-data), rather you should set only subnet-specific values
+ (Dhcp4/subnet[X]/option-data[Y]).
+ </para>
+ <para>
+ The following commands override the global
+ DNS servers option for a particular subnet, setting a single DNS
+ server with address 2001:db8:1::3.
+ <screen>
+> <userinput>config add Dhcp4/subnet4[0]/option-data</userinput>
+> <userinput>config set Dhcp4/subnet4[0]/option-data[0]/name "domain-name-servers"</userinput>
+> <userinput>config set Dhcp4/subnet4[0]/option-data[0]/code 6</userinput>
+> <userinput>config set Dhcp4/subnet4[0]/option-data[0]/space "dhcp4"</userinput>
+> <userinput>config set Dhcp4/subnet4[0]/option-data[0]/csv-format true</userinput>
+> <userinput>config set Dhcp4/subnet4[0]/option-data[0]/data "192.0.2.3"</userinput>
+> <userinput>config commit</userinput></screen>
</para>
+ <note>
+ <para>In a future version of Kea, it will not be necessary to specify
+ the option code, space and csv-format fields as they will be set
+ automatically.</para>
+ </note>
+
<para>
- Note: Although configuration is now accepted, some parts of it is not internally used
- by they server yet. Address pools are used, but option definitons are not.
- The only way to alter some options (e.g. Router Option or DNS servers and Domain name)
- is to modify source code. To do so, please edit
- src/bin/dhcp6/dhcp4_srv.cc file, modify the following parameters and
- recompile:
+ Below is a list of currently supported standard DHCPv4 options. The "Name" and "Code"
+ are the values that should be used as a name in the option-data
+ structures. "Type" designates the format of the data: the meanings of
+ the various types is given in <xref linkend="dhcp-types"/>.
+ </para>
+ <para>
+ Some options are designated as arrays, which means that more than one
+ value is allowed in such an option. For example the option time-servers
+ allows the specification of more than one IPv4 address, so allowing
+ clients to obtain the the addresses of multiple NTP servers.
+ </para>
+ <!-- @todo: describe record types -->
+
+ <para>
+ <table border="1" cellpadding="5%" id="dhcp4-std-options-list">
+ <caption>List of standard DHCPv4 options</caption>
+ <thead>
+ <tr><th>Name</th><th>Code</th><th>Type</th><th>Array?</th></tr>
+ </thead>
+ <tbody>
+<tr><td>subnet-mask</td><td>1</td><td>ipv4-address</td><td>false</td></tr>
+<tr><td>time-offset</td><td>2</td><td>uint32</td><td>false</td></tr>
+<tr><td>routers</td><td>3</td><td>ipv4-address</td><td>true</td></tr>
+<tr><td>time-servers</td><td>4</td><td>ipv4-address</td><td>true</td></tr>
+<tr><td>name-servers</td><td>5</td><td>ipv4-address</td><td>false</td></tr>
+<tr><td>domain-name-servers</td><td>6</td><td>ipv4-address</td><td>true</td></tr>
+<tr><td>log-servers</td><td>7</td><td>ipv4-address</td><td>true</td></tr>
+<tr><td>cookie-servers</td><td>8</td><td>ipv4-address</td><td>true</td></tr>
+<tr><td>lpr-servers</td><td>9</td><td>ipv4-address</td><td>true</td></tr>
+<tr><td>impress-servers</td><td>10</td><td>ipv4-address</td><td>true</td></tr>
+<tr><td>resource-location-servers</td><td>11</td><td>ipv4-address</td><td>true</td></tr>
+<tr><td>host-name</td><td>12</td><td>string</td><td>false</td></tr>
+<tr><td>boot-size</td><td>13</td><td>uint16</td><td>false</td></tr>
+<tr><td>merit-dump</td><td>14</td><td>string</td><td>false</td></tr>
+<tr><td>domain-name</td><td>15</td><td>fqdn</td><td>false</td></tr>
+<tr><td>swap-server</td><td>16</td><td>ipv4-address</td><td>false</td></tr>
+<tr><td>root-path</td><td>17</td><td>string</td><td>false</td></tr>
+<tr><td>extensions-path</td><td>18</td><td>string</td><td>false</td></tr>
+<tr><td>ip-forwarding</td><td>19</td><td>boolean</td><td>false</td></tr>
+<tr><td>non-local-source-routing</td><td>20</td><td>boolean</td><td>false</td></tr>
+<tr><td>policy-filter</td><td>21</td><td>ipv4-address</td><td>true</td></tr>
+<tr><td>max-dgram-reassembly</td><td>22</td><td>uint16</td><td>false</td></tr>
+<tr><td>default-ip-ttl</td><td>23</td><td>uint8</td><td>false</td></tr>
+<tr><td>path-mtu-aging-timeout</td><td>24</td><td>uint32</td><td>false</td></tr>
+<tr><td>path-mtu-plateau-table</td><td>25</td><td>uint16</td><td>true</td></tr>
+<tr><td>interface-mtu</td><td>26</td><td>uint16</td><td>false</td></tr>
+<tr><td>all-subnets-local</td><td>27</td><td>boolean</td><td>false</td></tr>
+<tr><td>broadcast-address</td><td>28</td><td>ipv4-address</td><td>false</td></tr>
+<tr><td>perform-mask-discovery</td><td>29</td><td>boolean</td><td>false</td></tr>
+<tr><td>mask-supplier</td><td>30</td><td>boolean</td><td>false</td></tr>
+<tr><td>router-discovery</td><td>31</td><td>boolean</td><td>false</td></tr>
+<tr><td>router-solicitation-address</td><td>32</td><td>ipv4-address</td><td>false</td></tr>
+<tr><td>static-routes</td><td>33</td><td>ipv4-address</td><td>true</td></tr>
+<tr><td>trailer-encapsulation</td><td>34</td><td>boolean</td><td>false</td></tr>
+<tr><td>arp-cache-timeout</td><td>35</td><td>uint32</td><td>false</td></tr>
+<tr><td>ieee802-3-encapsulation</td><td>36</td><td>boolean</td><td>false</td></tr>
+<tr><td>default-tcp-ttl</td><td>37</td><td>uint8</td><td>false</td></tr>
+<tr><td>tcp-keepalive-internal</td><td>38</td><td>uint32</td><td>false</td></tr>
+<tr><td>tcp-keepalive-garbage</td><td>39</td><td>boolean</td><td>false</td></tr>
+<tr><td>nis-domain</td><td>40</td><td>string</td><td>false</td></tr>
+<tr><td>nis-servers</td><td>41</td><td>ipv4-address</td><td>true</td></tr>
+<tr><td>ntp-servers</td><td>42</td><td>ipv4-address</td><td>true</td></tr>
+<tr><td>vendor-encapsulated-options</td><td>43</td><td>empty</td><td>false</td></tr>
+<tr><td>netbios-name-servers</td><td>44</td><td>ipv4-address</td><td>true</td></tr>
+<tr><td>netbios-dd-server</td><td>45</td><td>ipv4-address</td><td>true</td></tr>
+<tr><td>netbios-node-type</td><td>46</td><td>uint8</td><td>false</td></tr>
+<tr><td>netbios-scope</td><td>47</td><td>string</td><td>false</td></tr>
+<tr><td>font-servers</td><td>48</td><td>ipv4-address</td><td>true</td></tr>
+<tr><td>x-display-manager</td><td>49</td><td>ipv4-address</td><td>true</td></tr>
+<tr><td>dhcp-requested-address</td><td>50</td><td>ipv4-address</td><td>false</td></tr>
+<!-- Lease time should not be configured by a user.
+<tr><td>dhcp-lease-time</td><td>51</td><td>uint32</td><td>false</td></tr>
+-->
+<tr><td>dhcp-option-overload</td><td>52</td><td>uint8</td><td>false</td></tr>
+<!-- Message Type, Server Identifier and Parameter Request List should not be configured by a user.
+<tr><td>dhcp-message-type</td><td>53</td><td>uint8</td><td>false</td></tr>
+<tr><td>dhcp-server-identifier</td><td>54</td><td>ipv4-address</td><td>false</td></tr>
+<tr><td>dhcp-parameter-request-list</td><td>55</td><td>uint8</td><td>true</td></tr>
+-->
+<tr><td>dhcp-message</td><td>56</td><td>string</td><td>false</td></tr>
+<tr><td>dhcp-max-message-size</td><td>57</td><td>uint16</td><td>false</td></tr>
+<!-- Renewal and rebinding time should not be configured by a user.
+<tr><td>dhcp-renewal-time</td><td>58</td><td>uint32</td><td>false</td></tr>
+<tr><td>dhcp-rebinding-time</td><td>59</td><td>uint32</td><td>false</td></tr>
+-->
+<tr><td>vendor-class-identifier</td><td>60</td><td>binary</td><td>false</td></tr>
+<!-- Client identifier should not be configured by a user.
+<tr><td>dhcp-client-identifier</td><td>61</td><td>binary</td><td>false</td></tr>
+-->
+<tr><td>nwip-domain-name</td><td>62</td><td>string</td><td>false</td></tr>
+<tr><td>nwip-suboptions</td><td>63</td><td>binary</td><td>false</td></tr>
+<tr><td>user-class</td><td>77</td><td>binary</td><td>false</td></tr>
+<tr><td>fqdn</td><td>81</td><td>record</td><td>false</td></tr>
+<tr><td>dhcp-agent-options</td><td>82</td><td>empty</td><td>false</td></tr>
+<tr><td>authenticate</td><td>90</td><td>binary</td><td>false</td></tr>
+<tr><td>client-last-transaction-time</td><td>91</td><td>uint32</td><td>false</td></tr>
+<tr><td>associated-ip</td><td>92</td><td>ipv4-address</td><td>true</td></tr>
+<tr><td>subnet-selection</td><td>118</td><td>ipv4-address</td><td>false</td></tr>
+<tr><td>domain-search</td><td>119</td><td>binary</td><td>false</td></tr>
+<tr><td>vivco-suboptions</td><td>124</td><td>binary</td><td>false</td></tr>
+<tr><td>vivso-suboptions</td><td>125</td><td>binary</td><td>false</td></tr>
+ </tbody>
+ </table>
+ </para>
+ <para>
+ <table border="1" cellpadding="5%" id="dhcp-types">
+ <caption>List of standard DHCP option types</caption>
+ <thead>
+ <tr><th>Name</th><th>Meaning</th></tr>
+ </thead>
+ <tbody>
+ <tr><td>binary</td><td>An arbitrary string of bytes, specified as a set of hexadecimal digits.</td></tr>
+ <tr><td>boolean</td><td>Boolean value with allowed values true or false</td></tr>
+ <tr><td>empty</td><td>No value, data is carried in suboptions</td></tr>
+ <tr><td>fqdn</td><td>Fully qualified domain name (e.g. www.example.com)</td></tr>
+ <tr><td>ipv4-address</td><td>IPv4 address in the usual dotted-decimal notation (e.g. 192.0.2.1)</td></tr>
+ <tr><td>ipv6-address</td><td>IPv6 address in the usual colon notation (e.g. 2001:db8::1)</td></tr>
+ <tr><td>record</td><td>Structured data that may comprise any types (except "record" and "empty")</td></tr>
+ <tr><td>string</td><td>Any text</td></tr>
+ <tr><td>uint8</td><td>8 bit unsigned integer with allowed values 0 to 255</td></tr>
+ <tr><td>uint16</td><td>16 bit unsinged integer with allowed values 0 to 65535</td></tr>
+ <tr><td>uint32</td><td>32 bit unsigned integer with allowed values 0 to 4294967295</td></tr>
+ </tbody>
+ </table>
+ </para>
+ </section>
+
+ <section id="dhcp4-custom-options">
+ <title>Custom DHCPv4 options</title>
+ <para>It is also possible to define options other than the standard ones.
+ Assume that we want to define a new DHCPv4 option called "foo" which will have
+ code 222 and will convey a single unsigned 32 bit integer value. We can define
+ such an option by using the following commands:
+ <screen>
+> <userinput>config add Dhcp4/option-def</userinput>
+> <userinput>config set Dhcp4/option-def[0]/name "foo"</userinput>
+> <userinput>config set Dhcp4/option-def[0]/code 222</userinput>
+> <userinput>config set Dhcp4/option-def[0]/type "uint32"</userinput>
+> <userinput>config set Dhcp4/option-def[0]/array false</userinput>
+> <userinput>config set Dhcp4/option-def[0]/record-types ""</userinput>
+> <userinput>config set Dhcp4/option-def[0]/space "dhcp4"</userinput>
+> <userinput>config set Dhcp4/option-def[0]/encapsulate ""</userinput>
+> <userinput>config commit</userinput></screen>
+ The "false" value of the "array" parameter determines that the option
+ does NOT comprise an array of "uint32" values but rather a single value.
+ Two other parameters have been left blank: "record-types" and "encapsulate".
+ The former specifies the comma separated list of option data fields if the
+ option comprises a record of data fields. The "record-fields" value should
+ be non-empty if the "type" is set to "record". Otherwise it must be left
+ blank. The latter parameter specifies the name of the option space being
+ encapsulated by the particular option. If the particular option does not
+ encapsulate any option space it should be left blank.
+ Note that the above set of comments define the format of the new option and do not
+ set its values.
+ </para>
+ <note>
+ <para>
+ In the current release the default values are not propagated to the
+ parser when the new configuration is being set. Therefore, all
+ parameters must be specified at all times, even if their values are
+ left blank.
+ </para>
+ </note>
+
+ <para>Once the new option format is defined, its value is set
+ in the same way as for a standard option. For example the following
+ commands set a global value that applies to all subnets.
<screen>
-const std::string HARDCODED_GATEWAY = "192.0.2.1";
-const std::string HARDCODED_DNS_SERVER = "192.0.2.2";
-const std::string HARDCODED_DOMAIN_NAME = "isc.example.com";</screen>
+> <userinput>config add Dhcp4/option-data</userinput>
+> <userinput>config set Dhcp4/option-data[0]/name "foo"</userinput>
+> <userinput>config set Dhcp4/option-data[0]/code 222</userinput>
+> <userinput>config set Dhcp4/option-data[0]/space "dhcp4"</userinput>
+> <userinput>config set Dhcp4/option-data[0]/csv-format true</userinput>
+> <userinput>config set Dhcp4/option-data[0]/data "12345"</userinput>
+> <userinput>config commit</userinput></screen>
+ </para>
- Lease database and configuration support is planned for end of 2012.
+ <para>New options can take more complex forms than simple use of
+ primitives (uint8, string, ipv4-address etc): it is possible to
+ define an option comprising a number of existing primitives.
</para>
+ <para>Assume we
+ want to define a new option that will consist of an IPv4
+ address, followed by unsigned 16 bit integer, followed by a text
+ string. Such an option could be defined in the following way:
+<screen>
+> <userinput>config add Dhcp4/option-def</userinput>
+> <userinput>config set Dhcp4/option-def[0]/name "bar"</userinput>
+> <userinput>config set Dhcp4/option-def[0]/code 223</userinput>
+> <userinput>config set Dhcp4/option-def[0]/space "dhcp4"</userinput>
+> <userinput>config set Dhcp4/option-def[0]/type "record"</userinput>
+> <userinput>config set Dhcp4/option-def[0]/array false</userinput>
+> <userinput>config set Dhcp4/option-def[0]/record-types "ipv4-address, uint16, string"</userinput>
+> <userinput>config set Dhcp4/option-def[0]/encapsulate ""</userinput>
+</screen>
+ The "type" is set to "record" to indicate that the option contains
+ multiple values of different types. These types are given as a comma-separated
+ list in the "record-types" field and should be those listed in <xref linkend="dhcp-types"/>.
+ </para>
+ <para>
+ The values of the option are set as follows:
+<screen>
+> <userinput>config add Dhcp4/option-data</userinput>
+> <userinput>config set Dhcp4/option-data[0]/name "bar"</userinput>
+> <userinput>config set Dhcp4/option-data[0]/space "dhcp4"</userinput>
+> <userinput>config set Dhcp4/option-data[0]/code 223</userinput>
+> <userinput>config set Dhcp4/option-data[0]/csv-format true</userinput>
+> <userinput>config set Dhcp4/option-data[0]/data "192.0.2.100, 123, Hello World"</userinput>
+> <userinput>config commit</userinput></screen>
+ </para>
+ "csv-format" is set "true" to indicate that the "data" field comprises a command-separated
+ list of values. The values in the "data" must correspond to the types set in
+ the "record-types" field of the option definition.
</section>
+ <section id="dhcp4-vendor-opts">
+ <title>DHCPv4 vendor specific options</title>
+ <para>
+ Currently there are three option spaces defined: dhcp4 (to
+ be used in DHCPv4 daemon) and dhcp6 (for the DHCPv6 daemon); there
+ is also vendor-encapsulated-options-space, which is empty by default, but options
+ can be defined in it. Those options are called vendor-specific
+ information options. The following examples show how to define
+ an option "foo" with code 1 that consists of an IPv4 address, an
+ unsigned 16 bit integer and a string. The "foo" option is conveyed
+ in a vendor specific information option.
+ </para>
+ <para>
+ The first step is to define the format of the option:
+<screen>
+> <userinput>config add Dhcp4/option-def</userinput>
+> <userinput>config set Dhcp4/option-def[0]/name "foo"</userinput>
+> <userinput>config set Dhcp4/option-def[0]/code 1</userinput>
+> <userinput>config set Dhcp4/option-def[0]/space "vendor-encapsulated-options-space"</userinput>
+> <userinput>config set Dhcp4/option-def[0]/type "record"</userinput>
+> <userinput>config set Dhcp4/option-def[0]/array false</userinput>
+> <userinput>config set Dhcp4/option-def[0]/record-types "ipv4-address, uint16, string"</userinput>
+> <userinput>config set Dhcp4/option-def[0]/encapsulates ""</userinput>
+> <userinput>config commit</userinput>
+</screen>
+ (Note that the option space is set to "vendor-encapsulated-options-space".)
+ Once the option format is defined, the next step is to define actual values
+ for that option:
+ <screen>
+> <userinput>config add Dhcp4/option-data</userinput>
+> <userinput>config set Dhcp4/option-data[0]/name "foo"</userinput>
+> <userinput>config set Dhcp4/option-data[0]/space "vendor-encapsulated-options-space"</userinput>
+> <userinput>config set Dhcp4/option-data[0]/code 1</userinput>
+> <userinput>config set Dhcp4/option-data[0]/csv-format true</userinput>
+> <userinput>config set Dhcp4/option-data[0]/data "192.0.2.3, 123, Hello World"</userinput>
+> <userinput>config commit</userinput></screen>
+ We also set up a dummy value for vendor-opts, the option that conveys our sub-option "foo".
+ This is required else the option will not be included in messages sent to the client.
+ <screen>
+> <userinput>config add Dhcp4/option-data</userinput>
+> <userinput>config set Dhcp4/option-data[1]/name "vendor-encapsulated-options"</userinput>
+> <userinput>config set Dhcp4/option-data[1]/space "dhcp4"</userinput>
+> <userinput>config set Dhcp4/option-data[1]/code 43</userinput>
+> <userinput>config set Dhcp4/option-data[1]/csv-format false</userinput>
+> <userinput>config set Dhcp4/option-data[1]/data ""</userinput>
+> <userinput>config commit</userinput></screen>
+ </para>
+
+ <note>
+ <para>
+ With this version of BIND 10, the "vendor-encapsulated-options" option
+ must be specified in the configuration although it has no configurable
+ parameters. If it is not specified, the server will assume that it is
+ not configured and will not send it to a client. In the future there
+ will be no need to include this option in the configuration.
+ </para>
+ </note>
+
+ </section>
+
+ <section id="dhcp4-option-spaces">
+
+ <title>Nested DHCPv4 options (custom option spaces)</title>
+ <para>It is sometimes useful to define completely new option
+ space. This is the case when user creates new option in the
+ standard option space ("dhcp4 or "dhcp6") and wants this option
+ to convey sub-options. Thanks to being in the separate space,
+ sub-option codes will have a separate numbering scheme and may
+ overlap with codes of standard options.
+ </para>
+ <para>Note that creation of a new option space when defining
+ sub-options for a standard option is not required, because it is
+ created by default if the standard option is meant to convey any
+ sub-options (see <xref linkend="dhcp4-vendor-opts"/>).
+ </para>
+ <para>
+ Assume that we want to have a DHCPv4 option called "container" with
+ code 222 that conveys two sub-options with codes 1 and 2.
+ First we need to define the new sub-options:
+<screen>
+> <userinput>config add Dhcp4/option-def</userinput>
+> <userinput>config set Dhcp4/option-def[0]/name "subopt1"</userinput>
+> <userinput>config set Dhcp4/option-def[0]/code 1</userinput>
+> <userinput>config set Dhcp4/option-def[0]/space "isc"</userinput>
+> <userinput>config set Dhcp4/option-def[0]/type "ipv4-address"</userinput>
+> <userinput>config set Dhcp4/option-def[0]/record-types ""</userinput>
+> <userinput>config set Dhcp4/option-def[0]/array false</userinput>
+> <userinput>config set Dhcp4/option-def[0]/encapsulate ""</userinput>
+> <userinput>config commit</userinput>
+
+> <userinput>config add Dhcp4/option-def</userinput>
+> <userinput>config set Dhcp4/option-def[1]/name "subopt2"</userinput>
+> <userinput>config set Dhcp4/option-def[1]/code 2</userinput>
+> <userinput>config set Dhcp4/option-def[1]/space "isc"</userinput>
+> <userinput>config set Dhcp4/option-def[1]/type "string"</userinput>
+> <userinput>config set Dhcp4/option-def[1]/record-types ""</userinput>
+> <userinput>config set Dhcp4/option-def[1]/array false</userinput>
+> <userinput>config set Dhcp4/option-def[1]/encapsulate ""</userinput>
+> <userinput>config commit</userinput>
+</screen>
+ Note that we have defined the options to belong to a new option space
+ (in this case, "isc").
+ </para>
+ <para>
+ The next step is to define a regular DHCPv4 option with our desired
+ code and specify that it should include options from the new option space:
+<screen>
+> <userinput>add Dhcp4/option-def</userinput>
+> <userinput>set Dhcp4/option-def[2]/name "container"</userinput>
+> <userinput>set Dhcp4/option-def[2]/code 222</userinput>
+> <userinput>set Dhcp4/option-def[2]/space "dhcp4"</userinput>
+> <userinput>set Dhcp4/option-def[2]/type "empty"</userinput>
+> <userinput>set Dhcp4/option-def[2]/array false</userinput>
+> <userinput>set Dhcp4/option-def[2]/record-types ""</userinput>
+> <userinput>set Dhcp4/option-def[2]/encapsulate "isc"</userinput>
+> <userinput>commit</userinput>
+</screen>
+ The name of the option space in which the sub-options are defined
+ is set in the "encapsulate" field. The "type" field is set to "empty"
+ to indicate that this option does not carry any data other than
+ sub-options.
+ </para>
+ <para>
+ Finally, we can set values for the new options:
+<screen>
+> <userinput>config add Dhcp4/option-data</userinput>
+> <userinput>config set Dhcp4/option-data[0]/name "subopt1"</userinput>
+> <userinput>config set Dhcp4/option-data[0]/space "isc"</userinput>
+> <userinput>config set Dhcp4/option-data[0]/code 1</userinput>
+> <userinput>config set Dhcp4/option-data[0]/csv-format true</userinput>
+> <userinput>config set Dhcp4/option-data[0]/data "192.0.2.3"</userinput>
+> <userinput>config commit</userinput>
+<userinput></userinput>
+> <userinput>config add Dhcp4/option-data</userinput>
+> <userinput>config set Dhcp4/option-data[1]/name "subopt2"</userinput>
+> <userinput>config set Dhcp4/option-data[1]/space "isc"</userinput>
+> <userinput>config set Dhcp4/option-data[1]/code 2</userinput>
+> <userinput>config set Dhcp4/option-data[1]/csv-format true</userinput>
+> <userinput>config set Dhcp4/option-data[1]/data "Hello world"</userinput>
+> <userinput>config commit</userinput>
+<userinput></userinput>
+> <userinput>config add Dhcp4/option-data</userinput>
+> <userinput>config set Dhcp4/option-data[2]/name "container"</userinput>
+> <userinput>config set Dhcp4/option-data[2]/space "dhcp4"</userinput>
+> <userinput>config set Dhcp4/option-data[2]/code 222</userinput>
+> <userinput>config set Dhcp4/option-data[2]/csv-format true</userinput>
+> <userinput>config set Dhcp4/option-data[2]/data ""</userinput>
+> <userinput>config commit</userinput>
+</screen>
+ Even though the "container" option does not carry any data except
+ sub-options, the "data" field must be explictly set to an empty value.
+ This is required because in the current version of BIND 10 DHCP, the
+ default configuration values are not propagated to the configuration parsers:
+ if the "data" is not set the parser will assume that this
+ parameter is not specified and an error will be reported.
+ </para>
+ <para>Note that it is possible to create an option which carries some data
+ in addition to the sub-options defined in the encapsulated option space. For example,
+ if the "container" option from the previous example was required to carry an uint16
+ value as well as the sub-options, the "type" value would have to be set to "uint16" in
+ the option definition. (Such an option would then have the following
+ data structure: DHCP header, uint16 value, sub-options.) The value specified
+ with the "data" parameter - which should be a valid integer enclosed in quotes,
+ e.g. "123" - would then be assigned to the uint16 field in the "container" option.
+ </para>
+ </section>
+ </section>
<section id="dhcp4-serverid">
<title>Server Identifier in DHCPv4</title>
- <para>The DHCPv4 protocol uses a "server identifier" for clients to be able
- to discriminate between several servers present on the same link: this
- value is an IPv4 address of the server. When started for the first time,
- the DHCPv4 server will choose one of its IPv4 addresses as its server-id,
- and store the chosen value to a file. (The file is named b10-dhcp4-serverid and is
- stored in the "local state directory". This is set during installation
- when "configure" is run, and can be changed by using "--localstatedir"
- on the "configure" command line.) That file will be read by the server
- and the contained value used whenever the server is subsequently started.
+ <para>
+ The DHCPv4 protocol uses a "server identifier" for clients to be able
+ to discriminate between several servers present on the same link: this
+ value is an IPv4 address of the server. When started for the first time,
+ the DHCPv4 server will choose one of its IPv4 addresses as its server-id,
+ and store the chosen value to a file. That file will be read by the server
+ and the contained value used whenever the server is subsequently started.
</para>
<para>
- It is unlikely that this parameter needs to be changed. If such a need
- arises, please stop the server, edit the file and restart the server.
- It is a text file that should contain an IPv4 address. Spaces are
- ignored. No extra characters are allowed in this file.
+ It is unlikely that this parameter should ever need to be changed.
+ However, if such a need arises, stop the server, edit the file and restart
+ the server. (The file is named b10-dhcp4-serverid and by default is
+ stored in the "var" subdirectory of the directory in which BIND 10 is installed.
+ This can be changed when BIND 10 is built by using "--localstatedir"
+ on the "configure" command line.) The file is a text file that should
+ contain an IPv4 address. Spaces are ignored, and no extra characters are allowed
+ in this file.
</para>
</section>
<section id="dhcp4-std">
- <title>Supported standards</title>
+ <title>Supported Standards</title>
<para>The following standards and draft standards are currently
supported:</para>
<itemizedlist>
<listitem>
- <simpara>RFC2131: Supported messages are DISCOVER, OFFER,
- REQUEST, ACK, NAK, RELEASE.</simpara>
+ <simpara><ulink url="http://tools.ietf.org/html/rfc2131">RFC 2131</ulink>: Supported messages are DISCOVER, OFFER,
+ REQUEST, RELEASE, ACK, and NAK.</simpara>
</listitem>
<listitem>
- <simpara>RFC2132: Supported options are: PAD (0),
+ <simpara><ulink url="http://tools.ietf.org/html/rfc2132">RFC 2132</ulink>: Supported options are: PAD (0),
END(255), Message Type(53), DHCP Server Identifier (54),
Domain Name (15), DNS Servers (6), IP Address Lease Time
(51), Subnet mask (1), and Routers (3).</simpara>
</listitem>
- <listitem>
- <simpara>RFC6842: Server responses include client-id option
- if client sent it in its message.</simpara>
- </listitem>
</itemizedlist>
</section>
<section id="dhcp4-limit">
<title>DHCPv4 Server Limitations</title>
<para>These are the current limitations of the DHCPv4 server
- software. Most of them are reflections of the early stage of
+ software. Most of them are reflections of the current stage of
development and should be treated as <quote>not implemented
yet</quote>, rather than actual limitations.</para>
<itemizedlist>
<listitem>
- <simpara>During initial IPv4 node configuration, the
+ <para>
+ On startup, the DHCPv4 server does not get the full configuration from
+ BIND 10. To remedy this, after starting BIND 10, modify any parameter
+ and commit the changes, e.g.
+ <screen>
+> <userinput>config show Dhcp4/renew-timer</userinput>
+Dhcp4/renew-timer 1000 integer (default)
+> <userinput>config set Dhcp4/renew-timer 1001</userinput>
+> <userinput>config commit</userinput></screen>
+ </para>
+ </listitem>
+ <listitem>
+ <simpara>During the initial IPv4 node configuration, the
server is expected to send packets to a node that does not
have IPv4 address assigned yet. The server requires
certain tricks (or hacks) to transmit such packets. This
@@ -3551,44 +4143,36 @@ const std::string HARDCODED_DOMAIN_NAME = "isc.example.com";</screen>
relayed traffic only (that is, normal point to point
communication).</simpara>
</listitem>
+
<listitem>
<simpara>Upon start, the server will open sockets on all
interfaces that are not loopback, are up and running and
have IPv4 address.</simpara>
</listitem>
+
<listitem>
- <simpara>PRL (Parameter Request List, a list of options
- requested by a client) is currently ignored and server
- assigns DNS SERVER and DOMAIN NAME options.</simpara>
- </listitem>
- <listitem>
- <simpara><command>b10-dhcp4</command> does not support
- BOOTP. That is a design choice. This limitation is
+ <simpara>The DHCPv4 server does not support
+ BOOTP. That is a design choice and the limitation is
permanent. If you have legacy nodes that can't use DHCP and
- require BOOTP support, please use the latest version of ISC DHCP
- via <ulink url="http://www.isc.org/software/dhcp"/>.</simpara>
+ require BOOTP support, please use the latest version of ISC DHCP,
+ available from <ulink url="http://www.isc.org/software/dhcp"/>.</simpara>
</listitem>
<listitem>
<simpara>Interface detection is currently working on Linux
only. See <xref linkend="iface-detect"/> for details.</simpara>
</listitem>
<listitem>
- <simpara><command>b10-dhcp4</command> does not verify that
- assigned address is unused. According to RFC2131, the
- allocating server should verify that address is no used by
+ <simpara>The DHCPv4 server does not verify that
+ assigned address is unused. According to <ulink url="http://tools.ietf.org/html/rfc2131">RFC 2131</ulink>, the
+ allocating server should verify that address is not used by
sending ICMP echo request.</simpara>
</listitem>
<listitem>
- <simpara>Address rebinding (REQUEST/Rebinding), confirmation
- (CONFIRM) and duplication report (DECLINE) are not supported
- yet.</simpara>
+ <simpara>Address rebinding (REBIND) and duplication report (DECLINE)
+ are not supported yet.</simpara>
</listitem>
<listitem>
- <simpara>DNS Update is not supported yet.</simpara>
- </listitem>
- <listitem>
- <simpara>-v (verbose) command line option is currently
- the default, and cannot be disabled.</simpara>
+ <simpara>DNS Update is not yet supported.</simpara>
</listitem>
</itemizedlist>
</section>
@@ -3596,139 +4180,46 @@ const std::string HARDCODED_DOMAIN_NAME = "isc.example.com";</screen>
</chapter>
<chapter id="dhcp6">
- <title>DHCPv6 Server</title>
- <para>The Dynamic Host Configuration Protocol for IPv6 (DHCPv6) is
- specified in RFC3315. BIND 10 provides a DHCPv6 server implementation
- that is described in this chapter. For a description of the DHCPv4
- server implementation, see <xref linkend="dhcp4"/>.
- </para>
+ <title>The DHCPv6 Server</title>
- <para>The DHCPv6 server component is currently under intense
- development. You may want to check out <ulink
- url="http://bind10.isc.org/wiki/Kea">BIND 10 DHCP (Kea) wiki</ulink>
- and recent posts on <ulink
- url="https://lists.isc.org/mailman/listinfo/bind10-dev">BIND 10
- developers mailing list</ulink>.</para>
+ <section id="dhcp6-start-stop">
+ <title>Starting and Stopping the DHCPv6 Server</title>
- <note>
<para>
- As of November 2012, the DHCPv6 component is partially functioning,
- having the following capabilities:
+ <command>b10-dhcp6</command> is the BIND 10 DHCPv6 server and, like other
+ parts of BIND 10, is configured through the <command>bindctl</command>
+ program.
</para>
- <itemizedlist>
- <listitem>
- <simpara>DHCPv6 server able to allocate leases (but not renew them).</simpara>
- </listitem>
- <listitem>
- <simpara>Some configuration available through the BIND 10 configuration mechanism.</simpara>
- </listitem>
- <listitem>
- <simpara>Lease storage in a MySQL database.</simpara>
- </listitem>
- </itemizedlist>
- </note>
-
- <section id="dhcp6-install">
- <title>DHCPv6 Server Build and Installation</title>
<para>
- DHCPv6 is part of the BIND 10 suite of programs and is built as part of
- the build of BIND 10. With the use of MySQL, some additional
- installation steps are needed:
+ After starting BIND 10 and starting <command>bindctl</command>, the first step
+ in configuring the server is to add <command>b10-dhcp6</command> to the list of running BIND 10 services.
+<screen>
+> <userinput>config add Init/components b10-dhcp6</userinput>
+> <userinput>config set Init/components/b10-dhcp6/kind dispensable</userinput>
+> <userinput>config commit</userinput>
+</screen>
</para>
- <section>
- <title>Install MySQL</title>
- <para>
- Install MySQL according to the instructions for your system. The client development
- libraries must be installed.
- </para>
- </section>
- <section>
- <title>Build and Install BIND 10</title>
- <para>
- Build and install BIND 10 as described in <xref linkend="installation"/>, with
- the following modification: to enable the MySQL database code, the
- "configure" step (see <xref linkend="configure"/>), specify the location of the
- MySQL configuration program "mysql_config" with the "--with-mysql-config" switch,
- i.e.
- <screen><userinput>./configure [other-options] --with-dhcp-mysql</userinput></screen>
- ...if MySQL was installed in the default location, or:
- <screen><userinput>./configure [other-options] --with-dhcp-mysql=<replaceable><path-to-mysql_config></replaceable></userinput></screen>
- ...if not.
- </para>
- </section>
- <section>
- <title>Create MySQL Database and BIND 10 User</title>
- <para>
- The next task is to create both the DHCPv6 lease database and the user under which the DHCPv6 server will
- access it. Although the intention is to have the name of the database and the user configurable,
- at the moment they are hard-coded as "kea", as is the associated password. ("kea" is an internal
- code name for BIND 10 DHCP.) There are a number of steps required:
- </para>
- <para>
- 1. Log into MySQL as "root":
- <screen>$ <userinput>mysql -u root -p</userinput>
-Enter password:<userinput/>
- :<userinput/>
-mysql></screen>
- </para>
- <para>
- 2. Create the database:
- <screen>mysql> <userinput>CREATE DATABASE kea;</userinput></screen>
- </para>
- <para>
- 3. Create the database tables:
- <screen>mysql> <userinput>CONNECT kea;</userinput>
-mysql> <userinput>SOURCE <replaceable><path-to-bind10></replaceable>/share/bind10/dhcpdb_create.mysql</userinput></screen>
- </para>
- <para>
- 4. Create the user under which BIND 10 will access the database and grant it access to the database tables:
- <screen>mysql> <userinput>CREATE USER 'kea'@'localhost' IDENTIFIED BY 'kea';</userinput>
-mysql> <userinput>GRANT ALL ON kea.* TO 'kea'@'localhost';</userinput></screen>
- </para>
- <para>
- 5. Exit MySQL:
- <screen>mysql> <userinput>quit</userinput>
-Bye<userinput/>
-$</screen>
- </para>
- </section>
- </section>
-
- <section id="dhcp6-usage">
- <title>DHCPv6 Server Usage</title>
-
<para>
- <command>b10-dhcp6</command> is a BIND 10 component and is being
- run under BIND 10 framework. To add a DHCPv6 process to the set of running
- BIND 10 services, you can use following commands in <command>bindctl</command>:
- <screen>> <userinput>config add Boss/components b10-dhcp6</userinput>
-> <userinput>config set Boss/components/b10-dhcp6/kind dispensable</userinput>
-> <userinput>config commit</userinput></screen>
+ To remove <command>b10-dhcp6</command> from the set of running services,
+ the <command>b10-dhcp4</command> is removed from list of Init components:
+<screen>
+> <userinput>config remove Init/components b10-dhcp6</userinput>
+> <userinput>config commit</userinput>
+</screen>
</para>
- <para>
- To stop running <command>b10-dhcp6</command>, use the
- following command:
- <screen>> <userinput>config remove Boss/components b10-dhcp6</userinput>
-> <userinput>config commit</userinput></screen>
- </para>
<para>
During start-up the server will detect available network interfaces
and will attempt to open UDP sockets on all interfaces that
are up, running, are not loopback, are multicast-capable, and
- have IPv6 address assigned. It will then listen to incoming traffic. The
- currently supported client messages are SOLICIT and REQUEST. The server
- will respond to them with ADVERTISE and REPLY, respectively.
- </para>
- <para>
- Since the DHCPv6 server opens privileged ports, it requires root
- access. Make sure you run this daemon as root.
+ have IPv6 address assigned. It will then listen to incoming traffic.
</para>
+
</section>
- <section id="dhcp6-config">
+ <section id="dhcp6-configuration">
<title>DHCPv6 Server Configuration</title>
<para>
Once the server has been started, it can be configured. To view the
@@ -3736,16 +4227,22 @@ $</screen>
<screen>> <userinput>config show Dhcp6</userinput></screen>
When starting the Dhcp6 daemon for the first time, the default configuration
will be available. It will look similar to this:
- <screen>
+<screen>
> <userinput>config show Dhcp6</userinput>
-Dhcp6/interface "eth0" string (default)
-Dhcp6/renew-timer 1000 integer (default)
-Dhcp6/rebind-timer 2000 integer (default)
-Dhcp6/preferred-lifetime 3000 integer (default)
-Dhcp6/valid-lifetime 4000 integer (default)
-Dhcp6/subnet6 [] list (default)</screen>
+Dhcp6/interface/ list (default)
+Dhcp6/renew-timer 1000 integer (default)
+Dhcp6/rebind-timer 2000 integer (default)
+Dhcp6/preferred-lifetime 3000 integer (default)
+Dhcp6/valid-lifetime 4000 integer (default)
+Dhcp6/option-data [] list (default)
+Dhcp6/lease-database/type "memfile" string (default)
+Dhcp6/lease-database/name "" string (default)
+Dhcp6/lease-database/user "" string (default)
+Dhcp6/lease-database/host "" string (default)
+Dhcp6/lease-database/password "" string (default)
+Dhcp6/subnet6/ list
+</screen>
</para>
-
<para>
To change one of the parameters, simply follow
the usual <command>bindctl</command> procedure. For example, to make the
@@ -3757,7 +4254,77 @@ Dhcp6/subnet6 [] list (default)</screen>
and apply to all defined subnets, unless they are overridden on a
per-subnet basis.
</para>
+ <note>
+ <para>
+ With this version of BIND 10, there are a number of known limitations
+ and problems in the DHCPv6 server. See <xref linkend="dhcp6-limit"/>.
+ </para>
+ </note>
+ <section>
+ <title>Database Configuration</title>
+ <para>
+ All leases issued by the server are stored in the lease database. Currently,
+ the only supported database is MySQL
+ <footnote>
+ <para>
+ The server comes with an in-memory database ("memfile") configured as the default
+ database. This is used for internal testing and is not supported. In addition,
+ it does not store lease information on disk: lease information will be lost if the
+ server is restarted.
+ </para>
+ </footnote>, and so the server must be configured to
+ access the correct database with the appropriate credentials.
+ </para>
+ <note>
+ <para>
+ Database access information must be configured for the DHCPv6 server, even if
+ it has already been configured for the DHCPv4 server. The servers store their
+ information independently, so each server can use a separate
+ database or both servers can use the same database.
+ </para>
+ </note>
+ <para>
+ Database configuration is controlled through the Dhcp6/lease-database parameters.
+ The type of the database must be set to MySQL (although the string entered is "mysql"):
+<screen>
+> <userinput>config set Dhcp6/lease-database/type "mysql"</userinput>
+</screen>
+ Next, the name of the database is to hold the leases must be set: this is the
+ name used when the lease database was created (see <xref linkend="dhcp-database-create"/>).
+<screen>
+> <userinput>config set Dhcp6/lease-database/name "<replaceable>database-name</replaceable>"</userinput>
+</screen>
+ If the database is located on a different system to the DHCPv6 server, the
+ database host name must also be specified (although note that this configuration
+ may have a severe impact on server performance):
+<screen>
+> <userinput>config set Dhcp6/lease-database/host "<replaceable>remote-host-name</replaceable>"</userinput>
+</screen>
+ The usual state of affairs will be to have the database on the same machine as the
+ DHCPv6 server. In this case, set the value to the empty string (this is the default):
+<screen>
+> <userinput>config set Dhcp6/lease-database/host ""</userinput>
+</screen>
+ </para>
+ <para>
+ Finally, the credentials of the account under which the server will access the database
+ should be set:
+<screen>
+> <userinput>config set Dhcp6/lease-database/user "<replaceable>user-name</replaceable>"</userinput>
+> <userinput>config set Dhcp6/lease-database/password "<replaceable>password</replaceable>"</userinput>
+</screen>
+ If there is no password to the account, set the password to the empty string "". (This is also the default.)
+ </para>
+ <note>
+ <para>The password is echoed when entered and is stored in clear text in the BIND 10 configuration
+ database. Improved password security will be added in a future version of BIND 10 DHCP</para>
+ </note>
+ </section>
+
+
+ <section>
+ <title>Subnet and Address Pool</title>
<para>
The essential role of a DHCPv6 server is address assignment. For this,
the server has to be configured with at least one subnet and one pool of dynamic
@@ -3797,7 +4364,7 @@ Dhcp6/subnet6 [] list (default)</screen>
> <userinput>config set Dhcp6/subnet6[1]/pool [ "2001:db8:beef::/48" ]</userinput>
> <userinput>config commit</userinput></screen>
Arrays are counted from 0. subnet[0] refers to the subnet defined in the
- previous example. The <command>config add Dhcp6/subnet6</command> adds
+ previous example. The <command>config add Dhcp6/subnet6</command> command adds
another (second) subnet. It can be referred to as
<command>Dhcp6/subnet6[1]</command>. In this example, we allow server to
dynamically assign all addresses available in the whole subnet. Although
@@ -3810,90 +4377,492 @@ Dhcp6/subnet6 [] list (default)</screen>
a given pool, it will be able to allocate also first (typically network
address) address from that pool. For example for pool 2001:db8::/64 the
2001:db8:: address may be assigned as well. If you want to avoid this,
- please use min-max notation.
+ please use the "min-max" notation.
</para>
+ </section>
+
+ <section id="dhcp6-std-options">
+ <title>Standard DHCPv6 options</title>
<para>
- Options can also be configured: the following commands configure
- the DNS-SERVERS option for all subnets with the following addresses:
- 2001:db8:1::1 and 2001:db8:1::2
+ One of the major features of DHCPv6 server is to provide configuration
+ options to clients. Although there are several options that require
+ special behavior, most options are sent by the server only if the client
+ explicitly requested them. The following example shows how to
+ configure DNS servers, which is one of the most frequently used
+ options. Numbers in the first column are added for easier reference and
+ will not appear on screen. Options specified in this way are considered
+ global and apply to all configured subnets.
+
<screen>
+1. > <userinput>config add Dhcp6/option-data</userinput>
+2. > <userinput>config set Dhcp6/option-data[0]/name "dns-servers"</userinput>
+3. > <userinput>config set Dhcp6/option-data[0]/code 23</userinput>
+4. > <userinput>config set Dhcp6/option-data[0]/space "dhcp6"</userinput>
+5. > <userinput>config set Dhcp6/option-data[0]/csv-format true</userinput>
+6. > <userinput>config set Dhcp6/option-data[0]/data "2001:db8::cafe, 2001:db8::babe"</userinput>
+7. > <userinput>config commit</userinput>
+</screen>
+ </para>
+ <para>
+ The first line creates new entry in option-data table. It
+ contains information on all global options that the server is
+ supposed to configure in all subnets. The second line specifies
+ option name. For a complete list of currently supported names,
+ see <xref linkend="dhcp6-std-options-list"/> below.
+ The third line specifies option code, which must match one of the
+ values from that
+ list. Line 4 specifies option space, which must always
+ be set to "dhcp6" as these are standard DHCPv6 options. For
+ other name spaces, including custom option spaces, see <xref
+ linkend="dhcp6-option-spaces"/>. The fifth line specifies the format in
+ which the data will be entered: use of CSV (comma
+ separated values) is recommended. The sixth line gives the actual value to
+ be sent to clients. Data is specified as a normal text, with
+ values separated by commas if more than one value is
+ allowed.
+ </para>
+
+ <para>
+ Options can also be configured as hexadecimal values. If csv-format is
+ set to false, the option data must be specified as a string of hexadecimal
+ numbers. The
+ following commands configure the DNS-SERVERS option for all
+ subnets with the following addresses: 2001:db8:1::cafe and
+ 2001:db8:1::babe.
+ <screen>
> <userinput>config add Dhcp6/option-data</userinput>
> <userinput>config set Dhcp6/option-data[0]/name "dns-servers"</userinput>
> <userinput>config set Dhcp6/option-data[0]/code 23</userinput>
+> <userinput>config set Dhcp6/option-data[0]/space "dhcp6"</userinput>
+> <userinput>config set Dhcp6/option-data[0]/csv-format false</userinput>
> <userinput>config set Dhcp6/option-data[0]/data "2001 0DB8 0001 0000 0000 0000</userinput>
- <userinput>0000 0001 2001 0DB8 0001 0000 0000 0000 0000 0002"</userinput>
+ <userinput>0000 CAFE 2001 0DB8 0001 0000 0000 0000 0000 BABE"</userinput>
> <userinput>config commit</userinput>
</screen>
(The value for the setting of the "data" element is split across two
- lines in this document for clarity: when entering the command, all the
- string should be entered on the same line.)
+ lines in this document for clarity: when entering the command, the
+ whole string should be entered on the same line.)
</para>
- <para>
- Currently the only way to set option data is to specify the
- data as a string of hexadecimal digits. It is planned to allow
- alternative ways of specifying the data as a comma-separated list,
- e.g. "2001:db8:1::1,2001:db8:1::2".
- </para>
- <para>
- As with global settings, it is also possible to override options on a
- per-subnet basis, e.g. the following commands override the global DNS
- servers option for a particular subnet, setting a single DNS server with
- address 2001:db8:1::3.
- <screen>
+
+ <para>
+ It is possible to override options on a per-subnet basis. If
+ clients connected to most of your subnets are expected to get the
+ same values of a given option, you should use global options: you
+ can then override specific values for a small number of subnets.
+ On the other hand, if you use different values in each subnet,
+ it does not make sense to specify global option values
+ (Dhcp6/option-data), rather you should set only subnet-specific values
+ (Dhcp6/subnet[X]/option-data[Y]).
+ </para>
+ <para>
+ The following commands override the global
+ DNS servers option for a particular subnet, setting a single DNS
+ server with address 2001:db8:1::3.
+ <screen>
> <userinput>config add Dhcp6/subnet6[0]/option-data</userinput>
> <userinput>config set Dhcp6/subnet6[0]/option-data[0]/name "dns-servers"</userinput>
> <userinput>config set Dhcp6/subnet6[0]/option-data[0]/code 23</userinput>
-> <userinput>config set Dhcp6/subnet6[0]/option-data[0]/data "2001 0DB8 0001 0000</userinput>
- <userinput>0000 0000 0000 0003"</userinput>
+> <userinput>config set Dhcp6/subnet6[0]/option-data[0]/space "dhcp6"</userinput>
+> <userinput>config set Dhcp6/subnet6[0]/option-data[0]/csv-format true</userinput>
+> <userinput>config set Dhcp6/subnet6[0]/option-data[0]/data "2001:db8:1::3"</userinput>
> <userinput>config commit</userinput></screen>
- (As before, the setting of the "data" element has been split across two
- lines for clarity.)
+ </para>
+
+ <note>
+ <para>
+ In future versions of BIND 10 DHCP, it will not be necessary to specify
+ option code, space and csv-format fields, as those fields will be set
+ automatically.
</para>
- <note>
- <para>
- With this version of BIND 10, there are a number of known limitations
- and problems in the DHCPv6 server. See <xref linkend="dhcp6-limit"/>.
- </para>
- </note>
+ </note>
+
+
+ <para>
+ Below is a list of currently supported standard DHCPv6 options. The "Name" and "Code"
+ are the values that should be used as a name in the option-data
+ structures. "Type" designates the format of the data: the meanings of
+ the various types is given in <xref linkend="dhcp-types"/>.
+ </para>
+ <para>
+ Some options are designated as arrays, which means that more than one
+ value is allowed in such an option. For example the option dns-servers
+ allows the specification of more than one IPv6 address, so allowing
+ clients to obtain the the addresses of multiple DNS servers.
+ </para>
+
+<!-- @todo: describe record types -->
+
+ <para>
+ <table border="1" cellpadding="5%" id="dhcp6-std-options-list">
+ <caption>List of standard DHCPv6 options</caption>
+ <thead>
+ <tr><th>Name</th><th>Code</th><th>Type</th><th>Array?</th></tr>
+ <tr></tr>
+ </thead>
+ <tbody>
+<!-- Our engine uses those options on its own, admin must not configure them on his own
+<tr><td>clientid</td><td>1</td><td>binary</td><td>false</td></tr>
+<tr><td>serverid</td><td>2</td><td>binary</td><td>false</td></tr>
+<tr><td>ia-na</td><td>3</td><td>record</td><td>false</td></tr>
+<tr><td>ia-ta</td><td>4</td><td>uint32</td><td>false</td></tr>
+<tr><td>iaaddr</td><td>5</td><td>record</td><td>false</td></tr>
+<tr><td>oro</td><td>6</td><td>uint16</td><td>true</td></tr> -->
+<tr><td>preference</td><td>7</td><td>uint8</td><td>false</td></tr>
+
+<!-- Our engine uses those options on its own, admin must not configure them on his own
+<tr><td>elapsed-time</td><td>8</td><td>uint16</td><td>false</td></tr>
+<tr><td>relay-msg</td><td>9</td><td>binary</td><td>false</td></tr>
+<tr><td>auth</td><td>11</td><td>binary</td><td>false</td></tr>
+<tr><td>unicast</td><td>12</td><td>ipv6-address</td><td>false</td></tr>
+<tr><td>status-code</td><td>13</td><td>record</td><td>false</td></tr>
+<tr><td>rapid-commit</td><td>14</td><td>empty</td><td>false</td></tr>
+<tr><td>user-class</td><td>15</td><td>binary</td><td>false</td></tr>
+<tr><td>vendor-class</td><td>16</td><td>record</td><td>false</td></tr>
+<tr><td>vendor-opts</td><td>17</td><td>uint32</td><td>false</td></tr>
+<tr><td>interface-id</td><td>18</td><td>binary</td><td>false</td></tr>
+<tr><td>reconf-msg</td><td>19</td><td>uint8</td><td>false</td></tr>
+<tr><td>reconf-accept</td><td>20</td><td>empty</td><td>false</td></tr> -->
+<tr><td>sip-server-dns</td><td>21</td><td>fqdn</td><td>true</td></tr>
+<tr><td>sip-server-addr</td><td>22</td><td>ipv6-address</td><td>true</td></tr>
+<tr><td>dns-servers</td><td>23</td><td>ipv6-address</td><td>true</td></tr>
+<tr><td>domain-search</td><td>24</td><td>fqdn</td><td>true</td></tr>
+<!-- <tr><td>ia-pd</td><td>25</td><td>record</td><td>false</td></tr> -->
+<!-- <tr><td>iaprefix</td><td>26</td><td>record</td><td>false</td></tr> -->
+<tr><td>nis-servers</td><td>27</td><td>ipv6-address</td><td>true</td></tr>
+<tr><td>nisp-servers</td><td>28</td><td>ipv6-address</td><td>true</td></tr>
+<tr><td>nis-domain-name</td><td>29</td><td>fqdn</td><td>true</td></tr>
+<tr><td>nisp-domain-name</td><td>30</td><td>fqdn</td><td>true</td></tr>
+<tr><td>sntp-servers</td><td>31</td><td>ipv6-address</td><td>true</td></tr>
+<tr><td>information-refresh-time</td><td>32</td><td>uint32</td><td>false</td></tr>
+<tr><td>bcmcs-server-dns</td><td>33</td><td>fqdn</td><td>true</td></tr>
+<tr><td>bcmcs-server-addr</td><td>34</td><td>ipv6-address</td><td>true</td></tr>
+<tr><td>geoconf-civic</td><td>36</td><td>record</td><td>false</td></tr>
+<tr><td>remote-id</td><td>37</td><td>record</td><td>false</td></tr>
+<tr><td>subscriber-id</td><td>38</td><td>binary</td><td>false</td></tr>
+<tr><td>client-fqdn</td><td>39</td><td>record</td><td>false</td></tr>
+<tr><td>pana-agent</td><td>40</td><td>ipv6-address</td><td>true</td></tr>
+<tr><td>new-posix-timezone</td><td>41</td><td>string</td><td>false</td></tr>
+<tr><td>new-tzdb-timezone</td><td>42</td><td>string</td><td>false</td></tr>
+<tr><td>ero</td><td>43</td><td>uint16</td><td>true</td></tr>
+<tr><td>lq-query</td><td>44</td><td>record</td><td>false</td></tr>
+<tr><td>client-data</td><td>45</td><td>empty</td><td>false</td></tr>
+<tr><td>clt-time</td><td>46</td><td>uint32</td><td>false</td></tr>
+<tr><td>lq-relay-data</td><td>47</td><td>record</td><td>false</td></tr>
+<tr><td>lq-client-link</td><td>48</td><td>ipv6-address</td><td>true</td></tr>
+ </tbody>
+ </table>
+ </para>
+ </section>
+
+ <section id="dhcp6-custom-options">
+ <title>Custom DHCPv6 options</title>
+ <para>It is also possible to define options other than the standard ones.
+ Assume that we want to define a new DHCPv6 option called "foo" which will have
+ code 100 and will convey a single unsigned 32 bit integer value. We can define
+ such an option by using the following commands:
+ <screen>
+> <userinput>config add Dhcp6/option-def</userinput>
+> <userinput>config set Dhcp6/option-def[0]/name "foo"</userinput>
+> <userinput>config set Dhcp6/option-def[0]/code 100</userinput>
+> <userinput>config set Dhcp6/option-def[0]/type "uint32"</userinput>
+> <userinput>config set Dhcp6/option-def[0]/array false</userinput>
+> <userinput>config set Dhcp6/option-def[0]/record-types ""</userinput>
+> <userinput>config set Dhcp6/option-def[0]/space "dhcp6"</userinput>
+> <userinput>config set Dhcp6/option-def[0]/encapsulate ""</userinput>
+> <userinput>config commit</userinput></screen>
+ The "false" value of the "array" parameter determines that the option
+ does NOT comprise an array of "uint32" values but rather a single value.
+ Two other parameters have been left blank: "record-types" and "encapsulate".
+ The former specifies the comma separated list of option data fields if the
+ option comprises a record of data fields. The "record-fields" value should
+ be non-empty if the "type" is set to "record". Otherwise it must be left
+ blank. The latter parameter specifies the name of the option space being
+ encapsulated by the particular option. If the particular option does not
+ encapsulate any option space it should be left blank.
+ Note that the above set of comments define the format of the new option and do not
+ set its values.
+ </para>
+ <para>Once the new option format is defined, its value is set
+ in the same way as for a standard option. For example the following
+ commands set a global value that applies to all subnets.
+ <screen>
+> <userinput>config add Dhcp6/option-data</userinput>
+> <userinput>config set Dhcp6/option-data[0]/name "foo"</userinput>
+> <userinput>config set Dhcp6/option-data[0]/code 100</userinput>
+> <userinput>config set Dhcp6/option-data[0]/space "dhcp6"</userinput>
+> <userinput>config set Dhcp6/option-data[0]/csv-format true</userinput>
+> <userinput>config set Dhcp6/option-data[0]/data "12345"</userinput>
+> <userinput>config commit</userinput></screen>
+ </para>
+
+ <para>New options can take more complex forms than simple use of
+ primitives (uint8, string, ipv6-address etc): it is possible to
+ define an option comprising a number of existing primitives.
+ </para>
+ <para>
+ Assume we
+ want to define a new option that will consist of an IPv6
+ address, followed by unsigned 16 bit integer, followed by a text
+ string. Such an option could be defined in the following way:
+<screen>
+> <userinput>config add Dhcp6/option-def</userinput>
+> <userinput>config set Dhcp6/option-def[0]/name "bar"</userinput>
+> <userinput>config set Dhcp6/option-def[0]/code 101</userinput>
+> <userinput>config set Dhcp6/option-def[0]/space "dhcp6"</userinput>
+> <userinput>config set Dhcp6/option-def[0]/type "record"</userinput>
+> <userinput>config set Dhcp6/option-def[0]/array false</userinput>
+> <userinput>config set Dhcp6/option-def[0]/record-types "ipv6-address, uint16, string"</userinput>
+> <userinput>config set Dhcp6/option-def[0]/encapsulate ""</userinput>
+</screen>
+ The "type" is set to "record" to indicate that the option contains
+ multiple values of different types. These types are given as a comma-separated
+ list in the "record-types" field and should be those listed in <xref linkend="dhcp-types"/>.
+ </para>
+ <para>
+ The values of the option are set as follows:
+<screen>
+> <userinput>config add Dhcp6/option-data</userinput>
+> <userinput>config set Dhcp6/option-data[0]/name "bar"</userinput>
+> <userinput>config set Dhcp6/option-data[0]/space "dhcp6"</userinput>
+> <userinput>config set Dhcp6/option-data[0]/code 101</userinput>
+> <userinput>config set Dhcp6/option-data[0]/csv-format true</userinput>
+> <userinput>config set Dhcp6/option-data[0]/data "2001:db8:1::10, 123, Hello World"</userinput>
+> <userinput>config commit</userinput></screen>
+ </para>
+ "csv-format" is set "true" to indicate that the "data" field comprises a command-separated
+ list of values. The values in the "data" must correspond to the types set in
+ the "record-types" field of the option definition.
+ </section>
+
+ <section id="dhcp6-vendor-opts">
+ <title>DHCPv6 vendor specific options</title>
+ <para>
+ Currently there are three option spaces defined: dhcp4 (to be used
+ in DHCPv4 daemon) and dhcp6 (for the DHCPv6 daemon); there is also
+ vendor-opts-space, which is empty by default, but options can be
+ defined in it. Those options are called vendor-specific information
+ options. The following examples show how to define an option "foo"
+ with code 1 that consists of an IPv6 address, an unsigned 16 bit integer
+ and a string. The "foo" option is conveyed in a vendor specific
+ information option. This option comprises a single uint32 value
+ that is set to "12345". The sub-option "foo" follows the data
+ field holding this value.
+ <screen>
+> <userinput>config add Dhcp6/option-def</userinput>
+> <userinput>config set Dhcp6/option-def[0]/name "foo"</userinput>
+> <userinput>config set Dhcp6/option-def[0]/code 1</userinput>
+> <userinput>config set Dhcp6/option-def[0]/space "vendor-opts-space"</userinput>
+> <userinput>config set Dhcp6/option-def[0]/type "record"</userinput>
+> <userinput>config set Dhcp6/option-def[0]/array false</userinput>
+> <userinput>config set Dhcp6/option-def[0]/record-types "ipv6-address, uint16, string"</userinput>
+> <userinput>config set Dhcp6/option-def[0]/encapsulates ""</userinput>
+> <userinput>config commit</userinput>
+</screen>
+ (Note that the option space is set to "vendor-opts-space".)
+ Once the option format is defined, the next step is to define actual values
+ for that option:
+ <screen>
+> <userinput>config add Dhcp6/option-data</userinput>
+> <userinput>config set Dhcp6/option-data[0]/name "foo"</userinput>
+> <userinput>config set Dhcp6/option-data[0]/space "vendor-opts-space"</userinput>
+> <userinput>config set Dhcp6/option-data[0]/code 1</userinput>
+> <userinput>config set Dhcp6/option-data[0]/csv-format true</userinput>
+> <userinput>config set Dhcp6/option-data[0]/data "2001:db8:1::10, 123, Hello World"</userinput>
+> <userinput>config commit</userinput></screen>
+ We should also define values for the vendor-opts, that will convey our option foo.
+ <screen>
+> <userinput>config add Dhcp6/option-data</userinput>
+> <userinput>config set Dhcp6/option-data[1]/name "vendor-opts"</userinput>
+> <userinput>config set Dhcp6/option-data[1]/space "dhcp6"</userinput>
+> <userinput>config set Dhcp6/option-data[1]/code 17</userinput>
+> <userinput>config set Dhcp6/option-data[1]/csv-format true</userinput>
+> <userinput>config set Dhcp6/option-data[1]/data "12345"</userinput>
+> <userinput>config commit</userinput></screen>
+ </para>
+ </section>
+
+ <section id="dhcp6-option-spaces">
+ <title>Nested DHCPv6 options (custom option spaces)</title>
+ <para>It is sometimes useful to define completely new option
+ spaces. This is useful if the user wants his new option to
+ convey sub-options that use separate numbering scheme, for
+ example sub-options with codes 1 and 2. Those option codes
+ conflict with standard DHCPv6 options, so a separate option
+ space must be defined.
+ </para>
+ <para>Note that it is not required to create new option space when
+ defining sub-options for a standard option because it is by
+ default created if the standard option is meant to convey
+ any sub-options (see <xref linkend="dhcp6-vendor-opts"/>).
+ </para>
+ <para>
+ Assume that we want to have a DHCPv6 option called "container"
+ with code 102 that conveys two sub-options with codes 1 and 2.
+ First we need to define the new sub-options:
+<screen>
+> <userinput>config add Dhcp6/option-def</userinput>
+> <userinput>config set Dhcp6/option-def[0]/name "subopt1"</userinput>
+> <userinput>config set Dhcp6/option-def[0]/code 1</userinput>
+> <userinput>config set Dhcp6/option-def[0]/space "isc"</userinput>
+> <userinput>config set Dhcp6/option-def[0]/type "ipv6-address"</userinput>
+> <userinput>config set Dhcp6/option-def[0]/record-types ""</userinput>
+> <userinput>config set Dhcp6/option-def[0]/array false</userinput>
+> <userinput>config set Dhcp6/option-def[0]/encapsulate ""</userinput>
+> <userinput>config commit</userinput>
+> <userinput></userinput>
+> <userinput>config add Dhcp6/option-def</userinput>
+> <userinput>config set Dhcp6/option-def[1]/name "subopt2"</userinput>
+> <userinput>config set Dhcp6/option-def[1]/code 2</userinput>
+> <userinput>config set Dhcp6/option-def[1]/space "isc"</userinput>
+> <userinput>config set Dhcp6/option-def[1]/type "string"</userinput>
+> <userinput>config set Dhcp6/option-def[1]/record-types ""</userinput>
+> <userinput>config set Dhcp6/option-def[1]/array false</userinput>
+> <userinput>config set Dhcp6/option-def[1]/encapsulate ""</userinput>
+> <userinput>config commit</userinput>
+</screen>
+ Note that we have defined the options to belong to a new option space
+ (in this case, "isc").
+ </para>
+ <para>
+The next step is to define a regular DHCPv6 option and specify that it
+should include options from the isc option space:
+<screen>
+> <userinput>config add Dhcp6/option-def</userinput>
+> <userinput>config set Dhcp6/option-def[2]/name "container"</userinput>
+> <userinput>config set Dhcp6/option-def[2]/code 102</userinput>
+> <userinput>config set Dhcp6/option-def[2]/space "dhcp6"</userinput>
+> <userinput>config set Dhcp6/option-def[2]/type "empty"</userinput>
+> <userinput>config set Dhcp6/option-def[2]/array false</userinput>
+> <userinput>config set Dhcp6/option-def[2]/record-types ""</userinput>
+> <userinput>config set Dhcp6/option-def[2]/encapsulate "isc"</userinput>
+> <userinput>config commit</userinput>
+</screen>
+ The name of the option space in which the sub-options are defined
+ is set in the "encapsulate" field. The "type" field is set to "empty"
+ which imposes that this option does not carry any data other than
+ sub-options.
+ </para>
+ <para>
+ Finally, we can set values for the new options:
+<screen>
+> <userinput>config add Dhcp6/option-data</userinput>
+> <userinput>config set Dhcp6/option-data[0]/name "subopt1"</userinput>
+> <userinput>config set Dhcp6/option-data[0]/space "isc"</userinput>
+> <userinput>config set Dhcp6/option-data[0]/code 1</userinput>
+> <userinput>config set Dhcp6/option-data[0]/csv-format true</userinput>
+> <userinput>config set Dhcp6/option-data[0]/data "2001:db8::abcd"</userinput>
+> <userinput>config commit</userinput>
+> <userinput></userinput>
+> <userinput>config add Dhcp6/option-data</userinput>
+> <userinput>config set Dhcp6/option-data[1]/name "subopt2"</userinput>
+> <userinput>config set Dhcp6/option-data[1]/space "isc"</userinput>
+> <userinput>config set Dhcp6/option-data[1]/code 2</userinput>
+> <userinput>config set Dhcp6/option-data[1]/csv-format true</userinput>
+> <userinput>config set Dhcp6/option-data[1]/data "Hello world"</userinput>
+> <userinput>config commit</userinput>
+> <userinput></userinput>
+> <userinput>config add Dhcp6/option-data</userinput>
+> <userinput>config set Dhcp6/option-data[2]/name "container"</userinput>
+> <userinput>config set Dhcp6/option-data[2]/space "dhcp6"</userinput>
+> <userinput>config set Dhcp6/option-data[2]/code 102</userinput>
+> <userinput>config set Dhcp6/option-data[2]/csv-format true</userinput>
+> <userinput>config set Dhcp6/option-data[2]/data ""</userinput>
+> <userinput>config commit</userinput>
+</screen>
+ Even though the "container" option does not carry any data except
+ sub-options, the "data" field must be explictly set to an empty value.
+ This is required because in the current version of BIND 10 DHCP, the
+ default configuration values are not propagated to the configuration parsers:
+ if the "data" is not set the parser will assume that this
+ parameter is not specified and an error will be reported.
+ </para>
+ <para>Note that it is possible to create an option which carries some data
+ in addition to the sub-options defined in the encapsulated option space. For example,
+ if the "container" option from the previous example was required to carry an uint16
+ value as well as the sub-options, the "type" value would have to be set to "uint16" in
+ the option definition. (Such an option would then have the following
+ data structure: DHCP header, uint16 value, sub-options.) The value specified
+ with the "data" parameter - which should be a valid integer enclosed in quotes,
+ e.g. "123" - would then be assigned to the uint16 field in the "container" option.
+ </para>
</section>
+ <section id="dhcp6-config-subnets">
+ <title>Subnet Selection</title>
+ <para>
+ The DHCPv6 server may receive requests from local (connected
+ to the same subnet as the server) and remote (connecting via
+ relays) clients.
+ <note>
+ <para>
+ Currently relayed DHCPv6 traffic is not supported. The server will
+ only respond to local DHCPv6 requests - see <xref linkend="dhcp6-limit"/>
+ </para>
+ </note>
+ As it may have many subnet configurations defined, it
+ must select appropriate subnet for a given request. To do this, the server first
+ checks if there is only one subnet defined and source of the packet is
+ link-local. If this is the case, the server assumes that the only subnet
+ defined is local and client is indeed connected to it. This check
+ simplifies small deployments.
+ </para>
+ <para>
+ If there are two or more subnets defined, the server can not assume
+ which of those (if any) subnets are local. Therefore an optional
+ "interface" parameter is available within a subnet definition to designate that a given subnet
+ is local, i.e. reachable directly over specified interface. For example
+ the server that is intended to serve a local subnet over eth0 may be configured
+ as follows:
+<screen>
+> <userinput>config add Dhcp6/subnet6</userinput>
+> <userinput>config set Dhcp6/subnet6[1]/subnet "2001:db8:beef::/48"</userinput>
+> <userinput>config set Dhcp6/subnet6[1]/pool [ "2001:db8:beef::/48" ]</userinput>
+> <userinput>config set Dhcp6/subnet6[1]/interface "eth0"</userinput>
+> <userinput>config commit</userinput>
+</screen>
+ </para>
+ </section>
+
+ </section>
+
<section id="dhcp6-serverid">
<title>Server Identifier in DHCPv6</title>
<para>The DHCPv6 protocol uses a "server identifier" (also known
as a DUID) for clients to be able to discriminate between several
servers present on the same link. There are several types of
- DUIDs defined, but RFC 3315 instructs servers to use DUID-LLT if
+ DUIDs defined, but <ulink url="http://tools.ietf.org/html/rfc3315">RFC 3315</ulink> instructs servers to use DUID-LLT if
possible. This format consists of a link-layer (MAC) address and a
timestamp. When started for the first time, the DHCPv6 server will
automatically generate such a DUID and store the chosen value to
- a file (The file is named b10-dhcp6-serverid and is stored in the
- "local state directory". This is set during installation when
- "configure" is run, and can be changed by using "--localstatedir"
- on the "configure" command line.) That file will be read by the server
+ a file. That file is read by the server
and the contained value used whenever the server is subsequently started.
</para>
<para>
- It is unlikely that this parameter needs to be changed. If such a need
- arises, please stop the server, edit the file and start the server
- again. It is a text file that contains double digit hexadecimal values
+ It is unlikely that this parameter should ever need to be changed.
+ However, if such a need arises, stop the server, edit the file and restart
+ the server. (The file is named b10-dhcp6-serverid and by default is
+ stored in the "var" subdirectory of the directory in which BIND 10 is installed.
+ This can be changed when BIND 10 is built by using "--localstatedir"
+ on the "configure" command line.) The file is a text file that contains
+ double digit hexadecimal values
separated by colons. This format is similar to typical MAC address
format. Spaces are ignored. No extra characters are allowed in this
file.
</para>
+
</section>
<section id="dhcp6-std">
- <title>Supported DHCPv6 Standards</title>
+ <title>Supported Standards</title>
<para>The following standards and draft standards are currently
supported:</para>
<itemizedlist>
<listitem>
- <simpara>RFC3315: Supported messages are SOLICIT,
- ADVERTISE, REQUEST, and REPLY. Supported options are
- SERVER_ID, CLIENT_ID, IA_NA, and IAADDRESS.</simpara>
+ <simpara><ulink url="http://tools.ietf.org/html/rfc3315">RFC 3315</ulink>: Supported messages are SOLICIT,
+ ADVERTISE, REQUEST, RELEASE, RENEW, and REPLY.</simpara>
</listitem>
<listitem>
- <simpara>RFC3646: Supported option is DNS_SERVERS.</simpara>
+ <simpara><ulink url="http://tools.ietf.org/html/rfc3646">RFC 3646</ulink>: Supported option is DNS_SERVERS.</simpara>
</listitem>
</itemizedlist>
</section>
@@ -3905,20 +4874,8 @@ Dhcp6/subnet6 [] list (default)</screen>
software. Most of them are reflections of the early stage of
development and should be treated as <quote>not implemented
yet</quote>, rather than actual limitations.</para>
- <para>
<itemizedlist>
- <listitem>
- <para>The DHCPv6 server has only been tested on Debian
- operating systems. There are known problems with the
- handling of packets in CentOS and RHEL.</para>
- </listitem>
- <listitem>
- <para>Relayed traffic is not supported.</para>
- </listitem>
- <listitem>
- <para><command>b10-dhcp6</command> only supports
- a limited number of configuration options.</para>
- </listitem>
+
<listitem>
<para>
On startup, the DHCPv6 server does not get the full configuration from
@@ -3932,40 +4889,26 @@ Dhcp6/renew-timer 1000 integer (default)
</para>
</listitem>
<listitem>
- <para>Upon start, the server will open sockets on all
- interfaces that are not loopback, are up, running and are
- multicast capable and have IPv6 address. Support for
- multiple interfaces is not coded in reception routines yet,
- so if you are running this code on a machine that has many
- interfaces and <command>b10-dhcp6</command> happens to
- listen on wrong interface, the easiest way to work around
- this problem is to turn down other interfaces. This
- limitation will be fixed shortly.</para>
+ <simpara>Relayed traffic is not supported.</simpara>
</listitem>
<listitem>
- <para>ORO (Option Request Option, a list of options
- requested by a client) is currently unsupported.</para>
+ <simpara>Temporary addresses are not supported.</simpara>
</listitem>
<listitem>
- <para>Temporary addresses are not supported.</para>
+ <simpara>Prefix delegation is not supported.</simpara>
</listitem>
<listitem>
- <para>Prefix delegation is not supported.</para>
+ <simpara>Rebinding (REBIND), confirmation (CONFIRM),
+ and duplication report (DECLINE) are not yet supported.</simpara>
</listitem>
<listitem>
- <para>Address renewal (RENEW), rebinding (REBIND),
- confirmation (CONFIRM), duplication report (DECLINE) and
- release (RELEASE) are not supported.</para>
+ <simpara>DNS Update is not supported.</simpara>
</listitem>
<listitem>
- <para>DNS Update is not supported.</para>
- </listitem>
- <listitem>
- <para>Interface detection is currently working on Linux
- only. See <xref linkend="iface-detect"/> for details.</para>
+ <simpara>Interface detection is currently working on Linux
+ only. See <xref linkend="iface-detect"/> for details.</simpara>
</listitem>
</itemizedlist>
- </para>
</section>
</chapter>
@@ -3974,7 +4917,7 @@ Dhcp6/renew-timer 1000 integer (default)
<title>libdhcp++ library</title>
<para>
libdhcp++ is a common library written in C++ that handles
- many DHCP-related tasks, including
+ many DHCP-related tasks, including:
<itemizedlist>
<listitem>
<simpara>DHCPv4 and DHCPv6 packets parsing, manipulation and assembly</simpara>
@@ -3992,10 +4935,8 @@ Dhcp6/renew-timer 1000 integer (default)
</para>
<para>
- While this library is currently used by
- <command>b10-dhcp4</command> and <command>b10-dhcp6</command>
- only, it is designed to be a portable, universal library, useful for
- any kind of DHCP-related software.
+ While this library is currently used by BIND 10 DHCP, it is designed to
+ be a portable, universal library, useful for any kind of DHCP-related software.
</para>
<!-- TODO: point to doxygen docs -->
@@ -4052,7 +4993,7 @@ Dhcp6/renew-timer 1000 integer (default)
"queries.tcp": 1749,
"queries.udp": 867868
},
- "Boss": {
+ "Init": {
"boot_time": "2011-01-20T16:59:03Z"
},
"Stats": {
diff --git a/examples/configure.ac b/examples/configure.ac
index 37515d9..850e7ef 100644
--- a/examples/configure.ac
+++ b/examples/configure.ac
@@ -14,9 +14,10 @@ AC_LANG([C++])
# Checks for BIND 10 headers and libraries
AX_ISC_BIND10
-# We use -R, -rpath etc so the resulting program will be more likekly to
+# We use -R option etc so the resulting program will be more likekly to
# "just work" by default. Embedding a specific library path is a controversial
-# practice, though; if you don't like it you can remove the following setting.
+# practice, though; if you don't like it you can remove the following setting,
+# or use the --disable-rpath option.
if test "x$BIND10_RPATH" != "x"; then
LDFLAGS="$LDFLAGS $BIND10_RPATH"
fi
diff --git a/examples/m4/ax_isc_rpath.m4 b/examples/m4/ax_isc_rpath.m4
index 91d9b8a..ee1e472 100644
--- a/examples/m4/ax_isc_rpath.m4
+++ b/examples/m4/ax_isc_rpath.m4
@@ -3,44 +3,54 @@ dnl
dnl @summary figure out whether and which "rpath" linker option is available
dnl
dnl This macro checks if the linker supports an option to embed a path
-dnl to a runtime library (often installed in an uncommon place), such as
-dnl gcc's -rpath option. If found, it sets the ISC_RPATH_FLAG variable to
+dnl to a runtime library (often installed in an uncommon place), such as the
+dnl commonly used -R option. If found, it sets the ISC_RPATH_FLAG variable to
dnl the found option flag. The main configure.ac can use it as follows:
dnl if test "x$ISC_RPATH_FLAG" != "x"; then
dnl LDFLAGS="$LDFLAGS ${ISC_RPATH_FLAG}/usr/local/lib/some_library"
dnl fi
+dnl
+dnl If you pass --disable-rpath to configure, ISC_RPATH_FLAG is not set
AC_DEFUN([AX_ISC_RPATH], [
-# We'll tweak both CXXFLAGS and CCFLAGS so this function will work whichever
-# language is used in the main script. Note also that it's not LDFLAGS;
-# technically this is a linker flag, but we've noticed $LDFLAGS can be placed
-# where the compiler could interpret it as a compiler option, leading to
-# subtle failure mode. So, in the check below using the compiler flag is
-# safer (in the actual Makefiles the flag should be set in LDFLAGS).
-CXXFLAGS_SAVED="$CXXFLAGS"
-CXXFLAGS="$CXXFLAGS -Wl,-R/usr/lib"
-CCFLAGS_SAVED="$CCFLAGS"
-CCFLAGS="$CCFLAGS -Wl,-R/usr/lib"
+AC_ARG_ENABLE(rpath,
+ [AC_HELP_STRING([--disable-rpath], [don't hardcode library path into binaries])],
+ rpath=$enableval, rpath=yes)
+
+if test x$rpath != xno; then
+ # We'll tweak both CXXFLAGS and CCFLAGS so this function will work
+ # whichever language is used in the main script. Note also that it's not
+ #LDFLAGS; technically this is a linker flag, but we've noticed $LDFLAGS
+ # can be placed where the compiler could interpret it as a compiler
+ # option, leading to subtle failure mode. So, in the check below using
+ # the compiler flag is safer (in the actual Makefiles the flag should be
+ # set in LDFLAGS).
+ CXXFLAGS_SAVED="$CXXFLAGS"
+ CXXFLAGS="$CXXFLAGS -Wl,-R/usr/lib"
+ CCFLAGS_SAVED="$CCFLAGS"
+ CCFLAGS="$CCFLAGS -Wl,-R/usr/lib"
-# check -Wl,-R and -R rather than gcc specific -rpath to be as portable
-# as possible. -Wl,-R seems to be safer, so we try it first. In some cases
-# -R is not actually recognized but AC_TRY_LINK doesn't fail due to that.
-AC_MSG_CHECKING([whether -Wl,-R flag is available in linker])
-AC_TRY_LINK([],[],
- [ AC_MSG_RESULT(yes)
- ISC_RPATH_FLAG=-Wl,-R
- ],[ AC_MSG_RESULT(no)
- AC_MSG_CHECKING([whether -R flag is available in linker])
- CXXFLAGS="$CXXFLAGS_SAVED -R"
- CCFLAGS="$CCFLAGS_SAVED -R"
+ # check -Wl,-R and -R rather than gcc specific -rpath to be as portable
+ # as possible. -Wl,-R seems to be safer, so we try it first. In some
+ # cases -R is not actually recognized but AC_TRY_LINK doesn't fail due to
+ # that.
+ AC_MSG_CHECKING([whether -Wl,-R flag is available in linker])
+ AC_TRY_LINK([],[],
+ [ AC_MSG_RESULT(yes)
+ ISC_RPATH_FLAG=-Wl,-R
+ ],[ AC_MSG_RESULT(no)
+ AC_MSG_CHECKING([whether -R flag is available in linker])
+ CXXFLAGS="$CXXFLAGS_SAVED -R"
+ CCFLAGS="$CCFLAGS_SAVED -R"
AC_TRY_LINK([], [],
[ AC_MSG_RESULT([yes; note that -R is more sensitive about the position in option arguments])
ISC_RPATH_FLAG=-R
],[ AC_MSG_RESULT(no) ])
- ])
+ ])
-CXXFLAGS=$CXXFLAGS_SAVED
-CCFLAGS=$CCFLAGS_SAVED
+ CXXFLAGS=$CXXFLAGS_SAVED
+ CCFLAGS=$CCFLAGS_SAVED
+fi
])dnl AX_ISC_RPATH
diff --git a/src/bin/auth/auth_config.cc b/src/bin/auth/auth_config.cc
index e8592ac..de8325b 100644
--- a/src/bin/auth/auth_config.cc
+++ b/src/bin/auth/auth_config.cc
@@ -106,7 +106,7 @@ public:
rollbackAddresses_ = old;
}
virtual void commit() {
- rollbackAddresses_.release();
+ rollbackAddresses_.reset();
}
private:
AuthSrv& server_;
diff --git a/src/bin/auth/auth_messages.mes b/src/bin/auth/auth_messages.mes
index d93da51..77b20b1 100644
--- a/src/bin/auth/auth_messages.mes
+++ b/src/bin/auth/auth_messages.mes
@@ -14,7 +14,7 @@
$NAMESPACE isc::auth
-% AUTH_AXFR_ERROR error handling AXFR request: %1
+% AUTH_AXFR_PROBLEM error handling AXFR request: %1
This is a debug message produced by the authoritative server when it
has encountered an error processing an AXFR request. The message gives
the reason for the error, and the server will return a SERVFAIL code to
@@ -232,13 +232,13 @@ This is a debug message produced by the authoritative server when it receives
a NOTIFY packet but the XFRIN process is not running. The packet will be
dropped and nothing returned to the sender.
-% AUTH_PACKET_PARSE_ERROR unable to parse received DNS packet: %1
+% AUTH_PACKET_PARSE_FAILED unable to parse received DNS packet: %1
This is a debug message, generated by the authoritative server when an
attempt to parse a received DNS packet has failed due to something other
than a protocol error. The reason for the failure is given in the message;
the server will return a SERVFAIL error code to the sender.
-% AUTH_PACKET_PROTOCOL_ERROR DNS packet protocol error: %1. Returning %2
+% AUTH_PACKET_PROTOCOL_FAILURE DNS packet protocol error: %1. Returning %2
This is a debug message, generated by the authoritative server when an
attempt to parse a received DNS packet has failed due to a protocol error.
The reason for the failure is given in the message, as is the error code
@@ -312,6 +312,9 @@ been created and is initializing. The AUTH_SERVER_STARTED message will be
output when initialization has successfully completed and the server starts
accepting queries.
+% AUTH_SERVER_EXITING exiting
+The authoritative server is exiting.
+
% AUTH_SERVER_FAILED server failed: %1
The authoritative server has encountered a fatal error and is terminating. The
reason for the failure is included in the message.
diff --git a/src/bin/auth/auth_srv.cc b/src/bin/auth/auth_srv.cc
index 26a8489..ca323e0 100644
--- a/src/bin/auth/auth_srv.cc
+++ b/src/bin/auth/auth_srv.cc
@@ -526,13 +526,13 @@ AuthSrv::processMessage(const IOMessage& io_message, Message& message,
// Parse the message.
message.fromWire(request_buffer);
} catch (const DNSProtocolError& error) {
- LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_PACKET_PROTOCOL_ERROR)
+ LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_PACKET_PROTOCOL_FAILURE)
.arg(error.getRcode().toText()).arg(error.what());
makeErrorMessage(impl_->renderer_, message, buffer, error.getRcode());
impl_->resumeServer(server, message, stats_attrs, true);
return;
} catch (const Exception& ex) {
- LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_PACKET_PARSE_ERROR)
+ LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_PACKET_PARSE_FAILED)
.arg(ex.what());
makeErrorMessage(impl_->renderer_, message, buffer, Rcode::SERVFAIL());
impl_->resumeServer(server, message, stats_attrs, true);
@@ -725,7 +725,7 @@ AuthSrvImpl::processXfrQuery(const IOMessage& io_message, Message& message,
xfrout_connected_ = false;
}
- LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_AXFR_ERROR)
+ LOG_DEBUG(auth_logger, DBG_AUTH_DETAIL, AUTH_AXFR_PROBLEM)
.arg(err.what());
makeErrorMessage(renderer_, message, buffer, Rcode::SERVFAIL(),
tsig_context);
diff --git a/src/bin/auth/b10-auth.xml b/src/bin/auth/b10-auth.xml
index 88f80e2..08a2fde 100644
--- a/src/bin/auth/b10-auth.xml
+++ b/src/bin/auth/b10-auth.xml
@@ -53,8 +53,8 @@
<para>The <command>b10-auth</command> daemon provides the BIND 10
authoritative DNS server.
Normally it is started by the
- <citerefentry><refentrytitle>bind10</refentrytitle><manvolnum>8</manvolnum></citerefentry>
- boss process.
+ <citerefentry><refentrytitle>b10-init</refentrytitle><manvolnum>8</manvolnum></citerefentry>
+ process.
</para>
<para>
@@ -194,7 +194,7 @@
<command>shutdown</command> exits <command>b10-auth</command>.
This has an optional <varname>pid</varname> argument to
select the process ID to stop.
- (Note that the BIND 10 boss process may restart this service
+ (Note that the b10-init process may restart this service
if configured.)
</para>
diff --git a/src/bin/auth/main.cc b/src/bin/auth/main.cc
index e90d199..dc03be2 100644
--- a/src/bin/auth/main.cc
+++ b/src/bin/auth/main.cc
@@ -44,6 +44,7 @@
#include <server_common/socket_request.h>
#include <boost/bind.hpp>
+#include <boost/scoped_ptr.hpp>
#include <sys/types.h>
#include <sys/socket.h>
@@ -152,10 +153,11 @@ main(int argc, char* argv[]) {
int ret = 0;
// XXX: we should eventually pass io_service here.
- Session* cc_session = NULL;
- Session* xfrin_session = NULL;
+ boost::scoped_ptr<AuthSrv> auth_server_; // placeholder
+ boost::scoped_ptr<Session> cc_session;
+ boost::scoped_ptr<Session> xfrin_session;
bool xfrin_session_established = false; // XXX (see Trac #287)
- ModuleCCSession* config_session = NULL;
+ boost::scoped_ptr<ModuleCCSession> config_session;
XfroutClient xfrout_client(getXfroutSocketPath());
SocketSessionForwarder ddns_forwarder(getDDNSSocketPath());
try {
@@ -167,7 +169,8 @@ main(int argc, char* argv[]) {
specfile = string(AUTH_SPECFILE_LOCATION);
}
- auth_server = new AuthSrv(xfrout_client, ddns_forwarder);
+ auth_server_.reset(new AuthSrv(xfrout_client, ddns_forwarder));
+ auth_server = auth_server_.get();
LOG_INFO(auth_logger, AUTH_SERVER_CREATED);
SimpleCallback* checkin = auth_server->getCheckinProvider();
@@ -179,7 +182,7 @@ main(int argc, char* argv[]) {
auth_server->setDNSService(dns_service);
LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_DNS_SERVICES_CREATED);
- cc_session = new Session(io_service.get_io_service());
+ cc_session.reset(new Session(io_service.get_io_service()));
LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_CONFIG_CHANNEL_CREATED);
// Initialize the Socket Requestor
isc::server_common::initSocketRequestor(*cc_session, AUTH_NAME);
@@ -187,22 +190,22 @@ main(int argc, char* argv[]) {
// We delay starting listening to new commands/config just before we
// go into the main loop to avoid confusion due to mixture of
// synchronous and asynchronous operations (this would happen in
- // initial communication with the boss that takes place in
+ // initial communication with b10-init that takes place in
// updateConfig() for listen_on and in initializing TSIG keys below).
// Until then all operations on the CC session will take place
// synchronously.
- config_session = new ModuleCCSession(specfile, *cc_session,
- my_config_handler,
- my_command_handler, false);
+ config_session.reset(new ModuleCCSession(specfile, *cc_session,
+ my_config_handler,
+ my_command_handler, false));
LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_CONFIG_CHANNEL_ESTABLISHED);
- xfrin_session = new Session(io_service.get_io_service());
+ xfrin_session.reset(new Session(io_service.get_io_service()));
LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_XFRIN_CHANNEL_CREATED);
xfrin_session->establish(NULL);
xfrin_session_established = true;
LOG_DEBUG(auth_logger, DBG_AUTH_START, AUTH_XFRIN_CHANNEL_ESTABLISHED);
- auth_server->setXfrinSession(xfrin_session);
+ auth_server->setXfrinSession(xfrin_session.get());
// Configure the server. configureAuthServer() is expected to install
// all initial configurations, but as a short term workaround we
@@ -210,7 +213,7 @@ main(int argc, char* argv[]) {
// updateConfig().
// if server load configure failed, we won't exit, give user second
// chance to correct the configure.
- auth_server->setConfigSession(config_session);
+ auth_server->setConfigSession(config_session.get());
try {
configureAuthServer(*auth_server, config_session->getFullConfig());
auth_server->updateConfig(ElementPtr());
@@ -228,7 +231,7 @@ main(int argc, char* argv[]) {
config_session->addRemoteConfig("data_sources",
boost::bind(datasrcConfigHandler,
auth_server, &first_time,
- config_session,
+ config_session.get(),
_1, _2, _3),
false);
@@ -260,10 +263,7 @@ main(int argc, char* argv[]) {
config_session->removeRemoteConfig("data_sources");
}
- delete xfrin_session;
- delete config_session;
- delete cc_session;
- delete auth_server;
+ LOG_INFO(auth_logger, AUTH_SERVER_EXITING);
return (ret);
}
diff --git a/src/bin/auth/query.cc b/src/bin/auth/query.cc
index 5b71565..a2a4117 100644
--- a/src/bin/auth/query.cc
+++ b/src/bin/auth/query.cc
@@ -101,8 +101,11 @@ Query::ResponseCreator::create(Message& response,
void
Query::addSOA(ZoneFinder& finder) {
- ZoneFinderContextPtr soa_ctx = finder.find(finder.getOrigin(),
- RRType::SOA(), dnssec_opt_);
+ // This method is always called in finding SOA for a negative response,
+ // so we specify the use of min(RRTTL, SOA MINTTL) as specified in
+ // Section 3 of RFC2308.
+ ZoneFinderContextPtr soa_ctx = finder.findAtOrigin(RRType::SOA(), true,
+ dnssec_opt_);
if (soa_ctx->code != ZoneFinder::SUCCESS) {
isc_throw(NoSOA, "There's no SOA record in zone " <<
finder.getOrigin().toText());
@@ -318,11 +321,9 @@ void
Query::addAuthAdditional(ZoneFinder& finder,
vector<ConstRRsetPtr>& additionals)
{
- const Name& origin = finder.getOrigin();
-
// Fill in authority and addtional sections.
- ConstZoneFinderContextPtr ns_context = finder.find(origin, RRType::NS(),
- dnssec_opt_);
+ ConstZoneFinderContextPtr ns_context =
+ finder.findAtOrigin(RRType::NS(), false, dnssec_opt_);
// zone origin name should have NS records
if (ns_context->code != ZoneFinder::SUCCESS) {
diff --git a/src/bin/auth/tests/config_unittest.cc b/src/bin/auth/tests/config_unittest.cc
index 830de0d..05c6cce 100644
--- a/src/bin/auth/tests/config_unittest.cc
+++ b/src/bin/auth/tests/config_unittest.cc
@@ -130,7 +130,7 @@ TEST_F(AuthConfigTest, invalidListenAddressConfig) {
isc::testutils::portconfig::invalidListenAddressConfig(server);
}
-// Try setting addresses trough config
+// Try setting addresses through config
TEST_F(AuthConfigTest, listenAddressConfig) {
isc::testutils::portconfig::listenAddressConfig(server);
diff --git a/src/bin/auth/tests/query_unittest.cc b/src/bin/auth/tests/query_unittest.cc
index a22d2d7..9822768 100644
--- a/src/bin/auth/tests/query_unittest.cc
+++ b/src/bin/auth/tests/query_unittest.cc
@@ -90,6 +90,10 @@ private:
#include <auth/tests/example_base_inc.cc>
#include <auth/tests/example_nsec3_inc.cc>
+// This SOA is used in negative responses; its RRTTL is set to SOA's MINTTL
+const char* const soa_minttl_txt =
+ "example.com. 0 IN SOA . . 1 0 0 0 0\n";
+
// This is used only in one pathological test case.
const char* const zone_ds_txt =
"example.com. 3600 IN DS 57855 5 1 "
@@ -1207,7 +1211,7 @@ TEST_P(QueryTest, nodomainANY) {
EXPECT_NO_THROW(query.process(*list_, Name("nxdomain.example.com"),
RRType::ANY(), response));
responseCheck(response, Rcode::NXDOMAIN(), AA_FLAG, 0, 1, 0,
- NULL, soa_txt, NULL, mock_finder->getOrigin());
+ NULL, soa_minttl_txt, NULL, mock_finder->getOrigin());
}
// This tests that when we need to look up Zone's apex NS records for
@@ -1345,7 +1349,7 @@ TEST_P(QueryTest, nxdomain) {
Name("nxdomain.example.com"), qtype,
response));
responseCheck(response, Rcode::NXDOMAIN(), AA_FLAG, 0, 1, 0,
- NULL, soa_txt, NULL, mock_finder->getOrigin());
+ NULL, soa_minttl_txt, NULL, mock_finder->getOrigin());
}
TEST_P(QueryTest, nxdomainWithNSEC) {
@@ -1356,8 +1360,8 @@ TEST_P(QueryTest, nxdomainWithNSEC) {
Name("nxdomain.example.com"), qtype,
response, true));
responseCheck(response, Rcode::NXDOMAIN(), AA_FLAG, 0, 6, 0,
- NULL, (string(soa_txt) +
- string("example.com. 3600 IN RRSIG ") +
+ NULL, (string(soa_minttl_txt) +
+ string("example.com. 0 IN RRSIG ") +
getCommonRRSIGText("SOA") + "\n" +
string(nsec_nxdomain_txt) + "\n" +
string("noglue.example.com. 3600 IN RRSIG ") +
@@ -1382,8 +1386,8 @@ TEST_P(QueryTest, nxdomainWithNSEC2) {
query.process(*list_, Name("(.no.example.com"), qtype, response,
true);
responseCheck(response, Rcode::NXDOMAIN(), AA_FLAG, 0, 6, 0,
- NULL, (string(soa_txt) +
- string("example.com. 3600 IN RRSIG ") +
+ NULL, (string(soa_minttl_txt) +
+ string("example.com. 0 IN RRSIG ") +
getCommonRRSIGText("SOA") + "\n" +
string(nsec_mx_txt) + "\n" +
string("mx.example.com. 3600 IN RRSIG ") +
@@ -1407,8 +1411,8 @@ TEST_P(QueryTest, nxdomainWithNSECDuplicate) {
query.process(*list_, Name("nx.no.example.com"), qtype, response,
true);
responseCheck(response, Rcode::NXDOMAIN(), AA_FLAG, 0, 4, 0,
- NULL, (string(soa_txt) +
- string("example.com. 3600 IN RRSIG ") +
+ NULL, (string(soa_minttl_txt) +
+ string("example.com. 0 IN RRSIG ") +
getCommonRRSIGText("SOA") + "\n" +
string(nsec_no_txt) + "\n" +
string(").no.example.com. 3600 IN RRSIG ") +
@@ -1474,8 +1478,8 @@ TEST_F(QueryTestForMockOnly, nxdomainBadNSEC5) {
query.process(*list_, Name("nxdomain.example.com"), qtype,
response, true);
responseCheck(response, Rcode::NXDOMAIN(), AA_FLAG, 0, 6, 0,
- NULL, (string(soa_txt) +
- string("example.com. 3600 IN RRSIG ") +
+ NULL, (string(soa_minttl_txt) +
+ string("example.com. 0 IN RRSIG ") +
getCommonRRSIGText("SOA") + "\n" +
string(nsec_nxdomain_txt) + "\n" +
string("noglue.example.com. 3600 IN RRSIG ") +
@@ -1503,7 +1507,7 @@ TEST_P(QueryTest, nxrrset) {
RRType::TXT(), response));
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 1, 0,
- NULL, soa_txt, NULL, mock_finder->getOrigin());
+ NULL, soa_minttl_txt, NULL, mock_finder->getOrigin());
}
TEST_P(QueryTest, nxrrsetWithNSEC) {
@@ -1513,7 +1517,8 @@ TEST_P(QueryTest, nxrrsetWithNSEC) {
response, true);
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 4, 0, NULL,
- (string(soa_txt) + string("example.com. 3600 IN RRSIG ") +
+ (string(soa_minttl_txt) +
+ string("example.com. 0 IN RRSIG ") +
getCommonRRSIGText("SOA") + "\n" +
string(nsec_www_txt) + "\n" +
string("www.example.com. 3600 IN RRSIG ") +
@@ -1534,7 +1539,8 @@ TEST_P(QueryTest, emptyNameWithNSEC) {
response, true);
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 4, 0, NULL,
- (string(soa_txt) + string("example.com. 3600 IN RRSIG ") +
+ (string(soa_minttl_txt) +
+ string("example.com. 0 IN RRSIG ") +
getCommonRRSIGText("SOA") + "\n" +
string(nsec_mx_txt) + "\n" +
string("mx.example.com. 3600 IN RRSIG ") +
@@ -1550,7 +1556,8 @@ TEST_P(QueryTest, nxrrsetWithoutNSEC) {
response, true);
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 2, 0, NULL,
- (string(soa_txt) + string("example.com. 3600 IN RRSIG ") +
+ (string(soa_minttl_txt) +
+ string("example.com. 0 IN RRSIG ") +
getCommonRRSIGText("SOA") + "\n").c_str(),
NULL, mock_finder->getOrigin());
}
@@ -1706,7 +1713,8 @@ TEST_P(QueryTest, wildcardNxrrsetWithDuplicateNSEC) {
response, true);
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 4, 0, NULL,
- (string(soa_txt) + string("example.com. 3600 IN RRSIG ") +
+ (string(soa_minttl_txt) +
+ string("example.com. 0 IN RRSIG ") +
getCommonRRSIGText("SOA") + "\n" +
string(nsec_wild_txt) +
string("*.wild.example.com. 3600 IN RRSIG ") +
@@ -1729,7 +1737,8 @@ TEST_P(QueryTest, wildcardNxrrsetWithNSEC) {
RRType::TXT(), response, true);
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 6, 0, NULL,
- (string(soa_txt) + string("example.com. 3600 IN RRSIG ") +
+ (string(soa_minttl_txt) +
+ string("example.com. 0 IN RRSIG ") +
getCommonRRSIGText("SOA") + "\n" +
string(nsec_wild_txt_nxrrset) +
string("*.uwild.example.com. 3600 IN RRSIG ") +
@@ -1753,7 +1762,8 @@ TEST_P(QueryTest, wildcardNxrrsetWithNSEC3) {
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 8, 0, NULL,
// SOA + its RRSIG
- (string(soa_txt) + string("example.com. 3600 IN RRSIG ") +
+ (string(soa_minttl_txt) +
+ string("example.com. 0 IN RRSIG ") +
getCommonRRSIGText("SOA") + "\n" +
// NSEC3 for the closest encloser + its RRSIG
string(nsec3_uwild_txt) +
@@ -1816,7 +1826,8 @@ TEST_P(QueryTest, wildcardEmptyWithNSEC) {
response, true);
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 6, 0, NULL,
- (string(soa_txt) + string("example.com. 3600 IN RRSIG ") +
+ (string(soa_minttl_txt) +
+ string("example.com. 0 IN RRSIG ") +
getCommonRRSIGText("SOA") + "\n" +
string(nsec_empty_prev_txt) +
string("t.example.com. 3600 IN RRSIG ") +
@@ -2043,7 +2054,7 @@ TEST_P(QueryTest, DNAME_NX_RRSET) {
RRType::TXT(), response));
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 1, 0,
- NULL, soa_txt, NULL, mock_finder->getOrigin());
+ NULL, soa_minttl_txt, NULL, mock_finder->getOrigin());
}
/*
@@ -2307,8 +2318,8 @@ TEST_P(QueryTest, dsAboveDelegationNoData) {
RRType::DS(), response, true));
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 4, 0, NULL,
- (string(soa_txt) +
- string("example.com. 3600 IN RRSIG ") +
+ (string(soa_minttl_txt) +
+ string("example.com. 0 IN RRSIG ") +
getCommonRRSIGText("SOA") + "\n" +
string(unsigned_delegation_nsec_txt) +
"unsigned-delegation.example.com. 3600 IN RRSIG " +
@@ -2324,7 +2335,8 @@ TEST_P(QueryTest, dsBelowDelegation) {
RRType::DS(), response, true));
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 4, 0, NULL,
- (string(soa_txt) + string("example.com. 3600 IN RRSIG ") +
+ (string(soa_minttl_txt) +
+ string("example.com. 0 IN RRSIG ") +
getCommonRRSIGText("SOA") + "\n" +
string(nsec_apex_txt) + "\n" +
string("example.com. 3600 IN RRSIG ") +
@@ -2342,7 +2354,8 @@ TEST_P(QueryTest, dsBelowDelegationWithDS) {
RRType::DS(), response, true));
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 2, 0, NULL,
- (string(soa_txt) + string("example.com. 3600 IN RRSIG ") +
+ (string(soa_minttl_txt) +
+ string("example.com. 0 IN RRSIG ") +
getCommonRRSIGText("SOA")).c_str(), NULL,
mock_finder->getOrigin());
}
@@ -2382,9 +2395,10 @@ TEST_F(QueryTestForMockOnly, dsAtGrandParentAndChild) {
memory_client.addZone(ZoneFinderPtr(
new AlternateZoneFinder(childname)));
query.process(*list_, childname, RRType::DS(), response, true);
+ // Note that RR TTL of SOA and its RRSIG are set to SOA MINTTL, 0
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 4, 0, NULL,
- (childname.toText() + " 3600 IN SOA . . 0 0 0 0 0\n" +
- childname.toText() + " 3600 IN RRSIG " +
+ (childname.toText() + " 0 IN SOA . . 0 0 0 0 0\n" +
+ childname.toText() + " 0 IN RRSIG " +
getCommonRRSIGText("SOA") + "\n" +
childname.toText() + " 3600 IN NSEC " +
childname.toText() + " SOA NSEC RRSIG\n" +
@@ -2404,9 +2418,10 @@ TEST_F(QueryTestForMockOnly, dsAtRoot) {
new AlternateZoneFinder(Name::ROOT_NAME())));
query.process(*list_, Name::ROOT_NAME(), RRType::DS(), response,
true);
+ // Note that RR TTL of SOA and its RRSIG are set to SOA MINTTL, 0
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 4, 0, NULL,
- (string(". 3600 IN SOA . . 0 0 0 0 0\n") +
- ". 3600 IN RRSIG " + getCommonRRSIGText("SOA") + "\n" +
+ (string(". 0 IN SOA . . 0 0 0 0 0\n") +
+ ". 0 IN RRSIG " + getCommonRRSIGText("SOA") + "\n" +
". 3600 IN NSEC " + ". SOA NSEC RRSIG\n" +
". 3600 IN RRSIG " +
getCommonRRSIGText("NSEC")).c_str(), NULL);
@@ -2443,7 +2458,8 @@ TEST_P(QueryTest, nxrrsetWithNSEC3) {
response, true);
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 4, 0, NULL,
- (string(soa_txt) + string("example.com. 3600 IN RRSIG ") +
+ (string(soa_minttl_txt) +
+ string("example.com. 0 IN RRSIG ") +
getCommonRRSIGText("SOA") + "\n" +
string(nsec3_www_txt) + "\n" +
nsec3_hash_.calculate(Name("www.example.com.")) +
@@ -2478,7 +2494,8 @@ TEST_P(QueryTest, nxrrsetWithNSEC3_ds_exact) {
query.process(*list_, Name("unsigned-delegation.example.com."),
RRType::DS(), response, true);
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 4, 0, NULL,
- (string(soa_txt) + string("example.com. 3600 IN RRSIG ") +
+ (string(soa_minttl_txt) +
+ string("example.com. 0 IN RRSIG ") +
getCommonRRSIGText("SOA") + "\n" +
string(unsigned_delegation_nsec3_txt) + "\n" +
nsec3_hash_.calculate(
@@ -2500,7 +2517,8 @@ TEST_P(QueryTest, nxrrsetWithNSEC3_ds_no_exact) {
query.process(*list_, Name("unsigned-delegation-optout.example.com."),
RRType::DS(), response, true);
responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 6, 0, NULL,
- (string(soa_txt) + string("example.com. 3600 IN RRSIG ") +
+ (string(soa_minttl_txt) +
+ string("example.com. 0 IN RRSIG ") +
getCommonRRSIGText("SOA") + "\n" +
string(nsec3_apex_txt) + "\n" +
nsec3_hash_.calculate(Name("example.com.")) +
@@ -2528,8 +2546,8 @@ TEST_P(QueryTest, nxdomainWithNSEC3Proof) {
response, true);
responseCheck(response, Rcode::NXDOMAIN(), AA_FLAG, 0, 8, 0, NULL,
// SOA + its RRSIG
- (string(soa_txt) +
- string("example.com. 3600 IN RRSIG ") +
+ (string(soa_minttl_txt) +
+ string("example.com. 0 IN RRSIG ") +
getCommonRRSIGText("SOA") + "\n" +
// NSEC3 for the closest encloser + its RRSIG
string(nsec3_apex_txt) + "\n" +
diff --git a/src/bin/bind10/.gitignore b/src/bin/bind10/.gitignore
index 2cf6b50..ce6632d 100644
--- a/src/bin/bind10/.gitignore
+++ b/src/bin/bind10/.gitignore
@@ -1,4 +1,3 @@
-/bind10
-/bind10_src.py
+/b10-init.py
/run_bind10.sh
/bind10.8
diff --git a/src/bin/bind10/Makefile.am b/src/bin/bind10/Makefile.am
index 86c6595..728fc4a 100644
--- a/src/bin/bind10/Makefile.am
+++ b/src/bin/bind10/Makefile.am
@@ -1,29 +1,33 @@
SUBDIRS = . tests
sbin_SCRIPTS = bind10
-CLEANFILES = bind10 bind10_src.pyc
-CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.py
-CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.pyc
+pkglibexec_SCRIPTS = b10-init
+CLEANFILES = b10-init b10-init.pyc
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/init_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/init_messages.pyc
pkglibexecdir = $(libexecdir)/@PACKAGE@
-nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.py
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/init_messages.py
pylogmessagedir = $(pyexecdir)/isc/log_messages/
noinst_SCRIPTS = run_bind10.sh
bind10dir = $(pkgdatadir)
-bind10_DATA = bob.spec
-EXTRA_DIST = bob.spec
+bind10_DATA = init.spec
+EXTRA_DIST = init.spec bind10.in
-man_MANS = bind10.8
-DISTCLEANFILES = $(man_MANS)
-EXTRA_DIST += $(man_MANS) bind10.xml bind10_messages.mes
+man_MANS = b10-init.8 bind10.8
+DISTCLEANFILES = $(man_MANS) bind10
+EXTRA_DIST += $(man_MANS) b10-init.xml bind10.xml init_messages.mes
if GENERATE_DOCS
bind10.8: bind10.xml
- @XSLTPROC@ --novalid --xinclude --nonet -o $@ http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl $(srcdir)/bind10.xml
+ @XSLTPROC@ --novalid --xinclude --nonet -o $@ http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl $(srcdir)/bind10.xml
+
+b10-init.8: b10-init.xml
+ @XSLTPROC@ --novalid --xinclude --nonet -o $@ http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl $(srcdir)/b10-init.xml
#dist-local-check-mans-enabled:
# @if grep "Man generation disabled" $(man_MANS) >/dev/null; then $(RM) $(man_MANS); fi
@@ -40,15 +44,15 @@ $(man_MANS):
endif
-$(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.py : bind10_messages.mes
+$(PYTHON_LOGMSGPKG_DIR)/work/init_messages.py : init_messages.mes
$(top_builddir)/src/lib/log/compiler/message \
- -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/bind10_messages.mes
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/init_messages.mes
# this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
-bind10: bind10_src.py $(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.py
+b10-init: init.py $(PYTHON_LOGMSGPKG_DIR)/work/init_messages.py
$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
-e "s|@@LIBDIR@@|$(libdir)|" \
- -e "s|@@LIBEXECDIR@@|$(pkglibexecdir)|" bind10_src.py >$@
+ -e "s|@@LIBEXECDIR@@|$(pkglibexecdir)|" init.py >$@
chmod a+x $@
pytest:
diff --git a/src/bin/bind10/README b/src/bin/bind10/README
index e1d2d89..d75c0cd 100644
--- a/src/bin/bind10/README
+++ b/src/bin/bind10/README
@@ -1,11 +1,12 @@
-This directory contains the source for the "Boss of Bind" program.
+This directory contains the source for the "b10-init" program, as well as
+the "bind10" script that runs it.
Files:
Makefile.am - build information
README - this file
TODO - remaining development tasks for this program
bind10.py.in - used to make bind10.py with proper Python paths
- bob.spec - defines the options and commands
+ init.spec - defines the options and commands
run_bind10.sh.in - use to make run_bind10.sh with proper Python paths
The "tests" directory contains unit tests for the application.
diff --git a/src/bin/bind10/b10-init.xml b/src/bin/bind10/b10-init.xml
new file mode 100644
index 0000000..f166683
--- /dev/null
+++ b/src/bin/bind10/b10-init.xml
@@ -0,0 +1,513 @@
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd"
+ [<!ENTITY mdash "—">]>
+<!--
+ - Copyright (C) 2010-2012 Internet Systems Consortium, Inc. ("ISC")
+ -
+ - Permission to use, copy, modify, and/or distribute this software for any
+ - purpose with or without fee is hereby granted, provided that the above
+ - copyright notice and this permission notice appear in all copies.
+ -
+ - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+ - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ - AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+ - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ - PERFORMANCE OF THIS SOFTWARE.
+-->
+
+<refentry>
+
+ <refentryinfo>
+ <date>February 5, 2013</date>
+ </refentryinfo>
+
+ <refmeta>
+ <refentrytitle>b10-init</refentrytitle>
+ <manvolnum>8</manvolnum>
+ <refmiscinfo>BIND10</refmiscinfo>
+ </refmeta>
+
+ <refnamediv>
+ <refname>b10-init</refname>
+ <refpurpose>BIND 10 Init process</refpurpose>
+ </refnamediv>
+
+ <docinfo>
+ <copyright>
+ <year>2010-2013</year>
+ <holder>Internet Systems Consortium, Inc. ("ISC")</holder>
+ </copyright>
+ </docinfo>
+
+ <refsynopsisdiv>
+ <cmdsynopsis>
+ <command>b10-init</command>
+ <arg><option>-c <replaceable>config-filename</replaceable></option></arg>
+ <arg><option>-i</option></arg>
+ <arg><option>-m <replaceable>file</replaceable></option></arg>
+ <arg><option>-p <replaceable>data_path</replaceable></option></arg>
+ <arg><option>-u <replaceable>user</replaceable></option></arg>
+ <arg><option>-v</option></arg>
+ <arg><option>-w <replaceable>wait_time</replaceable></option></arg>
+ <arg><option>--clear-config</option></arg>
+ <arg><option>--cmdctl-port</option> <replaceable>port</replaceable></arg>
+ <arg><option>--config-file</option> <replaceable>config-filename</replaceable></arg>
+ <arg><option>--data-path</option> <replaceable>directory</replaceable></arg>
+ <arg><option>--msgq-socket-file <replaceable>file</replaceable></option></arg>
+ <arg><option>--no-kill</option></arg>
+ <arg><option>--pid-file</option> <replaceable>filename</replaceable></arg>
+ <arg><option>--pretty-name <replaceable>name</replaceable></option></arg>
+ <arg><option>--user <replaceable>user</replaceable></option></arg>
+ <arg><option>--verbose</option></arg>
+ <arg><option>--wait <replaceable>wait_time</replaceable></option></arg>
+ </cmdsynopsis>
+ </refsynopsisdiv>
+
+ <refsect1>
+ <title>DESCRIPTION</title>
+
+ <para>The <command>b10-init</command> daemon starts up other
+ BIND 10 required daemons. It handles restarting of exiting
+ programs and also the shutdown of all managed daemons.</para>
+
+<!-- TODO: list what it starts here? -->
+
+<!-- TODO
+ <para>The configuration of the <command>b10-init</command> daemon
+ is defined in the TODO configuration file, as described in the
+ <citerefentry><refentrytitle>TODO</refentrytitle><manvolnum>5</manvolnum></citerefentry>
+ manual page.</para>
+-->
+
+ </refsect1>
+
+ <refsect1>
+ <title>ARGUMENTS</title>
+
+ <para>The arguments are as follows:</para>
+
+ <variablelist>
+
+ <varlistentry>
+ <term>
+ <option>-c</option> <replaceable>config-filename</replaceable>,
+ <option>--config-file</option> <replaceable>config-filename</replaceable>
+ </term>
+ <listitem>
+ <para>The configuration filename to use. Can be either absolute or
+ relative to data path. In case it is absolute, value of data path is
+ not considered.
+ Defaults to <filename>b10-config.db</filename>.</para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>
+ <option>--clear-config</option>
+ </term>
+ <listitem>
+ <para>
+ This will create a backup of the existing configuration
+ file, remove it and start
+ <refentrytitle>b10-cfgmgr</refentrytitle><manvolnum>8</manvolnum>
+ with the default configuration.
+ The name of the backup file can be found in the logs
+ (<varname>CFGMGR_BACKED_UP_CONFIG_FILE</varname>).
+ (It will append a number to the backup filename if a
+ previous backup file exists.)
+
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>
+ <option>--cmdctl-port</option> <replaceable>port</replaceable>
+ </term>
+ <listitem>
+ <para>The <command>b10-cmdctl</command> daemon will listen
+ on this port.
+ (See
+ <refentrytitle>b10-cmdctl</refentrytitle><manvolnum>8</manvolnum>
+ for the default.)
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>
+ <option>-p</option> <replaceable>directory</replaceable>,
+ <option>--data-path</option> <replaceable>directory</replaceable>
+ </term>
+ <listitem>
+ <para>The path where BIND 10 programs look for various data files.
+ Currently only
+ <citerefentry><refentrytitle>b10-cfgmgr</refentrytitle><manvolnum>8</manvolnum></citerefentry>
+ uses it to locate the configuration file, but the usage
+ might be extended for other programs and other types of
+ files.</para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>-m</option> <replaceable>file</replaceable>,
+ <option>--msgq-socket-file</option> <replaceable>file</replaceable></term>
+
+ <listitem>
+ <para>The UNIX domain socket file for the
+ <citerefentry><refentrytitle>b10-msgq</refentrytitle><manvolnum>8</manvolnum></citerefentry>
+ daemon to use.
+ The default is
+ <filename>/usr/local/var/bind10/msg_socket</filename>.
+<!-- @localstatedir@/@PACKAGE_NAME@/msg_socket -->
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>-i</option>, <option>--no-kill</option></term>
+ <listitem>
+ <para>When this option is passed, <command>b10-init</command>
+ does not send SIGTERM and SIGKILL signals to modules during
+ shutdown. (This option was introduced for use during
+ testing.)</para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>-u</option> <replaceable>user</replaceable>, <option>--user</option> <replaceable>name</replaceable></term>
+<!-- TODO: example more detail. -->
+ <listitem>
+ <para>The username for <command>b10-init</command> to run as.
+ <command>b10-init</command> must be initially ran as the
+ root user to use this option.
+ The default is to run as the current user.</para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>--pid-file</option> <replaceable>filename</replaceable></term>
+ <listitem>
+ <para>If defined, the PID of the <command>b10-init</command> is stored
+ in this file.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>--pretty-name <replaceable>name</replaceable></option></term>
+
+ <listitem>
+ <para>The name this process should have in tools like
+ <command>ps</command> or <command>top</command>. This
+ is handy if you have multiple versions/installations
+ of <command>b10-init</command>.
+<!-- TODO: only supported with setproctitle feature
+The default is the basename of ARG 0.
+-->
+</para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>-v</option>, <option>--verbose</option></term>
+ <listitem>
+ <para>Display more about what is going on for
+ <command>b10-init</command> and its child processes.</para>
+<!-- TODO: not true about all children yet -->
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>-w</option> <replaceable>wait_time</replaceable>, <option>--wait</option> <replaceable>wait_time</replaceable></term>
+ <listitem>
+ <para>Sets the amount of time that BIND 10 will wait for
+ the configuration manager (a key component of BIND 10)
+ to initialize itself before abandoning the start up and
+ terminating with an error. The
+ <replaceable>wait_time</replaceable> is specified in
+ seconds and has a default value of 10.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ </variablelist>
+ </refsect1>
+
+<!--
+TODO: configuration section
+-->
+
+ <refsect1>
+ <title>CONFIGURATION AND COMMANDS</title>
+
+ <para>
+ The configuration provides settings for components for
+ <command>b10-init</command> to manage under
+ <varname>/Init/components/</varname>.
+ The default elements are:
+ </para>
+
+ <itemizedlist>
+
+ <listitem>
+ <para> <varname>/Init/components/b10-cmdctl</varname> </para>
+ </listitem>
+
+ <listitem>
+ <para> <varname>/Init/components/b10-stats</varname> </para>
+ </listitem>
+
+ </itemizedlist>
+
+ <para>
+ (Note that the startup of <command>b10-sockcreator</command>,
+ <command>b10-cfgmgr</command>, and <command>b10-msgq</command>
+ is not configurable. They are hardcoded and <command>b10-init</command>
+ will not run without them.)
+ </para>
+
+ <para>
+ The named sets for components contain the following settings:
+ </para>
+
+ <variablelist>
+
+ <varlistentry>
+ <term><varname>address</varname></term>
+ <listitem>
+ <para>The name used for communicating to it on the message
+ bus.</para>
+<!-- NOTE: vorner said:
+These can be null, because the components are special ones, and
+the special class there already knows the address. It is (I hope)
+explained in the guide. I'd like to get rid of the special components
+sometime and I'd like it to teach to guess the address.
+-->
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><varname>kind</varname></term>
+ <listitem>
+ <para>
+ This defines how required a component is.
+ The possible settings for <varname>kind</varname> are:
+ <varname>core</varname> (system won't start if it won't
+ start and <command>b10-init</command> will shutdown if
+ a <quote>core</quote> component crashes),
+ <varname>dispensable</varname> (<command>b10-init</command>
+ will restart failing component),
+ and
+ <varname>needed</varname> (<command>b10-init</command>
+ will shutdown if component won't initially start, but
+ if crashes later, it will attempt to restart).
+ This setting is required.
+<!-- TODO: formatting -->
+ </para>
+ </listitem>
+ </varlistentry>
+
+<!--
+TODO: currently not used
+ <varlistentry>
+ <term> <varname>params</varname> </term>
+ <listitem>
+ <para>
+list
+</para>
+ </listitem>
+ </varlistentry>
+-->
+
+ <varlistentry>
+ <term> <varname>priority</varname> </term>
+ <listitem>
+ <para>This is an integer. <command>b10-init</command>
+ will start the components with largest priority numbers first.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term> <varname>process</varname> </term>
+ <listitem>
+ <para>This is the filename of the executable to be started.
+ If not defined, then <command>b10-init</command> will
+ use the component name instead.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term> <varname>special</varname> </term>
+ <listitem>
+ <para>
+ This defines if the component is started a special, hardcoded
+ way.
+<!--
+TODO: document this ... but maybe some of these will be removed
+once we get rid of some using switches for components?
+
+auth
+cfgmgr
+cmdctl
+msgq
+resolver
+sockcreator
+xfrin
+-->
+
+</para>
+ </listitem>
+ </varlistentry>
+
+ </variablelist>
+
+<!-- TODO: formating -->
+ <para>
+ The <varname>Init</varname> configuration commands are:
+ </para>
+
+<!-- TODO -->
+<!--
+ <para>
+ <command>drop_socket</command>
+ This is an internal command and not exposed to the administrator.
+ </para>
+-->
+
+<!-- TODO -->
+<!--
+ <para>
+ <command>get_socket</command>
+ This is an internal command and not exposed to the administrator.
+ </para>
+-->
+
+ <para>
+ <command>getstats</command> tells <command>b10-init</command>
+ to send its statistics data to the <command>b10-stats</command>
+ daemon.
+ This is an internal command and not exposed to the administrator.
+<!-- not defined in spec -->
+ </para>
+
+ <para>
+ <command>ping</command> is used to check the connection with the
+ <command>b10-init</command> daemon.
+ It returns the text <quote>pong</quote>.
+ </para>
+
+ <para>
+ <command>show_processes</command> lists the current processes
+ managed by <command>b10-init</command>.
+ The output is an array in JSON format containing the process
+ ID, the name for each and the address name used on each message bus.
+<!-- TODO: what is name? -->
+<!-- TODO: change to JSON object format? -->
+<!-- TODO: ticket #1406 -->
+ </para>
+
+ <para>
+ <command>shutdown</command> tells <command>b10-init</command>
+ to shutdown the BIND 10 servers.
+ It will tell each process it manages to shutdown and, when
+ complete, <command>b10-init</command> will exit.
+ </para>
+
+ </refsect1>
+
+ <refsect1>
+ <title>STATISTICS DATA</title>
+
+ <para>
+ The statistics data collected by the <command>b10-stats</command>
+ daemon for <quote>Init</quote> include:
+ </para>
+
+ <variablelist>
+
+ <varlistentry>
+ <term>boot_time</term>
+ <listitem><para>
+ The date and time that the <command>b10-init</command>
+ process started.
+ This is represented in ISO 8601 format.
+ </para></listitem>
+ </varlistentry>
+
+ </variablelist>
+
+ </refsect1>
+
+ <refsect1>
+ <title>FILES</title>
+ <para><filename>sockcreator-XXXXXX/sockcreator</filename>
+ —
+ the Unix Domain socket located in a temporary file directory for
+ <command>b10-sockcreator</command>
+<!-- <citerefentry><refentrytitle>b10-sockcreator</refentrytitle><manvolnum>8</manvolnum></citerefentry> -->
+ communication.
+ </para>
+ </refsect1>
+
+ <refsect1>
+ <title>SEE ALSO</title>
+ <para>
+ <citerefentry>
+ <refentrytitle>bindctl</refentrytitle><manvolnum>1</manvolnum>
+ </citerefentry>,
+ <citerefentry>
+ <refentrytitle>b10-auth</refentrytitle><manvolnum>8</manvolnum>
+ </citerefentry>,
+ <citerefentry>
+ <refentrytitle>b10-cfgmgr</refentrytitle><manvolnum>8</manvolnum>
+ </citerefentry>,
+ <citerefentry>
+ <refentrytitle>b10-cmdctl</refentrytitle><manvolnum>8</manvolnum>
+ </citerefentry>,
+ <citerefentry>
+ <refentrytitle>b10-msgq</refentrytitle><manvolnum>8</manvolnum>
+ </citerefentry>,
+ <citerefentry>
+ <refentrytitle>b10-xfrin</refentrytitle><manvolnum>8</manvolnum>
+ </citerefentry>,
+ <citerefentry>
+ <refentrytitle>b10-xfrout</refentrytitle><manvolnum>8</manvolnum>
+ </citerefentry>,
+ <citerefentry>
+ <refentrytitle>b10-zonemgr</refentrytitle><manvolnum>8</manvolnum>
+ </citerefentry>,
+ <citerefentry>
+ <refentrytitle>b10-stats</refentrytitle><manvolnum>8</manvolnum>
+ </citerefentry>,
+ <citetitle>BIND 10 Guide</citetitle>.
+ </para>
+ </refsect1>
+<!-- <citerefentry>
+ <refentrytitle>b10-sockcreator</refentrytitle><manvolnum>8</manvolnum>
+ </citerefentry>, -->
+
+ <refsect1 id='history'><title>HISTORY</title>
+ <para>The development of <command>b10-init</command>
+ was started in October 2009.
+ It was renamed and its configuration identifier changed
+ in February 2013.
+ </para>
+ </refsect1>
+
+ <refsect1>
+ <title>AUTHORS</title>
+ <para>
+ The <command>b10-init</command>
+ daemon was initially designed by Shane Kerr of ISC.
+ </para>
+ </refsect1>
+</refentry><!--
+ - Local variables:
+ - mode: sgml
+ - End:
+-->
diff --git a/src/bin/bind10/bind10.in b/src/bin/bind10/bind10.in
new file mode 100755
index 0000000..88c45c9
--- /dev/null
+++ b/src/bin/bind10/bind10.in
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+# We use this wrapper script both for production and in-source tests; in
+# the latter case B10_FROM_BUILD environment is expected to be defined.
+if test -n "${B10_FROM_BUILD}"; then
+ exec ${B10_FROM_BUILD}/src/bin/bind10/b10-init $*
+else
+ prefix=@prefix@
+ exec_prefix=@exec_prefix@
+ exec @libexecdir@/@PACKAGE@/b10-init $*
+fi
diff --git a/src/bin/bind10/bind10.xml b/src/bin/bind10/bind10.xml
index e32544a..16082f3 100644
--- a/src/bin/bind10/bind10.xml
+++ b/src/bin/bind10/bind10.xml
@@ -2,7 +2,7 @@
"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd"
[<!ENTITY mdash "—">]>
<!--
- - Copyright (C) 2010-2012 Internet Systems Consortium, Inc. ("ISC")
+ - Copyright (C) 2013 Internet Systems Consortium, Inc. ("ISC")
-
- Permission to use, copy, modify, and/or distribute this software for any
- purpose with or without fee is hereby granted, provided that the above
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>April 12, 2012</date>
+ <date>February 5, 2013</date>
</refentryinfo>
<refmeta>
@@ -31,12 +31,12 @@
<refnamediv>
<refname>bind10</refname>
- <refpurpose>BIND 10 boss process</refpurpose>
+ <refpurpose>BIND 10 start script</refpurpose>
</refnamediv>
<docinfo>
<copyright>
- <year>2010-2012</year>
+ <year>2013</year>
<holder>Internet Systems Consortium, Inc. ("ISC")</holder>
</copyright>
</docinfo>
@@ -44,468 +44,33 @@
<refsynopsisdiv>
<cmdsynopsis>
<command>bind10</command>
- <arg><option>-c <replaceable>config-filename</replaceable></option></arg>
- <arg><option>-i</option></arg>
- <arg><option>-m <replaceable>file</replaceable></option></arg>
- <arg><option>-p <replaceable>data_path</replaceable></option></arg>
- <arg><option>-u <replaceable>user</replaceable></option></arg>
- <arg><option>-v</option></arg>
- <arg><option>-w <replaceable>wait_time</replaceable></option></arg>
- <arg><option>--clear-config</option></arg>
- <arg><option>--cmdctl-port</option> <replaceable>port</replaceable></arg>
- <arg><option>--config-file</option> <replaceable>config-filename</replaceable></arg>
- <arg><option>--data-path</option> <replaceable>directory</replaceable></arg>
- <arg><option>--msgq-socket-file <replaceable>file</replaceable></option></arg>
- <arg><option>--no-kill</option></arg>
- <arg><option>--pid-file</option> <replaceable>filename</replaceable></arg>
- <arg><option>--pretty-name <replaceable>name</replaceable></option></arg>
- <arg><option>--user <replaceable>user</replaceable></option></arg>
- <arg><option>--verbose</option></arg>
- <arg><option>--wait <replaceable>wait_time</replaceable></option></arg>
+ <arg><option>options</option></arg>
</cmdsynopsis>
</refsynopsisdiv>
<refsect1>
<title>DESCRIPTION</title>
- <para>The <command>bind10</command> daemon starts up other
- BIND 10 required daemons. It handles restarting of exiting
- programs and also the shutdown of all managed daemons.</para>
+ <para>The <command>bind10</command> script is a simple wrapper that
+ starts BIND 10 by running the <command>b10-init</command> daemon. All
+ options passed to <command>bind10</command> are directly passed on to
+ <command>b10-init</command>.</para>
-<!-- TODO: list what it starts here? -->
-
-<!-- TODO
- <para>The configuration of the <command>bind10</command> daemon
- is defined in the TODO configuration file, as described in the
- <citerefentry><refentrytitle>TODO</refentrytitle><manvolnum>5</manvolnum></citerefentry>
- manual page.</para>
--->
-
- </refsect1>
-
- <refsect1>
- <title>ARGUMENTS</title>
-
- <para>The arguments are as follows:</para>
-
- <variablelist>
-
- <varlistentry>
- <term>
- <option>-c</option> <replaceable>config-filename</replaceable>,
- <option>--config-file</option> <replaceable>config-filename</replaceable>
- </term>
- <listitem>
- <para>The configuration filename to use. Can be either absolute or
- relative to data path. In case it is absolute, value of data path is
- not considered.
- Defaults to <filename>b10-config.db</filename>.</para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term>
- <option>--clear-config</option>
- </term>
- <listitem>
- <para>
- This will create a backup of the existing configuration
- file, remove it and start
- <refentrytitle>b10-cfgmgr</refentrytitle><manvolnum>8</manvolnum>
- with the default configuration.
- The name of the backup file can be found in the logs
- (<varname>CFGMGR_BACKED_UP_CONFIG_FILE</varname>).
- (It will append a number to the backup filename if a
- previous backup file exists.)
-
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term>
- <option>--cmdctl-port</option> <replaceable>port</replaceable>
- </term>
- <listitem>
- <para>The <command>b10-cmdctl</command> daemon will listen
- on this port.
- (See
- <refentrytitle>b10-cmdctl</refentrytitle><manvolnum>8</manvolnum>
- for the default.)
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term>
- <option>-p</option> <replaceable>directory</replaceable>,
- <option>--data-path</option> <replaceable>directory</replaceable>
- </term>
- <listitem>
- <para>The path where BIND 10 programs look for various data files.
- Currently only
- <citerefentry><refentrytitle>b10-cfgmgr</refentrytitle><manvolnum>8</manvolnum></citerefentry>
- uses it to locate the configuration file, but the usage
- might be extended for other programs and other types of
- files.</para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><option>-m</option> <replaceable>file</replaceable>,
- <option>--msgq-socket-file</option> <replaceable>file</replaceable></term>
-
- <listitem>
- <para>The UNIX domain socket file for the
- <citerefentry><refentrytitle>b10-msgq</refentrytitle><manvolnum>8</manvolnum></citerefentry>
- daemon to use.
- The default is
- <filename>/usr/local/var/bind10/msg_socket</filename>.
-<!-- @localstatedir@/@PACKAGE_NAME@/msg_socket -->
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><option>-i</option>, <option>--no-kill</option></term>
- <listitem>
- <para>When this option is passed, <command>bind10</command>
- does not send SIGTERM and SIGKILL signals to modules during
- shutdown. (This option was introduced for use during
- testing.)</para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><option>-u</option> <replaceable>user</replaceable>, <option>--user</option> <replaceable>name</replaceable></term>
-<!-- TODO: example more detail. -->
- <listitem>
- <para>The username for <command>bind10</command> to run as.
- <command>bind10</command> must be initially ran as the
- root user to use this option.
- The default is to run as the current user.</para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><option>--pid-file</option> <replaceable>filename</replaceable></term>
- <listitem>
- <para>If defined, the PID of the <command>bind10</command> is stored
- in this file.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><option>--pretty-name <replaceable>name</replaceable></option></term>
-
- <listitem>
- <para>The name this process should have in tools like
- <command>ps</command> or <command>top</command>. This
- is handy if you have multiple versions/installations
- of <command>bind10</command>.
-<!-- TODO: only supported with setproctitle feature
-The default is the basename of ARG 0.
--->
-</para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><option>-v</option>, <option>--verbose</option></term>
- <listitem>
- <para>Display more about what is going on for
- <command>bind10</command> and its child processes.</para>
-<!-- TODO: not true about all children yet -->
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><option>-w</option> <replaceable>wait_time</replaceable>, <option>--wait</option> <replaceable>wait_time</replaceable></term>
- <listitem>
- <para>Sets the amount of time that BIND 10 will wait for
- the configuration manager (a key component of BIND 10)
- to initialize itself before abandoning the start up and
- terminating with an error. The
- <replaceable>wait_time</replaceable> is specified in
- seconds and has a default value of 10.
- </para>
- </listitem>
- </varlistentry>
-
- </variablelist>
- </refsect1>
-
-<!--
-TODO: configuration section
--->
-
- <refsect1>
- <title>CONFIGURATION AND COMMANDS</title>
-
- <para>
- The configuration provides settings for components for
- <command>bind10</command> to manage under
- <varname>/Boss/components/</varname>.
- The default elements are:
- </para>
-
- <itemizedlist>
-
- <listitem>
- <para> <varname>/Boss/components/b10-cmdctl</varname> </para>
- </listitem>
-
- <listitem>
- <para> <varname>/Boss/components/b10-stats</varname> </para>
- </listitem>
-
- </itemizedlist>
-
- <para>
- (Note that the startup of <command>b10-sockcreator</command>,
- <command>b10-cfgmgr</command>, and <command>b10-msgq</command>
- is not configurable. They are hardcoded and <command>bind10</command>
- will not run without them.)
- </para>
-
- <para>
- The named sets for components contain the following settings:
- </para>
-
- <variablelist>
-
- <varlistentry>
- <term><varname>address</varname></term>
- <listitem>
- <para>The name used for communicating to it on the message
- bus.</para>
-<!-- NOTE: vorner said:
-These can be null, because the components are special ones, and
-the special class there already knows the address. It is (I hope)
-explained in the guide. I'd like to get rid of the special components
-sometime and I'd like it to teach to guess the address.
--->
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><varname>kind</varname></term>
- <listitem>
- <para>
- This defines how required a component is.
- The possible settings for <varname>kind</varname> are:
- <varname>core</varname> (system won't start if it won't
- start and <command>bind10</command> will shutdown if
- a <quote>core</quote> component crashes),
- <varname>dispensable</varname> (<command>bind10</command>
- will restart failing component),
- and
- <varname>needed</varname> (<command>bind10</command>
- will shutdown if component won't initially start, but
- if crashes later, it will attempt to restart).
- This setting is required.
-<!-- TODO: formatting -->
- </para>
- </listitem>
- </varlistentry>
-
-<!--
-TODO: currently not used
- <varlistentry>
- <term> <varname>params</varname> </term>
- <listitem>
- <para>
-list
-</para>
- </listitem>
- </varlistentry>
--->
-
- <varlistentry>
- <term> <varname>priority</varname> </term>
- <listitem>
- <para>This is an integer. <command>bind10</command>
- will start the components with largest priority numbers first.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term> <varname>process</varname> </term>
- <listitem>
- <para>This is the filename of the executable to be started.
- If not defined, then <command>bind10</command> will
- use the component name instead.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term> <varname>special</varname> </term>
- <listitem>
- <para>
- This defines if the component is started a special, hardcoded
- way.
-<!--
-TODO: document this ... but maybe some of these will be removed
-once we get rid of some using switches for components?
-
-auth
-cfgmgr
-cmdctl
-msgq
-resolver
-sockcreator
-xfrin
--->
-
-</para>
- </listitem>
- </varlistentry>
-
- </variablelist>
-
-<!-- TODO: formating -->
- <para>
- The <varname>Boss</varname> configuration commands are:
- </para>
-<!-- TODO: let's just let bind10 be known as bind10 and not Boss -->
-
-<!-- TODO -->
-<!--
- <para>
- <command>drop_socket</command>
- This is an internal command and not exposed to the administrator.
- </para>
--->
-
-<!-- TODO -->
-<!--
- <para>
- <command>get_socket</command>
- This is an internal command and not exposed to the administrator.
- </para>
--->
-
- <para>
- <command>getstats</command> tells <command>bind10</command>
- to send its statistics data to the <command>b10-stats</command>
- daemon.
- This is an internal command and not exposed to the administrator.
-<!-- not defined in spec -->
- </para>
-
- <para>
- <command>ping</command> is used to check the connection with the
- <command>bind10</command> daemon.
- It returns the text <quote>pong</quote>.
- </para>
-
- <para>
- <command>show_processes</command> lists the current processes
- managed by <command>bind10</command>.
- The output is an array in JSON format containing the process
- ID, the name for each and the address name used on each message bus.
-<!-- TODO: what is name? -->
-<!-- TODO: change to JSON object format? -->
-<!-- TODO: ticket #1406 -->
- </para>
-
- <para>
- <command>shutdown</command> tells <command>bind10</command>
- to shutdown the BIND 10 servers.
- It will tell each process it manages to shutdown and, when
- complete, <command>bind10</command> will exit.
- </para>
-
- </refsect1>
-
- <refsect1>
- <title>STATISTICS DATA</title>
-
- <para>
- The statistics data collected by the <command>b10-stats</command>
- daemon for <quote>Boss</quote> include:
- </para>
-
- <variablelist>
-
- <varlistentry>
- <term>boot_time</term>
- <listitem><para>
- The date and time that the <command>bind10</command>
- process started.
- This is represented in ISO 8601 format.
- </para></listitem>
- </varlistentry>
-
- </variablelist>
-
- </refsect1>
-
- <refsect1>
- <title>FILES</title>
- <para><filename>sockcreator-XXXXXX/sockcreator</filename>
- —
- the Unix Domain socket located in a temporary file directory for
- <command>b10-sockcreator</command>
-<!-- <citerefentry><refentrytitle>b10-sockcreator</refentrytitle><manvolnum>8</manvolnum></citerefentry> -->
- communication.
- </para>
</refsect1>
<refsect1>
<title>SEE ALSO</title>
<para>
<citerefentry>
- <refentrytitle>bindctl</refentrytitle><manvolnum>1</manvolnum>
- </citerefentry>,
- <citerefentry>
- <refentrytitle>b10-auth</refentrytitle><manvolnum>8</manvolnum>
- </citerefentry>,
- <citerefentry>
- <refentrytitle>b10-cfgmgr</refentrytitle><manvolnum>8</manvolnum>
- </citerefentry>,
- <citerefentry>
- <refentrytitle>b10-cmdctl</refentrytitle><manvolnum>8</manvolnum>
- </citerefentry>,
- <citerefentry>
- <refentrytitle>b10-msgq</refentrytitle><manvolnum>8</manvolnum>
- </citerefentry>,
- <citerefentry>
- <refentrytitle>b10-xfrin</refentrytitle><manvolnum>8</manvolnum>
- </citerefentry>,
- <citerefentry>
- <refentrytitle>b10-xfrout</refentrytitle><manvolnum>8</manvolnum>
- </citerefentry>,
- <citerefentry>
- <refentrytitle>b10-zonemgr</refentrytitle><manvolnum>8</manvolnum>
- </citerefentry>,
- <citerefentry>
- <refentrytitle>b10-stats</refentrytitle><manvolnum>8</manvolnum>
+ <refentrytitle>b10-init</refentrytitle><manvolnum>8</manvolnum>
</citerefentry>,
<citetitle>BIND 10 Guide</citetitle>.
</para>
</refsect1>
-<!-- <citerefentry>
- <refentrytitle>b10-sockcreator</refentrytitle><manvolnum>8</manvolnum>
- </citerefentry>, -->
<refsect1 id='history'><title>HISTORY</title>
- <para>The development of <command>bind10</command>
- was started in October 2009.</para>
- </refsect1>
-
- <refsect1>
- <title>AUTHORS</title>
- <para>
- The <command>bind10</command>
- daemon was initially designed by Shane Kerr of ISC.
+ <para>The <command>bind10</command> script was added in February 2013.
</para>
</refsect1>
-</refentry><!--
- - Local variables:
- - mode: sgml
- - End:
--->
+
+</refentry>
diff --git a/src/bin/bind10/bind10_messages.mes b/src/bin/bind10/bind10_messages.mes
deleted file mode 100644
index 9414ed6..0000000
--- a/src/bin/bind10/bind10_messages.mes
+++ /dev/null
@@ -1,327 +0,0 @@
-# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-# No namespace declaration - these constants go in the global namespace
-# of the xfrin messages python module.
-
-% BIND10_CHECK_MSGQ_ALREADY_RUNNING checking if msgq is already running
-The boss process is starting up and will now check if the message bus
-daemon is already running. If so, it will not be able to start, as it
-needs a dedicated message bus.
-
-% BIND10_COMPONENT_FAILED component %1 (pid %2) failed: %3
-The process terminated, but the bind10 boss didn't expect it to, which means
-it must have failed.
-
-% BIND10_COMPONENT_RESTART component %1 is about to restart
-The named component failed previously and we will try to restart it to provide
-as flawless service as possible, but it should be investigated what happened,
-as it could happen again.
-
-% BIND10_COMPONENT_START component %1 is starting
-The named component is about to be started by the boss process.
-
-% BIND10_COMPONENT_START_EXCEPTION component %1 failed to start: %2
-An exception (mentioned in the message) happened during the startup of the
-named component. The componet is not considered started and further actions
-will be taken about it.
-
-% BIND10_COMPONENT_STOP component %1 is being stopped
-A component is about to be asked to stop willingly by the boss.
-
-% BIND10_COMPONENT_UNSATISFIED component %1 is required to run and failed
-A component failed for some reason (see previous messages). It is either a core
-component or needed component that was just started. In any case, the system
-can't continue without it and will terminate.
-
-% BIND10_CONFIGURATOR_BUILD building plan '%1' -> '%2'
-A debug message. This indicates that the configurator is building a plan
-how to change configuration from the older one to newer one. This does no
-real work yet, it just does the planning what needs to be done.
-
-% BIND10_CONFIGURATOR_PLAN_INTERRUPTED configurator plan interrupted, only %1 of %2 done
-There was an exception during some planned task. The plan will not continue and
-only some tasks of the plan were completed. The rest is aborted. The exception
-will be propagated.
-
-% BIND10_CONFIGURATOR_RECONFIGURE reconfiguring running components
-A different configuration of which components should be running is being
-installed. All components that are no longer needed will be stopped and
-newly introduced ones started. This happens at startup, when the configuration
-is read the first time, or when an operator changes configuration of the boss.
-
-% BIND10_CONFIGURATOR_RUN running plan of %1 tasks
-A debug message. The configurator is about to execute a plan of actions it
-computed previously.
-
-% BIND10_CONFIGURATOR_START bind10 component configurator is starting up
-The part that cares about starting and stopping the right component from the
-boss process is starting up. This happens only once at the startup of the
-boss process. It will start the basic set of processes now (the ones boss
-needs to read the configuration), the rest will be started after the
-configuration is known.
-
-% BIND10_CONFIGURATOR_STOP bind10 component configurator is shutting down
-The part that cares about starting and stopping processes in the boss is
-shutting down. All started components will be shut down now (more precisely,
-asked to terminate by their own, if they fail to comply, other parts of
-the boss process will try to force them).
-
-% BIND10_CONFIGURATOR_TASK performing task %1 on %2
-A debug message. The configurator is about to perform one task of the plan it
-is currently executing on the named component.
-
-% BIND10_CONNECTING_TO_CC_FAIL failed to connect to configuration/command channel; try -v to see output from msgq
-The boss process tried to connect to the communication channel for
-commands and configuration updates during initialization, but it
-failed. This is a fatal startup error, and process will soon
-terminate after some cleanup. There can be several reasons for the
-failure, but the most likely cause is that the msgq daemon failed to
-start, and the most likely cause of the msgq failure is that it
-doesn't have a permission to create a socket file for the
-communication. To confirm that, you can see debug messages from msgq
-by starting BIND 10 with the -v command line option. If it indicates
-permission problem for msgq, make sure the directory where the socket
-file is to be created is writable for the msgq process. Note that if
-you specify the -u option to change process users, the directory must
-be writable for that user.
-
-% BIND10_INVALID_STATISTICS_DATA invalid specification of statistics data specified
-An error was encountered when the boss module specified
-statistics data which is invalid for the boss specification file.
-
-% BIND10_INVALID_USER invalid user: %1
-The boss process was started with the -u option, to drop root privileges
-and continue running as the specified user, but the user is unknown.
-
-% BIND10_KILLING_ALL_PROCESSES killing all started processes
-The boss module was not able to start every process it needed to start
-during startup, and will now kill the processes that did get started.
-
-% BIND10_LOST_SOCKET_CONSUMER consumer %1 of sockets disconnected, considering all its sockets closed
-A connection from one of the applications which requested a socket was
-closed. This means the application has terminated, so all the sockets it was
-using are now closed and bind10 process can release them as well, unless the
-same sockets are used by yet another application.
-
-% BIND10_MSGQ_ALREADY_RUNNING msgq daemon already running, cannot start
-There already appears to be a message bus daemon running. Either an
-old process was not shut down correctly, and needs to be killed, or
-another instance of BIND10, with the same msgq domain socket, is
-running, which needs to be stopped.
-
-% BIND10_MSGQ_DISAPPEARED msgq channel disappeared
-While listening on the message bus channel for messages, it suddenly
-disappeared. The msgq daemon may have died. This might lead to an
-inconsistent state of the system, and BIND 10 will now shut down.
-
-% BIND10_NO_SOCKET couldn't send a socket for token %1 because of error: %2
-An error occurred when the bind10 process was asked to send a socket file
-descriptor. The error is mentioned, most common reason is that the request
-is invalid and may not come from bind10 process at all.
-
-% BIND10_PROCESS_ENDED process %2 of %1 ended with status %3
-This indicates a process started previously terminated. The process id
-and component owning the process are indicated, as well as the exit code.
-This doesn't distinguish if the process was supposed to terminate or not.
-
-% BIND10_READING_BOSS_CONFIGURATION reading boss configuration
-The boss process is starting up, and will now process the initial
-configuration, as received from the configuration manager.
-
-% BIND10_RECEIVED_COMMAND received command: %1
-The boss module received a command and shall now process it. The command
-is printed.
-
-% BIND10_RECEIVED_NEW_CONFIGURATION received new configuration: %1
-The boss module received a configuration update and is going to apply
-it now. The new configuration is printed.
-
-% BIND10_RECEIVED_SIGNAL received signal %1
-The boss module received the given signal.
-
-% BIND10_RESTART_COMPONENT_SKIPPED Skipped restarting a component %1
-The boss module tried to restart a component after it failed (crashed)
-unexpectedly, but the boss then found that the component had been removed
-from its local configuration of components to run. This is an unusual
-situation but can happen if the administrator removes the component from
-the configuration after the component's crash and before the restart time.
-The boss module simply skipped restarting that module, and the whole system
-went back to the expected state (except that the crash itself is likely
-to be a bug).
-
-% BIND10_RESURRECTED_PROCESS resurrected %1 (PID %2)
-The given process has been restarted successfully, and is now running
-with the given process id.
-
-% BIND10_RESURRECTING_PROCESS resurrecting dead %1 process...
-The given process has ended unexpectedly, and is now restarted.
-
-% BIND10_SELECT_ERROR error in select() call: %1
-There was a fatal error in the call to select(), used to see if a child
-process has ended or if there is a message on the message bus. This
-should not happen under normal circumstances and is considered fatal,
-so BIND 10 will now shut down. The specific error is printed.
-
-% BIND10_SEND_SIGKILL sending SIGKILL to %1 (PID %2)
-The boss module is sending a SIGKILL signal to the given process.
-
-% BIND10_SEND_SIGNAL_FAIL sending %1 to %2 (PID %3) failed: %4
-The boss module sent a single (either SIGTERM or SIGKILL) to a process,
-but it failed due to some system level error. There are two major cases:
-the target process has already terminated but the boss module had sent
-the signal before it noticed the termination. In this case an error
-message should indicate something like "no such process". This can be
-safely ignored. The other case is that the boss module doesn't have
-the privilege to send a signal to the process. It can typically
-happen when the boss module started as a privileged process, spawned a
-subprocess, and then dropped the privilege. It includes the case for
-the socket creator when the boss process runs with the -u command line
-option. In this case, the boss module simply gives up to terminate
-the process explicitly because it's unlikely to succeed by keeping
-sending the signal. Although the socket creator is implemented so
-that it will terminate automatically when the boss process exits
-(and that should be the case for any other future process running with
-a higher privilege), but it's recommended to check if there's any
-remaining BIND 10 process if this message is logged. For all other
-cases, the boss module will keep sending the signal until it confirms
-all child processes terminate. Although unlikely, this could prevent
-the boss module from exiting, just keeping sending the signals. So,
-again, it's advisable to check if it really terminates when this
-message is logged.
-
-% BIND10_SEND_SIGTERM sending SIGTERM to %1 (PID %2)
-The boss module is sending a SIGTERM signal to the given process.
-
-% BIND10_SETGID setting GID to %1
-The boss switches the process group ID to the given value. This happens
-when BIND 10 starts with the -u option, and the group ID will be set to
-that of the specified user.
-
-% BIND10_SETUID setting UID to %1
-The boss switches the user it runs as to the given UID.
-
-% BIND10_SHUTDOWN stopping the server
-The boss process received a command or signal telling it to shut down.
-It will send a shutdown command to each process. The processes that do
-not shut down will then receive a SIGTERM signal. If that doesn't work,
-it shall send SIGKILL signals to the processes still alive.
-
-% BIND10_SHUTDOWN_COMPLETE all processes ended, shutdown complete
-All child processes have been stopped, and the boss process will now
-stop itself.
-
-% BIND10_SOCKCREATOR_BAD_CAUSE unknown error cause from socket creator: %1
-The socket creator reported an error when creating a socket. But the function
-which failed is unknown (not one of 'S' for socket or 'B' for bind).
-
-% BIND10_SOCKCREATOR_BAD_RESPONSE unknown response for socket request: %1
-The boss requested a socket from the creator, but the answer is unknown. This
-looks like a programmer error.
-
-% BIND10_SOCKCREATOR_EOF eof while expecting data from socket creator
-There should be more data from the socket creator, but it closed the socket.
-It probably crashed.
-
-% BIND10_SOCKCREATOR_INIT initializing socket creator parser
-The boss module initializes routines for parsing the socket creator
-protocol.
-
-% BIND10_SOCKCREATOR_KILL killing the socket creator
-The socket creator is being terminated the aggressive way, by sending it
-sigkill. This should not happen usually.
-
-% BIND10_SOCKCREATOR_TERMINATE terminating socket creator
-The boss module sends a request to terminate to the socket creator.
-
-% BIND10_SOCKCREATOR_TRANSPORT_ERROR transport error when talking to the socket creator: %1
-Either sending or receiving data from the socket creator failed with the given
-error. The creator probably crashed or some serious OS-level problem happened,
-as the communication happens only on local host.
-
-% BIND10_SOCKET_CREATED successfully created socket %1
-The socket creator successfully created and sent a requested socket, it has
-the given file number.
-
-% BIND10_SOCKET_ERROR error on %1 call in the creator: %2/%3
-The socket creator failed to create the requested socket. It failed on the
-indicated OS API function with given error.
-
-% BIND10_SOCKET_GET requesting socket [%1]:%2 of type %3 from the creator
-The boss forwards a request for a socket to the socket creator.
-
-% BIND10_STARTED_CC started configuration/command session
-Debug message given when BIND 10 has successfully started the object that
-handles configuration and commands.
-
-% BIND10_STARTED_PROCESS started %1
-The given process has successfully been started.
-
-% BIND10_STARTED_PROCESS_PID started %1 (PID %2)
-The given process has successfully been started, and has the given PID.
-
-% BIND10_STARTING starting BIND10: %1
-Informational message on startup that shows the full version.
-
-% BIND10_STARTING_CC starting configuration/command session
-Informational message given when BIND 10 is starting the session object
-that handles configuration and commands.
-
-% BIND10_STARTING_PROCESS starting process %1
-The boss module is starting the given process.
-
-% BIND10_STARTING_PROCESS_PORT starting process %1 (to listen on port %2)
-The boss module is starting the given process, which will listen on the
-given port number.
-
-% BIND10_STARTING_PROCESS_PORT_ADDRESS starting process %1 (to listen on %2#%3)
-The boss module is starting the given process, which will listen on the
-given address and port number (written as <address>#<port>).
-
-% BIND10_STARTUP_COMPLETE BIND 10 started
-All modules have been successfully started, and BIND 10 is now running.
-
-% BIND10_STARTUP_ERROR error during startup: %1
-There was a fatal error when BIND10 was trying to start. The error is
-shown, and BIND10 will now shut down.
-
-% BIND10_STARTUP_UNEXPECTED_MESSAGE unrecognised startup message %1
-During the startup process, a number of messages are exchanged between the
-Boss process and the processes it starts. This error is output when a
-message received by the Boss process is recognised as being of the
-correct format but is unexpected. It may be that processes are starting
-of sequence.
-
-% BIND10_STARTUP_UNRECOGNISED_MESSAGE unrecognised startup message %1
-During the startup process, a number of messages are exchanged between the
-Boss process and the processes it starts. This error is output when a
-message received by the Boss process is not recognised.
-
-% BIND10_STOP_PROCESS asking %1 to shut down
-The boss module is sending a shutdown command to the given module over
-the message channel.
-
-% BIND10_UNKNOWN_CHILD_PROCESS_ENDED unknown child pid %1 exited
-An unknown child process has exited. The PID is printed, but no further
-action will be taken by the boss process.
-
-% BIND10_WAIT_CFGMGR waiting for configuration manager process to initialize
-The configuration manager process is so critical to operation of BIND 10
-that after starting it, the Boss module will wait for it to initialize
-itself before continuing. This debug message is produced during the
-wait and may be output zero or more times depending on how long it takes
-the configuration manager to start up. The total length of time Boss
-will wait for the configuration manager before reporting an error is
-set with the command line --wait switch, which has a default value of
-ten seconds.
diff --git a/src/bin/bind10/bind10_src.py.in b/src/bin/bind10/bind10_src.py.in
deleted file mode 100755
index 9f41804..0000000
--- a/src/bin/bind10/bind10_src.py.in
+++ /dev/null
@@ -1,1318 +0,0 @@
-#!@PYTHON@
-
-# Copyright (C) 2010,2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-This file implements the Boss of Bind (BoB, or bob) program.
-
-Its purpose is to start up the BIND 10 system, and then manage the
-processes, by starting and stopping processes, plus restarting
-processes that exit.
-
-To start the system, it first runs the c-channel program (msgq), then
-connects to that. It then runs the configuration manager, and reads
-its own configuration. Then it proceeds to starting other modules.
-
-The Python subprocess module is used for starting processes, but
-because this is not efficient for managing groups of processes,
-SIGCHLD signals are caught and processed using the signal module.
-
-Most of the logic is contained in the BoB class. However, since Python
-requires that signal processing happen in the main thread, we do
-signal handling outside of that class, in the code running for
-__main__.
-"""
-
-import sys; sys.path.append ('@@PYTHONPATH@@')
-import os
-
-# If B10_FROM_SOURCE is set in the environment, we use data files
-# from a directory relative to that, otherwise we use the ones
-# installed on the system
-if "B10_FROM_SOURCE" in os.environ:
- SPECFILE_LOCATION = os.environ["B10_FROM_SOURCE"] + "/src/bin/bind10/bob.spec"
-else:
- PREFIX = "@prefix@"
- DATAROOTDIR = "@datarootdir@"
- SPECFILE_LOCATION = "@datadir@/@PACKAGE@/bob.spec".replace("${datarootdir}", DATAROOTDIR).replace("${prefix}", PREFIX)
-
-import subprocess
-import signal
-import re
-import errno
-import time
-import select
-import random
-import socket
-from optparse import OptionParser, OptionValueError
-import io
-import pwd
-import posix
-import copy
-
-from bind10_config import LIBEXECPATH
-import bind10_config
-import isc.cc
-import isc.util.process
-import isc.net.parse
-import isc.log
-from isc.log_messages.bind10_messages import *
-import isc.bind10.component
-import isc.bind10.special_component
-import isc.bind10.socket_cache
-import libutil_io_python
-import tempfile
-
-isc.log.init("b10-boss", buffer=True)
-logger = isc.log.Logger("boss")
-
-# Pending system-wide debug level definitions, the ones we
-# use here are hardcoded for now
-DBG_PROCESS = logger.DBGLVL_TRACE_BASIC
-DBG_COMMANDS = logger.DBGLVL_TRACE_DETAIL
-
-# Messages sent over the unix domain socket to indicate if it is followed by a real socket
-CREATOR_SOCKET_OK = b"1\n"
-CREATOR_SOCKET_UNAVAILABLE = b"0\n"
-
-# RCodes of known exceptions for the get_token command
-CREATOR_SOCKET_ERROR = 2
-CREATOR_SHARE_ERROR = 3
-
-# Assign this process some longer name
-isc.util.process.rename(sys.argv[0])
-
-# This is the version that gets displayed to the user.
-# The VERSION string consists of the module name, the module version
-# number, and the overall BIND 10 version number (set in configure.ac).
-VERSION = "bind10 20110223 (BIND 10 @PACKAGE_VERSION@)"
-
-# This is for boot_time of Boss
-_BASETIME = time.gmtime()
-
-# Detailed error message commonly used on startup failure, possibly due to
-# permission issue regarding log lock file. We dump verbose message because
-# it may not be clear exactly what to do if it simply says
-# "failed to open <filename>: permission denied"
-NOTE_ON_LOCK_FILE = """\
-TIP: if this is about permission error for a lock file, check if the directory
-of the file is writable for the user of the bind10 process; often you need
-to start bind10 as a super user. Also, if you specify the -u option to
-change the user and group, the directory must be writable for the group,
-and the created lock file must be writable for that user. Finally, make sure
-the lock file is not left in the directly before restarting.
-"""
-
-class ProcessInfoError(Exception): pass
-
-class ChangeUserError(Exception):
- '''Exception raised when setuid/setgid fails.
-
- When raised, it's expected to be propagated via underlying component
- management modules to the top level so that it will help provide useful
- fatal error message.
-
- '''
- pass
-
-class ProcessInfo:
- """Information about a process"""
-
- dev_null = open(os.devnull, "w")
-
- def __init__(self, name, args, env={}, dev_null_stdout=False,
- dev_null_stderr=False):
- self.name = name
- self.args = args
- self.env = env
- self.dev_null_stdout = dev_null_stdout
- self.dev_null_stderr = dev_null_stderr
- self.process = None
- self.pid = None
-
- def _preexec_work(self):
- """Function used before running a program that needs to run as a
- different user."""
- # First, put us into a separate process group so we don't get
- # SIGINT signals on Ctrl-C (the boss will shut everthing down by
- # other means).
- os.setpgrp()
-
- def _spawn(self):
- if self.dev_null_stdout:
- spawn_stdout = self.dev_null
- else:
- spawn_stdout = None
- if self.dev_null_stderr:
- spawn_stderr = self.dev_null
- else:
- spawn_stderr = None
- # Environment variables for the child process will be a copy of those
- # of the boss process with any additional specific variables given
- # on construction (self.env).
- spawn_env = copy.deepcopy(os.environ)
- spawn_env.update(self.env)
- spawn_env['PATH'] = LIBEXECPATH + ':' + spawn_env['PATH']
- self.process = subprocess.Popen(self.args,
- stdin=subprocess.PIPE,
- stdout=spawn_stdout,
- stderr=spawn_stderr,
- close_fds=True,
- env=spawn_env,
- preexec_fn=self._preexec_work)
- self.pid = self.process.pid
-
- # spawn() and respawn() are the same for now, but in the future they
- # may have different functionality
- def spawn(self):
- self._spawn()
-
- def respawn(self):
- self._spawn()
-
-class CChannelConnectError(Exception): pass
-
-class ProcessStartError(Exception): pass
-
-class BoB:
- """Boss of BIND class."""
-
- def __init__(self, msgq_socket_file=None, data_path=None,
- config_filename=None, clear_config=False,
- verbose=False, nokill=False, setuid=None, setgid=None,
- username=None, cmdctl_port=None, wait_time=10):
- """
- Initialize the Boss of BIND. This is a singleton (only one can run).
-
- The msgq_socket_file specifies the UNIX domain socket file that the
- msgq process listens on. If verbose is True, then the boss reports
- what it is doing.
-
- Data path and config filename are passed through to config manager
- (if provided) and specify the config file to be used.
-
- The cmdctl_port is passed to cmdctl and specify on which port it
- should listen.
-
- wait_time controls the amount of time (in seconds) that Boss waits
- for selected processes to initialize before continuing with the
- initialization. Currently this is only the configuration manager.
- """
- self.cc_session = None
- self.ccs = None
- self.curproc = None
- self.msgq_socket_file = msgq_socket_file
- self.component_config = {}
- # Some time in future, it may happen that a single component has
- # multple processes (like a pipeline-like component). If so happens,
- # name "components" may be inapropriate. But as the code isn't probably
- # completely ready for it, we leave it at components for now. We also
- # want to support multiple instances of a single component. If it turns
- # out that we'll have a single component with multiple same processes
- # or if we start multiple components with the same configuration (we do
- # this now, but it might change) is an open question.
- self.components = {}
- # Simply list of components that died and need to wait for a
- # restart. Components manage their own restart schedule now
- self.components_to_restart = []
- self.runnable = False
- self.__uid = setuid
- self.__gid = setgid
- self.username = username
- self.verbose = verbose
- self.nokill = nokill
- self.data_path = data_path
- self.config_filename = config_filename
- self.clear_config = clear_config
- self.cmdctl_port = cmdctl_port
- self.wait_time = wait_time
- self.msgq_timeout = 5
-
- # _run_under_unittests is only meant to be used when testing. It
- # bypasses execution of some code to help with testing.
- self._run_under_unittests = False
-
- self._component_configurator = isc.bind10.component.Configurator(self,
- isc.bind10.special_component.get_specials())
- # The priorities here make them start in the correct order. First
- # the socket creator (which would drop root privileges by then),
- # then message queue and after that the config manager (which uses
- # the config manager)
- self.__core_components = {
- 'sockcreator': {
- 'kind': 'core',
- 'special': 'sockcreator',
- 'priority': 200
- },
- 'msgq': {
- 'kind': 'core',
- 'special': 'msgq',
- 'priority': 199
- },
- 'cfgmgr': {
- 'kind': 'core',
- 'special': 'cfgmgr',
- 'priority': 198
- }
- }
- self.__started = False
- self.exitcode = 0
-
- # If -v was set, enable full debug logging.
- if self.verbose:
- logger.set_severity("DEBUG", 99)
- # This is set in init_socket_srv
- self._socket_path = None
- self._socket_cache = None
- self._tmpdir = None
- self._srv_socket = None
- self._unix_sockets = {}
-
- def __propagate_component_config(self, config):
- comps = dict(config)
- # Fill in the core components, so they stay alive
- for comp in self.__core_components:
- if comp in comps:
- raise Exception(comp + " is core component managed by " +
- "bind10 boss, do not set it")
- comps[comp] = self.__core_components[comp]
- # Update the configuration
- self._component_configurator.reconfigure(comps)
-
- def change_user(self):
- '''Change the user and group to those specified on construction.
-
- This method is expected to be called by a component on initial
- startup when the system is ready to switch the user and group
- (i.e., once all components that need the privilege of the original
- user have started).
- '''
- try:
- if self.__gid is not None:
- logger.info(BIND10_SETGID, self.__gid)
- posix.setgid(self.__gid)
- except Exception as ex:
- raise ChangeUserError('failed to change group: ' + str(ex))
-
- try:
- if self.__uid is not None:
- posix.setuid(self.__uid)
- # We use one-shot logger after setuid here. This will
- # detect any permission issue regarding logging due to the
- # result of setuid at the earliest opportunity.
- isc.log.Logger("boss").info(BIND10_SETUID, self.__uid)
- except Exception as ex:
- raise ChangeUserError('failed to change user: ' + str(ex))
-
- def config_handler(self, new_config):
- # If this is initial update, don't do anything now, leave it to startup
- if not self.runnable:
- return
- logger.debug(DBG_COMMANDS, BIND10_RECEIVED_NEW_CONFIGURATION,
- new_config)
- try:
- if 'components' in new_config:
- self.__propagate_component_config(new_config['components'])
- return isc.config.ccsession.create_answer(0)
- except Exception as e:
- return isc.config.ccsession.create_answer(1, str(e))
-
- def get_processes(self):
- pids = list(self.components.keys())
- pids.sort()
- process_list = [ ]
- for pid in pids:
- process_list.append([pid, self.components[pid].name(),
- self.components[pid].address()])
- return process_list
-
- def _get_stats_data(self):
- return { 'boot_time':
- time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME)
- }
-
- def command_handler(self, command, args):
- logger.debug(DBG_COMMANDS, BIND10_RECEIVED_COMMAND, command)
- answer = isc.config.ccsession.create_answer(1, "command not implemented")
- if type(command) != str:
- answer = isc.config.ccsession.create_answer(1, "bad command")
- else:
- if command == "shutdown":
- self.runnable = False
- answer = isc.config.ccsession.create_answer(0)
- elif command == "getstats":
- answer = isc.config.ccsession.create_answer(
- 0, self._get_stats_data())
- elif command == "ping":
- answer = isc.config.ccsession.create_answer(0, "pong")
- elif command == "show_processes":
- answer = isc.config.ccsession. \
- create_answer(0, self.get_processes())
- elif command == "get_socket":
- answer = self._get_socket(args)
- elif command == "drop_socket":
- if "token" not in args:
- answer = isc.config.ccsession. \
- create_answer(1, "Missing token parameter")
- else:
- try:
- self._socket_cache.drop_socket(args["token"])
- answer = isc.config.ccsession.create_answer(0)
- except Exception as e:
- answer = isc.config.ccsession.create_answer(1, str(e))
- else:
- answer = isc.config.ccsession.create_answer(1,
- "Unknown command")
- return answer
-
- def kill_started_components(self):
- """
- Called as part of the exception handling when a process fails to
- start, this runs through the list of started processes, killing
- each one. It then clears that list.
- """
- logger.info(BIND10_KILLING_ALL_PROCESSES)
- self.__kill_children(True)
- self.components = {}
-
- def _read_bind10_config(self):
- """
- Reads the parameters associated with the BoB module itself.
-
- This means the list of components we should start now.
-
- This could easily be combined into start_all_processes, but
- it stays because of historical reasons and because the tests
- replace the method sometimes.
- """
- logger.info(BIND10_READING_BOSS_CONFIGURATION)
-
- config_data = self.ccs.get_full_config()
- self.__propagate_component_config(config_data['components'])
-
- def log_starting(self, process, port = None, address = None):
- """
- A convenience function to output a "Starting xxx" message if the
- logging is set to DEBUG with debuglevel DBG_PROCESS or higher.
- Putting this into a separate method ensures
- that the output form is consistent across all processes.
-
- The process name (passed as the first argument) is put into
- self.curproc, and is used to indicate which process failed to
- start if there is an error (and is used in the "Started" message
- on success). The optional port and address information are
- appended to the message (if present).
- """
- self.curproc = process
- if port is None and address is None:
- logger.info(BIND10_STARTING_PROCESS, self.curproc)
- elif address is None:
- logger.info(BIND10_STARTING_PROCESS_PORT, self.curproc,
- port)
- else:
- logger.info(BIND10_STARTING_PROCESS_PORT_ADDRESS,
- self.curproc, address, port)
-
- def log_started(self, pid = None):
- """
- A convenience function to output a 'Started xxxx (PID yyyy)'
- message. As with starting_message(), this ensures a consistent
- format.
- """
- if pid is None:
- logger.debug(DBG_PROCESS, BIND10_STARTED_PROCESS, self.curproc)
- else:
- logger.debug(DBG_PROCESS, BIND10_STARTED_PROCESS_PID, self.curproc, pid)
-
- def process_running(self, msg, who):
- """
- Some processes return a message to the Boss after they have
- started to indicate that they are running. The form of the
- message is a dictionary with contents {"running:", "<process>"}.
- This method checks the passed message and returns True if the
- "who" process is contained in the message (so is presumably
- running). It returns False for all other conditions and will
- log an error if appropriate.
- """
- if msg is not None:
- try:
- if msg["running"] == who:
- return True
- else:
- logger.error(BIND10_STARTUP_UNEXPECTED_MESSAGE, msg)
- except:
- logger.error(BIND10_STARTUP_UNRECOGNISED_MESSAGE, msg)
-
- return False
-
- # The next few methods start the individual processes of BIND-10. They
- # are called via start_all_processes(). If any fail, an exception is
- # raised which is caught by the caller of start_all_processes(); this kills
- # processes started up to that point before terminating the program.
-
- def _make_process_info(self, name, args, env,
- dev_null_stdout=False, dev_null_stderr=False):
- """
- Wrapper around ProcessInfo(), useful to override
- ProcessInfo() creation during testing.
- """
- return ProcessInfo(name, args, env, dev_null_stdout, dev_null_stderr)
-
- def start_msgq(self):
- """
- Start the message queue and connect to the command channel.
- """
- self.log_starting("b10-msgq")
- msgq_proc = self._make_process_info("b10-msgq", ["b10-msgq"],
- self.c_channel_env,
- True, not self.verbose)
- msgq_proc.spawn()
- self.log_started(msgq_proc.pid)
-
- # Now connect to the c-channel
- cc_connect_start = time.time()
- while self.cc_session is None:
- # if we are run under unittests, break
- if self._run_under_unittests:
- break
-
- # if we have been trying for "a while" give up
- if (time.time() - cc_connect_start) > self.msgq_timeout:
- if msgq_proc.process:
- msgq_proc.process.kill()
- logger.error(BIND10_CONNECTING_TO_CC_FAIL)
- raise CChannelConnectError("Unable to connect to c-channel after 5 seconds")
-
- # try to connect, and if we can't wait a short while
- try:
- self.cc_session = isc.cc.Session(self.msgq_socket_file)
- except isc.cc.session.SessionError:
- time.sleep(0.1)
-
- # Subscribe to the message queue. The only messages we expect to receive
- # on this channel are once relating to process startup.
- if self.cc_session is not None:
- self.cc_session.group_subscribe("Boss")
-
- return msgq_proc
-
- def start_cfgmgr(self):
- """
- Starts the configuration manager process
- """
- self.log_starting("b10-cfgmgr")
- args = ["b10-cfgmgr"]
- if self.data_path is not None:
- args.append("--data-path=" + self.data_path)
- if self.config_filename is not None:
- args.append("--config-filename=" + self.config_filename)
- if self.clear_config:
- args.append("--clear-config")
- bind_cfgd = self._make_process_info("b10-cfgmgr", args,
- self.c_channel_env)
- bind_cfgd.spawn()
- self.log_started(bind_cfgd.pid)
-
- # Wait for the configuration manager to start up as
- # subsequent initialization cannot proceed without it. The
- # time to wait can be set on the command line.
- time_remaining = self.wait_time
- msg, env = self.cc_session.group_recvmsg()
- while time_remaining > 0 and not self.process_running(msg, "ConfigManager"):
- logger.debug(DBG_PROCESS, BIND10_WAIT_CFGMGR)
- time.sleep(1)
- time_remaining = time_remaining - 1
- msg, env = self.cc_session.group_recvmsg()
-
- if not self.process_running(msg, "ConfigManager"):
- raise ProcessStartError("Configuration manager process has not started")
-
- return bind_cfgd
-
- def start_ccsession(self, c_channel_env):
- """
- Start the CC Session
-
- The argument c_channel_env is unused but is supplied to keep the
- argument list the same for all start_xxx methods.
-
- With regards to logging, note that as the CC session is not a
- process, the log_starting/log_started methods are not used.
- """
- logger.info(BIND10_STARTING_CC)
- self.ccs = isc.config.ModuleCCSession(SPECFILE_LOCATION,
- self.config_handler,
- self.command_handler,
- socket_file = self.msgq_socket_file)
- self.ccs.start()
- logger.debug(DBG_PROCESS, BIND10_STARTED_CC)
-
- # A couple of utility methods for starting processes...
-
- def start_process(self, name, args, c_channel_env, port=None, address=None):
- """
- Given a set of command arguments, start the process and output
- appropriate log messages. If the start is successful, the process
- is added to the list of started processes.
-
- The port and address arguments are for log messages only.
- """
- self.log_starting(name, port, address)
- newproc = self._make_process_info(name, args, c_channel_env)
- newproc.spawn()
- self.log_started(newproc.pid)
- return newproc
-
- def register_process(self, pid, component):
- """
- Put another process into boss to watch over it. When the process
- dies, the component.failed() is called with the exit code.
-
- It is expected the info is a isc.bind10.component.BaseComponent
- subclass (or anything having the same interface).
- """
- self.components[pid] = component
-
- def start_simple(self, name):
- """
- Most of the BIND-10 processes are started with the command:
-
- <process-name> [-v]
-
- ... where -v is appended if verbose is enabled. This method
- generates the arguments from the name and starts the process.
-
- The port and address arguments are for log messages only.
- """
- # Set up the command arguments.
- args = [name]
- if self.verbose:
- args += ['-v']
-
- # ... and start the process
- return self.start_process(name, args, self.c_channel_env)
-
- # The next few methods start up the rest of the BIND-10 processes.
- # Although many of these methods are little more than a call to
- # start_simple, they are retained (a) for testing reasons and (b) as a place
- # where modifications can be made if the process start-up sequence changes
- # for a given process.
-
- def start_auth(self):
- """
- Start the Authoritative server
- """
- authargs = ['b10-auth']
- if self.verbose:
- authargs += ['-v']
-
- # ... and start
- return self.start_process("b10-auth", authargs, self.c_channel_env)
-
- def start_resolver(self):
- """
- Start the Resolver. At present, all these arguments and switches
- are pure speculation. As with the auth daemon, they should be
- read from the configuration database.
- """
- self.curproc = "b10-resolver"
- # XXX: this must be read from the configuration manager in the future
- resargs = ['b10-resolver']
- if self.verbose:
- resargs += ['-v']
-
- # ... and start
- return self.start_process("b10-resolver", resargs, self.c_channel_env)
-
- def start_cmdctl(self):
- """
- Starts the command control process
- """
- args = ["b10-cmdctl"]
- if self.cmdctl_port is not None:
- args.append("--port=" + str(self.cmdctl_port))
- if self.verbose:
- args.append("-v")
- return self.start_process("b10-cmdctl", args, self.c_channel_env,
- self.cmdctl_port)
-
- def start_all_components(self):
- """
- Starts up all the components. Any exception generated during the
- starting of the components is handled by the caller.
- """
- # Start the real core (sockcreator, msgq, cfgmgr)
- self._component_configurator.startup(self.__core_components)
-
- # Connect to the msgq. This is not a process, so it's not handled
- # inside the configurator.
- self.start_ccsession(self.c_channel_env)
-
- # Extract the parameters associated with Bob. This can only be
- # done after the CC Session is started. Note that the logging
- # configuration may override the "-v" switch set on the command line.
- self._read_bind10_config()
-
- # TODO: Return the dropping of privileges
-
- def startup(self):
- """
- Start the BoB instance.
-
- Returns None if successful, otherwise an string describing the
- problem.
- """
- # Try to connect to the c-channel daemon, to see if it is already
- # running
- c_channel_env = {}
- if self.msgq_socket_file is not None:
- c_channel_env["BIND10_MSGQ_SOCKET_FILE"] = self.msgq_socket_file
- logger.debug(DBG_PROCESS, BIND10_CHECK_MSGQ_ALREADY_RUNNING)
- try:
- self.cc_session = isc.cc.Session(self.msgq_socket_file)
- logger.fatal(BIND10_MSGQ_ALREADY_RUNNING)
- return "b10-msgq already running, or socket file not cleaned , cannot start"
- except isc.cc.session.SessionError:
- # this is the case we want, where the msgq is not running
- pass
-
- # Start all components. If any one fails to start, kill all started
- # components and exit with an error indication.
- try:
- self.c_channel_env = c_channel_env
- self.start_all_components()
- except ChangeUserError as e:
- self.kill_started_components()
- return str(e) + '; ' + NOTE_ON_LOCK_FILE.replace('\n', ' ')
- except Exception as e:
- self.kill_started_components()
- return "Unable to start " + self.curproc + ": " + str(e)
-
- # Started successfully
- self.runnable = True
- self.__started = True
- return None
-
- def stop_process(self, process, recipient, pid):
- """
- Stop the given process, friendly-like. The process is the name it has
- (in logs, etc), the recipient is the address on msgq. The pid is the
- pid of the process (if we have multiple processes of the same name,
- it might want to choose if it is for this one).
- """
- logger.info(BIND10_STOP_PROCESS, process)
- self.cc_session.group_sendmsg(isc.config.ccsession.
- create_command('shutdown', {'pid': pid}),
- recipient, recipient)
-
- def component_shutdown(self, exitcode=0):
- """
- Stop the Boss instance from a components' request. The exitcode
- indicates the desired exit code.
-
- If we did not start yet, it raises an exception, which is meant
- to propagate through the component and configurator to the startup
- routine and abort the startup immediately. If it is started up already,
- we just mark it so we terminate soon.
-
- It does set the exit code in both cases.
- """
- self.exitcode = exitcode
- if not self.__started:
- raise Exception("Component failed during startup");
- else:
- self.runnable = False
-
- def shutdown(self):
- """Stop the BoB instance."""
- logger.info(BIND10_SHUTDOWN)
- # If ccsession is still there, inform rest of the system this module
- # is stopping. Since everything will be stopped shortly, this is not
- # really necessary, but this is done to reflect that boss is also
- # 'just' a module.
- self.ccs.send_stopping()
-
- # try using the BIND 10 request to stop
- try:
- self._component_configurator.shutdown()
- except:
- pass
- # XXX: some delay probably useful... how much is uncertain
- # I have changed the delay from 0.5 to 1, but sometime it's
- # still not enough.
- time.sleep(1)
- self.reap_children()
-
- # Send TERM and KILL signals to modules if we're not prevented
- # from doing so
- if not self.nokill:
- # next try sending a SIGTERM
- self.__kill_children(False)
- # finally, send SIGKILL (unmaskable termination) until everybody
- # dies
- while self.components:
- # XXX: some delay probably useful... how much is uncertain
- time.sleep(0.1)
- self.reap_children()
- self.__kill_children(True)
- logger.info(BIND10_SHUTDOWN_COMPLETE)
-
- def __kill_children(self, forceful):
- '''Terminate remaining subprocesses by sending a signal.
-
- The forceful paramter will be passed Component.kill().
- This is a dedicated subroutine of shutdown(), just to unify two
- similar cases.
-
- '''
- logmsg = BIND10_SEND_SIGKILL if forceful else BIND10_SEND_SIGTERM
- # We need to make a copy of values as the components may be modified
- # in the loop.
- for component in list(self.components.values()):
- logger.info(logmsg, component.name(), component.pid())
- try:
- component.kill(forceful)
- except OSError as ex:
- # If kill() failed due to EPERM, it doesn't make sense to
- # keep trying, so we just log the fact and forget that
- # component. Ignore other OSErrors (usually ESRCH because
- # the child finally exited)
- signame = "SIGKILL" if forceful else "SIGTERM"
- logger.info(BIND10_SEND_SIGNAL_FAIL, signame,
- component.name(), component.pid(), ex)
- if ex.errno == errno.EPERM:
- del self.components[component.pid()]
-
- def _get_process_exit_status(self):
- return os.waitpid(-1, os.WNOHANG)
-
- def reap_children(self):
- """Check to see if any of our child processes have exited,
- and note this for later handling.
- """
- while True:
- try:
- (pid, exit_status) = self._get_process_exit_status()
- except OSError as o:
- if o.errno == errno.ECHILD:
- break
- # XXX: should be impossible to get any other error here
- raise
- if pid == 0:
- break
- if pid in self.components:
- # One of the components we know about. Get information on it.
- component = self.components.pop(pid)
- logger.info(BIND10_PROCESS_ENDED, component.name(), pid,
- exit_status)
- if component.is_running() and self.runnable:
- # Tell it it failed. But only if it matters (we are
- # not shutting down and the component considers itself
- # to be running.
- component_restarted = component.failed(exit_status);
- # if the process wants to be restarted, but not just yet,
- # it returns False
- if not component_restarted:
- self.components_to_restart.append(component)
- else:
- logger.info(BIND10_UNKNOWN_CHILD_PROCESS_ENDED, pid)
-
- def restart_processes(self):
- """
- Restart any dead processes:
-
- * Returns the time when the next process is ready to be restarted.
- * If the server is shutting down, returns 0.
- * If there are no processes, returns None.
-
- The values returned can be safely passed into select() as the
- timeout value.
-
- """
- if not self.runnable:
- return 0
- still_dead = []
- # keep track of the first time we need to check this queue again,
- # if at all
- next_restart_time = None
- now = time.time()
- for component in self.components_to_restart:
- # If the component was removed from the configurator between since
- # scheduled to restart, just ignore it. The object will just be
- # dropped here.
- if not self._component_configurator.has_component(component):
- logger.info(BIND10_RESTART_COMPONENT_SKIPPED, component.name())
- elif not component.restart(now):
- still_dead.append(component)
- if next_restart_time is None or\
- next_restart_time > component.get_restart_time():
- next_restart_time = component.get_restart_time()
- self.components_to_restart = still_dead
-
- return next_restart_time
-
- def _get_socket(self, args):
- """
- Implementation of the get_socket CC command. It asks the cache
- to provide the token and sends the information back.
- """
- try:
- try:
- addr = isc.net.parse.addr_parse(args['address'])
- port = isc.net.parse.port_parse(args['port'])
- protocol = args['protocol']
- if protocol not in ['UDP', 'TCP']:
- raise ValueError("Protocol must be either UDP or TCP")
- share_mode = args['share_mode']
- if share_mode not in ['ANY', 'SAMEAPP', 'NO']:
- raise ValueError("Share mode must be one of ANY, SAMEAPP" +
- " or NO")
- share_name = args['share_name']
- except KeyError as ke:
- return \
- isc.config.ccsession.create_answer(1,
- "Missing parameter " +
- str(ke))
-
- # FIXME: This call contains blocking IPC. It is expected to be
- # short, but if it turns out to be problem, we'll need to do
- # something about it.
- token = self._socket_cache.get_token(protocol, addr, port,
- share_mode, share_name)
- return isc.config.ccsession.create_answer(0, {
- 'token': token,
- 'path': self._socket_path
- })
- except isc.bind10.socket_cache.SocketError as e:
- return isc.config.ccsession.create_answer(CREATOR_SOCKET_ERROR,
- str(e))
- except isc.bind10.socket_cache.ShareError as e:
- return isc.config.ccsession.create_answer(CREATOR_SHARE_ERROR,
- str(e))
- except Exception as e:
- return isc.config.ccsession.create_answer(1, str(e))
-
- def socket_request_handler(self, token, unix_socket):
- """
- This function handles a token that comes over a unix_domain socket.
- The function looks into the _socket_cache and sends the socket
- identified by the token back over the unix_socket.
- """
- try:
- token = str(token, 'ASCII') # Convert from bytes to str
- fd = self._socket_cache.get_socket(token, unix_socket.fileno())
- # FIXME: These two calls are blocking in their nature. An OS-level
- # buffer is likely to be large enough to hold all these data, but
- # if it wasn't and the remote application got stuck, we would have
- # a problem. If there appear such problems, we should do something
- # about it.
- unix_socket.sendall(CREATOR_SOCKET_OK)
- libutil_io_python.send_fd(unix_socket.fileno(), fd)
- except Exception as e:
- logger.info(BIND10_NO_SOCKET, token, e)
- unix_socket.sendall(CREATOR_SOCKET_UNAVAILABLE)
-
- def socket_consumer_dead(self, unix_socket):
- """
- This function handles when a unix_socket closes. This means all
- sockets sent to it are to be considered closed. This function signals
- so to the _socket_cache.
- """
- logger.info(BIND10_LOST_SOCKET_CONSUMER, unix_socket.fileno())
- try:
- self._socket_cache.drop_application(unix_socket.fileno())
- except ValueError:
- # This means the application holds no sockets. It's harmless, as it
- # can happen in real life - for example, it requests a socket, but
- # get_socket doesn't find it, so the application dies. It should be
- # rare, though.
- pass
-
- def set_creator(self, creator):
- """
- Registeres a socket creator into the boss. The socket creator is not
- used directly, but through a cache. The cache is created in this
- method.
-
- If called more than once, it raises a ValueError.
- """
- if self._socket_cache is not None:
- raise ValueError("A creator was inserted previously")
- self._socket_cache = isc.bind10.socket_cache.Cache(creator)
-
- def init_socket_srv(self):
- """
- Creates and listens on a unix-domain socket to be able to send out
- the sockets.
-
- This method should be called after switching user, or the switched
- applications won't be able to access the socket.
- """
- self._srv_socket = socket.socket(socket.AF_UNIX)
- # We create a temporary directory somewhere safe and unique, to avoid
- # the need to find the place ourself or bother users. Also, this
- # secures the socket on some platforms, as it creates a private
- # directory.
- self._tmpdir = tempfile.mkdtemp(prefix='sockcreator-')
- # Get the name
- self._socket_path = os.path.join(self._tmpdir, "sockcreator")
- # And bind the socket to the name
- self._srv_socket.bind(self._socket_path)
- self._srv_socket.listen(5)
-
- def remove_socket_srv(self):
- """
- Closes and removes the listening socket and the directory where it
- lives, as we created both.
-
- It does nothing if the _srv_socket is not set (eg. it was not yet
- initialized).
- """
- if self._srv_socket is not None:
- self._srv_socket.close()
- if os.path.exists(self._socket_path):
- os.remove(self._socket_path)
- if os.path.isdir(self._tmpdir):
- os.rmdir(self._tmpdir)
-
- def _srv_accept(self):
- """
- Accept a socket from the unix domain socket server and put it to the
- others we care about.
- """
- (socket, conn) = self._srv_socket.accept()
- self._unix_sockets[socket.fileno()] = (socket, b'')
-
- def _socket_data(self, socket_fileno):
- """
- This is called when a socket identified by the socket_fileno needs
- attention. We try to read data from there. If it is closed, we remove
- it.
- """
- (sock, previous) = self._unix_sockets[socket_fileno]
- while True:
- try:
- data = sock.recv(1, socket.MSG_DONTWAIT)
- except socket.error as se:
- # These two might be different on some systems
- if se.errno == errno.EAGAIN or se.errno == errno.EWOULDBLOCK:
- # No more data now. Oh, well, just store what we have.
- self._unix_sockets[socket_fileno] = (sock, previous)
- return
- else:
- data = b'' # Pretend it got closed
- if len(data) == 0: # The socket got to it's end
- del self._unix_sockets[socket_fileno]
- self.socket_consumer_dead(sock)
- sock.close()
- return
- else:
- if data == b"\n":
- # Handle this token and clear it
- self.socket_request_handler(previous, sock)
- previous = b''
- else:
- previous += data
-
- def run(self, wakeup_fd):
- """
- The main loop, waiting for sockets, commands and dead processes.
- Runs as long as the runnable is true.
-
- The wakeup_fd descriptor is the read end of pipe where CHLD signal
- handler writes.
- """
- ccs_fd = self.ccs.get_socket().fileno()
- while self.runnable:
- # clean up any processes that exited
- self.reap_children()
- next_restart = self.restart_processes()
- if next_restart is None:
- wait_time = None
- else:
- wait_time = max(next_restart - time.time(), 0)
-
- # select() can raise EINTR when a signal arrives,
- # even if they are resumable, so we have to catch
- # the exception
- try:
- (rlist, wlist, xlist) = \
- select.select([wakeup_fd, ccs_fd,
- self._srv_socket.fileno()] +
- list(self._unix_sockets.keys()), [], [],
- wait_time)
- except select.error as err:
- if err.args[0] == errno.EINTR:
- (rlist, wlist, xlist) = ([], [], [])
- else:
- logger.fatal(BIND10_SELECT_ERROR, err)
- break
-
- for fd in rlist + xlist:
- if fd == ccs_fd:
- try:
- self.ccs.check_command()
- except isc.cc.session.ProtocolError:
- logger.fatal(BIND10_MSGQ_DISAPPEARED)
- self.runnable = False
- break
- elif fd == wakeup_fd:
- os.read(wakeup_fd, 32)
- elif fd == self._srv_socket.fileno():
- self._srv_accept()
- elif fd in self._unix_sockets:
- self._socket_data(fd)
-
-# global variables, needed for signal handlers
-options = None
-boss_of_bind = None
-
-def reaper(signal_number, stack_frame):
- """A child process has died (SIGCHLD received)."""
- # don't do anything...
- # the Python signal handler has been set up to write
- # down a pipe, waking up our select() bit
- pass
-
-def get_signame(signal_number):
- """Return the symbolic name for a signal."""
- for sig in dir(signal):
- if sig.startswith("SIG") and sig[3].isalnum():
- if getattr(signal, sig) == signal_number:
- return sig
- return "Unknown signal %d" % signal_number
-
-# XXX: perhaps register atexit() function and invoke that instead
-def fatal_signal(signal_number, stack_frame):
- """We need to exit (SIGINT or SIGTERM received)."""
- global options
- global boss_of_bind
- logger.info(BIND10_RECEIVED_SIGNAL, get_signame(signal_number))
- signal.signal(signal.SIGCHLD, signal.SIG_DFL)
- boss_of_bind.runnable = False
-
-def process_rename(option, opt_str, value, parser):
- """Function that renames the process if it is requested by a option."""
- isc.util.process.rename(value)
-
-def parse_args(args=sys.argv[1:], Parser=OptionParser):
- """
- Function for parsing command line arguments. Returns the
- options object from OptionParser.
- """
- parser = Parser(version=VERSION)
- parser.add_option("-m", "--msgq-socket-file", dest="msgq_socket_file",
- type="string", default=None,
- help="UNIX domain socket file the b10-msgq daemon will use")
- parser.add_option("-i", "--no-kill", action="store_true", dest="nokill",
- default=False, help="do not send SIGTERM and SIGKILL signals to modules during shutdown")
- parser.add_option("-u", "--user", dest="user", type="string", default=None,
- help="Change user after startup (must run as root)")
- parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
- help="display more about what is going on")
- parser.add_option("--pretty-name", type="string", action="callback",
- callback=process_rename,
- help="Set the process name (displayed in ps, top, ...)")
- parser.add_option("-c", "--config-file", action="store",
- dest="config_file", default=None,
- help="Configuration database filename")
- parser.add_option("--clear-config", action="store_true",
- dest="clear_config", default=False,
- help="Create backup of the configuration file and " +
- "start with a clean configuration")
- parser.add_option("-p", "--data-path", dest="data_path",
- help="Directory to search for configuration files",
- default=None)
- parser.add_option("--cmdctl-port", dest="cmdctl_port", type="int",
- default=None, help="Port of command control")
- parser.add_option("--pid-file", dest="pid_file", type="string",
- default=None,
- help="file to dump the PID of the BIND 10 process")
- parser.add_option("-w", "--wait", dest="wait_time", type="int",
- default=10, help="Time (in seconds) to wait for config manager to start up")
-
- (options, args) = parser.parse_args(args)
-
- if options.cmdctl_port is not None:
- try:
- isc.net.parse.port_parse(options.cmdctl_port)
- except ValueError as e:
- parser.error(e)
-
- if args:
- parser.print_help()
- sys.exit(1)
-
- return options
-
-def dump_pid(pid_file):
- """
- Dump the PID of the current process to the specified file. If the given
- file is None this function does nothing. If the file already exists,
- the existing content will be removed. If a system error happens in
- creating or writing to the file, the corresponding exception will be
- propagated to the caller.
- """
- if pid_file is None:
- return
- f = open(pid_file, "w")
- f.write('%d\n' % os.getpid())
- f.close()
-
-def unlink_pid_file(pid_file):
- """
- Remove the given file, which is basically expected to be the PID file
- created by dump_pid(). The specified may or may not exist; if it
- doesn't this function does nothing. Other system level errors in removing
- the file will be propagated as the corresponding exception.
- """
- if pid_file is None:
- return
- try:
- os.unlink(pid_file)
- except OSError as error:
- if error.errno is not errno.ENOENT:
- raise
-
-def remove_lock_files():
- """
- Remove various lock files which were created by code such as in the
- logger. This function should be called after BIND 10 shutdown.
- """
-
- lockfiles = ["logger_lockfile"]
-
- lpath = bind10_config.DATA_PATH
- if "B10_FROM_BUILD" in os.environ:
- lpath = os.environ["B10_FROM_BUILD"]
- if "B10_FROM_SOURCE_LOCALSTATEDIR" in os.environ:
- lpath = os.environ["B10_FROM_SOURCE_LOCALSTATEDIR"]
- if "B10_LOCKFILE_DIR_FROM_BUILD" in os.environ:
- lpath = os.environ["B10_LOCKFILE_DIR_FROM_BUILD"]
-
- for f in lockfiles:
- fname = lpath + '/' + f
- if os.path.isfile(fname):
- try:
- os.unlink(fname)
- except OSError as e:
- # We catch and ignore permission related error on unlink.
- # This can happen if bind10 started with -u, created a lock
- # file as a privileged user, but the directory is not writable
- # for the changed user. This setup will cause immediate
- # start failure, and we leave verbose error message including
- # the leftover lock file, so it should be acceptable to ignore
- # it (note that it doesn't make sense to log this event at
- # this poitn)
- if e.errno != errno.EPERM and e.errno != errno.EACCES:
- raise
-
- return
-
-def main():
- global options
- global boss_of_bind
- # Enforce line buffering on stdout, even when not a TTY
- sys.stdout = io.TextIOWrapper(sys.stdout.detach(), line_buffering=True)
-
- options = parse_args()
-
- # Announce startup. Making this is the first log message.
- try:
- logger.info(BIND10_STARTING, VERSION)
- except RuntimeError as e:
- sys.stderr.write('ERROR: failed to write the initial log: %s\n' %
- str(e))
- sys.stderr.write(NOTE_ON_LOCK_FILE)
- sys.exit(1)
-
- # Check user ID.
- setuid = None
- setgid = None
- username = None
- if options.user:
- # Try getting information about the user, assuming UID passed.
- try:
- pw_ent = pwd.getpwuid(int(options.user))
- setuid = pw_ent.pw_uid
- setgid = pw_ent.pw_gid
- username = pw_ent.pw_name
- except ValueError:
- pass
- except KeyError:
- pass
-
- # Next try getting information about the user, assuming user name
- # passed.
- # If the information is both a valid user name and user number, we
- # prefer the name because we try it second. A minor point, hopefully.
- try:
- pw_ent = pwd.getpwnam(options.user)
- setuid = pw_ent.pw_uid
- setgid = pw_ent.pw_gid
- username = pw_ent.pw_name
- except KeyError:
- pass
-
- if setuid is None:
- logger.fatal(BIND10_INVALID_USER, options.user)
- sys.exit(1)
-
- # Create wakeup pipe for signal handlers
- wakeup_pipe = os.pipe()
- signal.set_wakeup_fd(wakeup_pipe[1])
-
- # Set signal handlers for catching child termination, as well
- # as our own demise.
- signal.signal(signal.SIGCHLD, reaper)
- signal.siginterrupt(signal.SIGCHLD, False)
- signal.signal(signal.SIGINT, fatal_signal)
- signal.signal(signal.SIGTERM, fatal_signal)
-
- # Block SIGPIPE, as we don't want it to end this process
- signal.signal(signal.SIGPIPE, signal.SIG_IGN)
-
- try:
- # Go bob!
- boss_of_bind = BoB(options.msgq_socket_file, options.data_path,
- options.config_file, options.clear_config,
- options.verbose, options.nokill,
- setuid, setgid, username, options.cmdctl_port,
- options.wait_time)
- startup_result = boss_of_bind.startup()
- if startup_result:
- logger.fatal(BIND10_STARTUP_ERROR, startup_result)
- sys.exit(1)
- boss_of_bind.init_socket_srv()
- logger.info(BIND10_STARTUP_COMPLETE)
- dump_pid(options.pid_file)
-
- # Let it run
- boss_of_bind.run(wakeup_pipe[0])
-
- # shutdown
- signal.signal(signal.SIGCHLD, signal.SIG_DFL)
- boss_of_bind.shutdown()
- finally:
- # Clean up the filesystem
- unlink_pid_file(options.pid_file)
- remove_lock_files()
- if boss_of_bind is not None:
- boss_of_bind.remove_socket_srv()
- sys.exit(boss_of_bind.exitcode)
-
-if __name__ == "__main__":
- main()
diff --git a/src/bin/bind10/bob.spec b/src/bin/bind10/bob.spec
deleted file mode 100644
index 442d46f..0000000
--- a/src/bin/bind10/bob.spec
+++ /dev/null
@@ -1,92 +0,0 @@
-{
- "module_spec": {
- "module_name": "Boss",
- "module_description": "Master process",
- "config_data": [
- {
- "item_name": "components",
- "item_type": "named_set",
- "item_optional": false,
- "item_default": {
- "b10-stats": { "address": "Stats", "kind": "dispensable" },
- "b10-cmdctl": { "special": "cmdctl", "kind": "needed" }
- },
- "named_set_item_spec": {
- "item_name": "component",
- "item_type": "map",
- "item_optional": false,
- "item_default": { },
- "map_item_spec": [
- {
- "item_name": "special",
- "item_optional": true,
- "item_type": "string"
- },
- {
- "item_name": "process",
- "item_optional": true,
- "item_type": "string"
- },
- {
- "item_name": "kind",
- "item_optional": false,
- "item_type": "string",
- "item_default": "dispensable"
- },
- {
- "item_name": "address",
- "item_optional": true,
- "item_type": "string"
- },
- {
- "item_name": "params",
- "item_optional": true,
- "item_type": "list",
- "list_item_spec": {
- "item_name": "param",
- "item_optional": false,
- "item_type": "string",
- "item_default": ""
- }
- },
- {
- "item_name": "priority",
- "item_optional": true,
- "item_type": "integer"
- }
- ]
- }
- }
- ],
- "commands": [
- {
- "command_name": "shutdown",
- "command_description": "Shut down BIND 10",
- "command_args": []
- },
- {
- "command_name": "ping",
- "command_description": "Ping the boss process",
- "command_args": []
- },
- {
- "command_name": "show_processes",
- "command_description": "List the running BIND 10 processes",
- "command_args": []
- }
- ],
- "statistics": [
- {
- "item_name": "boot_time",
- "item_type": "string",
- "item_optional": false,
- "item_default": "1970-01-01T00:00:00Z",
- "item_title": "Boot time",
- "item_description": "A date time when bind10 process starts initially",
- "item_format": "date-time"
- }
- ]
- }
-}
-
-
diff --git a/src/bin/bind10/creatorapi.txt b/src/bin/bind10/creatorapi.txt
index c23d907..d3e67f4 100644
--- a/src/bin/bind10/creatorapi.txt
+++ b/src/bin/bind10/creatorapi.txt
@@ -1,7 +1,7 @@
Socket creator API
==================
-This API is between Boss and other modules to allow them requesting of sockets.
+This API is between Init and other modules to allow them requesting of sockets.
For simplicity, we will use the socket creator for all (even non-privileged)
ports for now, but we should have some function where we can abstract it later.
@@ -25,12 +25,12 @@ It seems we are stuck with current msgq for a while and there's a chance the
new replacement will not be able to send sockets inbound. So, we need another
channel.
-The boss will create a unix-domain socket and listen on it. When something
+b10-init will create a unix-domain socket and listen on it. When something
requests a socket over the command channel and the socket is created, some kind
of token is returned to the application (which will represent the future
socket). The application then connects to the unix-domain socket, sends the
-token over the connection (so Boss will know which socket to send there, in case
-multiple applications ask for sockets simultaneously) and Boss sends the socket
+token over the connection (so Init will know which socket to send there, in case
+multiple applications ask for sockets simultaneously) and Init sends the socket
in return.
In theory, we could send the requests directly over the unix-domain
@@ -48,8 +48,8 @@ socket, but it has two disadvantages:
Caching of sockets
------------------
-To allow sending the same socket to multiple application, the Boss process will
-hold a cache. Each socket that is created and sent is kept open in Boss and
+To allow sending the same socket to multiple application, the Init process will
+hold a cache. Each socket that is created and sent is kept open in Init and
preserved there as well. A reference count is kept with each of them.
When another application asks for the same socket, it is simply sent from the
@@ -60,14 +60,14 @@ command channel), the reference count can be decreased without problems. But
when the application terminates or crashes, we need to decrease it as well.
There's a problem, since we don't know which command channel connection (eg.
lname) belongs to which PID. Furthermore, the applications don't need to be
-started by boss.
+started by b10-init.
There are two possibilities:
* Let the msgq send messages about disconnected clients (eg. group message to
some name). This one is better if we want to migrate to dbus, since dbus
already has this capability as well as sending the sockets inbound (at least it
seems so on unix) and we could get rid of the unix-domain socket completely.
-* Keep the unix-domain connections open forever. Boss can remember which socket
+* Keep the unix-domain connections open forever. Init can remember which socket
was sent to which connection and when the connection closes (because the
application crashed), it can drop all the references on the sockets. This
seems easier to implement.
@@ -75,12 +75,12 @@ There are two possibilities:
The commands
------------
* Command to release a socket. This one would have single parameter, the token
- used to get the socket. After this, boss would decrease its reference count
- and if it drops to zero, close its own copy of the socket. This should be used
- when the module stops using the socket (and after closes it). The
- library could remember the file-descriptor to token mapping (for
- common applications that don't request the same socket multiple
- times in parallel).
+ used to get the socket. After this, b10-init would decrease its reference
+ count and if it drops to zero, close its own copy of the socket. This
+ should be used when the module stops using the socket (and after closes
+ it). The library could remember the file-descriptor to token mapping (for
+ common applications that don't request the same socket multiple times in
+ parallel).
* Command to request a socket. It would have parameters to specify which socket
(IP address, address family, port) and how to allow sharing. Sharing would be
one of:
diff --git a/src/bin/bind10/init.py.in b/src/bin/bind10/init.py.in
new file mode 100755
index 0000000..f47de31
--- /dev/null
+++ b/src/bin/bind10/init.py.in
@@ -0,0 +1,1321 @@
+#!@PYTHON@
+
+# Copyright (C) 2010,2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""
+This file implements the b10-init program.
+
+Its purpose is to start up the BIND 10 system, and then manage the
+processes, by starting and stopping processes, plus restarting
+processes that exit.
+
+To start the system, it first runs the c-channel program (msgq), then
+connects to that. It then runs the configuration manager, and reads
+its own configuration. Then it proceeds to starting other modules.
+
+The Python subprocess module is used for starting processes, but
+because this is not efficient for managing groups of processes,
+SIGCHLD signals are caught and processed using the signal module.
+
+Most of the logic is contained in the Init class. However, since Python
+requires that signal processing happen in the main thread, we do
+signal handling outside of that class, in the code running for
+__main__.
+"""
+
+import sys; sys.path.append ('@@PYTHONPATH@@')
+import os
+
+# If B10_FROM_SOURCE is set in the environment, we use data files
+# from a directory relative to that, otherwise we use the ones
+# installed on the system
+if "B10_FROM_SOURCE" in os.environ:
+ SPECFILE_LOCATION = os.environ["B10_FROM_SOURCE"] +\
+ "/src/bin/bind10/init.spec"
+else:
+ PREFIX = "@prefix@"
+ DATAROOTDIR = "@datarootdir@"
+ SPECFILE_LOCATION = "@datadir@/@PACKAGE@/init.spec"\
+ .replace("${datarootdir}", DATAROOTDIR)\
+ .replace("${prefix}", PREFIX)
+
+import subprocess
+import signal
+import re
+import errno
+import time
+import select
+import random
+import socket
+from optparse import OptionParser, OptionValueError
+import io
+import pwd
+import posix
+import copy
+
+from bind10_config import LIBEXECPATH
+import bind10_config
+import isc.cc
+import isc.util.process
+import isc.net.parse
+import isc.log
+import isc.config
+from isc.log_messages.init_messages import *
+import isc.bind10.component
+import isc.bind10.special_component
+import isc.bind10.socket_cache
+import libutil_io_python
+import tempfile
+
+isc.log.init("b10-init", buffer=True)
+logger = isc.log.Logger("init")
+
+# Pending system-wide debug level definitions, the ones we
+# use here are hardcoded for now
+DBG_PROCESS = logger.DBGLVL_TRACE_BASIC
+DBG_COMMANDS = logger.DBGLVL_TRACE_DETAIL
+
+# Messages sent over the unix domain socket to indicate if it is followed by a real socket
+CREATOR_SOCKET_OK = b"1\n"
+CREATOR_SOCKET_UNAVAILABLE = b"0\n"
+
+# RCodes of known exceptions for the get_token command
+CREATOR_SOCKET_ERROR = 2
+CREATOR_SHARE_ERROR = 3
+
+# Assign this process some longer name
+isc.util.process.rename()
+
+# This is the version that gets displayed to the user.
+# The VERSION string consists of the module name, the module version
+# number, and the overall BIND 10 version number (set in configure.ac).
+VERSION = "bind10 20110223 (BIND 10 @PACKAGE_VERSION@)"
+
+# This is for boot_time of Init
+_BASETIME = time.gmtime()
+
+# Detailed error message commonly used on startup failure, possibly due to
+# permission issue regarding log lock file. We dump verbose message because
+# it may not be clear exactly what to do if it simply says
+# "failed to open <filename>: permission denied"
+NOTE_ON_LOCK_FILE = """\
+TIP: if this is about permission error for a lock file, check if the directory
+of the file is writable for the user of the bind10 process; often you need
+to start bind10 as a super user. Also, if you specify the -u option to
+change the user and group, the directory must be writable for the group,
+and the created lock file must be writable for that user. Finally, make sure
+the lock file is not left in the directly before restarting.
+"""
+
+class ProcessInfoError(Exception): pass
+
+class ChangeUserError(Exception):
+ '''Exception raised when setuid/setgid fails.
+
+ When raised, it's expected to be propagated via underlying component
+ management modules to the top level so that it will help provide useful
+ fatal error message.
+
+ '''
+ pass
+
+class ProcessInfo:
+ """Information about a process"""
+
+ dev_null = open(os.devnull, "w")
+
+ def __init__(self, name, args, env={}, dev_null_stdout=False,
+ dev_null_stderr=False):
+ self.name = name
+ self.args = args
+ self.env = env
+ self.dev_null_stdout = dev_null_stdout
+ self.dev_null_stderr = dev_null_stderr
+ self.process = None
+ self.pid = None
+
+ def _preexec_work(self):
+ """Function used before running a program that needs to run as a
+ different user."""
+ # First, put us into a separate process group so we don't get
+ # SIGINT signals on Ctrl-C (b10-init will shut everthing down by
+ # other means).
+ os.setpgrp()
+
+ def _spawn(self):
+ if self.dev_null_stdout:
+ spawn_stdout = self.dev_null
+ else:
+ spawn_stdout = None
+ if self.dev_null_stderr:
+ spawn_stderr = self.dev_null
+ else:
+ spawn_stderr = None
+ # Environment variables for the child process will be a copy of those
+ # of the b10-init process with any additional specific variables given
+ # on construction (self.env).
+ spawn_env = copy.deepcopy(os.environ)
+ spawn_env.update(self.env)
+ spawn_env['PATH'] = LIBEXECPATH + ':' + spawn_env['PATH']
+ self.process = subprocess.Popen(self.args,
+ stdin=subprocess.PIPE,
+ stdout=spawn_stdout,
+ stderr=spawn_stderr,
+ close_fds=True,
+ env=spawn_env,
+ preexec_fn=self._preexec_work)
+ self.pid = self.process.pid
+
+ # spawn() and respawn() are the same for now, but in the future they
+ # may have different functionality
+ def spawn(self):
+ self._spawn()
+
+ def respawn(self):
+ self._spawn()
+
+class CChannelConnectError(Exception): pass
+
+class ProcessStartError(Exception): pass
+
+class Init:
+ """Init of BIND class."""
+
+ def __init__(self, msgq_socket_file=None, data_path=None,
+ config_filename=None, clear_config=False,
+ verbose=False, nokill=False, setuid=None, setgid=None,
+ username=None, cmdctl_port=None, wait_time=10):
+ """
+ Initialize the Init of BIND. This is a singleton (only one can run).
+
+ The msgq_socket_file specifies the UNIX domain socket file that the
+ msgq process listens on. If verbose is True, then b10-init reports
+ what it is doing.
+
+ Data path and config filename are passed through to config manager
+ (if provided) and specify the config file to be used.
+
+ The cmdctl_port is passed to cmdctl and specify on which port it
+ should listen.
+
+ wait_time controls the amount of time (in seconds) that Init waits
+ for selected processes to initialize before continuing with the
+ initialization. Currently this is only the configuration manager.
+ """
+ self.cc_session = None
+ self.ccs = None
+ self.curproc = None
+ self.msgq_socket_file = msgq_socket_file
+ self.component_config = {}
+ # Some time in future, it may happen that a single component has
+ # multple processes (like a pipeline-like component). If so happens,
+ # name "components" may be inapropriate. But as the code isn't probably
+ # completely ready for it, we leave it at components for now. We also
+ # want to support multiple instances of a single component. If it turns
+ # out that we'll have a single component with multiple same processes
+ # or if we start multiple components with the same configuration (we do
+ # this now, but it might change) is an open question.
+ self.components = {}
+ # Simply list of components that died and need to wait for a
+ # restart. Components manage their own restart schedule now
+ self.components_to_restart = []
+ self.runnable = False
+ self.__uid = setuid
+ self.__gid = setgid
+ self.username = username
+ self.verbose = verbose
+ self.nokill = nokill
+ self.data_path = data_path
+ self.config_filename = config_filename
+ self.clear_config = clear_config
+ self.cmdctl_port = cmdctl_port
+ self.wait_time = wait_time
+ self.msgq_timeout = 5
+
+ # _run_under_unittests is only meant to be used when testing. It
+ # bypasses execution of some code to help with testing.
+ self._run_under_unittests = False
+
+ self._component_configurator = isc.bind10.component.Configurator(self,
+ isc.bind10.special_component.get_specials())
+ # The priorities here make them start in the correct order. First
+ # the socket creator (which would drop root privileges by then),
+ # then message queue and after that the config manager (which uses
+ # the config manager)
+ self.__core_components = {
+ 'sockcreator': {
+ 'kind': 'core',
+ 'special': 'sockcreator',
+ 'priority': 200
+ },
+ 'msgq': {
+ 'kind': 'core',
+ 'special': 'msgq',
+ 'priority': 199
+ },
+ 'cfgmgr': {
+ 'kind': 'core',
+ 'special': 'cfgmgr',
+ 'priority': 198
+ }
+ }
+ self.__started = False
+ self.exitcode = 0
+
+ # If -v was set, enable full debug logging.
+ if self.verbose:
+ logger.set_severity("DEBUG", 99)
+ # This is set in init_socket_srv
+ self._socket_path = None
+ self._socket_cache = None
+ self._tmpdir = None
+ self._srv_socket = None
+ self._unix_sockets = {}
+
+ def __propagate_component_config(self, config):
+ comps = dict(config)
+ # Fill in the core components, so they stay alive
+ for comp in self.__core_components:
+ if comp in comps:
+ raise Exception(comp + " is core component managed by " +
+ "b10-init, do not set it")
+ comps[comp] = self.__core_components[comp]
+ # Update the configuration
+ self._component_configurator.reconfigure(comps)
+
+ def change_user(self):
+ '''Change the user and group to those specified on construction.
+
+ This method is expected to be called by a component on initial
+ startup when the system is ready to switch the user and group
+ (i.e., once all components that need the privilege of the original
+ user have started).
+ '''
+ try:
+ if self.__gid is not None:
+ logger.info(BIND10_SETGID, self.__gid)
+ posix.setgid(self.__gid)
+ except Exception as ex:
+ raise ChangeUserError('failed to change group: ' + str(ex))
+
+ try:
+ if self.__uid is not None:
+ posix.setuid(self.__uid)
+ # We use one-shot logger after setuid here. This will
+ # detect any permission issue regarding logging due to the
+ # result of setuid at the earliest opportunity.
+ isc.log.Logger("b10-init").info(BIND10_SETUID, self.__uid)
+ except Exception as ex:
+ raise ChangeUserError('failed to change user: ' + str(ex))
+
+ def config_handler(self, new_config):
+ # If this is initial update, don't do anything now, leave it to startup
+ if not self.runnable:
+ return
+ logger.debug(DBG_COMMANDS, BIND10_RECEIVED_NEW_CONFIGURATION,
+ new_config)
+ try:
+ if 'components' in new_config:
+ self.__propagate_component_config(new_config['components'])
+ return isc.config.ccsession.create_answer(0)
+ except Exception as e:
+ return isc.config.ccsession.create_answer(1, str(e))
+
+ def get_processes(self):
+ pids = list(self.components.keys())
+ pids.sort()
+ process_list = [ ]
+ for pid in pids:
+ process_list.append([pid, self.components[pid].name(),
+ self.components[pid].address()])
+ return process_list
+
+ def _get_stats_data(self):
+ return { 'boot_time':
+ time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME)
+ }
+
+ def command_handler(self, command, args):
+ logger.debug(DBG_COMMANDS, BIND10_RECEIVED_COMMAND, command)
+ answer = isc.config.ccsession.create_answer(1, "command not implemented")
+ if type(command) != str:
+ answer = isc.config.ccsession.create_answer(1, "bad command")
+ else:
+ if command == "shutdown":
+ self.runnable = False
+ answer = isc.config.ccsession.create_answer(0)
+ elif command == "getstats":
+ answer = isc.config.ccsession.create_answer(
+ 0, self._get_stats_data())
+ elif command == "ping":
+ answer = isc.config.ccsession.create_answer(0, "pong")
+ elif command == "show_processes":
+ answer = isc.config.ccsession. \
+ create_answer(0, self.get_processes())
+ elif command == "get_socket":
+ answer = self._get_socket(args)
+ elif command == "drop_socket":
+ if "token" not in args:
+ answer = isc.config.ccsession. \
+ create_answer(1, "Missing token parameter")
+ else:
+ try:
+ self._socket_cache.drop_socket(args["token"])
+ answer = isc.config.ccsession.create_answer(0)
+ except Exception as e:
+ answer = isc.config.ccsession.create_answer(1, str(e))
+ else:
+ answer = isc.config.ccsession.create_answer(1,
+ "Unknown command")
+ return answer
+
+ def kill_started_components(self):
+ """
+ Called as part of the exception handling when a process fails to
+ start, this runs through the list of started processes, killing
+ each one. It then clears that list.
+ """
+ logger.info(BIND10_KILLING_ALL_PROCESSES)
+ self.__kill_children(True)
+ self.components = {}
+
+ def _read_bind10_config(self):
+ """
+ Reads the parameters associated with the Init module itself.
+
+ This means the list of components we should start now.
+
+ This could easily be combined into start_all_processes, but
+ it stays because of historical reasons and because the tests
+ replace the method sometimes.
+ """
+ logger.info(BIND10_READING_INIT_CONFIGURATION)
+
+ config_data = self.ccs.get_full_config()
+ self.__propagate_component_config(config_data['components'])
+
+ def log_starting(self, process, port = None, address = None):
+ """
+ A convenience function to output a "Starting xxx" message if the
+ logging is set to DEBUG with debuglevel DBG_PROCESS or higher.
+ Putting this into a separate method ensures
+ that the output form is consistent across all processes.
+
+ The process name (passed as the first argument) is put into
+ self.curproc, and is used to indicate which process failed to
+ start if there is an error (and is used in the "Started" message
+ on success). The optional port and address information are
+ appended to the message (if present).
+ """
+ self.curproc = process
+ if port is None and address is None:
+ logger.info(BIND10_STARTING_PROCESS, self.curproc)
+ elif address is None:
+ logger.info(BIND10_STARTING_PROCESS_PORT, self.curproc,
+ port)
+ else:
+ logger.info(BIND10_STARTING_PROCESS_PORT_ADDRESS,
+ self.curproc, address, port)
+
+ def log_started(self, pid = None):
+ """
+ A convenience function to output a 'Started xxxx (PID yyyy)'
+ message. As with starting_message(), this ensures a consistent
+ format.
+ """
+ if pid is None:
+ logger.debug(DBG_PROCESS, BIND10_STARTED_PROCESS, self.curproc)
+ else:
+ logger.debug(DBG_PROCESS, BIND10_STARTED_PROCESS_PID, self.curproc, pid)
+
+ def process_running(self, msg, who):
+ """
+ Some processes return a message to the Init after they have
+ started to indicate that they are running. The form of the
+ message is a dictionary with contents {"running:", "<process>"}.
+ This method checks the passed message and returns True if the
+ "who" process is contained in the message (so is presumably
+ running). It returns False for all other conditions and will
+ log an error if appropriate.
+ """
+ if msg is not None:
+ try:
+ if msg["running"] == who:
+ return True
+ else:
+ logger.error(BIND10_STARTUP_UNEXPECTED_MESSAGE, msg)
+ except:
+ logger.error(BIND10_STARTUP_UNRECOGNISED_MESSAGE, msg)
+
+ return False
+
+ # The next few methods start the individual processes of BIND-10. They
+ # are called via start_all_processes(). If any fail, an exception is
+ # raised which is caught by the caller of start_all_processes(); this kills
+ # processes started up to that point before terminating the program.
+
+ def _make_process_info(self, name, args, env,
+ dev_null_stdout=False, dev_null_stderr=False):
+ """
+ Wrapper around ProcessInfo(), useful to override
+ ProcessInfo() creation during testing.
+ """
+ return ProcessInfo(name, args, env, dev_null_stdout, dev_null_stderr)
+
+ def start_msgq(self):
+ """
+ Start the message queue and connect to the command channel.
+ """
+ self.log_starting("b10-msgq")
+ msgq_proc = self._make_process_info("b10-msgq", ["b10-msgq"],
+ self.c_channel_env,
+ True, not self.verbose)
+ msgq_proc.spawn()
+ self.log_started(msgq_proc.pid)
+
+ # Now connect to the c-channel
+ cc_connect_start = time.time()
+ while self.cc_session is None:
+ # if we are run under unittests, break
+ if self._run_under_unittests:
+ break
+
+ # if we have been trying for "a while" give up
+ if (time.time() - cc_connect_start) > self.msgq_timeout:
+ if msgq_proc.process:
+ msgq_proc.process.kill()
+ logger.error(BIND10_CONNECTING_TO_CC_FAIL)
+ raise CChannelConnectError("Unable to connect to c-channel after 5 seconds")
+
+ # try to connect, and if we can't wait a short while
+ try:
+ self.cc_session = isc.cc.Session(self.msgq_socket_file)
+ except isc.cc.session.SessionError:
+ time.sleep(0.1)
+
+ # Subscribe to the message queue. The only messages we expect to receive
+ # on this channel are once relating to process startup.
+ if self.cc_session is not None:
+ self.cc_session.group_subscribe("Init")
+
+ return msgq_proc
+
+ def start_cfgmgr(self):
+ """
+ Starts the configuration manager process
+ """
+ self.log_starting("b10-cfgmgr")
+ args = ["b10-cfgmgr"]
+ if self.data_path is not None:
+ args.append("--data-path=" + self.data_path)
+ if self.config_filename is not None:
+ args.append("--config-filename=" + self.config_filename)
+ if self.clear_config:
+ args.append("--clear-config")
+ bind_cfgd = self._make_process_info("b10-cfgmgr", args,
+ self.c_channel_env)
+ bind_cfgd.spawn()
+ self.log_started(bind_cfgd.pid)
+
+ # Wait for the configuration manager to start up as
+ # subsequent initialization cannot proceed without it. The
+ # time to wait can be set on the command line.
+ time_remaining = self.wait_time
+ msg, env = self.cc_session.group_recvmsg()
+ while time_remaining > 0 and not self.process_running(msg, "ConfigManager"):
+ logger.debug(DBG_PROCESS, BIND10_WAIT_CFGMGR)
+ time.sleep(1)
+ time_remaining = time_remaining - 1
+ msg, env = self.cc_session.group_recvmsg()
+
+ if not self.process_running(msg, "ConfigManager"):
+ raise ProcessStartError("Configuration manager process has not started")
+
+ return bind_cfgd
+
+ def start_ccsession(self, c_channel_env):
+ """
+ Start the CC Session
+
+ The argument c_channel_env is unused but is supplied to keep the
+ argument list the same for all start_xxx methods.
+
+ With regards to logging, note that as the CC session is not a
+ process, the log_starting/log_started methods are not used.
+ """
+ logger.info(BIND10_STARTING_CC)
+ self.ccs = isc.config.ModuleCCSession(SPECFILE_LOCATION,
+ self.config_handler,
+ self.command_handler,
+ socket_file = self.msgq_socket_file)
+ self.ccs.start()
+ logger.debug(DBG_PROCESS, BIND10_STARTED_CC)
+
+ # A couple of utility methods for starting processes...
+
+ def start_process(self, name, args, c_channel_env, port=None, address=None):
+ """
+ Given a set of command arguments, start the process and output
+ appropriate log messages. If the start is successful, the process
+ is added to the list of started processes.
+
+ The port and address arguments are for log messages only.
+ """
+ self.log_starting(name, port, address)
+ newproc = self._make_process_info(name, args, c_channel_env)
+ newproc.spawn()
+ self.log_started(newproc.pid)
+ return newproc
+
+ def register_process(self, pid, component):
+ """
+ Put another process into b10-init to watch over it. When the process
+ dies, the component.failed() is called with the exit code.
+
+ It is expected the info is a isc.bind10.component.BaseComponent
+ subclass (or anything having the same interface).
+ """
+ self.components[pid] = component
+
+ def start_simple(self, name):
+ """
+ Most of the BIND-10 processes are started with the command:
+
+ <process-name> [-v]
+
+ ... where -v is appended if verbose is enabled. This method
+ generates the arguments from the name and starts the process.
+
+ The port and address arguments are for log messages only.
+ """
+ # Set up the command arguments.
+ args = [name]
+ if self.verbose:
+ args += ['-v']
+
+ # ... and start the process
+ return self.start_process(name, args, self.c_channel_env)
+
+ # The next few methods start up the rest of the BIND-10 processes.
+ # Although many of these methods are little more than a call to
+ # start_simple, they are retained (a) for testing reasons and (b) as a place
+ # where modifications can be made if the process start-up sequence changes
+ # for a given process.
+
+ def start_auth(self):
+ """
+ Start the Authoritative server
+ """
+ authargs = ['b10-auth']
+ if self.verbose:
+ authargs += ['-v']
+
+ # ... and start
+ return self.start_process("b10-auth", authargs, self.c_channel_env)
+
+ def start_resolver(self):
+ """
+ Start the Resolver. At present, all these arguments and switches
+ are pure speculation. As with the auth daemon, they should be
+ read from the configuration database.
+ """
+ self.curproc = "b10-resolver"
+ # XXX: this must be read from the configuration manager in the future
+ resargs = ['b10-resolver']
+ if self.verbose:
+ resargs += ['-v']
+
+ # ... and start
+ return self.start_process("b10-resolver", resargs, self.c_channel_env)
+
+ def start_cmdctl(self):
+ """
+ Starts the command control process
+ """
+ args = ["b10-cmdctl"]
+ if self.cmdctl_port is not None:
+ args.append("--port=" + str(self.cmdctl_port))
+ if self.verbose:
+ args.append("-v")
+ return self.start_process("b10-cmdctl", args, self.c_channel_env,
+ self.cmdctl_port)
+
+ def start_all_components(self):
+ """
+ Starts up all the components. Any exception generated during the
+ starting of the components is handled by the caller.
+ """
+ # Start the real core (sockcreator, msgq, cfgmgr)
+ self._component_configurator.startup(self.__core_components)
+
+ # Connect to the msgq. This is not a process, so it's not handled
+ # inside the configurator.
+ self.start_ccsession(self.c_channel_env)
+
+ # Extract the parameters associated with Init. This can only be
+ # done after the CC Session is started. Note that the logging
+ # configuration may override the "-v" switch set on the command line.
+ self._read_bind10_config()
+
+ # TODO: Return the dropping of privileges
+
+ def startup(self):
+ """
+ Start the Init instance.
+
+ Returns None if successful, otherwise an string describing the
+ problem.
+ """
+ # Try to connect to the c-channel daemon, to see if it is already
+ # running
+ c_channel_env = {}
+ if self.msgq_socket_file is not None:
+ c_channel_env["BIND10_MSGQ_SOCKET_FILE"] = self.msgq_socket_file
+ logger.debug(DBG_PROCESS, BIND10_CHECK_MSGQ_ALREADY_RUNNING)
+ try:
+ self.cc_session = isc.cc.Session(self.msgq_socket_file)
+ logger.fatal(BIND10_MSGQ_ALREADY_RUNNING)
+ return "b10-msgq already running, or socket file not cleaned , cannot start"
+ except isc.cc.session.SessionError:
+ # this is the case we want, where the msgq is not running
+ pass
+
+ # Start all components. If any one fails to start, kill all started
+ # components and exit with an error indication.
+ try:
+ self.c_channel_env = c_channel_env
+ self.start_all_components()
+ except ChangeUserError as e:
+ self.kill_started_components()
+ return str(e) + '; ' + NOTE_ON_LOCK_FILE.replace('\n', ' ')
+ except Exception as e:
+ self.kill_started_components()
+ return "Unable to start " + self.curproc + ": " + str(e)
+
+ # Started successfully
+ self.runnable = True
+ self.__started = True
+ return None
+
+ def stop_process(self, process, recipient, pid):
+ """
+ Stop the given process, friendly-like. The process is the name it has
+ (in logs, etc), the recipient is the address on msgq. The pid is the
+ pid of the process (if we have multiple processes of the same name,
+ it might want to choose if it is for this one).
+ """
+ logger.info(BIND10_STOP_PROCESS, process)
+ self.cc_session.group_sendmsg(isc.config.ccsession.
+ create_command('shutdown', {'pid': pid}),
+ recipient, recipient)
+
+ def component_shutdown(self, exitcode=0):
+ """
+ Stop the Init instance from a components' request. The exitcode
+ indicates the desired exit code.
+
+ If we did not start yet, it raises an exception, which is meant
+ to propagate through the component and configurator to the startup
+ routine and abort the startup immediately. If it is started up already,
+ we just mark it so we terminate soon.
+
+ It does set the exit code in both cases.
+ """
+ self.exitcode = exitcode
+ if not self.__started:
+ raise Exception("Component failed during startup");
+ else:
+ self.runnable = False
+
+ def shutdown(self):
+ """Stop the Init instance."""
+ logger.info(BIND10_SHUTDOWN)
+ # If ccsession is still there, inform rest of the system this module
+ # is stopping. Since everything will be stopped shortly, this is not
+ # really necessary, but this is done to reflect that b10-init is also
+ # 'just' a module.
+ self.ccs.send_stopping()
+
+ # try using the BIND 10 request to stop
+ try:
+ self._component_configurator.shutdown()
+ except:
+ pass
+ # XXX: some delay probably useful... how much is uncertain
+ # I have changed the delay from 0.5 to 1, but sometime it's
+ # still not enough.
+ time.sleep(1)
+ self.reap_children()
+
+ # Send TERM and KILL signals to modules if we're not prevented
+ # from doing so
+ if not self.nokill:
+ # next try sending a SIGTERM
+ self.__kill_children(False)
+ # finally, send SIGKILL (unmaskable termination) until everybody
+ # dies
+ while self.components:
+ # XXX: some delay probably useful... how much is uncertain
+ time.sleep(0.1)
+ self.reap_children()
+ self.__kill_children(True)
+ logger.info(BIND10_SHUTDOWN_COMPLETE)
+
+ def __kill_children(self, forceful):
+ '''Terminate remaining subprocesses by sending a signal.
+
+ The forceful paramter will be passed Component.kill().
+ This is a dedicated subroutine of shutdown(), just to unify two
+ similar cases.
+
+ '''
+ logmsg = BIND10_SEND_SIGKILL if forceful else BIND10_SEND_SIGTERM
+ # We need to make a copy of values as the components may be modified
+ # in the loop.
+ for component in list(self.components.values()):
+ logger.info(logmsg, component.name(), component.pid())
+ try:
+ component.kill(forceful)
+ except OSError as ex:
+ # If kill() failed due to EPERM, it doesn't make sense to
+ # keep trying, so we just log the fact and forget that
+ # component. Ignore other OSErrors (usually ESRCH because
+ # the child finally exited)
+ signame = "SIGKILL" if forceful else "SIGTERM"
+ logger.info(BIND10_SEND_SIGNAL_FAIL, signame,
+ component.name(), component.pid(), ex)
+ if ex.errno == errno.EPERM:
+ del self.components[component.pid()]
+
+ def _get_process_exit_status(self):
+ return os.waitpid(-1, os.WNOHANG)
+
+ def reap_children(self):
+ """Check to see if any of our child processes have exited,
+ and note this for later handling.
+ """
+ while True:
+ try:
+ (pid, exit_status) = self._get_process_exit_status()
+ except OSError as o:
+ if o.errno == errno.ECHILD:
+ break
+ # XXX: should be impossible to get any other error here
+ raise
+ if pid == 0:
+ break
+ if pid in self.components:
+ # One of the components we know about. Get information on it.
+ component = self.components.pop(pid)
+ logger.info(BIND10_PROCESS_ENDED, component.name(), pid,
+ exit_status)
+ if component.is_running() and self.runnable:
+ # Tell it it failed. But only if it matters (we are
+ # not shutting down and the component considers itself
+ # to be running.
+ component_restarted = component.failed(exit_status);
+ # if the process wants to be restarted, but not just yet,
+ # it returns False
+ if not component_restarted:
+ self.components_to_restart.append(component)
+ else:
+ logger.info(BIND10_UNKNOWN_CHILD_PROCESS_ENDED, pid)
+
+ def restart_processes(self):
+ """
+ Restart any dead processes:
+
+ * Returns the time when the next process is ready to be restarted.
+ * If the server is shutting down, returns 0.
+ * If there are no processes, returns None.
+
+ The values returned can be safely passed into select() as the
+ timeout value.
+
+ """
+ if not self.runnable:
+ return 0
+ still_dead = []
+ # keep track of the first time we need to check this queue again,
+ # if at all
+ next_restart_time = None
+ now = time.time()
+ for component in self.components_to_restart:
+ # If the component was removed from the configurator between since
+ # scheduled to restart, just ignore it. The object will just be
+ # dropped here.
+ if not self._component_configurator.has_component(component):
+ logger.info(BIND10_RESTART_COMPONENT_SKIPPED, component.name())
+ elif not component.restart(now):
+ still_dead.append(component)
+ if next_restart_time is None or\
+ next_restart_time > component.get_restart_time():
+ next_restart_time = component.get_restart_time()
+ self.components_to_restart = still_dead
+
+ return next_restart_time
+
+ def _get_socket(self, args):
+ """
+ Implementation of the get_socket CC command. It asks the cache
+ to provide the token and sends the information back.
+ """
+ try:
+ try:
+ addr = isc.net.parse.addr_parse(args['address'])
+ port = isc.net.parse.port_parse(args['port'])
+ protocol = args['protocol']
+ if protocol not in ['UDP', 'TCP']:
+ raise ValueError("Protocol must be either UDP or TCP")
+ share_mode = args['share_mode']
+ if share_mode not in ['ANY', 'SAMEAPP', 'NO']:
+ raise ValueError("Share mode must be one of ANY, SAMEAPP" +
+ " or NO")
+ share_name = args['share_name']
+ except KeyError as ke:
+ return \
+ isc.config.ccsession.create_answer(1,
+ "Missing parameter " +
+ str(ke))
+
+ # FIXME: This call contains blocking IPC. It is expected to be
+ # short, but if it turns out to be problem, we'll need to do
+ # something about it.
+ token = self._socket_cache.get_token(protocol, addr, port,
+ share_mode, share_name)
+ return isc.config.ccsession.create_answer(0, {
+ 'token': token,
+ 'path': self._socket_path
+ })
+ except isc.bind10.socket_cache.SocketError as e:
+ return isc.config.ccsession.create_answer(CREATOR_SOCKET_ERROR,
+ str(e))
+ except isc.bind10.socket_cache.ShareError as e:
+ return isc.config.ccsession.create_answer(CREATOR_SHARE_ERROR,
+ str(e))
+ except Exception as e:
+ return isc.config.ccsession.create_answer(1, str(e))
+
+ def socket_request_handler(self, token, unix_socket):
+ """
+ This function handles a token that comes over a unix_domain socket.
+ The function looks into the _socket_cache and sends the socket
+ identified by the token back over the unix_socket.
+ """
+ try:
+ token = str(token, 'ASCII') # Convert from bytes to str
+ fd = self._socket_cache.get_socket(token, unix_socket.fileno())
+ # FIXME: These two calls are blocking in their nature. An OS-level
+ # buffer is likely to be large enough to hold all these data, but
+ # if it wasn't and the remote application got stuck, we would have
+ # a problem. If there appear such problems, we should do something
+ # about it.
+ unix_socket.sendall(CREATOR_SOCKET_OK)
+ libutil_io_python.send_fd(unix_socket.fileno(), fd)
+ except Exception as e:
+ logger.info(BIND10_NO_SOCKET, token, e)
+ unix_socket.sendall(CREATOR_SOCKET_UNAVAILABLE)
+
+ def socket_consumer_dead(self, unix_socket):
+ """
+ This function handles when a unix_socket closes. This means all
+ sockets sent to it are to be considered closed. This function signals
+ so to the _socket_cache.
+ """
+ logger.info(BIND10_LOST_SOCKET_CONSUMER, unix_socket.fileno())
+ try:
+ self._socket_cache.drop_application(unix_socket.fileno())
+ except ValueError:
+ # This means the application holds no sockets. It's harmless, as it
+ # can happen in real life - for example, it requests a socket, but
+ # get_socket doesn't find it, so the application dies. It should be
+ # rare, though.
+ pass
+
+ def set_creator(self, creator):
+ """
+ Registeres a socket creator into the b10-init. The socket creator is not
+ used directly, but through a cache. The cache is created in this
+ method.
+
+ If called more than once, it raises a ValueError.
+ """
+ if self._socket_cache is not None:
+ raise ValueError("A creator was inserted previously")
+ self._socket_cache = isc.bind10.socket_cache.Cache(creator)
+
+ def init_socket_srv(self):
+ """
+ Creates and listens on a unix-domain socket to be able to send out
+ the sockets.
+
+ This method should be called after switching user, or the switched
+ applications won't be able to access the socket.
+ """
+ self._srv_socket = socket.socket(socket.AF_UNIX)
+ # We create a temporary directory somewhere safe and unique, to avoid
+ # the need to find the place ourself or bother users. Also, this
+ # secures the socket on some platforms, as it creates a private
+ # directory.
+ self._tmpdir = tempfile.mkdtemp(prefix='sockcreator-')
+ # Get the name
+ self._socket_path = os.path.join(self._tmpdir, "sockcreator")
+ # And bind the socket to the name
+ self._srv_socket.bind(self._socket_path)
+ self._srv_socket.listen(5)
+
+ def remove_socket_srv(self):
+ """
+ Closes and removes the listening socket and the directory where it
+ lives, as we created both.
+
+ It does nothing if the _srv_socket is not set (eg. it was not yet
+ initialized).
+ """
+ if self._srv_socket is not None:
+ self._srv_socket.close()
+ if os.path.exists(self._socket_path):
+ os.remove(self._socket_path)
+ if os.path.isdir(self._tmpdir):
+ os.rmdir(self._tmpdir)
+
+ def _srv_accept(self):
+ """
+ Accept a socket from the unix domain socket server and put it to the
+ others we care about.
+ """
+ (socket, conn) = self._srv_socket.accept()
+ self._unix_sockets[socket.fileno()] = (socket, b'')
+
+ def _socket_data(self, socket_fileno):
+ """
+ This is called when a socket identified by the socket_fileno needs
+ attention. We try to read data from there. If it is closed, we remove
+ it.
+ """
+ (sock, previous) = self._unix_sockets[socket_fileno]
+ while True:
+ try:
+ data = sock.recv(1, socket.MSG_DONTWAIT)
+ except socket.error as se:
+ # These two might be different on some systems
+ if se.errno == errno.EAGAIN or se.errno == errno.EWOULDBLOCK:
+ # No more data now. Oh, well, just store what we have.
+ self._unix_sockets[socket_fileno] = (sock, previous)
+ return
+ else:
+ data = b'' # Pretend it got closed
+ if len(data) == 0: # The socket got to it's end
+ del self._unix_sockets[socket_fileno]
+ self.socket_consumer_dead(sock)
+ sock.close()
+ return
+ else:
+ if data == b"\n":
+ # Handle this token and clear it
+ self.socket_request_handler(previous, sock)
+ previous = b''
+ else:
+ previous += data
+
+ def run(self, wakeup_fd):
+ """
+ The main loop, waiting for sockets, commands and dead processes.
+ Runs as long as the runnable is true.
+
+ The wakeup_fd descriptor is the read end of pipe where CHLD signal
+ handler writes.
+ """
+ ccs_fd = self.ccs.get_socket().fileno()
+ while self.runnable:
+ # clean up any processes that exited
+ self.reap_children()
+ next_restart = self.restart_processes()
+ if next_restart is None:
+ wait_time = None
+ else:
+ wait_time = max(next_restart - time.time(), 0)
+
+ # select() can raise EINTR when a signal arrives,
+ # even if they are resumable, so we have to catch
+ # the exception
+ try:
+ (rlist, wlist, xlist) = \
+ select.select([wakeup_fd, ccs_fd,
+ self._srv_socket.fileno()] +
+ list(self._unix_sockets.keys()), [], [],
+ wait_time)
+ except select.error as err:
+ if err.args[0] == errno.EINTR:
+ (rlist, wlist, xlist) = ([], [], [])
+ else:
+ logger.fatal(BIND10_SELECT_ERROR, err)
+ break
+
+ for fd in rlist + xlist:
+ if fd == ccs_fd:
+ try:
+ self.ccs.check_command()
+ except isc.cc.session.ProtocolError:
+ logger.fatal(BIND10_MSGQ_DISAPPEARED)
+ self.runnable = False
+ break
+ elif fd == wakeup_fd:
+ os.read(wakeup_fd, 32)
+ elif fd == self._srv_socket.fileno():
+ self._srv_accept()
+ elif fd in self._unix_sockets:
+ self._socket_data(fd)
+
+# global variables, needed for signal handlers
+options = None
+b10_init = None
+
+def reaper(signal_number, stack_frame):
+ """A child process has died (SIGCHLD received)."""
+ # don't do anything...
+ # the Python signal handler has been set up to write
+ # down a pipe, waking up our select() bit
+ pass
+
+def get_signame(signal_number):
+ """Return the symbolic name for a signal."""
+ for sig in dir(signal):
+ if sig.startswith("SIG") and sig[3].isalnum():
+ if getattr(signal, sig) == signal_number:
+ return sig
+ return "Unknown signal %d" % signal_number
+
+# XXX: perhaps register atexit() function and invoke that instead
+def fatal_signal(signal_number, stack_frame):
+ """We need to exit (SIGINT or SIGTERM received)."""
+ global options
+ global b10_init
+ logger.info(BIND10_RECEIVED_SIGNAL, get_signame(signal_number))
+ signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+ b10_init.runnable = False
+
+def process_rename(option, opt_str, value, parser):
+ """Function that renames the process if it is requested by a option."""
+ isc.util.process.rename(value)
+
+def parse_args(args=sys.argv[1:], Parser=OptionParser):
+ """
+ Function for parsing command line arguments. Returns the
+ options object from OptionParser.
+ """
+ parser = Parser(version=VERSION)
+ parser.add_option("-m", "--msgq-socket-file", dest="msgq_socket_file",
+ type="string", default=None,
+ help="UNIX domain socket file the b10-msgq daemon will use")
+ parser.add_option("-i", "--no-kill", action="store_true", dest="nokill",
+ default=False, help="do not send SIGTERM and SIGKILL signals to modules during shutdown")
+ parser.add_option("-u", "--user", dest="user", type="string", default=None,
+ help="Change user after startup (must run as root)")
+ parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
+ help="display more about what is going on")
+ parser.add_option("--pretty-name", type="string", action="callback",
+ callback=process_rename,
+ help="Set the process name (displayed in ps, top, ...)")
+ parser.add_option("-c", "--config-file", action="store",
+ dest="config_file", default=None,
+ help="Configuration database filename")
+ parser.add_option("--clear-config", action="store_true",
+ dest="clear_config", default=False,
+ help="Create backup of the configuration file and " +
+ "start with a clean configuration")
+ parser.add_option("-p", "--data-path", dest="data_path",
+ help="Directory to search for configuration files",
+ default=None)
+ parser.add_option("--cmdctl-port", dest="cmdctl_port", type="int",
+ default=None, help="Port of command control")
+ parser.add_option("--pid-file", dest="pid_file", type="string",
+ default=None,
+ help="file to dump the PID of the BIND 10 process")
+ parser.add_option("-w", "--wait", dest="wait_time", type="int",
+ default=10, help="Time (in seconds) to wait for config manager to start up")
+
+ (options, args) = parser.parse_args(args)
+
+ if options.cmdctl_port is not None:
+ try:
+ isc.net.parse.port_parse(options.cmdctl_port)
+ except ValueError as e:
+ parser.error(e)
+
+ if args:
+ parser.print_help()
+ sys.exit(1)
+
+ return options
+
+def dump_pid(pid_file):
+ """
+ Dump the PID of the current process to the specified file. If the given
+ file is None this function does nothing. If the file already exists,
+ the existing content will be removed. If a system error happens in
+ creating or writing to the file, the corresponding exception will be
+ propagated to the caller.
+ """
+ if pid_file is None:
+ return
+ f = open(pid_file, "w")
+ f.write('%d\n' % os.getpid())
+ f.close()
+
+def unlink_pid_file(pid_file):
+ """
+ Remove the given file, which is basically expected to be the PID file
+ created by dump_pid(). The specified may or may not exist; if it
+ doesn't this function does nothing. Other system level errors in removing
+ the file will be propagated as the corresponding exception.
+ """
+ if pid_file is None:
+ return
+ try:
+ os.unlink(pid_file)
+ except OSError as error:
+ if error.errno is not errno.ENOENT:
+ raise
+
+def remove_lock_files():
+ """
+ Remove various lock files which were created by code such as in the
+ logger. This function should be called after BIND 10 shutdown.
+ """
+
+ lockfiles = ["logger_lockfile"]
+
+ lpath = bind10_config.DATA_PATH
+ if "B10_FROM_BUILD" in os.environ:
+ lpath = os.environ["B10_FROM_BUILD"]
+ if "B10_FROM_SOURCE_LOCALSTATEDIR" in os.environ:
+ lpath = os.environ["B10_FROM_SOURCE_LOCALSTATEDIR"]
+ if "B10_LOCKFILE_DIR_FROM_BUILD" in os.environ:
+ lpath = os.environ["B10_LOCKFILE_DIR_FROM_BUILD"]
+
+ for f in lockfiles:
+ fname = lpath + '/' + f
+ if os.path.isfile(fname):
+ try:
+ os.unlink(fname)
+ except OSError as e:
+ # We catch and ignore permission related error on unlink.
+ # This can happen if bind10 started with -u, created a lock
+ # file as a privileged user, but the directory is not writable
+ # for the changed user. This setup will cause immediate
+ # start failure, and we leave verbose error message including
+ # the leftover lock file, so it should be acceptable to ignore
+ # it (note that it doesn't make sense to log this event at
+ # this poitn)
+ if e.errno != errno.EPERM and e.errno != errno.EACCES:
+ raise
+
+ return
+
+def main():
+ global options
+ global b10_init
+ # Enforce line buffering on stdout, even when not a TTY
+ sys.stdout = io.TextIOWrapper(sys.stdout.detach(), line_buffering=True)
+
+ options = parse_args()
+
+ # Announce startup. Making this is the first log message.
+ try:
+ logger.info(BIND10_STARTING, VERSION)
+ except RuntimeError as e:
+ sys.stderr.write('ERROR: failed to write the initial log: %s\n' %
+ str(e))
+ sys.stderr.write(NOTE_ON_LOCK_FILE)
+ sys.exit(1)
+
+ # Check user ID.
+ setuid = None
+ setgid = None
+ username = None
+ if options.user:
+ # Try getting information about the user, assuming UID passed.
+ try:
+ pw_ent = pwd.getpwuid(int(options.user))
+ setuid = pw_ent.pw_uid
+ setgid = pw_ent.pw_gid
+ username = pw_ent.pw_name
+ except ValueError:
+ pass
+ except KeyError:
+ pass
+
+ # Next try getting information about the user, assuming user name
+ # passed.
+ # If the information is both a valid user name and user number, we
+ # prefer the name because we try it second. A minor point, hopefully.
+ try:
+ pw_ent = pwd.getpwnam(options.user)
+ setuid = pw_ent.pw_uid
+ setgid = pw_ent.pw_gid
+ username = pw_ent.pw_name
+ except KeyError:
+ pass
+
+ if setuid is None:
+ logger.fatal(BIND10_INVALID_USER, options.user)
+ sys.exit(1)
+
+ # Create wakeup pipe for signal handlers
+ wakeup_pipe = os.pipe()
+ signal.set_wakeup_fd(wakeup_pipe[1])
+
+ # Set signal handlers for catching child termination, as well
+ # as our own demise.
+ signal.signal(signal.SIGCHLD, reaper)
+ signal.siginterrupt(signal.SIGCHLD, False)
+ signal.signal(signal.SIGINT, fatal_signal)
+ signal.signal(signal.SIGTERM, fatal_signal)
+
+ # Block SIGPIPE, as we don't want it to end this process
+ signal.signal(signal.SIGPIPE, signal.SIG_IGN)
+
+ try:
+ b10_init = Init(options.msgq_socket_file, options.data_path,
+ options.config_file, options.clear_config,
+ options.verbose, options.nokill,
+ setuid, setgid, username, options.cmdctl_port,
+ options.wait_time)
+ startup_result = b10_init.startup()
+ if startup_result:
+ logger.fatal(BIND10_STARTUP_ERROR, startup_result)
+ sys.exit(1)
+ b10_init.init_socket_srv()
+ logger.info(BIND10_STARTUP_COMPLETE)
+ dump_pid(options.pid_file)
+
+ # Let it run
+ b10_init.run(wakeup_pipe[0])
+
+ # shutdown
+ signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+ b10_init.shutdown()
+ finally:
+ # Clean up the filesystem
+ unlink_pid_file(options.pid_file)
+ remove_lock_files()
+ if b10_init is not None:
+ b10_init.remove_socket_srv()
+ sys.exit(b10_init.exitcode)
+
+if __name__ == "__main__":
+ main()
diff --git a/src/bin/bind10/init.spec b/src/bin/bind10/init.spec
new file mode 100644
index 0000000..62c6f09
--- /dev/null
+++ b/src/bin/bind10/init.spec
@@ -0,0 +1,92 @@
+{
+ "module_spec": {
+ "module_name": "Init",
+ "module_description": "Init process",
+ "config_data": [
+ {
+ "item_name": "components",
+ "item_type": "named_set",
+ "item_optional": false,
+ "item_default": {
+ "b10-stats": { "address": "Stats", "kind": "dispensable" },
+ "b10-cmdctl": { "special": "cmdctl", "kind": "needed" }
+ },
+ "named_set_item_spec": {
+ "item_name": "component",
+ "item_type": "map",
+ "item_optional": false,
+ "item_default": { },
+ "map_item_spec": [
+ {
+ "item_name": "special",
+ "item_optional": true,
+ "item_type": "string"
+ },
+ {
+ "item_name": "process",
+ "item_optional": true,
+ "item_type": "string"
+ },
+ {
+ "item_name": "kind",
+ "item_optional": false,
+ "item_type": "string",
+ "item_default": "dispensable"
+ },
+ {
+ "item_name": "address",
+ "item_optional": true,
+ "item_type": "string"
+ },
+ {
+ "item_name": "params",
+ "item_optional": true,
+ "item_type": "list",
+ "list_item_spec": {
+ "item_name": "param",
+ "item_optional": false,
+ "item_type": "string",
+ "item_default": ""
+ }
+ },
+ {
+ "item_name": "priority",
+ "item_optional": true,
+ "item_type": "integer"
+ }
+ ]
+ }
+ }
+ ],
+ "commands": [
+ {
+ "command_name": "shutdown",
+ "command_description": "Shut down BIND 10",
+ "command_args": []
+ },
+ {
+ "command_name": "ping",
+ "command_description": "Ping the b10-init process",
+ "command_args": []
+ },
+ {
+ "command_name": "show_processes",
+ "command_description": "List the running BIND 10 processes",
+ "command_args": []
+ }
+ ],
+ "statistics": [
+ {
+ "item_name": "boot_time",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Boot time",
+ "item_description": "A date time when bind10 process starts initially",
+ "item_format": "date-time"
+ }
+ ]
+ }
+}
+
+
diff --git a/src/bin/bind10/init_messages.mes b/src/bin/bind10/init_messages.mes
new file mode 100644
index 0000000..9cdb7ef
--- /dev/null
+++ b/src/bin/bind10/init_messages.mes
@@ -0,0 +1,327 @@
+# Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the xfrin messages python module.
+
+% BIND10_CHECK_MSGQ_ALREADY_RUNNING checking if msgq is already running
+The b10-init process is starting up and will now check if the message bus
+daemon is already running. If so, it will not be able to start, as it
+needs a dedicated message bus.
+
+% BIND10_COMPONENT_FAILED component %1 (pid %2) failed: %3
+The process terminated, but b10-init didn't expect it to, which means
+it must have failed.
+
+% BIND10_COMPONENT_RESTART component %1 is about to restart
+The named component failed previously and we will try to restart it to provide
+as flawless service as possible, but it should be investigated what happened,
+as it could happen again.
+
+% BIND10_COMPONENT_START component %1 is starting
+The named component is about to be started by the b10-init process.
+
+% BIND10_COMPONENT_START_EXCEPTION component %1 failed to start: %2
+An exception (mentioned in the message) happened during the startup of the
+named component. The componet is not considered started and further actions
+will be taken about it.
+
+% BIND10_COMPONENT_STOP component %1 is being stopped
+A component is about to be asked to stop willingly by the b10-init.
+
+% BIND10_COMPONENT_UNSATISFIED component %1 is required to run and failed
+A component failed for some reason (see previous messages). It is either a core
+component or needed component that was just started. In any case, the system
+can't continue without it and will terminate.
+
+% BIND10_CONFIGURATOR_BUILD building plan '%1' -> '%2'
+A debug message. This indicates that the configurator is building a plan
+how to change configuration from the older one to newer one. This does no
+real work yet, it just does the planning what needs to be done.
+
+% BIND10_CONFIGURATOR_PLAN_INTERRUPTED configurator plan interrupted, only %1 of %2 done
+There was an exception during some planned task. The plan will not continue and
+only some tasks of the plan were completed. The rest is aborted. The exception
+will be propagated.
+
+% BIND10_CONFIGURATOR_RECONFIGURE reconfiguring running components
+A different configuration of which components should be running is being
+installed. All components that are no longer needed will be stopped and
+newly introduced ones started. This happens at startup, when the configuration
+is read the first time, or when an operator changes configuration of the b10-init.
+
+% BIND10_CONFIGURATOR_RUN running plan of %1 tasks
+A debug message. The configurator is about to execute a plan of actions it
+computed previously.
+
+% BIND10_CONFIGURATOR_START bind10 component configurator is starting up
+The part that cares about starting and stopping the right component from
+the b10-init process is starting up. This happens only once at the startup
+of the b10-init process. It will start the basic set of processes now (the
+ones b10-init needs to read the configuration), the rest will be started
+after the configuration is known.
+
+% BIND10_CONFIGURATOR_STOP bind10 component configurator is shutting down
+The part that cares about starting and stopping processes in the b10-init is
+shutting down. All started components will be shut down now (more precisely,
+asked to terminate by their own, if they fail to comply, other parts of
+the b10-init process will try to force them).
+
+% BIND10_CONFIGURATOR_TASK performing task %1 on %2
+A debug message. The configurator is about to perform one task of the plan it
+is currently executing on the named component.
+
+% BIND10_CONNECTING_TO_CC_FAIL failed to connect to configuration/command channel; try -v to see output from msgq
+The b10-init process tried to connect to the communication channel for
+commands and configuration updates during initialization, but it
+failed. This is a fatal startup error, and process will soon
+terminate after some cleanup. There can be several reasons for the
+failure, but the most likely cause is that the msgq daemon failed to
+start, and the most likely cause of the msgq failure is that it
+doesn't have a permission to create a socket file for the
+communication. To confirm that, you can see debug messages from msgq
+by starting BIND 10 with the -v command line option. If it indicates
+permission problem for msgq, make sure the directory where the socket
+file is to be created is writable for the msgq process. Note that if
+you specify the -u option to change process users, the directory must
+be writable for that user.
+
+% BIND10_INVALID_STATISTICS_DATA invalid specification of statistics data specified
+An error was encountered when the b10-init module specified
+statistics data which is invalid for the b10-init specification file.
+
+% BIND10_INVALID_USER invalid user: %1
+The b10-init process was started with the -u option, to drop root privileges
+and continue running as the specified user, but the user is unknown.
+
+% BIND10_KILLING_ALL_PROCESSES killing all started processes
+The b10-init module was not able to start every process it needed to start
+during startup, and will now kill the processes that did get started.
+
+% BIND10_LOST_SOCKET_CONSUMER consumer %1 of sockets disconnected, considering all its sockets closed
+A connection from one of the applications which requested a socket was
+closed. This means the application has terminated, so all the sockets it was
+using are now closed and bind10 process can release them as well, unless the
+same sockets are used by yet another application.
+
+% BIND10_MSGQ_ALREADY_RUNNING msgq daemon already running, cannot start
+There already appears to be a message bus daemon running. Either an
+old process was not shut down correctly, and needs to be killed, or
+another instance of BIND10, with the same msgq domain socket, is
+running, which needs to be stopped.
+
+% BIND10_MSGQ_DISAPPEARED msgq channel disappeared
+While listening on the message bus channel for messages, it suddenly
+disappeared. The msgq daemon may have died. This might lead to an
+inconsistent state of the system, and BIND 10 will now shut down.
+
+% BIND10_NO_SOCKET couldn't send a socket for token %1 because of error: %2
+An error occurred when the bind10 process was asked to send a socket file
+descriptor. The error is mentioned, most common reason is that the request
+is invalid and may not come from bind10 process at all.
+
+% BIND10_PROCESS_ENDED process %2 of %1 ended with status %3
+This indicates a process started previously terminated. The process id
+and component owning the process are indicated, as well as the exit code.
+This doesn't distinguish if the process was supposed to terminate or not.
+
+% BIND10_READING_INIT_CONFIGURATION reading b10-init configuration
+The b10-init process is starting up, and will now process the initial
+configuration, as received from the configuration manager.
+
+% BIND10_RECEIVED_COMMAND received command: %1
+The b10-init module received a command and shall now process it. The command
+is printed.
+
+% BIND10_RECEIVED_NEW_CONFIGURATION received new configuration: %1
+The b10-init module received a configuration update and is going to apply
+it now. The new configuration is printed.
+
+% BIND10_RECEIVED_SIGNAL received signal %1
+The b10-init module received the given signal.
+
+% BIND10_RESTART_COMPONENT_SKIPPED Skipped restarting a component %1
+The b10-init module tried to restart a component after it failed (crashed)
+unexpectedly, but the b10-init then found that the component had been removed
+from its local configuration of components to run. This is an unusual
+situation but can happen if the administrator removes the component from
+the configuration after the component's crash and before the restart time.
+The b10-init module simply skipped restarting that module, and the whole system
+went back to the expected state (except that the crash itself is likely
+to be a bug).
+
+% BIND10_RESURRECTED_PROCESS resurrected %1 (PID %2)
+The given process has been restarted successfully, and is now running
+with the given process id.
+
+% BIND10_RESURRECTING_PROCESS resurrecting dead %1 process...
+The given process has ended unexpectedly, and is now restarted.
+
+% BIND10_SELECT_ERROR error in select() call: %1
+There was a fatal error in the call to select(), used to see if a child
+process has ended or if there is a message on the message bus. This
+should not happen under normal circumstances and is considered fatal,
+so BIND 10 will now shut down. The specific error is printed.
+
+% BIND10_SEND_SIGKILL sending SIGKILL to %1 (PID %2)
+The b10-init module is sending a SIGKILL signal to the given process.
+
+% BIND10_SEND_SIGNAL_FAIL sending %1 to %2 (PID %3) failed: %4
+The b10-init module sent a single (either SIGTERM or SIGKILL) to a process,
+but it failed due to some system level error. There are two major cases:
+the target process has already terminated but the b10-init module had sent
+the signal before it noticed the termination. In this case an error
+message should indicate something like "no such process". This can be
+safely ignored. The other case is that the b10-init module doesn't have
+the privilege to send a signal to the process. It can typically
+happen when the b10-init module started as a privileged process, spawned a
+subprocess, and then dropped the privilege. It includes the case for
+the socket creator when the b10-init process runs with the -u command line
+option. In this case, the b10-init module simply gives up to terminate
+the process explicitly because it's unlikely to succeed by keeping
+sending the signal. Although the socket creator is implemented so
+that it will terminate automatically when the b10-init process exits
+(and that should be the case for any other future process running with
+a higher privilege), but it's recommended to check if there's any
+remaining BIND 10 process if this message is logged. For all other
+cases, the b10-init module will keep sending the signal until it confirms
+all child processes terminate. Although unlikely, this could prevent
+the b10-init module from exiting, just keeping sending the signals. So,
+again, it's advisable to check if it really terminates when this
+message is logged.
+
+% BIND10_SEND_SIGTERM sending SIGTERM to %1 (PID %2)
+The b10-init module is sending a SIGTERM signal to the given process.
+
+% BIND10_SETGID setting GID to %1
+The b10-init switches the process group ID to the given value. This happens
+when BIND 10 starts with the -u option, and the group ID will be set to
+that of the specified user.
+
+% BIND10_SETUID setting UID to %1
+The b10-init switches the user it runs as to the given UID.
+
+% BIND10_SHUTDOWN stopping the server
+The b10-init process received a command or signal telling it to shut down.
+It will send a shutdown command to each process. The processes that do
+not shut down will then receive a SIGTERM signal. If that doesn't work,
+it shall send SIGKILL signals to the processes still alive.
+
+% BIND10_SHUTDOWN_COMPLETE all processes ended, shutdown complete
+All child processes have been stopped, and the b10-init process will now
+stop itself.
+
+% BIND10_SOCKCREATOR_BAD_CAUSE unknown error cause from socket creator: %1
+The socket creator reported an error when creating a socket. But the function
+which failed is unknown (not one of 'S' for socket or 'B' for bind).
+
+% BIND10_SOCKCREATOR_BAD_RESPONSE unknown response for socket request: %1
+The b10-init requested a socket from the creator, but the answer is unknown. This
+looks like a programmer error.
+
+% BIND10_SOCKCREATOR_EOF eof while expecting data from socket creator
+There should be more data from the socket creator, but it closed the socket.
+It probably crashed.
+
+% BIND10_SOCKCREATOR_INIT initializing socket creator parser
+The b10-init module initializes routines for parsing the socket creator
+protocol.
+
+% BIND10_SOCKCREATOR_KILL killing the socket creator
+The socket creator is being terminated the aggressive way, by sending it
+sigkill. This should not happen usually.
+
+% BIND10_SOCKCREATOR_TERMINATE terminating socket creator
+The b10-init module sends a request to terminate to the socket creator.
+
+% BIND10_SOCKCREATOR_TRANSPORT_ERROR transport error when talking to the socket creator: %1
+Either sending or receiving data from the socket creator failed with the given
+error. The creator probably crashed or some serious OS-level problem happened,
+as the communication happens only on local host.
+
+% BIND10_SOCKET_CREATED successfully created socket %1
+The socket creator successfully created and sent a requested socket, it has
+the given file number.
+
+% BIND10_SOCKET_ERROR error on %1 call in the creator: %2/%3
+The socket creator failed to create the requested socket. It failed on the
+indicated OS API function with given error.
+
+% BIND10_SOCKET_GET requesting socket [%1]:%2 of type %3 from the creator
+The b10-init forwards a request for a socket to the socket creator.
+
+% BIND10_STARTED_CC started configuration/command session
+Debug message given when BIND 10 has successfully started the object that
+handles configuration and commands.
+
+% BIND10_STARTED_PROCESS started %1
+The given process has successfully been started.
+
+% BIND10_STARTED_PROCESS_PID started %1 (PID %2)
+The given process has successfully been started, and has the given PID.
+
+% BIND10_STARTING starting BIND10: %1
+Informational message on startup that shows the full version.
+
+% BIND10_STARTING_CC starting configuration/command session
+Informational message given when BIND 10 is starting the session object
+that handles configuration and commands.
+
+% BIND10_STARTING_PROCESS starting process %1
+The b10-init module is starting the given process.
+
+% BIND10_STARTING_PROCESS_PORT starting process %1 (to listen on port %2)
+The b10-init module is starting the given process, which will listen on the
+given port number.
+
+% BIND10_STARTING_PROCESS_PORT_ADDRESS starting process %1 (to listen on %2#%3)
+The b10-init module is starting the given process, which will listen on the
+given address and port number (written as <address>#<port>).
+
+% BIND10_STARTUP_COMPLETE BIND 10 started
+All modules have been successfully started, and BIND 10 is now running.
+
+% BIND10_STARTUP_ERROR error during startup: %1
+There was a fatal error when BIND10 was trying to start. The error is
+shown, and BIND10 will now shut down.
+
+% BIND10_STARTUP_UNEXPECTED_MESSAGE unrecognised startup message %1
+During the startup process, a number of messages are exchanged between the
+Init process and the processes it starts. This error is output when a
+message received by the Init process is recognised as being of the
+correct format but is unexpected. It may be that processes are starting
+of sequence.
+
+% BIND10_STARTUP_UNRECOGNISED_MESSAGE unrecognised startup message %1
+During the startup process, a number of messages are exchanged between the
+Init process and the processes it starts. This error is output when a
+message received by the Init process is not recognised.
+
+% BIND10_STOP_PROCESS asking %1 to shut down
+The b10-init module is sending a shutdown command to the given module over
+the message channel.
+
+% BIND10_UNKNOWN_CHILD_PROCESS_ENDED unknown child pid %1 exited
+An unknown child process has exited. The PID is printed, but no further
+action will be taken by the b10-init process.
+
+% BIND10_WAIT_CFGMGR waiting for configuration manager process to initialize
+The configuration manager process is so critical to operation of BIND 10
+that after starting it, the Init module will wait for it to initialize
+itself before continuing. This debug message is produced during the
+wait and may be output zero or more times depending on how long it takes
+the configuration manager to start up. The total length of time Init
+will wait for the configuration manager before reporting an error is
+set with the command line --wait switch, which has a default value of
+ten seconds.
diff --git a/src/bin/bind10/run_bind10.sh.in b/src/bin/bind10/run_bind10.sh.in
index 17d2c53..8121eba 100755
--- a/src/bin/bind10/run_bind10.sh.in
+++ b/src/bin/bind10/run_bind10.sh.in
@@ -45,5 +45,5 @@ export B10_FROM_BUILD
BIND10_MSGQ_SOCKET_FILE=@abs_top_builddir@/msgq_socket
export BIND10_MSGQ_SOCKET_FILE
-exec ${PYTHON_EXEC} -O ${BIND10_PATH}/bind10 "$@"
+exec ${BIND10_PATH}/b10-init "$@"
diff --git a/src/bin/bind10/tests/Makefile.am b/src/bin/bind10/tests/Makefile.am
index a5e3fab..6d59dbd 100644
--- a/src/bin/bind10/tests/Makefile.am
+++ b/src/bin/bind10/tests/Makefile.am
@@ -1,7 +1,7 @@
PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
#PYTESTS = args_test.py bind10_test.py
# NOTE: this has a generated test found in the builddir
-PYTESTS = bind10_test.py
+PYTESTS = init_test.py
noinst_SCRIPTS = $(PYTESTS)
# If necessary (rare cases), explicitly specify paths to dynamic libraries
diff --git a/src/bin/bind10/tests/args_test.py b/src/bin/bind10/tests/args_test.py
index 93a7cea..2447a62 100644
--- a/src/bin/bind10/tests/args_test.py
+++ b/src/bin/bind10/tests/args_test.py
@@ -1,5 +1,5 @@
"""
-This program tests the boss process to make sure that it runs while
+This program tests the b10-init process to make sure that it runs while
dropping permissions. It must be run as a user that can set permission.
"""
import unittest
@@ -17,69 +17,69 @@ SUID_USER="shane"
BIND10_EXE="../run_bind10.sh"
TIMEOUT=3
-class TestBossArgs(unittest.TestCase):
- def _waitForString(self, bob, s):
+class TestInitArgs(unittest.TestCase):
+ def _waitForString(self, init, s):
found_string = False
start_time = time.time()
while time.time() < start_time + TIMEOUT:
- (r,w,x) = select.select((bob.stdout,), (), (), TIMEOUT)
- if bob.stdout in r:
- s = bob.stdout.readline()
+ (r,w,x) = select.select((init.stdout,), (), (), TIMEOUT)
+ if init.stdout in r:
+ s = init.stdout.readline()
if s == '':
break
- if s.startswith(s):
+ if s.startswith(s):
found_string = True
break
return found_string
def testNoArgs(self):
"""Run bind10 without any arguments"""
- bob = subprocess.Popen(args=(BIND10_EXE,),
- stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- started_ok = self._waitForString(bob, '[bind10] BIND 10 started')
+ init = subprocess.Popen(args=(BIND10_EXE,),
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ started_ok = self._waitForString(init, '[bind10] BIND 10 started')
time.sleep(0.1)
- bob.terminate()
- bob.wait()
+ init.terminate()
+ init.wait()
self.assertTrue(started_ok)
def testBadOption(self):
"""Run bind10 with a bogus option"""
- bob = subprocess.Popen(args=(BIND10_EXE, "--badoption"),
- stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- failed = self._waitForString(bob, 'bind10: error: no such option: --badoption')
+ init = subprocess.Popen(args=(BIND10_EXE, "--badoption"),
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ failed = self._waitForString(init, 'bind10: error: no such option: --badoption')
time.sleep(0.1)
- bob.terminate()
- self.assertTrue(bob.wait() == 2)
+ init.terminate()
+ self.assertTrue(init.wait() == 2)
self.assertTrue(failed)
def testArgument(self):
"""Run bind10 with an argument (this is not allowed)"""
- bob = subprocess.Popen(args=(BIND10_EXE, "argument"),
- stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- failed = self._waitForString(bob, 'Usage: bind10 [options]')
+ init = subprocess.Popen(args=(BIND10_EXE, "argument"),
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ failed = self._waitForString(init, 'Usage: bind10 [options]')
time.sleep(0.1)
- bob.terminate()
- self.assertTrue(bob.wait() == 1)
+ init.terminate()
+ self.assertTrue(init.wait() == 1)
self.assertTrue(failed)
def testBadUser(self):
"""Run bind10 with a bogus user"""
- bob = subprocess.Popen(args=(BIND10_EXE, "-u", "bogus_user"),
- stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- failed = self._waitForString(bob, "bind10: invalid user: 'bogus_user'")
+ init = subprocess.Popen(args=(BIND10_EXE, "-u", "bogus_user"),
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ failed = self._waitForString(init, "bind10: invalid user: 'bogus_user'")
time.sleep(0.1)
- bob.terminate()
- self.assertTrue(bob.wait() == 1)
+ init.terminate()
+ self.assertTrue(init.wait() == 1)
self.assertTrue(failed)
def testBadUid(self):
"""Run bind10 with a bogus user ID"""
- bob = subprocess.Popen(args=(BIND10_EXE, "-u", "999999999"),
- stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- failed = self._waitForString(bob, "bind10: invalid user: '999999999'")
+ init = subprocess.Popen(args=(BIND10_EXE, "-u", "999999999"),
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ failed = self._waitForString(init, "bind10: invalid user: '999999999'")
time.sleep(0.1)
- bob.terminate()
- self.assertTrue(bob.wait() == 1)
+ init.terminate()
+ self.assertTrue(init.wait() == 1)
self.assertTrue(failed)
def testFailSetUser(self):
@@ -90,12 +90,12 @@ class TestBossArgs(unittest.TestCase):
if os.getuid() == 0:
self.skipTest("test must not be run as root (uid is 0)")
# XXX: we depend on the "nobody" user
- bob = subprocess.Popen(args=(BIND10_EXE, "-u", "nobody"),
- stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- failed = self._waitForString(bob, "[bind10] Error on startup: Unable to start b10-msgq; Unable to change to user nobody")
+ init = subprocess.Popen(args=(BIND10_EXE, "-u", "nobody"),
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ failed = self._waitForString(init, "[bind10] Error on startup: Unable to start b10-msgq; Unable to change to user nobody")
time.sleep(0.1)
- bob.terminate()
- self.assertTrue(bob.wait() == 1)
+ init.terminate()
+ self.assertTrue(init.wait() == 1)
self.assertTrue(failed)
def testSetUser(self):
@@ -108,9 +108,9 @@ class TestBossArgs(unittest.TestCase):
if os.geteuid() != 0:
self.skipTest("test must run as root (euid is not 0)")
- bob = subprocess.Popen(args=(BIND10_EXE, "-u", SUID_USER),
- stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- started_ok = self._waitForString(bob, '[bind10] BIND 10 started')
+ init = subprocess.Popen(args=(BIND10_EXE, "-u", SUID_USER),
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ started_ok = self._waitForString(init, '[bind10] BIND 10 started')
self.assertTrue(started_ok)
ps = subprocess.Popen(args=("ps", "axo", "user,pid"),
stdout=subprocess.PIPE)
@@ -120,22 +120,22 @@ class TestBossArgs(unittest.TestCase):
s = ps.stdout.readline()
if s == '': break
(user, pid) = s.split()
- if int(pid) == bob.pid:
+ if int(pid) == init.pid:
ps_user = user.decode()
break
self.assertTrue(ps_user is not None)
self.assertTrue(ps_user == SUID_USER)
time.sleep(0.1)
- bob.terminate()
- x = bob.wait()
- self.assertTrue(bob.wait() == 0)
+ init.terminate()
+ x = init.wait()
+ self.assertTrue(init.wait() == 0)
def testPrettyName(self):
"""Try the --pretty-name option."""
- CMD_PRETTY_NAME = b'bob-name-test'
- bob = subprocess.Popen(args=(BIND10_EXE, '--pretty-name',
+ CMD_PRETTY_NAME = b'init-name-test'
+ init = subprocess.Popen(args=(BIND10_EXE, '--pretty-name',
CMD_PRETTY_NAME), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- started_ok = self._waitForString(bob, '[bind10] BIND 10 started')
+ started_ok = self._waitForString(init, '[bind10] BIND 10 started')
self.assertTrue(started_ok)
ps = subprocess.Popen(args=("ps", "axo", "pid,comm"),
stdout=subprocess.PIPE)
@@ -145,13 +145,13 @@ class TestBossArgs(unittest.TestCase):
s = ps.stdout.readline()
if s == '': break
(pid,comm) = s.split(None, 1)
- if int(pid) == bob.pid:
+ if int(pid) == init.pid:
command = comm
break
self.assertEqual(command, CMD_PRETTY_NAME + b'\n')
time.sleep(0.1)
- bob.terminate()
- bob.wait()
+ init.terminate()
+ init.wait()
if __name__ == '__main__':
unittest.main()
diff --git a/src/bin/bind10/tests/bind10_test.py.in b/src/bin/bind10/tests/bind10_test.py.in
deleted file mode 100644
index ccfa831..0000000
--- a/src/bin/bind10/tests/bind10_test.py.in
+++ /dev/null
@@ -1,2422 +0,0 @@
-# Copyright (C) 2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-# Most of the time, we omit the "bind10_src" for brevity. Sometimes,
-# we want to be explicit about what we do, like when hijacking a library
-# call used by the bind10_src.
-from bind10_src import ProcessInfo, BoB, parse_args, dump_pid, unlink_pid_file, _BASETIME
-import bind10_src
-
-# XXX: environment tests are currently disabled, due to the preprocessor
-# setup that we have now complicating the environment
-
-import unittest
-import sys
-import os
-import os.path
-import copy
-import signal
-import socket
-from isc.net.addr import IPAddr
-import time
-import isc
-import isc.log
-import isc.bind10.socket_cache
-import errno
-import random
-
-from isc.testutils.parse_args import TestOptParser, OptsError
-from isc.testutils.ccsession_mock import MockModuleCCSession
-
-class TestProcessInfo(unittest.TestCase):
- def setUp(self):
- # redirect stdout to a pipe so we can check that our
- # process spawning is doing the right thing with stdout
- self.old_stdout = os.dup(sys.stdout.fileno())
- self.pipes = os.pipe()
- os.dup2(self.pipes[1], sys.stdout.fileno())
- os.close(self.pipes[1])
- # note that we use dup2() to restore the original stdout
- # to the main program ASAP in each test... this prevents
- # hangs reading from the child process (as the pipe is only
- # open in the child), and also insures nice pretty output
-
- def tearDown(self):
- # clean up our stdout munging
- os.dup2(self.old_stdout, sys.stdout.fileno())
- os.close(self.pipes[0])
-
- def test_init(self):
- pi = ProcessInfo('Test Process', [ '/bin/echo', 'foo' ])
- pi.spawn()
- os.dup2(self.old_stdout, sys.stdout.fileno())
- self.assertEqual(pi.name, 'Test Process')
- self.assertEqual(pi.args, [ '/bin/echo', 'foo' ])
-# self.assertEqual(pi.env, { 'PATH': os.environ['PATH'],
-# 'PYTHON_EXEC': os.environ['PYTHON_EXEC'] })
- self.assertEqual(pi.dev_null_stdout, False)
- self.assertEqual(os.read(self.pipes[0], 100), b"foo\n")
- self.assertNotEqual(pi.process, None)
- self.assertTrue(type(pi.pid) is int)
-
-# def test_setting_env(self):
-# pi = ProcessInfo('Test Process', [ '/bin/true' ], env={'FOO': 'BAR'})
-# os.dup2(self.old_stdout, sys.stdout.fileno())
-# self.assertEqual(pi.env, { 'PATH': os.environ['PATH'],
-# 'PYTHON_EXEC': os.environ['PYTHON_EXEC'],
-# 'FOO': 'BAR' })
-
- def test_setting_null_stdout(self):
- pi = ProcessInfo('Test Process', [ '/bin/echo', 'foo' ],
- dev_null_stdout=True)
- pi.spawn()
- os.dup2(self.old_stdout, sys.stdout.fileno())
- self.assertEqual(pi.dev_null_stdout, True)
- self.assertEqual(os.read(self.pipes[0], 100), b"")
-
- def test_respawn(self):
- pi = ProcessInfo('Test Process', [ '/bin/echo', 'foo' ])
- pi.spawn()
- # wait for old process to work...
- self.assertEqual(os.read(self.pipes[0], 100), b"foo\n")
- # respawn it
- old_pid = pi.pid
- pi.respawn()
- os.dup2(self.old_stdout, sys.stdout.fileno())
- # make sure the new one started properly
- self.assertEqual(pi.name, 'Test Process')
- self.assertEqual(pi.args, [ '/bin/echo', 'foo' ])
-# self.assertEqual(pi.env, { 'PATH': os.environ['PATH'],
-# 'PYTHON_EXEC': os.environ['PYTHON_EXEC'] })
- self.assertEqual(pi.dev_null_stdout, False)
- self.assertEqual(os.read(self.pipes[0], 100), b"foo\n")
- self.assertNotEqual(pi.process, None)
- self.assertTrue(type(pi.pid) is int)
- self.assertNotEqual(pi.pid, old_pid)
-
-class TestCacheCommands(unittest.TestCase):
- """
- Test methods of boss related to the socket cache and socket handling.
- """
- def setUp(self):
- """
- Prepare the boss for some tests.
-
- Also prepare some variables we need.
- """
- self.__boss = BoB()
- # Fake the cache here so we can pretend it is us and hijack the
- # calls to its methods.
- self.__boss._socket_cache = self
- self.__boss._socket_path = '/socket/path'
- self.__raise_exception = None
- self.__socket_args = {
- "port": 53,
- "address": "::",
- "protocol": "UDP",
- "share_mode": "ANY",
- "share_name": "app"
- }
- # What was and wasn't called.
- self.__drop_app_called = None
- self.__get_socket_called = None
- self.__send_fd_called = None
- self.__get_token_called = None
- self.__drop_socket_called = None
- bind10_src.libutil_io_python.send_fd = self.__send_fd
-
- def __send_fd(self, to, socket):
- """
- A function to hook the send_fd in the bind10_src.
- """
- self.__send_fd_called = (to, socket)
-
- class FalseSocket:
- """
- A socket where we can fake methods we need instead of having a real
- socket.
- """
- def __init__(self):
- self.send = b""
- def fileno(self):
- """
- The file number. Used for identifying the remote application.
- """
- return 42
-
- def sendall(self, data):
- """
- Adds data to the self.send.
- """
- self.send += data
-
- def drop_application(self, application):
- """
- Part of pretending to be the cache. Logs the parameter to
- self.__drop_app_called.
-
- In the case self.__raise_exception is set, the exception there
- is raised instead.
- """
- if self.__raise_exception is not None:
- raise self.__raise_exception
- self.__drop_app_called = application
-
- def test_consumer_dead(self):
- """
- Test that it calls the drop_application method of the cache.
- """
- self.__boss.socket_consumer_dead(self.FalseSocket())
- self.assertEqual(42, self.__drop_app_called)
-
- def test_consumer_dead_invalid(self):
- """
- Test that it doesn't crash in case the application is not known to
- the cache, the boss doesn't crash, as this actually can happen in
- practice.
- """
- self.__raise_exception = ValueError("This application is unknown")
- # This doesn't crash
- self.__boss.socket_consumer_dead(self.FalseSocket())
-
- def get_socket(self, token, application):
- """
- Part of pretending to be the cache. If there's anything in
- __raise_exception, it is raised. Otherwise, the call is logged
- into __get_socket_called and a number is returned.
- """
- if self.__raise_exception is not None:
- raise self.__raise_exception
- self.__get_socket_called = (token, application)
- return 13
-
- def test_request_handler(self):
- """
- Test that a request for socket is forwarded and the socket is sent
- back, if it returns a socket.
- """
- socket = self.FalseSocket()
- # An exception from the cache
- self.__raise_exception = ValueError("Test value error")
- self.__boss.socket_request_handler(b"token", socket)
- # It was called, but it threw, so it is not noted here
- self.assertIsNone(self.__get_socket_called)
- self.assertEqual(b"0\n", socket.send)
- # It should not have sent any socket.
- self.assertIsNone(self.__send_fd_called)
- # Now prepare a valid scenario
- self.__raise_exception = None
- socket.send = b""
- self.__boss.socket_request_handler(b"token", socket)
- self.assertEqual(b"1\n", socket.send)
- self.assertEqual((42, 13), self.__send_fd_called)
- self.assertEqual(("token", 42), self.__get_socket_called)
-
- def get_token(self, protocol, address, port, share_mode, share_name):
- """
- Part of pretending to be the cache. If there's anything in
- __raise_exception, it is raised. Otherwise, the parameters are
- logged into __get_token_called and a token is returned.
- """
- if self.__raise_exception is not None:
- raise self.__raise_exception
- self.__get_token_called = (protocol, address, port, share_mode,
- share_name)
- return "token"
-
- def test_get_socket_ok(self):
- """
- Test the successful scenario of getting a socket.
- """
- result = self.__boss._get_socket(self.__socket_args)
- [code, answer] = result['result']
- self.assertEqual(0, code)
- self.assertEqual({
- 'token': 'token',
- 'path': '/socket/path'
- }, answer)
- addr = self.__get_token_called[1]
- self.assertTrue(isinstance(addr, IPAddr))
- self.assertEqual("::", str(addr))
- self.assertEqual(("UDP", addr, 53, "ANY", "app"),
- self.__get_token_called)
-
- def test_get_socket_error(self):
- """
- Test that bad inputs are handled correctly, etc.
- """
- def check_code(code, args):
- """
- Pass the args there and check if it returns success or not.
-
- The rest is not tested, as it is already checked in the
- test_get_socket_ok.
- """
- [rcode, ranswer] = self.__boss._get_socket(args)['result']
- self.assertEqual(code, rcode)
- if code != 0:
- # This should be an error message. The exact formatting
- # is unknown, but we check it is string at least
- self.assertTrue(isinstance(ranswer, str))
-
- def mod_args(name, value):
- """
- Override a parameter in the args.
- """
- result = dict(self.__socket_args)
- result[name] = value
- return result
-
- # Port too large
- check_code(1, mod_args('port', 65536))
- # Not numeric address
- check_code(1, mod_args('address', 'example.org.'))
- # Some bad values of enum-like params
- check_code(1, mod_args('protocol', 'BAD PROTO'))
- check_code(1, mod_args('share_mode', 'BAD SHARE'))
- # Check missing parameters
- for param in self.__socket_args.keys():
- args = dict(self.__socket_args)
- del args[param]
- check_code(1, args)
- # These are OK values for the enum-like parameters
- # The ones from test_get_socket_ok are not tested here
- check_code(0, mod_args('protocol', 'TCP'))
- check_code(0, mod_args('share_mode', 'SAMEAPP'))
- check_code(0, mod_args('share_mode', 'NO'))
- # If an exception is raised from within the cache, it is converted
- # to an error, not propagated
- self.__raise_exception = Exception("Test exception")
- check_code(1, self.__socket_args)
- # The special "expected" exceptions
- self.__raise_exception = \
- isc.bind10.socket_cache.ShareError("Not shared")
- check_code(3, self.__socket_args)
- self.__raise_exception = \
- isc.bind10.socket_cache.SocketError("Not shared", 13)
- check_code(2, self.__socket_args)
-
- def drop_socket(self, token):
- """
- Part of pretending to be the cache. If there's anything in
- __raise_exception, it is raised. Otherwise, the parameter is stored
- in __drop_socket_called.
- """
- if self.__raise_exception is not None:
- raise self.__raise_exception
- self.__drop_socket_called = token
-
- def test_drop_socket(self):
- """
- Check the drop_socket command. It should directly call the method
- on the cache. Exceptions should be translated to error messages.
- """
- # This should be OK and just propagated to the call.
- self.assertEqual({"result": [0]},
- self.__boss.command_handler("drop_socket",
- {"token": "token"}))
- self.assertEqual("token", self.__drop_socket_called)
- self.__drop_socket_called = None
- # Missing parameter
- self.assertEqual({"result": [1, "Missing token parameter"]},
- self.__boss.command_handler("drop_socket", {}))
- self.assertIsNone(self.__drop_socket_called)
- # An exception is raised from within the cache
- self.__raise_exception = ValueError("Test error")
- self.assertEqual({"result": [1, "Test error"]},
- self.__boss.command_handler("drop_socket",
- {"token": "token"}))
-
-
-class TestBoB(unittest.TestCase):
- def setUp(self):
- # Save original values that may be tweaked in some tests
- self.__orig_setgid = bind10_src.posix.setgid
- self.__orig_setuid = bind10_src.posix.setuid
- self.__orig_logger_class = isc.log.Logger
-
- def tearDown(self):
- # Restore original values saved in setUp()
- bind10_src.posix.setgid = self.__orig_setgid
- bind10_src.posix.setuid = self.__orig_setuid
- isc.log.Logger = self.__orig_logger_class
-
- def test_init(self):
- bob = BoB()
- self.assertEqual(bob.verbose, False)
- self.assertEqual(bob.msgq_socket_file, None)
- self.assertEqual(bob.cc_session, None)
- self.assertEqual(bob.ccs, None)
- self.assertEqual(bob.components, {})
- self.assertEqual(bob.runnable, False)
- self.assertEqual(bob.username, None)
- self.assertIsNone(bob._socket_cache)
-
- def __setgid(self, gid):
- self.__gid_set = gid
-
- def __setuid(self, uid):
- self.__uid_set = uid
-
- def test_change_user(self):
- bind10_src.posix.setgid = self.__setgid
- bind10_src.posix.setuid = self.__setuid
-
- self.__gid_set = None
- self.__uid_set = None
- bob = BoB()
- bob.change_user()
- # No gid/uid set in boss, nothing called.
- self.assertIsNone(self.__gid_set)
- self.assertIsNone(self.__uid_set)
-
- BoB(setuid=42, setgid=4200).change_user()
- # This time, it get's called
- self.assertEqual(4200, self.__gid_set)
- self.assertEqual(42, self.__uid_set)
-
- def raising_set_xid(gid_or_uid):
- ex = OSError()
- ex.errno, ex.strerror = errno.EPERM, 'Operation not permitted'
- raise ex
-
- # Let setgid raise an exception
- bind10_src.posix.setgid = raising_set_xid
- bind10_src.posix.setuid = self.__setuid
- self.assertRaises(bind10_src.ChangeUserError,
- BoB(setuid=42, setgid=4200).change_user)
-
- # Let setuid raise an exception
- bind10_src.posix.setgid = self.__setgid
- bind10_src.posix.setuid = raising_set_xid
- self.assertRaises(bind10_src.ChangeUserError,
- BoB(setuid=42, setgid=4200).change_user)
-
- # Let initial log output after setuid raise an exception
- bind10_src.posix.setgid = self.__setgid
- bind10_src.posix.setuid = self.__setuid
- isc.log.Logger = raising_set_xid
- self.assertRaises(bind10_src.ChangeUserError,
- BoB(setuid=42, setgid=4200).change_user)
-
- def test_set_creator(self):
- """
- Test the call to set_creator. First time, the cache is created
- with the passed creator. The next time, it throws an exception.
- """
- bob = BoB()
- # The cache doesn't use it at start, so just create an empty class
- class Creator: pass
- creator = Creator()
- bob.set_creator(creator)
- self.assertTrue(isinstance(bob._socket_cache,
- isc.bind10.socket_cache.Cache))
- self.assertEqual(creator, bob._socket_cache._creator)
- self.assertRaises(ValueError, bob.set_creator, creator)
-
- def test_socket_srv(self):
- """Tests init_socket_srv() and remove_socket_srv() work as expected."""
- bob = BoB()
-
- self.assertIsNone(bob._srv_socket)
- self.assertIsNone(bob._tmpdir)
- self.assertIsNone(bob._socket_path)
-
- bob.init_socket_srv()
-
- self.assertIsNotNone(bob._srv_socket)
- self.assertNotEqual(-1, bob._srv_socket.fileno())
- self.assertEqual(os.path.join(bob._tmpdir, 'sockcreator'),
- bob._srv_socket.getsockname())
-
- self.assertIsNotNone(bob._tmpdir)
- self.assertTrue(os.path.isdir(bob._tmpdir))
- self.assertIsNotNone(bob._socket_path)
- self.assertTrue(os.path.exists(bob._socket_path))
-
- # Check that it's possible to connect to the socket file (this
- # only works if the socket file exists and the server listens on
- # it).
- s = socket.socket(socket.AF_UNIX)
- try:
- s.connect(bob._socket_path)
- can_connect = True
- s.close()
- except socket.error as e:
- can_connect = False
-
- self.assertTrue(can_connect)
-
- bob.remove_socket_srv()
-
- self.assertEqual(-1, bob._srv_socket.fileno())
- self.assertFalse(os.path.exists(bob._socket_path))
- self.assertFalse(os.path.isdir(bob._tmpdir))
-
- # These should not fail either:
-
- # second call
- bob.remove_socket_srv()
-
- bob._srv_socket = None
- bob.remove_socket_srv()
-
- def test_init_alternate_socket(self):
- bob = BoB("alt_socket_file")
- self.assertEqual(bob.verbose, False)
- self.assertEqual(bob.msgq_socket_file, "alt_socket_file")
- self.assertEqual(bob.cc_session, None)
- self.assertEqual(bob.ccs, None)
- self.assertEqual(bob.components, {})
- self.assertEqual(bob.runnable, False)
- self.assertEqual(bob.username, None)
-
- def test_command_handler(self):
- class DummySession():
- def group_sendmsg(self, msg, group):
- (self.msg, self.group) = (msg, group)
- def group_recvmsg(self, nonblock, seq): pass
- class DummyModuleCCSession():
- module_spec = isc.config.module_spec.ModuleSpec({
- "module_name": "Boss",
- "statistics": [
- {
- "item_name": "boot_time",
- "item_type": "string",
- "item_optional": False,
- "item_default": "1970-01-01T00:00:00Z",
- "item_title": "Boot time",
- "item_description": "A date time when bind10 process starts initially",
- "item_format": "date-time"
- }
- ]
- })
- def get_module_spec(self):
- return self.module_spec
- bob = BoB()
- bob.verbose = True
- bob.cc_session = DummySession()
- bob.ccs = DummyModuleCCSession()
- # a bad command
- self.assertEqual(bob.command_handler(-1, None),
- isc.config.ccsession.create_answer(1, "bad command"))
- # "shutdown" command
- self.assertEqual(bob.command_handler("shutdown", None),
- isc.config.ccsession.create_answer(0))
- self.assertFalse(bob.runnable)
- # "getstats" command
- self.assertEqual(bob.command_handler("getstats", None),
- isc.config.ccsession.create_answer(0,
- { 'boot_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME) }))
- # "ping" command
- self.assertEqual(bob.command_handler("ping", None),
- isc.config.ccsession.create_answer(0, "pong"))
- # "show_processes" command
- self.assertEqual(bob.command_handler("show_processes", None),
- isc.config.ccsession.create_answer(0,
- bob.get_processes()))
- # an unknown command
- self.assertEqual(bob.command_handler("__UNKNOWN__", None),
- isc.config.ccsession.create_answer(1, "Unknown command"))
-
- # Fake the get_token of cache and test the command works
- bob._socket_path = '/socket/path'
- class cache:
- def get_token(self, protocol, addr, port, share_mode, share_name):
- return str(addr) + ':' + str(port)
- bob._socket_cache = cache()
- args = {
- "port": 53,
- "address": "0.0.0.0",
- "protocol": "UDP",
- "share_mode": "ANY",
- "share_name": "app"
- }
- # at all and this is the easiest way to check.
- self.assertEqual({'result': [0, {'token': '0.0.0.0:53',
- 'path': '/socket/path'}]},
- bob.command_handler("get_socket", args))
- # The drop_socket is not tested here, but in TestCacheCommands.
- # It needs the cache mocks to be in place and they are there.
-
- def test_stop_process(self):
- """
- Test checking the stop_process method sends the right message over
- the message bus.
- """
- class DummySession():
- def group_sendmsg(self, msg, group, instance="*"):
- (self.msg, self.group, self.instance) = (msg, group, instance)
- bob = BoB()
- bob.cc_session = DummySession()
- bob.stop_process('process', 'address', 42)
- self.assertEqual('address', bob.cc_session.group)
- self.assertEqual('address', bob.cc_session.instance)
- self.assertEqual({'command': ['shutdown', {'pid': 42}]},
- bob.cc_session.msg)
-
-# Mock class for testing BoB's usage of ProcessInfo
-class MockProcessInfo:
- def __init__(self, name, args, env={}, dev_null_stdout=False,
- dev_null_stderr=False):
- self.name = name
- self.args = args
- self.env = env
- self.dev_null_stdout = dev_null_stdout
- self.dev_null_stderr = dev_null_stderr
- self.process = None
- self.pid = None
-
- def spawn(self):
- # set some pid (only used for testing that it is not None anymore)
- self.pid = 42147
-
-# Class for testing the BoB without actually starting processes.
-# This is used for testing the start/stop components routines and
-# the BoB commands.
-#
-# Testing that external processes start is outside the scope
-# of the unit test, by overriding the process start methods we can check
-# that the right processes are started depending on the configuration
-# options.
-class MockBob(BoB):
- def __init__(self):
- BoB.__init__(self)
-
- # Set flags as to which of the overridden methods has been run.
- self.msgq = False
- self.cfgmgr = False
- self.ccsession = False
- self.auth = False
- self.resolver = False
- self.xfrout = False
- self.xfrin = False
- self.zonemgr = False
- self.stats = False
- self.stats_httpd = False
- self.cmdctl = False
- self.dhcp6 = False
- self.dhcp4 = False
- self.c_channel_env = {}
- self.components = { }
- self.creator = False
- self.get_process_exit_status_called = False
-
- class MockSockCreator(isc.bind10.component.Component):
- def __init__(self, process, boss, kind, address=None, params=None):
- isc.bind10.component.Component.__init__(self, process, boss,
- kind, 'SockCreator')
- self._start_func = boss.start_creator
-
- specials = isc.bind10.special_component.get_specials()
- specials['sockcreator'] = MockSockCreator
- self._component_configurator = \
- isc.bind10.component.Configurator(self, specials)
-
- def start_creator(self):
- self.creator = True
- procinfo = ProcessInfo('b10-sockcreator', ['/bin/false'])
- procinfo.pid = 1
- return procinfo
-
- def _read_bind10_config(self):
- # Configuration options are set directly
- pass
-
- def start_msgq(self):
- self.msgq = True
- procinfo = ProcessInfo('b10-msgq', ['/bin/false'])
- procinfo.pid = 2
- return procinfo
-
- def start_ccsession(self, c_channel_env):
- # this is not a process, don't have to do anything with procinfo
- self.ccsession = True
-
- def start_cfgmgr(self):
- self.cfgmgr = True
- procinfo = ProcessInfo('b10-cfgmgr', ['/bin/false'])
- procinfo.pid = 3
- return procinfo
-
- def start_auth(self):
- self.auth = True
- procinfo = ProcessInfo('b10-auth', ['/bin/false'])
- procinfo.pid = 5
- return procinfo
-
- def start_resolver(self):
- self.resolver = True
- procinfo = ProcessInfo('b10-resolver', ['/bin/false'])
- procinfo.pid = 6
- return procinfo
-
- def start_simple(self, name):
- procmap = { 'b10-zonemgr': self.start_zonemgr,
- 'b10-stats': self.start_stats,
- 'b10-stats-httpd': self.start_stats_httpd,
- 'b10-cmdctl': self.start_cmdctl,
- 'b10-dhcp6': self.start_dhcp6,
- 'b10-dhcp4': self.start_dhcp4,
- 'b10-xfrin': self.start_xfrin,
- 'b10-xfrout': self.start_xfrout }
- return procmap[name]()
-
- def start_xfrout(self):
- self.xfrout = True
- procinfo = ProcessInfo('b10-xfrout', ['/bin/false'])
- procinfo.pid = 7
- return procinfo
-
- def start_xfrin(self):
- self.xfrin = True
- procinfo = ProcessInfo('b10-xfrin', ['/bin/false'])
- procinfo.pid = 8
- return procinfo
-
- def start_zonemgr(self):
- self.zonemgr = True
- procinfo = ProcessInfo('b10-zonemgr', ['/bin/false'])
- procinfo.pid = 9
- return procinfo
-
- def start_stats(self):
- self.stats = True
- procinfo = ProcessInfo('b10-stats', ['/bin/false'])
- procinfo.pid = 10
- return procinfo
-
- def start_stats_httpd(self):
- self.stats_httpd = True
- procinfo = ProcessInfo('b10-stats-httpd', ['/bin/false'])
- procinfo.pid = 11
- return procinfo
-
- def start_cmdctl(self):
- self.cmdctl = True
- procinfo = ProcessInfo('b10-cmdctl', ['/bin/false'])
- procinfo.pid = 12
- return procinfo
-
- def start_dhcp6(self):
- self.dhcp6 = True
- procinfo = ProcessInfo('b10-dhcp6', ['/bin/false'])
- procinfo.pid = 13
- return procinfo
-
- def start_dhcp4(self):
- self.dhcp4 = True
- procinfo = ProcessInfo('b10-dhcp4', ['/bin/false'])
- procinfo.pid = 14
- return procinfo
-
- def stop_process(self, process, recipient, pid):
- procmap = { 'b10-auth': self.stop_auth,
- 'b10-resolver': self.stop_resolver,
- 'b10-xfrout': self.stop_xfrout,
- 'b10-xfrin': self.stop_xfrin,
- 'b10-zonemgr': self.stop_zonemgr,
- 'b10-stats': self.stop_stats,
- 'b10-stats-httpd': self.stop_stats_httpd,
- 'b10-cmdctl': self.stop_cmdctl }
- procmap[process]()
-
- # Some functions to pretend we stop processes, use by stop_process
- def stop_msgq(self):
- if self.msgq:
- del self.components[2]
- self.msgq = False
-
- def stop_cfgmgr(self):
- if self.cfgmgr:
- del self.components[3]
- self.cfgmgr = False
-
- def stop_auth(self):
- if self.auth:
- del self.components[5]
- self.auth = False
-
- def stop_resolver(self):
- if self.resolver:
- del self.components[6]
- self.resolver = False
-
- def stop_xfrout(self):
- if self.xfrout:
- del self.components[7]
- self.xfrout = False
-
- def stop_xfrin(self):
- if self.xfrin:
- del self.components[8]
- self.xfrin = False
-
- def stop_zonemgr(self):
- if self.zonemgr:
- del self.components[9]
- self.zonemgr = False
-
- def stop_stats(self):
- if self.stats:
- del self.components[10]
- self.stats = False
-
- def stop_stats_httpd(self):
- if self.stats_httpd:
- del self.components[11]
- self.stats_httpd = False
-
- def stop_cmdctl(self):
- if self.cmdctl:
- del self.components[12]
- self.cmdctl = False
-
- def _get_process_exit_status(self):
- if self.get_process_exit_status_called:
- return (0, 0)
- self.get_process_exit_status_called = True
- return (53, 0)
-
- def _get_process_exit_status_unknown_pid(self):
- if self.get_process_exit_status_called:
- return (0, 0)
- self.get_process_exit_status_called = True
- return (42, 0)
-
- def _get_process_exit_status_raises_oserror_echild(self):
- raise OSError(errno.ECHILD, 'Mock error')
-
- def _get_process_exit_status_raises_oserror_other(self):
- raise OSError(0, 'Mock error')
-
- def _get_process_exit_status_raises_other(self):
- raise Exception('Mock error')
-
- def _make_mock_process_info(self, name, args, c_channel_env,
- dev_null_stdout=False, dev_null_stderr=False):
- return MockProcessInfo(name, args, c_channel_env,
- dev_null_stdout, dev_null_stderr)
-
-class MockBobSimple(BoB):
- def __init__(self):
- BoB.__init__(self)
- # Set which process has been started
- self.started_process_name = None
- self.started_process_args = None
- self.started_process_env = None
-
- def _make_mock_process_info(self, name, args, c_channel_env,
- dev_null_stdout=False, dev_null_stderr=False):
- return MockProcessInfo(name, args, c_channel_env,
- dev_null_stdout, dev_null_stderr)
-
- def start_process(self, name, args, c_channel_env, port=None,
- address=None):
- self.started_process_name = name
- self.started_process_args = args
- self.started_process_env = c_channel_env
- return None
-
-class TestStartStopProcessesBob(unittest.TestCase):
- """
- Check that the start_all_components method starts the right combination
- of components and that the right components are started and stopped
- according to changes in configuration.
- """
- def check_environment_unchanged(self):
- # Check whether the environment has not been changed
- self.assertEqual(original_os_environ, os.environ)
-
- def check_started(self, bob, core, auth, resolver):
- """
- Check that the right sets of services are started. The ones that
- should be running are specified by the core, auth and resolver parameters
- (they are groups of processes, eg. auth means b10-auth, -xfrout, -xfrin
- and -zonemgr).
- """
- self.assertEqual(bob.msgq, core)
- self.assertEqual(bob.cfgmgr, core)
- self.assertEqual(bob.ccsession, core)
- self.assertEqual(bob.creator, core)
- self.assertEqual(bob.auth, auth)
- self.assertEqual(bob.resolver, resolver)
- self.assertEqual(bob.xfrout, auth)
- self.assertEqual(bob.xfrin, auth)
- self.assertEqual(bob.zonemgr, auth)
- self.assertEqual(bob.stats, core)
- self.assertEqual(bob.stats_httpd, core)
- self.assertEqual(bob.cmdctl, core)
- self.check_environment_unchanged()
-
- def check_preconditions(self, bob):
- self.check_started(bob, False, False, False)
-
- def check_started_none(self, bob):
- """
- Check that the situation is according to configuration where no servers
- should be started. Some components still need to be running.
- """
- self.check_started(bob, True, False, False)
- self.check_environment_unchanged()
-
- def check_started_both(self, bob):
- """
- Check the situation is according to configuration where both servers
- (auth and resolver) are enabled.
- """
- self.check_started(bob, True, True, True)
- self.check_environment_unchanged()
-
- def check_started_auth(self, bob):
- """
- Check the set of components needed to run auth only is started.
- """
- self.check_started(bob, True, True, False)
- self.check_environment_unchanged()
-
- def check_started_resolver(self, bob):
- """
- Check the set of components needed to run resolver only is started.
- """
- self.check_started(bob, True, False, True)
- self.check_environment_unchanged()
-
- def check_started_dhcp(self, bob, v4, v6):
- """
- Check if proper combinations of DHCPv4 and DHCpv6 can be started
- """
- self.assertEqual(v4, bob.dhcp4)
- self.assertEqual(v6, bob.dhcp6)
- self.check_environment_unchanged()
-
- def construct_config(self, start_auth, start_resolver):
- # The things that are common, not turned on an off
- config = {}
- config['b10-stats'] = { 'kind': 'dispensable', 'address': 'Stats' }
- config['b10-stats-httpd'] = { 'kind': 'dispensable',
- 'address': 'StatsHttpd' }
- config['b10-cmdctl'] = { 'kind': 'needed', 'special': 'cmdctl' }
- if start_auth:
- config['b10-auth'] = { 'kind': 'needed', 'special': 'auth' }
- config['b10-xfrout'] = { 'kind': 'dispensable',
- 'address': 'Xfrout' }
- config['b10-xfrin'] = { 'kind': 'dispensable',
- 'address': 'Xfrin' }
- config['b10-zonemgr'] = { 'kind': 'dispensable',
- 'address': 'Zonemgr' }
- if start_resolver:
- config['b10-resolver'] = { 'kind': 'needed',
- 'special': 'resolver' }
- return {'components': config}
-
- def config_start_init(self, start_auth, start_resolver):
- """
- Test the configuration is loaded at the startup.
- """
- bob = MockBob()
- config = self.construct_config(start_auth, start_resolver)
- class CC:
- def get_full_config(self):
- return config
- # Provide the fake CC with data
- bob.ccs = CC()
- # And make sure it's not overwritten
- def start_ccsession():
- bob.ccsession = True
- bob.start_ccsession = lambda _: start_ccsession()
- # We need to return the original _read_bind10_config
- bob._read_bind10_config = lambda: BoB._read_bind10_config(bob)
- bob.start_all_components()
- self.check_started(bob, True, start_auth, start_resolver)
- self.check_environment_unchanged()
-
- def test_start_none(self):
- self.config_start_init(False, False)
-
- def test_start_resolver(self):
- self.config_start_init(False, True)
-
- def test_start_auth(self):
- self.config_start_init(True, False)
-
- def test_start_both(self):
- self.config_start_init(True, True)
-
- def test_config_start(self):
- """
- Test that the configuration starts and stops components according
- to configuration changes.
- """
-
- # Create BoB and ensure correct initialization
- bob = MockBob()
- self.check_preconditions(bob)
-
- bob.start_all_components()
- bob.runnable = True
- bob.config_handler(self.construct_config(False, False))
- self.check_started_none(bob)
-
- # Enable both at once
- bob.config_handler(self.construct_config(True, True))
- self.check_started_both(bob)
-
- # Not touched by empty change
- bob.config_handler({})
- self.check_started_both(bob)
-
- # Not touched by change to the same configuration
- bob.config_handler(self.construct_config(True, True))
- self.check_started_both(bob)
-
- # Turn them both off again
- bob.config_handler(self.construct_config(False, False))
- self.check_started_none(bob)
-
- # Not touched by empty change
- bob.config_handler({})
- self.check_started_none(bob)
-
- # Not touched by change to the same configuration
- bob.config_handler(self.construct_config(False, False))
- self.check_started_none(bob)
-
- # Start and stop auth separately
- bob.config_handler(self.construct_config(True, False))
- self.check_started_auth(bob)
-
- bob.config_handler(self.construct_config(False, False))
- self.check_started_none(bob)
-
- # Start and stop resolver separately
- bob.config_handler(self.construct_config(False, True))
- self.check_started_resolver(bob)
-
- bob.config_handler(self.construct_config(False, False))
- self.check_started_none(bob)
-
- # Alternate
- bob.config_handler(self.construct_config(True, False))
- self.check_started_auth(bob)
-
- bob.config_handler(self.construct_config(False, True))
- self.check_started_resolver(bob)
-
- bob.config_handler(self.construct_config(True, False))
- self.check_started_auth(bob)
-
- def test_config_start_once(self):
- """
- Tests that a component is started only once.
- """
- # Create BoB and ensure correct initialization
- bob = MockBob()
- self.check_preconditions(bob)
-
- bob.start_all_components()
-
- bob.runnable = True
- bob.config_handler(self.construct_config(True, True))
- self.check_started_both(bob)
-
- bob.start_auth = lambda: self.fail("Started auth again")
- bob.start_xfrout = lambda: self.fail("Started xfrout again")
- bob.start_xfrin = lambda: self.fail("Started xfrin again")
- bob.start_zonemgr = lambda: self.fail("Started zonemgr again")
- bob.start_resolver = lambda: self.fail("Started resolver again")
-
- # Send again we want to start them. Should not do it, as they are.
- bob.config_handler(self.construct_config(True, True))
-
- def test_config_not_started_early(self):
- """
- Test that components are not started by the config handler before
- startup.
- """
- bob = MockBob()
- self.check_preconditions(bob)
-
- bob.start_auth = lambda: self.fail("Started auth again")
- bob.start_xfrout = lambda: self.fail("Started xfrout again")
- bob.start_xfrin = lambda: self.fail("Started xfrin again")
- bob.start_zonemgr = lambda: self.fail("Started zonemgr again")
- bob.start_resolver = lambda: self.fail("Started resolver again")
-
- bob.config_handler({'start_auth': True, 'start_resolver': True})
-
- # Checks that DHCP (v4 and v6) components are started when expected
- def test_start_dhcp(self):
-
- # Create BoB and ensure correct initialization
- bob = MockBob()
- self.check_preconditions(bob)
-
- bob.start_all_components()
- bob.config_handler(self.construct_config(False, False))
- self.check_started_dhcp(bob, False, False)
-
- def test_start_dhcp_v6only(self):
- # Create BoB and ensure correct initialization
- bob = MockBob()
- self.check_preconditions(bob)
- # v6 only enabled
- bob.start_all_components()
- bob.runnable = True
- bob._BoB_started = True
- config = self.construct_config(False, False)
- config['components']['b10-dhcp6'] = { 'kind': 'needed',
- 'address': 'Dhcp6' }
- bob.config_handler(config)
- self.check_started_dhcp(bob, False, True)
-
- # uncomment when dhcpv4 becomes implemented
- # v4 only enabled
- #bob.cfg_start_dhcp6 = False
- #bob.cfg_start_dhcp4 = True
- #self.check_started_dhcp(bob, True, False)
-
- # both v4 and v6 enabled
- #bob.cfg_start_dhcp6 = True
- #bob.cfg_start_dhcp4 = True
- #self.check_started_dhcp(bob, True, True)
-
-class MockComponent:
- def __init__(self, name, pid, address=None):
- self.name = lambda: name
- self.pid = lambda: pid
- self.address = lambda: address
- self.restarted = False
- self.forceful = False
- self.running = True
- self.has_failed = False
-
- def get_restart_time(self):
- return 0 # arbitrary dummy value
-
- def restart(self, now):
- self.restarted = True
- return True
-
- def is_running(self):
- return self.running
-
- def failed(self, status):
- return self.has_failed
-
- def kill(self, forceful):
- self.forceful = forceful
-
-class TestBossCmd(unittest.TestCase):
- def test_ping(self):
- """
- Confirm simple ping command works.
- """
- bob = MockBob()
- answer = bob.command_handler("ping", None)
- self.assertEqual(answer, {'result': [0, 'pong']})
-
- def test_show_processes_empty(self):
- """
- Confirm getting a list of processes works.
- """
- bob = MockBob()
- answer = bob.command_handler("show_processes", None)
- self.assertEqual(answer, {'result': [0, []]})
-
- def test_show_processes(self):
- """
- Confirm getting a list of processes works.
- """
- bob = MockBob()
- bob.register_process(1, MockComponent('first', 1))
- bob.register_process(2, MockComponent('second', 2, 'Second'))
- answer = bob.command_handler("show_processes", None)
- processes = [[1, 'first', None],
- [2, 'second', 'Second']]
- self.assertEqual(answer, {'result': [0, processes]})
-
-class TestParseArgs(unittest.TestCase):
- """
- This tests parsing of arguments of the bind10 master process.
- """
- #TODO: Write tests for the original parsing, bad options, etc.
- def test_no_opts(self):
- """
- Test correct default values when no options are passed.
- """
- options = parse_args([], TestOptParser)
- self.assertEqual(None, options.data_path)
- self.assertEqual(None, options.config_file)
- self.assertEqual(None, options.cmdctl_port)
-
- def test_data_path(self):
- """
- Test it can parse the data path.
- """
- self.assertRaises(OptsError, parse_args, ['-p'], TestOptParser)
- self.assertRaises(OptsError, parse_args, ['--data-path'],
- TestOptParser)
- options = parse_args(['-p', '/data/path'], TestOptParser)
- self.assertEqual('/data/path', options.data_path)
- options = parse_args(['--data-path=/data/path'], TestOptParser)
- self.assertEqual('/data/path', options.data_path)
-
- def test_config_filename(self):
- """
- Test it can parse the config switch.
- """
- self.assertRaises(OptsError, parse_args, ['-c'], TestOptParser)
- self.assertRaises(OptsError, parse_args, ['--config-file'],
- TestOptParser)
- options = parse_args(['-c', 'config-file'], TestOptParser)
- self.assertEqual('config-file', options.config_file)
- options = parse_args(['--config-file=config-file'], TestOptParser)
- self.assertEqual('config-file', options.config_file)
-
- def test_clear_config(self):
- options = parse_args([], TestOptParser)
- self.assertEqual(False, options.clear_config)
- options = parse_args(['--clear-config'], TestOptParser)
- self.assertEqual(True, options.clear_config)
-
- def test_nokill(self):
- options = parse_args([], TestOptParser)
- self.assertEqual(False, options.nokill)
- options = parse_args(['--no-kill'], TestOptParser)
- self.assertEqual(True, options.nokill)
- options = parse_args([], TestOptParser)
- self.assertEqual(False, options.nokill)
- options = parse_args(['-i'], TestOptParser)
- self.assertEqual(True, options.nokill)
-
- def test_cmdctl_port(self):
- """
- Test it can parse the command control port.
- """
- self.assertRaises(OptsError, parse_args, ['--cmdctl-port=abc'],
- TestOptParser)
- self.assertRaises(OptsError, parse_args, ['--cmdctl-port=100000000'],
- TestOptParser)
- self.assertRaises(OptsError, parse_args, ['--cmdctl-port'],
- TestOptParser)
- options = parse_args(['--cmdctl-port=1234'], TestOptParser)
- self.assertEqual(1234, options.cmdctl_port)
-
-class TestPIDFile(unittest.TestCase):
- def setUp(self):
- self.pid_file = '@builddir@' + os.sep + 'bind10.pid'
- if os.path.exists(self.pid_file):
- os.unlink(self.pid_file)
-
- def tearDown(self):
- if os.path.exists(self.pid_file):
- os.unlink(self.pid_file)
-
- def check_pid_file(self):
- # dump PID to the file, and confirm the content is correct
- dump_pid(self.pid_file)
- my_pid = os.getpid()
- with open(self.pid_file, "r") as f:
- self.assertEqual(my_pid, int(f.read()))
-
- def test_dump_pid(self):
- self.check_pid_file()
-
- # make sure any existing content will be removed
- with open(self.pid_file, "w") as f:
- f.write('dummy data\n')
- self.check_pid_file()
-
- def test_unlink_pid_file_notexist(self):
- dummy_data = 'dummy_data\n'
-
- with open(self.pid_file, "w") as f:
- f.write(dummy_data)
-
- unlink_pid_file("no_such_pid_file")
-
- # the file specified for unlink_pid_file doesn't exist,
- # and the original content of the file should be intact.
- with open(self.pid_file, "r") as f:
- self.assertEqual(dummy_data, f.read())
-
- def test_dump_pid_with_none(self):
- # Check the behavior of dump_pid() and unlink_pid_file() with None.
- # This should be no-op.
- dump_pid(None)
- self.assertFalse(os.path.exists(self.pid_file))
-
- dummy_data = 'dummy_data\n'
-
- with open(self.pid_file, "w") as f:
- f.write(dummy_data)
-
- unlink_pid_file(None)
-
- with open(self.pid_file, "r") as f:
- self.assertEqual(dummy_data, f.read())
-
- def test_dump_pid_failure(self):
- # the attempt to open file will fail, which should result in exception.
- self.assertRaises(IOError, dump_pid,
- 'nonexistent_dir' + os.sep + 'bind10.pid')
-
-class TestBossComponents(unittest.TestCase):
- """
- Test the boss propagates component configuration properly to the
- component configurator and acts sane.
- """
- def setUp(self):
- self.__param = None
- self.__called = False
- self.__compconfig = {
- 'comp': {
- 'kind': 'needed',
- 'process': 'cat'
- }
- }
- self._tmp_time = None
- self._tmp_sleep = None
- self._tmp_module_cc_session = None
- self._tmp_cc_session = None
-
- def tearDown(self):
- if self._tmp_time is not None:
- time.time = self._tmp_time
- if self._tmp_sleep is not None:
- time.sleep = self._tmp_sleep
- if self._tmp_module_cc_session is not None:
- isc.config.ModuleCCSession = self._tmp_module_cc_session
- if self._tmp_cc_session is not None:
- isc.cc.Session = self._tmp_cc_session
-
- def __unary_hook(self, param):
- """
- A hook function that stores the parameter for later examination.
- """
- self.__param = param
-
- def __nullary_hook(self):
- """
- A hook function that notes down it was called.
- """
- self.__called = True
-
- def __check_core(self, config):
- """
- A function checking that the config contains parts for the valid
- core component configuration.
- """
- self.assertIsNotNone(config)
- for component in ['sockcreator', 'msgq', 'cfgmgr']:
- self.assertTrue(component in config)
- self.assertEqual(component, config[component]['special'])
- self.assertEqual('core', config[component]['kind'])
-
- def __check_extended(self, config):
- """
- This checks that the config contains the core and one more component.
- """
- self.__check_core(config)
- self.assertTrue('comp' in config)
- self.assertEqual('cat', config['comp']['process'])
- self.assertEqual('needed', config['comp']['kind'])
- self.assertEqual(4, len(config))
-
- def test_correct_run(self):
- """
- Test the situation when we run in usual scenario, nothing fails,
- we just start, reconfigure and then stop peacefully.
- """
- bob = MockBob()
- # Start it
- orig = bob._component_configurator.startup
- bob._component_configurator.startup = self.__unary_hook
- bob.start_all_components()
- bob._component_configurator.startup = orig
- self.__check_core(self.__param)
- self.assertEqual(3, len(self.__param))
-
- # Reconfigure it
- self.__param = None
- orig = bob._component_configurator.reconfigure
- bob._component_configurator.reconfigure = self.__unary_hook
- # Otherwise it does not work
- bob.runnable = True
- bob.config_handler({'components': self.__compconfig})
- self.__check_extended(self.__param)
- currconfig = self.__param
- # If we reconfigure it, but it does not contain the components part,
- # nothing is called
- bob.config_handler({})
- self.assertEqual(self.__param, currconfig)
- self.__param = None
- bob._component_configurator.reconfigure = orig
- # Check a configuration that messes up the core components is rejected.
- compconf = dict(self.__compconfig)
- compconf['msgq'] = { 'process': 'echo' }
- result = bob.config_handler({'components': compconf})
- # Check it rejected it
- self.assertEqual(1, result['result'][0])
-
- # We can't call shutdown, that one relies on the stuff in main
- # We check somewhere else that the shutdown is actually called
- # from there (the test_kills).
-
- def __real_test_kill(self, nokill=False, ex_on_kill=None):
- """
- Helper function that does the actual kill functionality testing.
- """
- bob = MockBob()
- bob.nokill = nokill
-
- killed = []
- class ImmortalComponent:
- """
- An immortal component. It does not stop when it is told so
- (anyway it is not told so). It does not die if it is killed
- the first time. It dies only when killed forcefully.
- """
- def __init__(self):
- # number of kill() calls, preventing infinite loop.
- self.__call_count = 0
-
- def kill(self, forceful=False):
- self.__call_count += 1
- if self.__call_count > 2:
- raise Exception('Too many calls to ImmortalComponent.kill')
-
- killed.append(forceful)
- if ex_on_kill is not None:
- # If exception is given by the test, raise it here.
- # In the case of ESRCH, the process should have gone
- # somehow, so we clear the components.
- if ex_on_kill.errno == errno.ESRCH:
- bob.components = {}
- raise ex_on_kill
- if forceful:
- bob.components = {}
- def pid(self):
- return 1
- def name(self):
- return "Immortal"
- bob.components = {}
- bob.register_process(1, ImmortalComponent())
-
- # While at it, we check the configurator shutdown is actually called
- orig = bob._component_configurator.shutdown
- bob._component_configurator.shutdown = self.__nullary_hook
- self.__called = False
-
- bob.ccs = MockModuleCCSession()
- self.assertFalse(bob.ccs.stopped)
-
- bob.shutdown()
-
- self.assertTrue(bob.ccs.stopped)
-
- # Here, killed is an array where False is added if SIGTERM
- # should be sent, or True if SIGKILL should be sent, in order in
- # which they're sent.
- if nokill:
- self.assertEqual([], killed)
- else:
- if ex_on_kill is not None:
- self.assertEqual([False], killed)
- else:
- self.assertEqual([False, True], killed)
-
- self.assertTrue(self.__called)
-
- bob._component_configurator.shutdown = orig
-
- def test_kills(self):
- """
- Test that the boss kills components which don't want to stop.
- """
- self.__real_test_kill()
-
- def test_kill_fail(self):
- """Test cases where kill() results in an exception due to OS error.
-
- The behavior should be different for EPERM, so we test two cases.
-
- """
-
- ex = OSError()
- ex.errno, ex.strerror = errno.ESRCH, 'No such process'
- self.__real_test_kill(ex_on_kill=ex)
-
- ex.errno, ex.strerror = errno.EPERM, 'Operation not permitted'
- self.__real_test_kill(ex_on_kill=ex)
-
- def test_nokill(self):
- """
- Test that the boss *doesn't* kill components which don't want to
- stop, when asked not to (by passing the --no-kill option which
- sets bob.nokill to True).
- """
- self.__real_test_kill(True)
-
- def test_component_shutdown(self):
- """
- Test the component_shutdown sets all variables accordingly.
- """
- bob = MockBob()
- self.assertRaises(Exception, bob.component_shutdown, 1)
- self.assertEqual(1, bob.exitcode)
- bob._BoB__started = True
- bob.component_shutdown(2)
- self.assertEqual(2, bob.exitcode)
- self.assertFalse(bob.runnable)
-
- def test_init_config(self):
- """
- Test initial configuration is loaded.
- """
- bob = MockBob()
- # Start it
- bob._component_configurator.reconfigure = self.__unary_hook
- # We need to return the original read_bind10_config
- bob._read_bind10_config = lambda: BoB._read_bind10_config(bob)
- # And provide a session to read the data from
- class CC:
- pass
- bob.ccs = CC()
- bob.ccs.get_full_config = lambda: {'components': self.__compconfig}
- bob.start_all_components()
- self.__check_extended(self.__param)
-
- def __setup_restart(self, bob, component):
- '''Common procedure for restarting a component used below.'''
- bob.components_to_restart = { component }
- component.restarted = False
- bob.restart_processes()
-
- def test_restart_processes(self):
- '''Check some behavior on restarting processes.'''
- bob = MockBob()
- bob.runnable = True
- component = MockComponent('test', 53)
-
- # A component to be restarted will actually be restarted iff it's
- # in the configurator's configuration.
- # We bruteforce the configurator internal below; ugly, but the easiest
- # way for the test.
- bob._component_configurator._components['test'] = (None, component)
- self.__setup_restart(bob, component)
- self.assertTrue(component.restarted)
- self.assertNotIn(component, bob.components_to_restart)
-
- # Remove the component from the configuration. It won't be restarted
- # even if scheduled, nor will remain in the to-be-restarted list.
- del bob._component_configurator._components['test']
- self.__setup_restart(bob, component)
- self.assertFalse(component.restarted)
- self.assertNotIn(component, bob.components_to_restart)
-
- def test_get_processes(self):
- '''Test that procsses are returned correctly, sorted by pid.'''
- bob = MockBob()
-
- pids = list(range(0, 20))
- random.shuffle(pids)
-
- for i in range(0, 20):
- pid = pids[i]
- component = MockComponent('test' + str(pid), pid,
- 'Test' + str(pid))
- bob.components[pid] = component
-
- process_list = bob.get_processes()
- self.assertEqual(20, len(process_list))
-
- last_pid = -1
- for process in process_list:
- pid = process[0]
- self.assertLessEqual(last_pid, pid)
- last_pid = pid
- self.assertEqual([pid, 'test' + str(pid), 'Test' + str(pid)],
- process)
-
- def _test_reap_children_helper(self, runnable, is_running, failed):
- '''Construct a BoB instance, set various data in it according to
- passed args and check if the component was added to the list of
- components to restart.'''
- bob = MockBob()
- bob.runnable = runnable
-
- component = MockComponent('test', 53)
- component.running = is_running
- component.has_failed = failed
- bob.components[53] = component
-
- self.assertNotIn(component, bob.components_to_restart)
-
- bob.reap_children()
-
- if runnable and is_running and not failed:
- self.assertIn(component, bob.components_to_restart)
- else:
- self.assertEqual([], bob.components_to_restart)
-
- def test_reap_children(self):
- '''Test that children are queued to be restarted when they ask for it.'''
- # test various combinations of 3 booleans
- # (BoB.runnable, component.is_running(), component.failed())
- self._test_reap_children_helper(False, False, False)
- self._test_reap_children_helper(False, False, True)
- self._test_reap_children_helper(False, True, False)
- self._test_reap_children_helper(False, True, True)
- self._test_reap_children_helper(True, False, False)
- self._test_reap_children_helper(True, False, True)
- self._test_reap_children_helper(True, True, False)
- self._test_reap_children_helper(True, True, True)
-
- # setup for more tests below
- bob = MockBob()
- bob.runnable = True
- component = MockComponent('test', 53)
- bob.components[53] = component
-
- # case where the returned pid is unknown to us. nothing should
- # happpen then.
- bob.get_process_exit_status_called = False
- bob._get_process_exit_status = bob._get_process_exit_status_unknown_pid
- bob.components_to_restart = []
- # this should do nothing as the pid is unknown
- bob.reap_children()
- self.assertEqual([], bob.components_to_restart)
-
- # case where bob._get_process_exit_status() raises OSError with
- # errno.ECHILD
- bob._get_process_exit_status = \
- bob._get_process_exit_status_raises_oserror_echild
- bob.components_to_restart = []
- # this should catch and handle the OSError
- bob.reap_children()
- self.assertEqual([], bob.components_to_restart)
-
- # case where bob._get_process_exit_status() raises OSError with
- # errno other than ECHILD
- bob._get_process_exit_status = \
- bob._get_process_exit_status_raises_oserror_other
- with self.assertRaises(OSError):
- bob.reap_children()
-
- # case where bob._get_process_exit_status() raises something
- # other than OSError
- bob._get_process_exit_status = \
- bob._get_process_exit_status_raises_other
- with self.assertRaises(Exception):
- bob.reap_children()
-
- def test_kill_started_components(self):
- '''Test that started components are killed.'''
- bob = MockBob()
-
- component = MockComponent('test', 53, 'Test')
- bob.components[53] = component
-
- self.assertEqual([[53, 'test', 'Test']], bob.get_processes())
- bob.kill_started_components()
- self.assertEqual([], bob.get_processes())
- self.assertTrue(component.forceful)
-
- def _start_msgq_helper(self, bob, verbose):
- bob.verbose = verbose
- pi = bob.start_msgq()
- self.assertEqual('b10-msgq', pi.name)
- self.assertEqual(['b10-msgq'], pi.args)
- self.assertTrue(pi.dev_null_stdout)
- self.assertEqual(pi.dev_null_stderr, not verbose)
- self.assertEqual({'FOO': 'an env string'}, pi.env)
-
- # this is set by ProcessInfo.spawn()
- self.assertEqual(42147, pi.pid)
-
- def test_start_msgq(self):
- '''Test that b10-msgq is started.'''
- bob = MockBobSimple()
- bob.c_channel_env = {'FOO': 'an env string'}
- bob._run_under_unittests = True
-
- # use the MockProcessInfo creator
- bob._make_process_info = bob._make_mock_process_info
-
- # non-verbose case
- self._start_msgq_helper(bob, False)
-
- # verbose case
- self._start_msgq_helper(bob, True)
-
- def test_start_msgq_timeout(self):
- '''Test that b10-msgq startup attempts connections several times
- and times out eventually.'''
- bob = MockBobSimple()
- bob.c_channel_env = {}
- # set the timeout to an arbitrary pre-determined value (which
- # code below depends on)
- bob.msgq_timeout = 1
- bob._run_under_unittests = False
-
- # use the MockProcessInfo creator
- bob._make_process_info = bob._make_mock_process_info
-
- global attempts
- global tsec
- attempts = 0
- tsec = 0
- self._tmp_time = time.time
- self._tmp_sleep = time.sleep
- def _my_time():
- global attempts
- global tsec
- attempts += 1
- return tsec
- def _my_sleep(nsec):
- global tsec
- tsec += nsec
- time.time = _my_time
- time.sleep = _my_sleep
-
- global cc_sub
- cc_sub = None
- class DummySessionAlwaysFails():
- def __init__(self, socket_file):
- raise isc.cc.session.SessionError('Connection fails')
- def group_subscribe(self, s):
- global cc_sub
- cc_sub = s
-
- isc.cc.Session = DummySessionAlwaysFails
-
- with self.assertRaises(bind10_src.CChannelConnectError):
- # An exception will be thrown here when it eventually times
- # out.
- pi = bob.start_msgq()
-
- # time.time() should be called 12 times within the while loop:
- # starting from 0, and 11 more times from 0.1 to 1.1. There's
- # another call to time.time() outside the loop, which makes it
- # 13.
- self.assertEqual(attempts, 13)
-
- # group_subscribe() should not have been called here.
- self.assertIsNone(cc_sub)
-
- global cc_socket_file
- cc_socket_file = None
- cc_sub = None
- class DummySession():
- def __init__(self, socket_file):
- global cc_socket_file
- cc_socket_file = socket_file
- def group_subscribe(self, s):
- global cc_sub
- cc_sub = s
-
- isc.cc.Session = DummySession
-
- # reset values
- attempts = 0
- tsec = 0
-
- pi = bob.start_msgq()
-
- # just one attempt, but 2 calls to time.time()
- self.assertEqual(attempts, 2)
-
- self.assertEqual(cc_socket_file, bob.msgq_socket_file)
- self.assertEqual(cc_sub, 'Boss')
-
- # isc.cc.Session, time.time() and time.sleep() are restored
- # during tearDown().
-
- def _start_cfgmgr_helper(self, bob, data_path, filename, clear_config):
- expect_args = ['b10-cfgmgr']
- if data_path is not None:
- bob.data_path = data_path
- expect_args.append('--data-path=' + data_path)
- if filename is not None:
- bob.config_filename = filename
- expect_args.append('--config-filename=' + filename)
- if clear_config:
- bob.clear_config = clear_config
- expect_args.append('--clear-config')
-
- pi = bob.start_cfgmgr()
- self.assertEqual('b10-cfgmgr', pi.name)
- self.assertEqual(expect_args, pi.args)
- self.assertEqual({'TESTENV': 'A test string'}, pi.env)
-
- # this is set by ProcessInfo.spawn()
- self.assertEqual(42147, pi.pid)
-
- def test_start_cfgmgr(self):
- '''Test that b10-cfgmgr is started.'''
- class DummySession():
- def __init__(self):
- self._tries = 0
- def group_recvmsg(self):
- self._tries += 1
- # return running on the 3rd try onwards
- if self._tries >= 3:
- return ({'running': 'ConfigManager'}, None)
- else:
- return ({}, None)
-
- bob = MockBobSimple()
- bob.c_channel_env = {'TESTENV': 'A test string'}
- bob.cc_session = DummySession()
- bob.wait_time = 5
-
- # use the MockProcessInfo creator
- bob._make_process_info = bob._make_mock_process_info
-
- global attempts
- attempts = 0
- self._tmp_sleep = time.sleep
- def _my_sleep(nsec):
- global attempts
- attempts += 1
- time.sleep = _my_sleep
-
- # defaults
- self._start_cfgmgr_helper(bob, None, None, False)
-
- # check that 2 attempts were made. on the 3rd attempt,
- # process_running() returns that ConfigManager is running.
- self.assertEqual(attempts, 2)
-
- # data_path is specified
- self._start_cfgmgr_helper(bob, '/var/lib/test', None, False)
-
- # config_filename is specified. Because `bob` is not
- # reconstructed, data_path is retained from the last call to
- # _start_cfgmgr_helper().
- self._start_cfgmgr_helper(bob, '/var/lib/test', 'foo.cfg', False)
-
- # clear_config is specified. Because `bob` is not reconstructed,
- # data_path and config_filename are retained from the last call
- # to _start_cfgmgr_helper().
- self._start_cfgmgr_helper(bob, '/var/lib/test', 'foo.cfg', True)
-
- def test_start_cfgmgr_timeout(self):
- '''Test that b10-cfgmgr startup attempts connections several times
- and times out eventually.'''
- class DummySession():
- def group_recvmsg(self):
- return (None, None)
- bob = MockBobSimple()
- bob.c_channel_env = {}
- bob.cc_session = DummySession()
- # set wait_time to an arbitrary pre-determined value (which code
- # below depends on)
- bob.wait_time = 2
-
- # use the MockProcessInfo creator
- bob._make_process_info = bob._make_mock_process_info
-
- global attempts
- attempts = 0
- self._tmp_sleep = time.sleep
- def _my_sleep(nsec):
- global attempts
- attempts += 1
- time.sleep = _my_sleep
-
- # We just check that an exception was thrown, and that several
- # attempts were made to connect.
- with self.assertRaises(bind10_src.ProcessStartError):
- pi = bob.start_cfgmgr()
-
- # 2 seconds of attempts every 1 second should result in 2 attempts
- self.assertEqual(attempts, 2)
-
- # time.sleep() is restored during tearDown().
-
- def test_start_ccsession(self):
- '''Test that CC session is started.'''
- class DummySession():
- def __init__(self, specfile, config_handler, command_handler,
- socket_file):
- self.specfile = specfile
- self.config_handler = config_handler
- self.command_handler = command_handler
- self.socket_file = socket_file
- self.started = False
- def start(self):
- self.started = True
- bob = MockBobSimple()
- self._tmp_module_cc_session = isc.config.ModuleCCSession
- isc.config.ModuleCCSession = DummySession
-
- bob.start_ccsession({})
- self.assertEqual(bind10_src.SPECFILE_LOCATION, bob.ccs.specfile)
- self.assertEqual(bob.config_handler, bob.ccs.config_handler)
- self.assertEqual(bob.command_handler, bob.ccs.command_handler)
- self.assertEqual(bob.msgq_socket_file, bob.ccs.socket_file)
- self.assertTrue(bob.ccs.started)
-
- # isc.config.ModuleCCSession is restored during tearDown().
-
- def test_start_process(self):
- '''Test that processes can be started.'''
- bob = MockBob()
-
- # use the MockProcessInfo creator
- bob._make_process_info = bob._make_mock_process_info
-
- pi = bob.start_process('Test Process', ['/bin/true'], {})
- self.assertEqual('Test Process', pi.name)
- self.assertEqual(['/bin/true'], pi.args)
- self.assertEqual({}, pi.env)
-
- # this is set by ProcessInfo.spawn()
- self.assertEqual(42147, pi.pid)
-
- def test_register_process(self):
- '''Test that processes can be registered with BoB.'''
- bob = MockBob()
- component = MockComponent('test', 53, 'Test')
-
- self.assertFalse(53 in bob.components)
- bob.register_process(53, component)
- self.assertTrue(53 in bob.components)
- self.assertEqual(bob.components[53].name(), 'test')
- self.assertEqual(bob.components[53].pid(), 53)
- self.assertEqual(bob.components[53].address(), 'Test')
-
- def _start_simple_helper(self, bob, verbose):
- bob.verbose = verbose
-
- args = ['/bin/true']
- if verbose:
- args.append('-v')
-
- bob.start_simple('/bin/true')
- self.assertEqual('/bin/true', bob.started_process_name)
- self.assertEqual(args, bob.started_process_args)
- self.assertEqual({'TESTENV': 'A test string'}, bob.started_process_env)
-
- def test_start_simple(self):
- '''Test simple process startup.'''
- bob = MockBobSimple()
- bob.c_channel_env = {'TESTENV': 'A test string'}
-
- # non-verbose case
- self._start_simple_helper(bob, False)
-
- # verbose case
- self._start_simple_helper(bob, True)
-
- def _start_auth_helper(self, bob, verbose):
- bob.verbose = verbose
-
- args = ['b10-auth']
- if verbose:
- args.append('-v')
-
- bob.start_auth()
- self.assertEqual('b10-auth', bob.started_process_name)
- self.assertEqual(args, bob.started_process_args)
- self.assertEqual({'FOO': 'an env string'}, bob.started_process_env)
-
- def test_start_auth(self):
- '''Test that b10-auth is started.'''
- bob = MockBobSimple()
- bob.c_channel_env = {'FOO': 'an env string'}
-
- # non-verbose case
- self._start_auth_helper(bob, False)
-
- # verbose case
- self._start_auth_helper(bob, True)
-
- def _start_resolver_helper(self, bob, verbose):
- bob.verbose = verbose
-
- args = ['b10-resolver']
- if verbose:
- args.append('-v')
-
- bob.start_resolver()
- self.assertEqual('b10-resolver', bob.started_process_name)
- self.assertEqual(args, bob.started_process_args)
- self.assertEqual({'BAR': 'an env string'}, bob.started_process_env)
-
- def test_start_resolver(self):
- '''Test that b10-resolver is started.'''
- bob = MockBobSimple()
- bob.c_channel_env = {'BAR': 'an env string'}
-
- # non-verbose case
- self._start_resolver_helper(bob, False)
-
- # verbose case
- self._start_resolver_helper(bob, True)
-
- def _start_cmdctl_helper(self, bob, verbose, port = None):
- bob.verbose = verbose
-
- args = ['b10-cmdctl']
-
- if port is not None:
- bob.cmdctl_port = port
- args.append('--port=9353')
-
- if verbose:
- args.append('-v')
-
- bob.start_cmdctl()
- self.assertEqual('b10-cmdctl', bob.started_process_name)
- self.assertEqual(args, bob.started_process_args)
- self.assertEqual({'BAZ': 'an env string'}, bob.started_process_env)
-
- def test_start_cmdctl(self):
- '''Test that b10-cmdctl is started.'''
- bob = MockBobSimple()
- bob.c_channel_env = {'BAZ': 'an env string'}
-
- # non-verbose case
- self._start_cmdctl_helper(bob, False)
-
- # verbose case
- self._start_cmdctl_helper(bob, True)
-
- # with port, non-verbose case
- self._start_cmdctl_helper(bob, False, 9353)
-
- # with port, verbose case
- self._start_cmdctl_helper(bob, True, 9353)
-
- def test_socket_data(self):
- '''Test that BoB._socket_data works as expected.'''
- class MockSock:
- def __init__(self, fd, throw):
- self.fd = fd
- self.throw = throw
- self.buf = b'Hello World.\nYou are so nice today.\nXX'
- self.i = 0
-
- def recv(self, bufsize, flags = 0):
- if bufsize != 1:
- raise Exception('bufsize != 1')
- if flags != socket.MSG_DONTWAIT:
- raise Exception('flags != socket.MSG_DONTWAIT')
- # after 15 recv()s, throw a socket.error with EAGAIN to
- # get _socket_data() to save back what's been read. The
- # number 15 is arbitrarily chosen, but the checks then
- # depend on this being 15, i.e., if you adjust this
- # number, you may have to adjust the checks below too.
- if self.throw and self.i > 15:
- raise socket.error(errno.EAGAIN, 'Try again')
- if self.i >= len(self.buf):
- return b'';
- t = self.i
- self.i += 1
- return self.buf[t:t+1]
-
- def close(self):
- return
-
- class MockBobSocketData(BoB):
- def __init__(self, throw):
- self._unix_sockets = {42: (MockSock(42, throw), b'')}
- self.requests = []
- self.dead = []
-
- def socket_request_handler(self, previous, sock):
- self.requests.append({sock.fd: previous})
-
- def socket_consumer_dead(self, sock):
- self.dead.append(sock.fd)
-
- # Case where we get data every time we call recv()
- bob = MockBobSocketData(False)
- bob._socket_data(42)
- self.assertEqual(bob.requests,
- [{42: b'Hello World.'},
- {42: b'You are so nice today.'}])
- self.assertEqual(bob.dead, [42])
- self.assertEqual({}, bob._unix_sockets)
-
- # Case where socket.recv() raises EAGAIN. In this case, the
- # routine is supposed to save what it has back to
- # BoB._unix_sockets.
- bob = MockBobSocketData(True)
- bob._socket_data(42)
- self.assertEqual(bob.requests, [{42: b'Hello World.'}])
- self.assertFalse(bob.dead)
- self.assertEqual(len(bob._unix_sockets), 1)
- self.assertEqual(bob._unix_sockets[42][1], b'You')
-
- def test_startup(self):
- '''Test that BoB.startup() handles failures properly.'''
- class MockBobStartup(BoB):
- def __init__(self, throw):
- self.throw = throw
- self.started = False
- self.killed = False
- self.msgq_socket_file = None
- self.curproc = 'myproc'
- self.runnable = False
-
- def start_all_components(self):
- self.started = True
- if self.throw is True:
- raise Exception('Assume starting components has failed.')
- elif self.throw:
- raise self.throw
-
- def kill_started_components(self):
- self.killed = True
-
- class DummySession():
- def __init__(self, socket_file):
- raise isc.cc.session.SessionError('This is the expected case.')
-
- class DummySessionSocketExists():
- def __init__(self, socket_file):
- # simulate that connect passes
- return
-
- isc.cc.Session = DummySession
-
- # All is well case, where all components are started
- # successfully. We check that the actual call to
- # start_all_components() is made, and BoB.runnable is true.
- bob = MockBobStartup(False)
- r = bob.startup()
- self.assertIsNone(r)
- self.assertTrue(bob.started)
- self.assertFalse(bob.killed)
- self.assertTrue(bob.runnable)
- self.assertEqual({}, bob.c_channel_env)
-
- # Case where starting components fails. We check that
- # kill_started_components() is called right after, and
- # BoB.runnable is not modified.
- bob = MockBobStartup(True)
- r = bob.startup()
- # r contains an error message
- self.assertEqual(r, 'Unable to start myproc: Assume starting components has failed.')
- self.assertTrue(bob.started)
- self.assertTrue(bob.killed)
- self.assertFalse(bob.runnable)
- self.assertEqual({}, bob.c_channel_env)
-
- # Check if msgq_socket_file is carried over
- bob = MockBobStartup(False)
- bob.msgq_socket_file = 'foo'
- r = bob.startup()
- self.assertEqual({'BIND10_MSGQ_SOCKET_FILE': 'foo'}, bob.c_channel_env)
-
- # Check failure of changing user results in a different message
- bob = MockBobStartup(bind10_src.ChangeUserError('failed to chusr'))
- r = bob.startup()
- self.assertIn('failed to chusr', r)
- self.assertTrue(bob.killed)
-
- # Check the case when socket file already exists
- isc.cc.Session = DummySessionSocketExists
- bob = MockBobStartup(False)
- r = bob.startup()
- self.assertIn('already running', r)
-
- # isc.cc.Session is restored during tearDown().
-
-class SocketSrvTest(unittest.TestCase):
- """
- This tests some methods of boss related to the unix domain sockets used
- to transfer other sockets to applications.
- """
- def setUp(self):
- """
- Create the boss to test, testdata and backup some functions.
- """
- self.__boss = BoB()
- self.__select_backup = bind10_src.select.select
- self.__select_called = None
- self.__socket_data_called = None
- self.__consumer_dead_called = None
- self.__socket_request_handler_called = None
-
- def tearDown(self):
- """
- Restore functions.
- """
- bind10_src.select.select = self.__select_backup
-
- class __FalseSocket:
- """
- A mock socket for the select and accept and stuff like that.
- """
- def __init__(self, owner, fileno=42):
- self.__owner = owner
- self.__fileno = fileno
- self.data = None
- self.closed = False
-
- def fileno(self):
- return self.__fileno
-
- def accept(self):
- return (self.__class__(self.__owner, 13), "/path/to/socket")
-
- def recv(self, bufsize, flags=0):
- self.__owner.assertEqual(1, bufsize)
- self.__owner.assertEqual(socket.MSG_DONTWAIT, flags)
- if isinstance(self.data, socket.error):
- raise self.data
- elif self.data is not None:
- if len(self.data):
- result = self.data[0:1]
- self.data = self.data[1:]
- return result
- else:
- raise socket.error(errno.EAGAIN, "Would block")
- else:
- return b''
-
- def close(self):
- self.closed = True
-
- class __CCS:
- """
- A mock CCS, just to provide the socket file number.
- """
- class __Socket:
- def fileno(self):
- return 1
- def get_socket(self):
- return self.__Socket()
-
- def __select_accept(self, r, w, x, t):
- self.__select_called = (r, w, x, t)
- return ([42], [], [])
-
- def __select_data(self, r, w, x, t):
- self.__select_called = (r, w, x, t)
- return ([13], [], [])
-
- def __accept(self):
- """
- Hijact the accept method of the boss.
-
- Notes down it was called and stops the boss.
- """
- self.__accept_called = True
- self.__boss.runnable = False
-
- def test_srv_accept_called(self):
- """
- Test that the _srv_accept method of boss is called when the listening
- socket is readable.
- """
- self.__boss.runnable = True
- self.__boss._srv_socket = self.__FalseSocket(self)
- self.__boss._srv_accept = self.__accept
- self.__boss.ccs = self.__CCS()
- bind10_src.select.select = self.__select_accept
- self.__boss.run(2)
- # It called the accept
- self.assertTrue(self.__accept_called)
- # And the select had the right parameters
- self.assertEqual(([2, 1, 42], [], [], None), self.__select_called)
-
- def test_srv_accept(self):
- """
- Test how the _srv_accept method works.
- """
- self.__boss._srv_socket = self.__FalseSocket(self)
- self.__boss._srv_accept()
- # After we accepted, a new socket is added there
- socket = self.__boss._unix_sockets[13][0]
- # The socket is properly stored there
- self.assertTrue(isinstance(socket, self.__FalseSocket))
- # And the buffer (yet empty) is there
- self.assertEqual({13: (socket, b'')}, self.__boss._unix_sockets)
-
- def __socket_data(self, socket):
- self.__boss.runnable = False
- self.__socket_data_called = socket
-
- def test_socket_data(self):
- """
- Test that a socket that wants attention gets it.
- """
- self.__boss._srv_socket = self.__FalseSocket(self)
- self.__boss._socket_data = self.__socket_data
- self.__boss.ccs = self.__CCS()
- self.__boss._unix_sockets = {13: (self.__FalseSocket(self, 13), b'')}
- self.__boss.runnable = True
- bind10_src.select.select = self.__select_data
- self.__boss.run(2)
- self.assertEqual(13, self.__socket_data_called)
- self.assertEqual(([2, 1, 42, 13], [], [], None), self.__select_called)
-
- def __prepare_data(self, data):
- socket = self.__FalseSocket(self, 13)
- self.__boss._unix_sockets = {13: (socket, b'')}
- socket.data = data
- self.__boss.socket_consumer_dead = self.__consumer_dead
- self.__boss.socket_request_handler = self.__socket_request_handler
- return socket
-
- def __consumer_dead(self, socket):
- self.__consumer_dead_called = socket
-
- def __socket_request_handler(self, token, socket):
- self.__socket_request_handler_called = (token, socket)
-
- def test_socket_closed(self):
- """
- Test that a socket is removed and the socket_consumer_dead is called
- when it is closed.
- """
- socket = self.__prepare_data(None)
- self.__boss._socket_data(13)
- self.assertEqual(socket, self.__consumer_dead_called)
- self.assertEqual({}, self.__boss._unix_sockets)
- self.assertTrue(socket.closed)
-
- def test_socket_short(self):
- """
- Test that if there's not enough data to get the whole socket, it is
- kept there, but nothing is called.
- """
- socket = self.__prepare_data(b'tok')
- self.__boss._socket_data(13)
- self.assertEqual({13: (socket, b'tok')}, self.__boss._unix_sockets)
- self.assertFalse(socket.closed)
- self.assertIsNone(self.__consumer_dead_called)
- self.assertIsNone(self.__socket_request_handler_called)
-
- def test_socket_continue(self):
- """
- Test that we call the token handling function when the whole token
- comes. This test pretends to continue reading where the previous one
- stopped.
- """
- socket = self.__prepare_data(b"en\nanothe")
- # The data to finish
- self.__boss._unix_sockets[13] = (socket, b'tok')
- self.__boss._socket_data(13)
- self.assertEqual({13: (socket, b'anothe')}, self.__boss._unix_sockets)
- self.assertFalse(socket.closed)
- self.assertIsNone(self.__consumer_dead_called)
- self.assertEqual((b'token', socket),
- self.__socket_request_handler_called)
-
- def test_broken_socket(self):
- """
- If the socket raises an exception during the read other than EAGAIN,
- it is broken and we remove it.
- """
- sock = self.__prepare_data(socket.error(errno.ENOMEM,
- "There's more memory available, but not for you"))
- self.__boss._socket_data(13)
- self.assertEqual(sock, self.__consumer_dead_called)
- self.assertEqual({}, self.__boss._unix_sockets)
- self.assertTrue(sock.closed)
-
-class TestFunctions(unittest.TestCase):
- def setUp(self):
- self.lockfile_testpath = \
- "@abs_top_builddir@/src/bin/bind10/tests/lockfile_test"
- self.assertFalse(os.path.exists(self.lockfile_testpath))
- os.mkdir(self.lockfile_testpath)
- self.assertTrue(os.path.isdir(self.lockfile_testpath))
- self.__isfile_orig = bind10_src.os.path.isfile
- self.__unlink_orig = bind10_src.os.unlink
-
- def tearDown(self):
- os.rmdir(self.lockfile_testpath)
- self.assertFalse(os.path.isdir(self.lockfile_testpath))
- os.environ["B10_LOCKFILE_DIR_FROM_BUILD"] = "@abs_top_builddir@"
- bind10_src.os.path.isfile = self.__isfile_orig
- bind10_src.os.unlink = self.__unlink_orig
-
- def test_remove_lock_files(self):
- os.environ["B10_LOCKFILE_DIR_FROM_BUILD"] = self.lockfile_testpath
-
- # create lockfiles for the testcase
- lockfiles = ["logger_lockfile"]
- for f in lockfiles:
- fname = os.environ["B10_LOCKFILE_DIR_FROM_BUILD"] + '/' + f
- self.assertFalse(os.path.exists(fname))
- open(fname, "w").close()
- self.assertTrue(os.path.isfile(fname))
-
- # first call should clear up all the lockfiles
- bind10_src.remove_lock_files()
-
- # check if the lockfiles exist
- for f in lockfiles:
- fname = os.environ["B10_LOCKFILE_DIR_FROM_BUILD"] + '/' + f
- self.assertFalse(os.path.isfile(fname))
-
- # second call should not assert anyway
- bind10_src.remove_lock_files()
-
- def test_remove_lock_files_fail(self):
- # Permission error on unlink is ignored; other exceptions are really
- # unexpected and propagated.
- def __raising_unlink(unused, ex):
- raise ex
-
- bind10_src.os.path.isfile = lambda _: True
- os_error = OSError()
- bind10_src.os.unlink = lambda f: __raising_unlink(f, os_error)
-
- os_error.errno = errno.EPERM
- bind10_src.remove_lock_files() # no disruption
-
- os_error.errno = errno.EACCES
- bind10_src.remove_lock_files() # no disruption
-
- os_error.errno = errno.ENOENT
- self.assertRaises(OSError, bind10_src.remove_lock_files)
-
- bind10_src.os.unlink = lambda f: __raising_unlink(f, Exception('bad'))
- self.assertRaises(Exception, bind10_src.remove_lock_files)
-
- def test_get_signame(self):
- # just test with some samples
- signame = bind10_src.get_signame(signal.SIGTERM)
- self.assertEqual('SIGTERM', signame)
- signame = bind10_src.get_signame(signal.SIGKILL)
- self.assertEqual('SIGKILL', signame)
- # 59426 is hopefully an unused signal on most platforms
- signame = bind10_src.get_signame(59426)
- self.assertEqual('Unknown signal 59426', signame)
-
- def test_fatal_signal(self):
- self.assertIsNone(bind10_src.boss_of_bind)
- bind10_src.boss_of_bind = BoB()
- bind10_src.boss_of_bind.runnable = True
- bind10_src.fatal_signal(signal.SIGTERM, None)
- # Now, runnable must be False
- self.assertFalse(bind10_src.boss_of_bind.runnable)
- bind10_src.boss_of_bind = None
-
-if __name__ == '__main__':
- # store os.environ for test_unchanged_environment
- original_os_environ = copy.deepcopy(os.environ)
- isc.log.resetUnitTestRootLogger()
- unittest.main()
diff --git a/src/bin/bind10/tests/init_test.py.in b/src/bin/bind10/tests/init_test.py.in
new file mode 100644
index 0000000..9a591ef
--- /dev/null
+++ b/src/bin/bind10/tests/init_test.py.in
@@ -0,0 +1,2426 @@
+# Copyright (C) 2011 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# Most of the time, we omit the "init" for brevity. Sometimes,
+# we want to be explicit about what we do, like when hijacking a library
+# call used by the b10-init.
+from init import Init, ProcessInfo, parse_args, dump_pid, unlink_pid_file, _BASETIME
+import init
+
+# XXX: environment tests are currently disabled, due to the preprocessor
+# setup that we have now complicating the environment
+
+import unittest
+import sys
+import os
+import os.path
+import copy
+import signal
+import socket
+from isc.net.addr import IPAddr
+import time
+import isc.log
+import isc.config
+import isc.bind10.socket_cache
+import errno
+import random
+
+from isc.testutils.parse_args import TestOptParser, OptsError
+from isc.testutils.ccsession_mock import MockModuleCCSession
+
+class TestProcessInfo(unittest.TestCase):
+ def setUp(self):
+ # redirect stdout to a pipe so we can check that our
+ # process spawning is doing the right thing with stdout
+ self.old_stdout = os.dup(sys.stdout.fileno())
+ self.pipes = os.pipe()
+ os.dup2(self.pipes[1], sys.stdout.fileno())
+ os.close(self.pipes[1])
+ # note that we use dup2() to restore the original stdout
+ # to the main program ASAP in each test... this prevents
+ # hangs reading from the child process (as the pipe is only
+ # open in the child), and also insures nice pretty output
+
+ def tearDown(self):
+ # clean up our stdout munging
+ os.dup2(self.old_stdout, sys.stdout.fileno())
+ os.close(self.pipes[0])
+
+ def test_init(self):
+ pi = ProcessInfo('Test Process', [ '/bin/echo', 'foo' ])
+ pi.spawn()
+ os.dup2(self.old_stdout, sys.stdout.fileno())
+ self.assertEqual(pi.name, 'Test Process')
+ self.assertEqual(pi.args, [ '/bin/echo', 'foo' ])
+# self.assertEqual(pi.env, { 'PATH': os.environ['PATH'],
+# 'PYTHON_EXEC': os.environ['PYTHON_EXEC'] })
+ self.assertEqual(pi.dev_null_stdout, False)
+ self.assertEqual(os.read(self.pipes[0], 100), b"foo\n")
+ self.assertNotEqual(pi.process, None)
+ self.assertTrue(type(pi.pid) is int)
+
+# def test_setting_env(self):
+# pi = ProcessInfo('Test Process', [ '/bin/true' ], env={'FOO': 'BAR'})
+# os.dup2(self.old_stdout, sys.stdout.fileno())
+# self.assertEqual(pi.env, { 'PATH': os.environ['PATH'],
+# 'PYTHON_EXEC': os.environ['PYTHON_EXEC'],
+# 'FOO': 'BAR' })
+
+ def test_setting_null_stdout(self):
+ pi = ProcessInfo('Test Process', [ '/bin/echo', 'foo' ],
+ dev_null_stdout=True)
+ pi.spawn()
+ os.dup2(self.old_stdout, sys.stdout.fileno())
+ self.assertEqual(pi.dev_null_stdout, True)
+ self.assertEqual(os.read(self.pipes[0], 100), b"")
+
+ def test_respawn(self):
+ pi = ProcessInfo('Test Process', [ '/bin/echo', 'foo' ])
+ pi.spawn()
+ # wait for old process to work...
+ self.assertEqual(os.read(self.pipes[0], 100), b"foo\n")
+ # respawn it
+ old_pid = pi.pid
+ pi.respawn()
+ os.dup2(self.old_stdout, sys.stdout.fileno())
+ # make sure the new one started properly
+ self.assertEqual(pi.name, 'Test Process')
+ self.assertEqual(pi.args, [ '/bin/echo', 'foo' ])
+# self.assertEqual(pi.env, { 'PATH': os.environ['PATH'],
+# 'PYTHON_EXEC': os.environ['PYTHON_EXEC'] })
+ self.assertEqual(pi.dev_null_stdout, False)
+ self.assertEqual(os.read(self.pipes[0], 100), b"foo\n")
+ self.assertNotEqual(pi.process, None)
+ self.assertTrue(type(pi.pid) is int)
+ self.assertNotEqual(pi.pid, old_pid)
+
+class TestCacheCommands(unittest.TestCase):
+ """
+ Test methods of b10-init related to the socket cache and socket handling.
+ """
+ def setUp(self):
+ """
+ Prepare b10-init for some tests.
+
+ Also prepare some variables we need.
+ """
+ self.__b10_init = Init()
+ # Fake the cache here so we can pretend it is us and hijack the
+ # calls to its methods.
+ self.__b10_init._socket_cache = self
+ self.__b10_init._socket_path = '/socket/path'
+ self.__raise_exception = None
+ self.__socket_args = {
+ "port": 53,
+ "address": "::",
+ "protocol": "UDP",
+ "share_mode": "ANY",
+ "share_name": "app"
+ }
+ # What was and wasn't called.
+ self.__drop_app_called = None
+ self.__get_socket_called = None
+ self.__send_fd_called = None
+ self.__get_token_called = None
+ self.__drop_socket_called = None
+ init.libutil_io_python.send_fd = self.__send_fd
+
+ def __send_fd(self, to, socket):
+ """
+ A function to hook the send_fd in the b10-init.
+ """
+ self.__send_fd_called = (to, socket)
+
+ class FalseSocket:
+ """
+ A socket where we can fake methods we need instead of having a real
+ socket.
+ """
+ def __init__(self):
+ self.send = b""
+ def fileno(self):
+ """
+ The file number. Used for identifying the remote application.
+ """
+ return 42
+
+ def sendall(self, data):
+ """
+ Adds data to the self.send.
+ """
+ self.send += data
+
+ def drop_application(self, application):
+ """
+ Part of pretending to be the cache. Logs the parameter to
+ self.__drop_app_called.
+
+ In the case self.__raise_exception is set, the exception there
+ is raised instead.
+ """
+ if self.__raise_exception is not None:
+ raise self.__raise_exception
+ self.__drop_app_called = application
+
+ def test_consumer_dead(self):
+ """
+ Test that it calls the drop_application method of the cache.
+ """
+ self.__b10_init.socket_consumer_dead(self.FalseSocket())
+ self.assertEqual(42, self.__drop_app_called)
+
+ def test_consumer_dead_invalid(self):
+ """
+ Test that it doesn't crash in case the application is not known to
+ the cache, the b10_init doesn't crash, as this actually can happen in
+ practice.
+ """
+ self.__raise_exception = ValueError("This application is unknown")
+ # This doesn't crash
+ self.__b10_init.socket_consumer_dead(self.FalseSocket())
+
+ def get_socket(self, token, application):
+ """
+ Part of pretending to be the cache. If there's anything in
+ __raise_exception, it is raised. Otherwise, the call is logged
+ into __get_socket_called and a number is returned.
+ """
+ if self.__raise_exception is not None:
+ raise self.__raise_exception
+ self.__get_socket_called = (token, application)
+ return 13
+
+ def test_request_handler(self):
+ """
+ Test that a request for socket is forwarded and the socket is sent
+ back, if it returns a socket.
+ """
+ socket = self.FalseSocket()
+ # An exception from the cache
+ self.__raise_exception = ValueError("Test value error")
+ self.__b10_init.socket_request_handler(b"token", socket)
+ # It was called, but it threw, so it is not noted here
+ self.assertIsNone(self.__get_socket_called)
+ self.assertEqual(b"0\n", socket.send)
+ # It should not have sent any socket.
+ self.assertIsNone(self.__send_fd_called)
+ # Now prepare a valid scenario
+ self.__raise_exception = None
+ socket.send = b""
+ self.__b10_init.socket_request_handler(b"token", socket)
+ self.assertEqual(b"1\n", socket.send)
+ self.assertEqual((42, 13), self.__send_fd_called)
+ self.assertEqual(("token", 42), self.__get_socket_called)
+
+ def get_token(self, protocol, address, port, share_mode, share_name):
+ """
+ Part of pretending to be the cache. If there's anything in
+ __raise_exception, it is raised. Otherwise, the parameters are
+ logged into __get_token_called and a token is returned.
+ """
+ if self.__raise_exception is not None:
+ raise self.__raise_exception
+ self.__get_token_called = (protocol, address, port, share_mode,
+ share_name)
+ return "token"
+
+ def test_get_socket_ok(self):
+ """
+ Test the successful scenario of getting a socket.
+ """
+ result = self.__b10_init._get_socket(self.__socket_args)
+ [code, answer] = result['result']
+ self.assertEqual(0, code)
+ self.assertEqual({
+ 'token': 'token',
+ 'path': '/socket/path'
+ }, answer)
+ addr = self.__get_token_called[1]
+ self.assertTrue(isinstance(addr, IPAddr))
+ self.assertEqual("::", str(addr))
+ self.assertEqual(("UDP", addr, 53, "ANY", "app"),
+ self.__get_token_called)
+
+ def test_get_socket_error(self):
+ """
+ Test that bad inputs are handled correctly, etc.
+ """
+ def check_code(code, args):
+ """
+ Pass the args there and check if it returns success or not.
+
+ The rest is not tested, as it is already checked in the
+ test_get_socket_ok.
+ """
+ [rcode, ranswer] = self.__b10_init._get_socket(args)['result']
+ self.assertEqual(code, rcode)
+ if code != 0:
+ # This should be an error message. The exact formatting
+ # is unknown, but we check it is string at least
+ self.assertTrue(isinstance(ranswer, str))
+
+ def mod_args(name, value):
+ """
+ Override a parameter in the args.
+ """
+ result = dict(self.__socket_args)
+ result[name] = value
+ return result
+
+ # Port too large
+ check_code(1, mod_args('port', 65536))
+ # Not numeric address
+ check_code(1, mod_args('address', 'example.org.'))
+ # Some bad values of enum-like params
+ check_code(1, mod_args('protocol', 'BAD PROTO'))
+ check_code(1, mod_args('share_mode', 'BAD SHARE'))
+ # Check missing parameters
+ for param in self.__socket_args.keys():
+ args = dict(self.__socket_args)
+ del args[param]
+ check_code(1, args)
+ # These are OK values for the enum-like parameters
+ # The ones from test_get_socket_ok are not tested here
+ check_code(0, mod_args('protocol', 'TCP'))
+ check_code(0, mod_args('share_mode', 'SAMEAPP'))
+ check_code(0, mod_args('share_mode', 'NO'))
+ # If an exception is raised from within the cache, it is converted
+ # to an error, not propagated
+ self.__raise_exception = Exception("Test exception")
+ check_code(1, self.__socket_args)
+ # The special "expected" exceptions
+ self.__raise_exception = \
+ isc.bind10.socket_cache.ShareError("Not shared")
+ check_code(3, self.__socket_args)
+ self.__raise_exception = \
+ isc.bind10.socket_cache.SocketError("Not shared", 13)
+ check_code(2, self.__socket_args)
+
+ def drop_socket(self, token):
+ """
+ Part of pretending to be the cache. If there's anything in
+ __raise_exception, it is raised. Otherwise, the parameter is stored
+ in __drop_socket_called.
+ """
+ if self.__raise_exception is not None:
+ raise self.__raise_exception
+ self.__drop_socket_called = token
+
+ def test_drop_socket(self):
+ """
+ Check the drop_socket command. It should directly call the method
+ on the cache. Exceptions should be translated to error messages.
+ """
+ # This should be OK and just propagated to the call.
+ self.assertEqual({"result": [0]},
+ self.__b10_init.command_handler("drop_socket",
+ {"token": "token"}))
+ self.assertEqual("token", self.__drop_socket_called)
+ self.__drop_socket_called = None
+ # Missing parameter
+ self.assertEqual({"result": [1, "Missing token parameter"]},
+ self.__b10_init.command_handler("drop_socket", {}))
+ self.assertIsNone(self.__drop_socket_called)
+ # An exception is raised from within the cache
+ self.__raise_exception = ValueError("Test error")
+ self.assertEqual({"result": [1, "Test error"]},
+ self.__b10_init.command_handler("drop_socket",
+ {"token": "token"}))
+
+
+class TestInit(unittest.TestCase):
+ def setUp(self):
+ # Save original values that may be tweaked in some tests
+ self.__orig_setgid = init.posix.setgid
+ self.__orig_setuid = init.posix.setuid
+ self.__orig_logger_class = isc.log.Logger
+
+ def tearDown(self):
+ # Restore original values saved in setUp()
+ init.posix.setgid = self.__orig_setgid
+ init.posix.setuid = self.__orig_setuid
+ isc.log.Logger = self.__orig_logger_class
+
+ def test_init(self):
+ b10_init = Init()
+ self.assertEqual(b10_init.verbose, False)
+ self.assertEqual(b10_init.msgq_socket_file, None)
+ self.assertEqual(b10_init.cc_session, None)
+ self.assertEqual(b10_init.ccs, None)
+ self.assertEqual(b10_init.components, {})
+ self.assertEqual(b10_init.runnable, False)
+ self.assertEqual(b10_init.username, None)
+ self.assertIsNone(b10_init._socket_cache)
+
+ def __setgid(self, gid):
+ self.__gid_set = gid
+
+ def __setuid(self, uid):
+ self.__uid_set = uid
+
+ def test_change_user(self):
+ init.posix.setgid = self.__setgid
+ init.posix.setuid = self.__setuid
+
+ self.__gid_set = None
+ self.__uid_set = None
+ b10_init = Init()
+ b10_init.change_user()
+ # No gid/uid set in init, nothing called.
+ self.assertIsNone(self.__gid_set)
+ self.assertIsNone(self.__uid_set)
+
+ Init(setuid=42, setgid=4200).change_user()
+ # This time, it get's called
+ self.assertEqual(4200, self.__gid_set)
+ self.assertEqual(42, self.__uid_set)
+
+ def raising_set_xid(gid_or_uid):
+ ex = OSError()
+ ex.errno, ex.strerror = errno.EPERM, 'Operation not permitted'
+ raise ex
+
+ # Let setgid raise an exception
+ init.posix.setgid = raising_set_xid
+ init.posix.setuid = self.__setuid
+ self.assertRaises(init.ChangeUserError,
+ Init(setuid=42, setgid=4200).change_user)
+
+ # Let setuid raise an exception
+ init.posix.setgid = self.__setgid
+ init.posix.setuid = raising_set_xid
+ self.assertRaises(init.ChangeUserError,
+ Init(setuid=42, setgid=4200).change_user)
+
+ # Let initial log output after setuid raise an exception
+ init.posix.setgid = self.__setgid
+ init.posix.setuid = self.__setuid
+ isc.log.Logger = raising_set_xid
+ self.assertRaises(init.ChangeUserError,
+ Init(setuid=42, setgid=4200).change_user)
+
+ def test_set_creator(self):
+ """
+ Test the call to set_creator. First time, the cache is created
+ with the passed creator. The next time, it throws an exception.
+ """
+ init = Init()
+ # The cache doesn't use it at start, so just create an empty class
+ class Creator: pass
+ creator = Creator()
+ init.set_creator(creator)
+ self.assertTrue(isinstance(init._socket_cache,
+ isc.bind10.socket_cache.Cache))
+ self.assertEqual(creator, init._socket_cache._creator)
+ self.assertRaises(ValueError, init.set_creator, creator)
+
+ def test_socket_srv(self):
+ """Tests init_socket_srv() and remove_socket_srv() work as expected."""
+ init = Init()
+
+ self.assertIsNone(init._srv_socket)
+ self.assertIsNone(init._tmpdir)
+ self.assertIsNone(init._socket_path)
+
+ init.init_socket_srv()
+
+ self.assertIsNotNone(init._srv_socket)
+ self.assertNotEqual(-1, init._srv_socket.fileno())
+ self.assertEqual(os.path.join(init._tmpdir, 'sockcreator'),
+ init._srv_socket.getsockname())
+
+ self.assertIsNotNone(init._tmpdir)
+ self.assertTrue(os.path.isdir(init._tmpdir))
+ self.assertIsNotNone(init._socket_path)
+ self.assertTrue(os.path.exists(init._socket_path))
+
+ # Check that it's possible to connect to the socket file (this
+ # only works if the socket file exists and the server listens on
+ # it).
+ s = socket.socket(socket.AF_UNIX)
+ try:
+ s.connect(init._socket_path)
+ can_connect = True
+ s.close()
+ except socket.error as e:
+ can_connect = False
+
+ self.assertTrue(can_connect)
+
+ init.remove_socket_srv()
+
+ self.assertEqual(-1, init._srv_socket.fileno())
+ self.assertFalse(os.path.exists(init._socket_path))
+ self.assertFalse(os.path.isdir(init._tmpdir))
+
+ # These should not fail either:
+
+ # second call
+ init.remove_socket_srv()
+
+ init._srv_socket = None
+ init.remove_socket_srv()
+
+ def test_init_alternate_socket(self):
+ init = Init("alt_socket_file")
+ self.assertEqual(init.verbose, False)
+ self.assertEqual(init.msgq_socket_file, "alt_socket_file")
+ self.assertEqual(init.cc_session, None)
+ self.assertEqual(init.ccs, None)
+ self.assertEqual(init.components, {})
+ self.assertEqual(init.runnable, False)
+ self.assertEqual(init.username, None)
+
+ def test_command_handler(self):
+ class DummySession():
+ def group_sendmsg(self, msg, group):
+ (self.msg, self.group) = (msg, group)
+ def group_recvmsg(self, nonblock, seq): pass
+ class DummyModuleCCSession():
+ module_spec = isc.config.module_spec.ModuleSpec({
+ "module_name": "Init",
+ "statistics": [
+ {
+ "item_name": "boot_time",
+ "item_type": "string",
+ "item_optional": False,
+ "item_default": "1970-01-01T00:00:00Z",
+ "item_title": "Boot time",
+ "item_description": "A date time when bind10 process starts initially",
+ "item_format": "date-time"
+ }
+ ]
+ })
+ def get_module_spec(self):
+ return self.module_spec
+ init = Init()
+ init.verbose = True
+ init.cc_session = DummySession()
+ init.ccs = DummyModuleCCSession()
+ # a bad command
+ self.assertEqual(init.command_handler(-1, None),
+ isc.config.ccsession.create_answer(1, "bad command"))
+ # "shutdown" command
+ self.assertEqual(init.command_handler("shutdown", None),
+ isc.config.ccsession.create_answer(0))
+ self.assertFalse(init.runnable)
+ # "getstats" command
+ self.assertEqual(init.command_handler("getstats", None),
+ isc.config.ccsession.create_answer(0,
+ { 'boot_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME) }))
+ # "ping" command
+ self.assertEqual(init.command_handler("ping", None),
+ isc.config.ccsession.create_answer(0, "pong"))
+ # "show_processes" command
+ self.assertEqual(init.command_handler("show_processes", None),
+ isc.config.ccsession.create_answer(0,
+ init.get_processes()))
+ # an unknown command
+ self.assertEqual(init.command_handler("__UNKNOWN__", None),
+ isc.config.ccsession.create_answer(1, "Unknown command"))
+
+ # Fake the get_token of cache and test the command works
+ init._socket_path = '/socket/path'
+ class cache:
+ def get_token(self, protocol, addr, port, share_mode, share_name):
+ return str(addr) + ':' + str(port)
+ init._socket_cache = cache()
+ args = {
+ "port": 53,
+ "address": "0.0.0.0",
+ "protocol": "UDP",
+ "share_mode": "ANY",
+ "share_name": "app"
+ }
+ # at all and this is the easiest way to check.
+ self.assertEqual({'result': [0, {'token': '0.0.0.0:53',
+ 'path': '/socket/path'}]},
+ init.command_handler("get_socket", args))
+ # The drop_socket is not tested here, but in TestCacheCommands.
+ # It needs the cache mocks to be in place and they are there.
+
+ def test_stop_process(self):
+ """
+ Test checking the stop_process method sends the right message over
+ the message bus.
+ """
+ class DummySession():
+ def group_sendmsg(self, msg, group, instance="*"):
+ (self.msg, self.group, self.instance) = (msg, group, instance)
+ init = Init()
+ init.cc_session = DummySession()
+ init.stop_process('process', 'address', 42)
+ self.assertEqual('address', init.cc_session.group)
+ self.assertEqual('address', init.cc_session.instance)
+ self.assertEqual({'command': ['shutdown', {'pid': 42}]},
+ init.cc_session.msg)
+
+# Mock class for testing Init's usage of ProcessInfo
+class MockProcessInfo:
+ def __init__(self, name, args, env={}, dev_null_stdout=False,
+ dev_null_stderr=False):
+ self.name = name
+ self.args = args
+ self.env = env
+ self.dev_null_stdout = dev_null_stdout
+ self.dev_null_stderr = dev_null_stderr
+ self.process = None
+ self.pid = None
+
+ def spawn(self):
+ # set some pid (only used for testing that it is not None anymore)
+ self.pid = 42147
+
+# Class for testing the Init without actually starting processes.
+# This is used for testing the start/stop components routines and
+# the Init commands.
+#
+# Testing that external processes start is outside the scope
+# of the unit test, by overriding the process start methods we can check
+# that the right processes are started depending on the configuration
+# options.
+class MockInit(Init):
+ def __init__(self):
+ Init.__init__(self)
+
+ # Set flags as to which of the overridden methods has been run.
+ self.msgq = False
+ self.cfgmgr = False
+ self.ccsession = False
+ self.auth = False
+ self.resolver = False
+ self.xfrout = False
+ self.xfrin = False
+ self.zonemgr = False
+ self.stats = False
+ self.stats_httpd = False
+ self.cmdctl = False
+ self.dhcp6 = False
+ self.dhcp4 = False
+ self.c_channel_env = {}
+ self.components = { }
+ self.creator = False
+ self.get_process_exit_status_called = False
+
+ class MockSockCreator(isc.bind10.component.Component):
+ def __init__(self, process, b10_init, kind, address=None,
+ params=None):
+ isc.bind10.component.Component.__init__(self, process,
+ b10_init, kind,
+ 'SockCreator')
+ self._start_func = b10_init.start_creator
+
+ specials = isc.bind10.special_component.get_specials()
+ specials['sockcreator'] = MockSockCreator
+ self._component_configurator = \
+ isc.bind10.component.Configurator(self, specials)
+
+ def start_creator(self):
+ self.creator = True
+ procinfo = ProcessInfo('b10-sockcreator', ['/bin/false'])
+ procinfo.pid = 1
+ return procinfo
+
+ def _read_bind10_config(self):
+ # Configuration options are set directly
+ pass
+
+ def start_msgq(self):
+ self.msgq = True
+ procinfo = ProcessInfo('b10-msgq', ['/bin/false'])
+ procinfo.pid = 2
+ return procinfo
+
+ def start_ccsession(self, c_channel_env):
+ # this is not a process, don't have to do anything with procinfo
+ self.ccsession = True
+
+ def start_cfgmgr(self):
+ self.cfgmgr = True
+ procinfo = ProcessInfo('b10-cfgmgr', ['/bin/false'])
+ procinfo.pid = 3
+ return procinfo
+
+ def start_auth(self):
+ self.auth = True
+ procinfo = ProcessInfo('b10-auth', ['/bin/false'])
+ procinfo.pid = 5
+ return procinfo
+
+ def start_resolver(self):
+ self.resolver = True
+ procinfo = ProcessInfo('b10-resolver', ['/bin/false'])
+ procinfo.pid = 6
+ return procinfo
+
+ def start_simple(self, name):
+ procmap = { 'b10-zonemgr': self.start_zonemgr,
+ 'b10-stats': self.start_stats,
+ 'b10-stats-httpd': self.start_stats_httpd,
+ 'b10-cmdctl': self.start_cmdctl,
+ 'b10-dhcp6': self.start_dhcp6,
+ 'b10-dhcp4': self.start_dhcp4,
+ 'b10-xfrin': self.start_xfrin,
+ 'b10-xfrout': self.start_xfrout }
+ return procmap[name]()
+
+ def start_xfrout(self):
+ self.xfrout = True
+ procinfo = ProcessInfo('b10-xfrout', ['/bin/false'])
+ procinfo.pid = 7
+ return procinfo
+
+ def start_xfrin(self):
+ self.xfrin = True
+ procinfo = ProcessInfo('b10-xfrin', ['/bin/false'])
+ procinfo.pid = 8
+ return procinfo
+
+ def start_zonemgr(self):
+ self.zonemgr = True
+ procinfo = ProcessInfo('b10-zonemgr', ['/bin/false'])
+ procinfo.pid = 9
+ return procinfo
+
+ def start_stats(self):
+ self.stats = True
+ procinfo = ProcessInfo('b10-stats', ['/bin/false'])
+ procinfo.pid = 10
+ return procinfo
+
+ def start_stats_httpd(self):
+ self.stats_httpd = True
+ procinfo = ProcessInfo('b10-stats-httpd', ['/bin/false'])
+ procinfo.pid = 11
+ return procinfo
+
+ def start_cmdctl(self):
+ self.cmdctl = True
+ procinfo = ProcessInfo('b10-cmdctl', ['/bin/false'])
+ procinfo.pid = 12
+ return procinfo
+
+ def start_dhcp6(self):
+ self.dhcp6 = True
+ procinfo = ProcessInfo('b10-dhcp6', ['/bin/false'])
+ procinfo.pid = 13
+ return procinfo
+
+ def start_dhcp4(self):
+ self.dhcp4 = True
+ procinfo = ProcessInfo('b10-dhcp4', ['/bin/false'])
+ procinfo.pid = 14
+ return procinfo
+
+ def stop_process(self, process, recipient, pid):
+ procmap = { 'b10-auth': self.stop_auth,
+ 'b10-resolver': self.stop_resolver,
+ 'b10-xfrout': self.stop_xfrout,
+ 'b10-xfrin': self.stop_xfrin,
+ 'b10-zonemgr': self.stop_zonemgr,
+ 'b10-stats': self.stop_stats,
+ 'b10-stats-httpd': self.stop_stats_httpd,
+ 'b10-cmdctl': self.stop_cmdctl }
+ procmap[process]()
+
+ # Some functions to pretend we stop processes, use by stop_process
+ def stop_msgq(self):
+ if self.msgq:
+ del self.components[2]
+ self.msgq = False
+
+ def stop_cfgmgr(self):
+ if self.cfgmgr:
+ del self.components[3]
+ self.cfgmgr = False
+
+ def stop_auth(self):
+ if self.auth:
+ del self.components[5]
+ self.auth = False
+
+ def stop_resolver(self):
+ if self.resolver:
+ del self.components[6]
+ self.resolver = False
+
+ def stop_xfrout(self):
+ if self.xfrout:
+ del self.components[7]
+ self.xfrout = False
+
+ def stop_xfrin(self):
+ if self.xfrin:
+ del self.components[8]
+ self.xfrin = False
+
+ def stop_zonemgr(self):
+ if self.zonemgr:
+ del self.components[9]
+ self.zonemgr = False
+
+ def stop_stats(self):
+ if self.stats:
+ del self.components[10]
+ self.stats = False
+
+ def stop_stats_httpd(self):
+ if self.stats_httpd:
+ del self.components[11]
+ self.stats_httpd = False
+
+ def stop_cmdctl(self):
+ if self.cmdctl:
+ del self.components[12]
+ self.cmdctl = False
+
+ def _get_process_exit_status(self):
+ if self.get_process_exit_status_called:
+ return (0, 0)
+ self.get_process_exit_status_called = True
+ return (53, 0)
+
+ def _get_process_exit_status_unknown_pid(self):
+ if self.get_process_exit_status_called:
+ return (0, 0)
+ self.get_process_exit_status_called = True
+ return (42, 0)
+
+ def _get_process_exit_status_raises_oserror_echild(self):
+ raise OSError(errno.ECHILD, 'Mock error')
+
+ def _get_process_exit_status_raises_oserror_other(self):
+ raise OSError(0, 'Mock error')
+
+ def _get_process_exit_status_raises_other(self):
+ raise Exception('Mock error')
+
+ def _make_mock_process_info(self, name, args, c_channel_env,
+ dev_null_stdout=False, dev_null_stderr=False):
+ return MockProcessInfo(name, args, c_channel_env,
+ dev_null_stdout, dev_null_stderr)
+
+class MockInitSimple(Init):
+ def __init__(self):
+ Init.__init__(self)
+ # Set which process has been started
+ self.started_process_name = None
+ self.started_process_args = None
+ self.started_process_env = None
+
+ def _make_mock_process_info(self, name, args, c_channel_env,
+ dev_null_stdout=False, dev_null_stderr=False):
+ return MockProcessInfo(name, args, c_channel_env,
+ dev_null_stdout, dev_null_stderr)
+
+ def start_process(self, name, args, c_channel_env, port=None,
+ address=None):
+ self.started_process_name = name
+ self.started_process_args = args
+ self.started_process_env = c_channel_env
+ return None
+
+class TestStartStopProcessesInit(unittest.TestCase):
+ """
+ Check that the start_all_components method starts the right combination
+ of components and that the right components are started and stopped
+ according to changes in configuration.
+ """
+ def check_environment_unchanged(self):
+ # Check whether the environment has not been changed
+ self.assertEqual(original_os_environ, os.environ)
+
+ def check_started(self, init, core, auth, resolver):
+ """
+ Check that the right sets of services are started. The ones that
+ should be running are specified by the core, auth and resolver parameters
+ (they are groups of processes, eg. auth means b10-auth, -xfrout, -xfrin
+ and -zonemgr).
+ """
+ self.assertEqual(init.msgq, core)
+ self.assertEqual(init.cfgmgr, core)
+ self.assertEqual(init.ccsession, core)
+ self.assertEqual(init.creator, core)
+ self.assertEqual(init.auth, auth)
+ self.assertEqual(init.resolver, resolver)
+ self.assertEqual(init.xfrout, auth)
+ self.assertEqual(init.xfrin, auth)
+ self.assertEqual(init.zonemgr, auth)
+ self.assertEqual(init.stats, core)
+ self.assertEqual(init.stats_httpd, core)
+ self.assertEqual(init.cmdctl, core)
+ self.check_environment_unchanged()
+
+ def check_preconditions(self, init):
+ self.check_started(init, False, False, False)
+
+ def check_started_none(self, init):
+ """
+ Check that the situation is according to configuration where no servers
+ should be started. Some components still need to be running.
+ """
+ self.check_started(init, True, False, False)
+ self.check_environment_unchanged()
+
+ def check_started_both(self, init):
+ """
+ Check the situation is according to configuration where both servers
+ (auth and resolver) are enabled.
+ """
+ self.check_started(init, True, True, True)
+ self.check_environment_unchanged()
+
+ def check_started_auth(self, init):
+ """
+ Check the set of components needed to run auth only is started.
+ """
+ self.check_started(init, True, True, False)
+ self.check_environment_unchanged()
+
+ def check_started_resolver(self, init):
+ """
+ Check the set of components needed to run resolver only is started.
+ """
+ self.check_started(init, True, False, True)
+ self.check_environment_unchanged()
+
+ def check_started_dhcp(self, init, v4, v6):
+ """
+ Check if proper combinations of DHCPv4 and DHCpv6 can be started
+ """
+ self.assertEqual(v4, init.dhcp4)
+ self.assertEqual(v6, init.dhcp6)
+ self.check_environment_unchanged()
+
+ def construct_config(self, start_auth, start_resolver):
+ # The things that are common, not turned on an off
+ config = {}
+ config['b10-stats'] = { 'kind': 'dispensable', 'address': 'Stats' }
+ config['b10-stats-httpd'] = { 'kind': 'dispensable',
+ 'address': 'StatsHttpd' }
+ config['b10-cmdctl'] = { 'kind': 'needed', 'special': 'cmdctl' }
+ if start_auth:
+ config['b10-auth'] = { 'kind': 'needed', 'special': 'auth' }
+ config['b10-xfrout'] = { 'kind': 'dispensable',
+ 'address': 'Xfrout' }
+ config['b10-xfrin'] = { 'kind': 'dispensable',
+ 'address': 'Xfrin' }
+ config['b10-zonemgr'] = { 'kind': 'dispensable',
+ 'address': 'Zonemgr' }
+ if start_resolver:
+ config['b10-resolver'] = { 'kind': 'needed',
+ 'special': 'resolver' }
+ return {'components': config}
+
+ def config_start_init(self, start_auth, start_resolver):
+ """
+ Test the configuration is loaded at the startup.
+ """
+ init = MockInit()
+ config = self.construct_config(start_auth, start_resolver)
+ class CC:
+ def get_full_config(self):
+ return config
+ # Provide the fake CC with data
+ init.ccs = CC()
+ # And make sure it's not overwritten
+ def start_ccsession():
+ init.ccsession = True
+ init.start_ccsession = lambda _: start_ccsession()
+ # We need to return the original _read_bind10_config
+ init._read_bind10_config = lambda: Init._read_bind10_config(init)
+ init.start_all_components()
+ self.check_started(init, True, start_auth, start_resolver)
+ self.check_environment_unchanged()
+
+ def test_start_none(self):
+ self.config_start_init(False, False)
+
+ def test_start_resolver(self):
+ self.config_start_init(False, True)
+
+ def test_start_auth(self):
+ self.config_start_init(True, False)
+
+ def test_start_both(self):
+ self.config_start_init(True, True)
+
+ def test_config_start(self):
+ """
+ Test that the configuration starts and stops components according
+ to configuration changes.
+ """
+
+ # Create Init and ensure correct initialization
+ init = MockInit()
+ self.check_preconditions(init)
+
+ init.start_all_components()
+ init.runnable = True
+ init.config_handler(self.construct_config(False, False))
+ self.check_started_none(init)
+
+ # Enable both at once
+ init.config_handler(self.construct_config(True, True))
+ self.check_started_both(init)
+
+ # Not touched by empty change
+ init.config_handler({})
+ self.check_started_both(init)
+
+ # Not touched by change to the same configuration
+ init.config_handler(self.construct_config(True, True))
+ self.check_started_both(init)
+
+ # Turn them both off again
+ init.config_handler(self.construct_config(False, False))
+ self.check_started_none(init)
+
+ # Not touched by empty change
+ init.config_handler({})
+ self.check_started_none(init)
+
+ # Not touched by change to the same configuration
+ init.config_handler(self.construct_config(False, False))
+ self.check_started_none(init)
+
+ # Start and stop auth separately
+ init.config_handler(self.construct_config(True, False))
+ self.check_started_auth(init)
+
+ init.config_handler(self.construct_config(False, False))
+ self.check_started_none(init)
+
+ # Start and stop resolver separately
+ init.config_handler(self.construct_config(False, True))
+ self.check_started_resolver(init)
+
+ init.config_handler(self.construct_config(False, False))
+ self.check_started_none(init)
+
+ # Alternate
+ init.config_handler(self.construct_config(True, False))
+ self.check_started_auth(init)
+
+ init.config_handler(self.construct_config(False, True))
+ self.check_started_resolver(init)
+
+ init.config_handler(self.construct_config(True, False))
+ self.check_started_auth(init)
+
+ def test_config_start_once(self):
+ """
+ Tests that a component is started only once.
+ """
+ # Create Init and ensure correct initialization
+ init = MockInit()
+ self.check_preconditions(init)
+
+ init.start_all_components()
+
+ init.runnable = True
+ init.config_handler(self.construct_config(True, True))
+ self.check_started_both(init)
+
+ init.start_auth = lambda: self.fail("Started auth again")
+ init.start_xfrout = lambda: self.fail("Started xfrout again")
+ init.start_xfrin = lambda: self.fail("Started xfrin again")
+ init.start_zonemgr = lambda: self.fail("Started zonemgr again")
+ init.start_resolver = lambda: self.fail("Started resolver again")
+
+ # Send again we want to start them. Should not do it, as they are.
+ init.config_handler(self.construct_config(True, True))
+
+ def test_config_not_started_early(self):
+ """
+ Test that components are not started by the config handler before
+ startup.
+ """
+ init = MockInit()
+ self.check_preconditions(init)
+
+ init.start_auth = lambda: self.fail("Started auth again")
+ init.start_xfrout = lambda: self.fail("Started xfrout again")
+ init.start_xfrin = lambda: self.fail("Started xfrin again")
+ init.start_zonemgr = lambda: self.fail("Started zonemgr again")
+ init.start_resolver = lambda: self.fail("Started resolver again")
+
+ init.config_handler({'start_auth': True, 'start_resolver': True})
+
+ # Checks that DHCP (v4 and v6) components are started when expected
+ def test_start_dhcp(self):
+
+ # Create Init and ensure correct initialization
+ init = MockInit()
+ self.check_preconditions(init)
+
+ init.start_all_components()
+ init.config_handler(self.construct_config(False, False))
+ self.check_started_dhcp(init, False, False)
+
+ def test_start_dhcp_v6only(self):
+ # Create Init and ensure correct initialization
+ init = MockInit()
+ self.check_preconditions(init)
+ # v6 only enabled
+ init.start_all_components()
+ init.runnable = True
+ init._Init_started = True
+ config = self.construct_config(False, False)
+ config['components']['b10-dhcp6'] = { 'kind': 'needed',
+ 'address': 'Dhcp6' }
+ init.config_handler(config)
+ self.check_started_dhcp(init, False, True)
+
+ # uncomment when dhcpv4 becomes implemented
+ # v4 only enabled
+ #init.cfg_start_dhcp6 = False
+ #init.cfg_start_dhcp4 = True
+ #self.check_started_dhcp(init, True, False)
+
+ # both v4 and v6 enabled
+ #init.cfg_start_dhcp6 = True
+ #init.cfg_start_dhcp4 = True
+ #self.check_started_dhcp(init, True, True)
+
+class MockComponent:
+ def __init__(self, name, pid, address=None):
+ self.name = lambda: name
+ self.pid = lambda: pid
+ self.address = lambda: address
+ self.restarted = False
+ self.forceful = False
+ self.running = True
+ self.has_failed = False
+
+ def get_restart_time(self):
+ return 0 # arbitrary dummy value
+
+ def restart(self, now):
+ self.restarted = True
+ return True
+
+ def is_running(self):
+ return self.running
+
+ def failed(self, status):
+ return self.has_failed
+
+ def kill(self, forceful):
+ self.forceful = forceful
+
+class TestInitCmd(unittest.TestCase):
+ def test_ping(self):
+ """
+ Confirm simple ping command works.
+ """
+ init = MockInit()
+ answer = init.command_handler("ping", None)
+ self.assertEqual(answer, {'result': [0, 'pong']})
+
+ def test_show_processes_empty(self):
+ """
+ Confirm getting a list of processes works.
+ """
+ init = MockInit()
+ answer = init.command_handler("show_processes", None)
+ self.assertEqual(answer, {'result': [0, []]})
+
+ def test_show_processes(self):
+ """
+ Confirm getting a list of processes works.
+ """
+ init = MockInit()
+ init.register_process(1, MockComponent('first', 1))
+ init.register_process(2, MockComponent('second', 2, 'Second'))
+ answer = init.command_handler("show_processes", None)
+ processes = [[1, 'first', None],
+ [2, 'second', 'Second']]
+ self.assertEqual(answer, {'result': [0, processes]})
+
+class TestParseArgs(unittest.TestCase):
+ """
+ This tests parsing of arguments of the bind10 master process.
+ """
+ #TODO: Write tests for the original parsing, bad options, etc.
+ def test_no_opts(self):
+ """
+ Test correct default values when no options are passed.
+ """
+ options = parse_args([], TestOptParser)
+ self.assertEqual(None, options.data_path)
+ self.assertEqual(None, options.config_file)
+ self.assertEqual(None, options.cmdctl_port)
+
+ def test_data_path(self):
+ """
+ Test it can parse the data path.
+ """
+ self.assertRaises(OptsError, parse_args, ['-p'], TestOptParser)
+ self.assertRaises(OptsError, parse_args, ['--data-path'],
+ TestOptParser)
+ options = parse_args(['-p', '/data/path'], TestOptParser)
+ self.assertEqual('/data/path', options.data_path)
+ options = parse_args(['--data-path=/data/path'], TestOptParser)
+ self.assertEqual('/data/path', options.data_path)
+
+ def test_config_filename(self):
+ """
+ Test it can parse the config switch.
+ """
+ self.assertRaises(OptsError, parse_args, ['-c'], TestOptParser)
+ self.assertRaises(OptsError, parse_args, ['--config-file'],
+ TestOptParser)
+ options = parse_args(['-c', 'config-file'], TestOptParser)
+ self.assertEqual('config-file', options.config_file)
+ options = parse_args(['--config-file=config-file'], TestOptParser)
+ self.assertEqual('config-file', options.config_file)
+
+ def test_clear_config(self):
+ options = parse_args([], TestOptParser)
+ self.assertEqual(False, options.clear_config)
+ options = parse_args(['--clear-config'], TestOptParser)
+ self.assertEqual(True, options.clear_config)
+
+ def test_nokill(self):
+ options = parse_args([], TestOptParser)
+ self.assertEqual(False, options.nokill)
+ options = parse_args(['--no-kill'], TestOptParser)
+ self.assertEqual(True, options.nokill)
+ options = parse_args([], TestOptParser)
+ self.assertEqual(False, options.nokill)
+ options = parse_args(['-i'], TestOptParser)
+ self.assertEqual(True, options.nokill)
+
+ def test_cmdctl_port(self):
+ """
+ Test it can parse the command control port.
+ """
+ self.assertRaises(OptsError, parse_args, ['--cmdctl-port=abc'],
+ TestOptParser)
+ self.assertRaises(OptsError, parse_args, ['--cmdctl-port=100000000'],
+ TestOptParser)
+ self.assertRaises(OptsError, parse_args, ['--cmdctl-port'],
+ TestOptParser)
+ options = parse_args(['--cmdctl-port=1234'], TestOptParser)
+ self.assertEqual(1234, options.cmdctl_port)
+
+class TestPIDFile(unittest.TestCase):
+ def setUp(self):
+ self.pid_file = '@builddir@' + os.sep + 'bind10.pid'
+ if os.path.exists(self.pid_file):
+ os.unlink(self.pid_file)
+
+ def tearDown(self):
+ if os.path.exists(self.pid_file):
+ os.unlink(self.pid_file)
+
+ def check_pid_file(self):
+ # dump PID to the file, and confirm the content is correct
+ dump_pid(self.pid_file)
+ my_pid = os.getpid()
+ with open(self.pid_file, "r") as f:
+ self.assertEqual(my_pid, int(f.read()))
+
+ def test_dump_pid(self):
+ self.check_pid_file()
+
+ # make sure any existing content will be removed
+ with open(self.pid_file, "w") as f:
+ f.write('dummy data\n')
+ self.check_pid_file()
+
+ def test_unlink_pid_file_notexist(self):
+ dummy_data = 'dummy_data\n'
+
+ with open(self.pid_file, "w") as f:
+ f.write(dummy_data)
+
+ unlink_pid_file("no_such_pid_file")
+
+ # the file specified for unlink_pid_file doesn't exist,
+ # and the original content of the file should be intact.
+ with open(self.pid_file, "r") as f:
+ self.assertEqual(dummy_data, f.read())
+
+ def test_dump_pid_with_none(self):
+ # Check the behavior of dump_pid() and unlink_pid_file() with None.
+ # This should be no-op.
+ dump_pid(None)
+ self.assertFalse(os.path.exists(self.pid_file))
+
+ dummy_data = 'dummy_data\n'
+
+ with open(self.pid_file, "w") as f:
+ f.write(dummy_data)
+
+ unlink_pid_file(None)
+
+ with open(self.pid_file, "r") as f:
+ self.assertEqual(dummy_data, f.read())
+
+ def test_dump_pid_failure(self):
+ # the attempt to open file will fail, which should result in exception.
+ self.assertRaises(IOError, dump_pid,
+ 'nonexistent_dir' + os.sep + 'bind10.pid')
+
+class TestInitComponents(unittest.TestCase):
+ """
+ Test b10-init propagates component configuration properly to the
+ component configurator and acts sane.
+ """
+ def setUp(self):
+ self.__param = None
+ self.__called = False
+ self.__compconfig = {
+ 'comp': {
+ 'kind': 'needed',
+ 'process': 'cat'
+ }
+ }
+ self._tmp_time = None
+ self._tmp_sleep = None
+ self._tmp_module_cc_session = None
+ self._tmp_cc_session = None
+
+ def tearDown(self):
+ if self._tmp_time is not None:
+ time.time = self._tmp_time
+ if self._tmp_sleep is not None:
+ time.sleep = self._tmp_sleep
+ if self._tmp_module_cc_session is not None:
+ isc.config.ModuleCCSession = self._tmp_module_cc_session
+ if self._tmp_cc_session is not None:
+ isc.cc.Session = self._tmp_cc_session
+
+ def __unary_hook(self, param):
+ """
+ A hook function that stores the parameter for later examination.
+ """
+ self.__param = param
+
+ def __nullary_hook(self):
+ """
+ A hook function that notes down it was called.
+ """
+ self.__called = True
+
+ def __check_core(self, config):
+ """
+ A function checking that the config contains parts for the valid
+ core component configuration.
+ """
+ self.assertIsNotNone(config)
+ for component in ['sockcreator', 'msgq', 'cfgmgr']:
+ self.assertTrue(component in config)
+ self.assertEqual(component, config[component]['special'])
+ self.assertEqual('core', config[component]['kind'])
+
+ def __check_extended(self, config):
+ """
+ This checks that the config contains the core and one more component.
+ """
+ self.__check_core(config)
+ self.assertTrue('comp' in config)
+ self.assertEqual('cat', config['comp']['process'])
+ self.assertEqual('needed', config['comp']['kind'])
+ self.assertEqual(4, len(config))
+
+ def test_correct_run(self):
+ """
+ Test the situation when we run in usual scenario, nothing fails,
+ we just start, reconfigure and then stop peacefully.
+ """
+ init = MockInit()
+ # Start it
+ orig = init._component_configurator.startup
+ init._component_configurator.startup = self.__unary_hook
+ init.start_all_components()
+ init._component_configurator.startup = orig
+ self.__check_core(self.__param)
+ self.assertEqual(3, len(self.__param))
+
+ # Reconfigure it
+ self.__param = None
+ orig = init._component_configurator.reconfigure
+ init._component_configurator.reconfigure = self.__unary_hook
+ # Otherwise it does not work
+ init.runnable = True
+ init.config_handler({'components': self.__compconfig})
+ self.__check_extended(self.__param)
+ currconfig = self.__param
+ # If we reconfigure it, but it does not contain the components part,
+ # nothing is called
+ init.config_handler({})
+ self.assertEqual(self.__param, currconfig)
+ self.__param = None
+ init._component_configurator.reconfigure = orig
+ # Check a configuration that messes up the core components is rejected.
+ compconf = dict(self.__compconfig)
+ compconf['msgq'] = { 'process': 'echo' }
+ result = init.config_handler({'components': compconf})
+ # Check it rejected it
+ self.assertEqual(1, result['result'][0])
+
+ # We can't call shutdown, that one relies on the stuff in main
+ # We check somewhere else that the shutdown is actually called
+ # from there (the test_kills).
+
+ def __real_test_kill(self, nokill=False, ex_on_kill=None):
+ """
+ Helper function that does the actual kill functionality testing.
+ """
+ init = MockInit()
+ init.nokill = nokill
+
+ killed = []
+ class ImmortalComponent:
+ """
+ An immortal component. It does not stop when it is told so
+ (anyway it is not told so). It does not die if it is killed
+ the first time. It dies only when killed forcefully.
+ """
+ def __init__(self):
+ # number of kill() calls, preventing infinite loop.
+ self.__call_count = 0
+
+ def kill(self, forceful=False):
+ self.__call_count += 1
+ if self.__call_count > 2:
+ raise Exception('Too many calls to ImmortalComponent.kill')
+
+ killed.append(forceful)
+ if ex_on_kill is not None:
+ # If exception is given by the test, raise it here.
+ # In the case of ESRCH, the process should have gone
+ # somehow, so we clear the components.
+ if ex_on_kill.errno == errno.ESRCH:
+ init.components = {}
+ raise ex_on_kill
+ if forceful:
+ init.components = {}
+ def pid(self):
+ return 1
+ def name(self):
+ return "Immortal"
+ init.components = {}
+ init.register_process(1, ImmortalComponent())
+
+ # While at it, we check the configurator shutdown is actually called
+ orig = init._component_configurator.shutdown
+ init._component_configurator.shutdown = self.__nullary_hook
+ self.__called = False
+
+ init.ccs = MockModuleCCSession()
+ self.assertFalse(init.ccs.stopped)
+
+ init.shutdown()
+
+ self.assertTrue(init.ccs.stopped)
+
+ # Here, killed is an array where False is added if SIGTERM
+ # should be sent, or True if SIGKILL should be sent, in order in
+ # which they're sent.
+ if nokill:
+ self.assertEqual([], killed)
+ else:
+ if ex_on_kill is not None:
+ self.assertEqual([False], killed)
+ else:
+ self.assertEqual([False, True], killed)
+
+ self.assertTrue(self.__called)
+
+ init._component_configurator.shutdown = orig
+
+ def test_kills(self):
+ """
+ Test that b10-init kills components which don't want to stop.
+ """
+ self.__real_test_kill()
+
+ def test_kill_fail(self):
+ """Test cases where kill() results in an exception due to OS error.
+
+ The behavior should be different for EPERM, so we test two cases.
+
+ """
+
+ ex = OSError()
+ ex.errno, ex.strerror = errno.ESRCH, 'No such process'
+ self.__real_test_kill(ex_on_kill=ex)
+
+ ex.errno, ex.strerror = errno.EPERM, 'Operation not permitted'
+ self.__real_test_kill(ex_on_kill=ex)
+
+ def test_nokill(self):
+ """
+ Test that b10-init *doesn't* kill components which don't want to
+ stop, when asked not to (by passing the --no-kill option which
+ sets init.nokill to True).
+ """
+ self.__real_test_kill(True)
+
+ def test_component_shutdown(self):
+ """
+ Test the component_shutdown sets all variables accordingly.
+ """
+ init = MockInit()
+ self.assertRaises(Exception, init.component_shutdown, 1)
+ self.assertEqual(1, init.exitcode)
+ init._Init__started = True
+ init.component_shutdown(2)
+ self.assertEqual(2, init.exitcode)
+ self.assertFalse(init.runnable)
+
+ def test_init_config(self):
+ """
+ Test initial configuration is loaded.
+ """
+ init = MockInit()
+ # Start it
+ init._component_configurator.reconfigure = self.__unary_hook
+ # We need to return the original read_bind10_config
+ init._read_bind10_config = lambda: Init._read_bind10_config(init)
+ # And provide a session to read the data from
+ class CC:
+ pass
+ init.ccs = CC()
+ init.ccs.get_full_config = lambda: {'components': self.__compconfig}
+ init.start_all_components()
+ self.__check_extended(self.__param)
+
+ def __setup_restart(self, init, component):
+ '''Common procedure for restarting a component used below.'''
+ init.components_to_restart = { component }
+ component.restarted = False
+ init.restart_processes()
+
+ def test_restart_processes(self):
+ '''Check some behavior on restarting processes.'''
+ init = MockInit()
+ init.runnable = True
+ component = MockComponent('test', 53)
+
+ # A component to be restarted will actually be restarted iff it's
+ # in the configurator's configuration.
+ # We bruteforce the configurator internal below; ugly, but the easiest
+ # way for the test.
+ init._component_configurator._components['test'] = (None, component)
+ self.__setup_restart(init, component)
+ self.assertTrue(component.restarted)
+ self.assertNotIn(component, init.components_to_restart)
+
+ # Remove the component from the configuration. It won't be restarted
+ # even if scheduled, nor will remain in the to-be-restarted list.
+ del init._component_configurator._components['test']
+ self.__setup_restart(init, component)
+ self.assertFalse(component.restarted)
+ self.assertNotIn(component, init.components_to_restart)
+
+ def test_get_processes(self):
+ '''Test that procsses are returned correctly, sorted by pid.'''
+ init = MockInit()
+
+ pids = list(range(0, 20))
+ random.shuffle(pids)
+
+ for i in range(0, 20):
+ pid = pids[i]
+ component = MockComponent('test' + str(pid), pid,
+ 'Test' + str(pid))
+ init.components[pid] = component
+
+ process_list = init.get_processes()
+ self.assertEqual(20, len(process_list))
+
+ last_pid = -1
+ for process in process_list:
+ pid = process[0]
+ self.assertLessEqual(last_pid, pid)
+ last_pid = pid
+ self.assertEqual([pid, 'test' + str(pid), 'Test' + str(pid)],
+ process)
+
+ def _test_reap_children_helper(self, runnable, is_running, failed):
+ '''Construct a Init instance, set various data in it according to
+ passed args and check if the component was added to the list of
+ components to restart.'''
+ init = MockInit()
+ init.runnable = runnable
+
+ component = MockComponent('test', 53)
+ component.running = is_running
+ component.has_failed = failed
+ init.components[53] = component
+
+ self.assertNotIn(component, init.components_to_restart)
+
+ init.reap_children()
+
+ if runnable and is_running and not failed:
+ self.assertIn(component, init.components_to_restart)
+ else:
+ self.assertEqual([], init.components_to_restart)
+
+ def test_reap_children(self):
+ '''Test that children are queued to be restarted when they ask for it.'''
+ # test various combinations of 3 booleans
+ # (Init.runnable, component.is_running(), component.failed())
+ self._test_reap_children_helper(False, False, False)
+ self._test_reap_children_helper(False, False, True)
+ self._test_reap_children_helper(False, True, False)
+ self._test_reap_children_helper(False, True, True)
+ self._test_reap_children_helper(True, False, False)
+ self._test_reap_children_helper(True, False, True)
+ self._test_reap_children_helper(True, True, False)
+ self._test_reap_children_helper(True, True, True)
+
+ # setup for more tests below
+ init = MockInit()
+ init.runnable = True
+ component = MockComponent('test', 53)
+ init.components[53] = component
+
+ # case where the returned pid is unknown to us. nothing should
+ # happpen then.
+ init.get_process_exit_status_called = False
+ init._get_process_exit_status = init._get_process_exit_status_unknown_pid
+ init.components_to_restart = []
+ # this should do nothing as the pid is unknown
+ init.reap_children()
+ self.assertEqual([], init.components_to_restart)
+
+ # case where init._get_process_exit_status() raises OSError with
+ # errno.ECHILD
+ init._get_process_exit_status = \
+ init._get_process_exit_status_raises_oserror_echild
+ init.components_to_restart = []
+ # this should catch and handle the OSError
+ init.reap_children()
+ self.assertEqual([], init.components_to_restart)
+
+ # case where init._get_process_exit_status() raises OSError with
+ # errno other than ECHILD
+ init._get_process_exit_status = \
+ init._get_process_exit_status_raises_oserror_other
+ with self.assertRaises(OSError):
+ init.reap_children()
+
+ # case where init._get_process_exit_status() raises something
+ # other than OSError
+ init._get_process_exit_status = \
+ init._get_process_exit_status_raises_other
+ with self.assertRaises(Exception):
+ init.reap_children()
+
+ def test_kill_started_components(self):
+ '''Test that started components are killed.'''
+ init = MockInit()
+
+ component = MockComponent('test', 53, 'Test')
+ init.components[53] = component
+
+ self.assertEqual([[53, 'test', 'Test']], init.get_processes())
+ init.kill_started_components()
+ self.assertEqual([], init.get_processes())
+ self.assertTrue(component.forceful)
+
+ def _start_msgq_helper(self, init, verbose):
+ init.verbose = verbose
+ pi = init.start_msgq()
+ self.assertEqual('b10-msgq', pi.name)
+ self.assertEqual(['b10-msgq'], pi.args)
+ self.assertTrue(pi.dev_null_stdout)
+ self.assertEqual(pi.dev_null_stderr, not verbose)
+ self.assertEqual({'FOO': 'an env string'}, pi.env)
+
+ # this is set by ProcessInfo.spawn()
+ self.assertEqual(42147, pi.pid)
+
+ def test_start_msgq(self):
+ '''Test that b10-msgq is started.'''
+ init = MockInitSimple()
+ init.c_channel_env = {'FOO': 'an env string'}
+ init._run_under_unittests = True
+
+ # use the MockProcessInfo creator
+ init._make_process_info = init._make_mock_process_info
+
+ # non-verbose case
+ self._start_msgq_helper(init, False)
+
+ # verbose case
+ self._start_msgq_helper(init, True)
+
+ def test_start_msgq_timeout(self):
+ '''Test that b10-msgq startup attempts connections several times
+ and times out eventually.'''
+ b10_init = MockInitSimple()
+ b10_init.c_channel_env = {}
+ # set the timeout to an arbitrary pre-determined value (which
+ # code below depends on)
+ b10_init.msgq_timeout = 1
+ b10_init._run_under_unittests = False
+
+ # use the MockProcessInfo creator
+ b10_init._make_process_info = b10_init._make_mock_process_info
+
+ global attempts
+ global tsec
+ attempts = 0
+ tsec = 0
+ self._tmp_time = time.time
+ self._tmp_sleep = time.sleep
+ def _my_time():
+ global attempts
+ global tsec
+ attempts += 1
+ return tsec
+ def _my_sleep(nsec):
+ global tsec
+ tsec += nsec
+ time.time = _my_time
+ time.sleep = _my_sleep
+
+ global cc_sub
+ cc_sub = None
+ class DummySessionAlwaysFails():
+ def __init__(self, socket_file):
+ raise isc.cc.session.SessionError('Connection fails')
+ def group_subscribe(self, s):
+ global cc_sub
+ cc_sub = s
+
+ isc.cc.Session = DummySessionAlwaysFails
+
+ with self.assertRaises(init.CChannelConnectError):
+ # An exception will be thrown here when it eventually times
+ # out.
+ pi = b10_init.start_msgq()
+
+ # time.time() should be called 12 times within the while loop:
+ # starting from 0, and 11 more times from 0.1 to 1.1. There's
+ # another call to time.time() outside the loop, which makes it
+ # 13.
+ self.assertEqual(attempts, 13)
+
+ # group_subscribe() should not have been called here.
+ self.assertIsNone(cc_sub)
+
+ global cc_socket_file
+ cc_socket_file = None
+ cc_sub = None
+ class DummySession():
+ def __init__(self, socket_file):
+ global cc_socket_file
+ cc_socket_file = socket_file
+ def group_subscribe(self, s):
+ global cc_sub
+ cc_sub = s
+
+ isc.cc.Session = DummySession
+
+ # reset values
+ attempts = 0
+ tsec = 0
+
+ pi = b10_init.start_msgq()
+
+ # just one attempt, but 2 calls to time.time()
+ self.assertEqual(attempts, 2)
+
+ self.assertEqual(cc_socket_file, b10_init.msgq_socket_file)
+ self.assertEqual(cc_sub, 'Init')
+
+ # isc.cc.Session, time.time() and time.sleep() are restored
+ # during tearDown().
+
+ def _start_cfgmgr_helper(self, init, data_path, filename, clear_config):
+ expect_args = ['b10-cfgmgr']
+ if data_path is not None:
+ init.data_path = data_path
+ expect_args.append('--data-path=' + data_path)
+ if filename is not None:
+ init.config_filename = filename
+ expect_args.append('--config-filename=' + filename)
+ if clear_config:
+ init.clear_config = clear_config
+ expect_args.append('--clear-config')
+
+ pi = init.start_cfgmgr()
+ self.assertEqual('b10-cfgmgr', pi.name)
+ self.assertEqual(expect_args, pi.args)
+ self.assertEqual({'TESTENV': 'A test string'}, pi.env)
+
+ # this is set by ProcessInfo.spawn()
+ self.assertEqual(42147, pi.pid)
+
+ def test_start_cfgmgr(self):
+ '''Test that b10-cfgmgr is started.'''
+ class DummySession():
+ def __init__(self):
+ self._tries = 0
+ def group_recvmsg(self):
+ self._tries += 1
+ # return running on the 3rd try onwards
+ if self._tries >= 3:
+ return ({'running': 'ConfigManager'}, None)
+ else:
+ return ({}, None)
+
+ init = MockInitSimple()
+ init.c_channel_env = {'TESTENV': 'A test string'}
+ init.cc_session = DummySession()
+ init.wait_time = 5
+
+ # use the MockProcessInfo creator
+ init._make_process_info = init._make_mock_process_info
+
+ global attempts
+ attempts = 0
+ self._tmp_sleep = time.sleep
+ def _my_sleep(nsec):
+ global attempts
+ attempts += 1
+ time.sleep = _my_sleep
+
+ # defaults
+ self._start_cfgmgr_helper(init, None, None, False)
+
+ # check that 2 attempts were made. on the 3rd attempt,
+ # process_running() returns that ConfigManager is running.
+ self.assertEqual(attempts, 2)
+
+ # data_path is specified
+ self._start_cfgmgr_helper(init, '/var/lib/test', None, False)
+
+ # config_filename is specified. Because `init` is not
+ # reconstructed, data_path is retained from the last call to
+ # _start_cfgmgr_helper().
+ self._start_cfgmgr_helper(init, '/var/lib/test', 'foo.cfg', False)
+
+ # clear_config is specified. Because `init` is not reconstructed,
+ # data_path and config_filename are retained from the last call
+ # to _start_cfgmgr_helper().
+ self._start_cfgmgr_helper(init, '/var/lib/test', 'foo.cfg', True)
+
+ def test_start_cfgmgr_timeout(self):
+ '''Test that b10-cfgmgr startup attempts connections several times
+ and times out eventually.'''
+ class DummySession():
+ def group_recvmsg(self):
+ return (None, None)
+ b10_init = MockInitSimple()
+ b10_init.c_channel_env = {}
+ b10_init.cc_session = DummySession()
+ # set wait_time to an arbitrary pre-determined value (which code
+ # below depends on)
+ b10_init.wait_time = 2
+
+ # use the MockProcessInfo creator
+ b10_init._make_process_info = b10_init._make_mock_process_info
+
+ global attempts
+ attempts = 0
+ self._tmp_sleep = time.sleep
+ def _my_sleep(nsec):
+ global attempts
+ attempts += 1
+ time.sleep = _my_sleep
+
+ # We just check that an exception was thrown, and that several
+ # attempts were made to connect.
+ with self.assertRaises(init.ProcessStartError):
+ pi = b10_init.start_cfgmgr()
+
+ # 2 seconds of attempts every 1 second should result in 2 attempts
+ self.assertEqual(attempts, 2)
+
+ # time.sleep() is restored during tearDown().
+
+ def test_start_ccsession(self):
+ '''Test that CC session is started.'''
+ class DummySession():
+ def __init__(self, specfile, config_handler, command_handler,
+ socket_file):
+ self.specfile = specfile
+ self.config_handler = config_handler
+ self.command_handler = command_handler
+ self.socket_file = socket_file
+ self.started = False
+ def start(self):
+ self.started = True
+ b10_init = MockInitSimple()
+ self._tmp_module_cc_session = isc.config.ModuleCCSession
+ isc.config.ModuleCCSession = DummySession
+
+ b10_init.start_ccsession({})
+ self.assertEqual(init.SPECFILE_LOCATION, b10_init.ccs.specfile)
+ self.assertEqual(b10_init.config_handler, b10_init.ccs.config_handler)
+ self.assertEqual(b10_init.command_handler,
+ b10_init.ccs.command_handler)
+ self.assertEqual(b10_init.msgq_socket_file, b10_init.ccs.socket_file)
+ self.assertTrue(b10_init.ccs.started)
+
+ # isc.config.ModuleCCSession is restored during tearDown().
+
+ def test_start_process(self):
+ '''Test that processes can be started.'''
+ init = MockInit()
+
+ # use the MockProcessInfo creator
+ init._make_process_info = init._make_mock_process_info
+
+ pi = init.start_process('Test Process', ['/bin/true'], {})
+ self.assertEqual('Test Process', pi.name)
+ self.assertEqual(['/bin/true'], pi.args)
+ self.assertEqual({}, pi.env)
+
+ # this is set by ProcessInfo.spawn()
+ self.assertEqual(42147, pi.pid)
+
+ def test_register_process(self):
+ '''Test that processes can be registered with Init.'''
+ init = MockInit()
+ component = MockComponent('test', 53, 'Test')
+
+ self.assertFalse(53 in init.components)
+ init.register_process(53, component)
+ self.assertTrue(53 in init.components)
+ self.assertEqual(init.components[53].name(), 'test')
+ self.assertEqual(init.components[53].pid(), 53)
+ self.assertEqual(init.components[53].address(), 'Test')
+
+ def _start_simple_helper(self, init, verbose):
+ init.verbose = verbose
+
+ args = ['/bin/true']
+ if verbose:
+ args.append('-v')
+
+ init.start_simple('/bin/true')
+ self.assertEqual('/bin/true', init.started_process_name)
+ self.assertEqual(args, init.started_process_args)
+ self.assertEqual({'TESTENV': 'A test string'}, init.started_process_env)
+
+ def test_start_simple(self):
+ '''Test simple process startup.'''
+ init = MockInitSimple()
+ init.c_channel_env = {'TESTENV': 'A test string'}
+
+ # non-verbose case
+ self._start_simple_helper(init, False)
+
+ # verbose case
+ self._start_simple_helper(init, True)
+
+ def _start_auth_helper(self, init, verbose):
+ init.verbose = verbose
+
+ args = ['b10-auth']
+ if verbose:
+ args.append('-v')
+
+ init.start_auth()
+ self.assertEqual('b10-auth', init.started_process_name)
+ self.assertEqual(args, init.started_process_args)
+ self.assertEqual({'FOO': 'an env string'}, init.started_process_env)
+
+ def test_start_auth(self):
+ '''Test that b10-auth is started.'''
+ init = MockInitSimple()
+ init.c_channel_env = {'FOO': 'an env string'}
+
+ # non-verbose case
+ self._start_auth_helper(init, False)
+
+ # verbose case
+ self._start_auth_helper(init, True)
+
+ def _start_resolver_helper(self, init, verbose):
+ init.verbose = verbose
+
+ args = ['b10-resolver']
+ if verbose:
+ args.append('-v')
+
+ init.start_resolver()
+ self.assertEqual('b10-resolver', init.started_process_name)
+ self.assertEqual(args, init.started_process_args)
+ self.assertEqual({'BAR': 'an env string'}, init.started_process_env)
+
+ def test_start_resolver(self):
+ '''Test that b10-resolver is started.'''
+ init = MockInitSimple()
+ init.c_channel_env = {'BAR': 'an env string'}
+
+ # non-verbose case
+ self._start_resolver_helper(init, False)
+
+ # verbose case
+ self._start_resolver_helper(init, True)
+
+ def _start_cmdctl_helper(self, init, verbose, port = None):
+ init.verbose = verbose
+
+ args = ['b10-cmdctl']
+
+ if port is not None:
+ init.cmdctl_port = port
+ args.append('--port=9353')
+
+ if verbose:
+ args.append('-v')
+
+ init.start_cmdctl()
+ self.assertEqual('b10-cmdctl', init.started_process_name)
+ self.assertEqual(args, init.started_process_args)
+ self.assertEqual({'BAZ': 'an env string'}, init.started_process_env)
+
+ def test_start_cmdctl(self):
+ '''Test that b10-cmdctl is started.'''
+ init = MockInitSimple()
+ init.c_channel_env = {'BAZ': 'an env string'}
+
+ # non-verbose case
+ self._start_cmdctl_helper(init, False)
+
+ # verbose case
+ self._start_cmdctl_helper(init, True)
+
+ # with port, non-verbose case
+ self._start_cmdctl_helper(init, False, 9353)
+
+ # with port, verbose case
+ self._start_cmdctl_helper(init, True, 9353)
+
+ def test_socket_data(self):
+ '''Test that Init._socket_data works as expected.'''
+ class MockSock:
+ def __init__(self, fd, throw):
+ self.fd = fd
+ self.throw = throw
+ self.buf = b'Hello World.\nYou are so nice today.\nXX'
+ self.i = 0
+
+ def recv(self, bufsize, flags = 0):
+ if bufsize != 1:
+ raise Exception('bufsize != 1')
+ if flags != socket.MSG_DONTWAIT:
+ raise Exception('flags != socket.MSG_DONTWAIT')
+ # after 15 recv()s, throw a socket.error with EAGAIN to
+ # get _socket_data() to save back what's been read. The
+ # number 15 is arbitrarily chosen, but the checks then
+ # depend on this being 15, i.e., if you adjust this
+ # number, you may have to adjust the checks below too.
+ if self.throw and self.i > 15:
+ raise socket.error(errno.EAGAIN, 'Try again')
+ if self.i >= len(self.buf):
+ return b'';
+ t = self.i
+ self.i += 1
+ return self.buf[t:t+1]
+
+ def close(self):
+ return
+
+ class MockInitSocketData(Init):
+ def __init__(self, throw):
+ self._unix_sockets = {42: (MockSock(42, throw), b'')}
+ self.requests = []
+ self.dead = []
+
+ def socket_request_handler(self, previous, sock):
+ self.requests.append({sock.fd: previous})
+
+ def socket_consumer_dead(self, sock):
+ self.dead.append(sock.fd)
+
+ # Case where we get data every time we call recv()
+ init = MockInitSocketData(False)
+ init._socket_data(42)
+ self.assertEqual(init.requests,
+ [{42: b'Hello World.'},
+ {42: b'You are so nice today.'}])
+ self.assertEqual(init.dead, [42])
+ self.assertEqual({}, init._unix_sockets)
+
+ # Case where socket.recv() raises EAGAIN. In this case, the
+ # routine is supposed to save what it has back to
+ # Init._unix_sockets.
+ init = MockInitSocketData(True)
+ init._socket_data(42)
+ self.assertEqual(init.requests, [{42: b'Hello World.'}])
+ self.assertFalse(init.dead)
+ self.assertEqual(len(init._unix_sockets), 1)
+ self.assertEqual(init._unix_sockets[42][1], b'You')
+
+ def test_startup(self):
+ '''Test that Init.startup() handles failures properly.'''
+ class MockInitStartup(Init):
+ def __init__(self, throw):
+ self.throw = throw
+ self.started = False
+ self.killed = False
+ self.msgq_socket_file = None
+ self.curproc = 'myproc'
+ self.runnable = False
+
+ def start_all_components(self):
+ self.started = True
+ if self.throw is True:
+ raise Exception('Assume starting components has failed.')
+ elif self.throw:
+ raise self.throw
+
+ def kill_started_components(self):
+ self.killed = True
+
+ class DummySession():
+ def __init__(self, socket_file):
+ raise isc.cc.session.SessionError('This is the expected case.')
+
+ class DummySessionSocketExists():
+ def __init__(self, socket_file):
+ # simulate that connect passes
+ return
+
+ isc.cc.Session = DummySession
+
+ # All is well case, where all components are started
+ # successfully. We check that the actual call to
+ # start_all_components() is made, and Init.runnable is true.
+ b10_init = MockInitStartup(False)
+ r = b10_init.startup()
+ self.assertIsNone(r)
+ self.assertTrue(b10_init.started)
+ self.assertFalse(b10_init.killed)
+ self.assertTrue(b10_init.runnable)
+ self.assertEqual({}, b10_init.c_channel_env)
+
+ # Case where starting components fails. We check that
+ # kill_started_components() is called right after, and
+ # Init.runnable is not modified.
+ b10_init = MockInitStartup(True)
+ r = b10_init.startup()
+ # r contains an error message
+ self.assertEqual(r, 'Unable to start myproc: Assume starting components has failed.')
+ self.assertTrue(b10_init.started)
+ self.assertTrue(b10_init.killed)
+ self.assertFalse(b10_init.runnable)
+ self.assertEqual({}, b10_init.c_channel_env)
+
+ # Check if msgq_socket_file is carried over
+ b10_init = MockInitStartup(False)
+ b10_init.msgq_socket_file = 'foo'
+ r = b10_init.startup()
+ self.assertEqual({'BIND10_MSGQ_SOCKET_FILE': 'foo'},
+ b10_init.c_channel_env)
+
+ # Check failure of changing user results in a different message
+ b10_init = MockInitStartup(init.ChangeUserError('failed to chusr'))
+ r = b10_init.startup()
+ self.assertIn('failed to chusr', r)
+ self.assertTrue(b10_init.killed)
+
+ # Check the case when socket file already exists
+ isc.cc.Session = DummySessionSocketExists
+ b10_init = MockInitStartup(False)
+ r = b10_init.startup()
+ self.assertIn('already running', r)
+
+ # isc.cc.Session is restored during tearDown().
+
+class SocketSrvTest(unittest.TestCase):
+ """
+ This tests some methods of b10-init related to the unix domain sockets
+ used to transfer other sockets to applications.
+ """
+ def setUp(self):
+ """
+ Create the b10-init to test, testdata and backup some functions.
+ """
+ self.__b10_init = Init()
+ self.__select_backup = init.select.select
+ self.__select_called = None
+ self.__socket_data_called = None
+ self.__consumer_dead_called = None
+ self.__socket_request_handler_called = None
+
+ def tearDown(self):
+ """
+ Restore functions.
+ """
+ init.select.select = self.__select_backup
+
+ class __FalseSocket:
+ """
+ A mock socket for the select and accept and stuff like that.
+ """
+ def __init__(self, owner, fileno=42):
+ self.__owner = owner
+ self.__fileno = fileno
+ self.data = None
+ self.closed = False
+
+ def fileno(self):
+ return self.__fileno
+
+ def accept(self):
+ return (self.__class__(self.__owner, 13), "/path/to/socket")
+
+ def recv(self, bufsize, flags=0):
+ self.__owner.assertEqual(1, bufsize)
+ self.__owner.assertEqual(socket.MSG_DONTWAIT, flags)
+ if isinstance(self.data, socket.error):
+ raise self.data
+ elif self.data is not None:
+ if len(self.data):
+ result = self.data[0:1]
+ self.data = self.data[1:]
+ return result
+ else:
+ raise socket.error(errno.EAGAIN, "Would block")
+ else:
+ return b''
+
+ def close(self):
+ self.closed = True
+
+ class __CCS:
+ """
+ A mock CCS, just to provide the socket file number.
+ """
+ class __Socket:
+ def fileno(self):
+ return 1
+ def get_socket(self):
+ return self.__Socket()
+
+ def __select_accept(self, r, w, x, t):
+ self.__select_called = (r, w, x, t)
+ return ([42], [], [])
+
+ def __select_data(self, r, w, x, t):
+ self.__select_called = (r, w, x, t)
+ return ([13], [], [])
+
+ def __accept(self):
+ """
+ Hijack the accept method of the b10-init.
+
+ Notes down it was called and stops b10-init.
+ """
+ self.__accept_called = True
+ self.__b10_init.runnable = False
+
+ def test_srv_accept_called(self):
+ """
+ Test that the _srv_accept method of b10-init is called when the
+ listening socket is readable.
+ """
+ self.__b10_init.runnable = True
+ self.__b10_init._srv_socket = self.__FalseSocket(self)
+ self.__b10_init._srv_accept = self.__accept
+ self.__b10_init.ccs = self.__CCS()
+ init.select.select = self.__select_accept
+ self.__b10_init.run(2)
+ # It called the accept
+ self.assertTrue(self.__accept_called)
+ # And the select had the right parameters
+ self.assertEqual(([2, 1, 42], [], [], None), self.__select_called)
+
+ def test_srv_accept(self):
+ """
+ Test how the _srv_accept method works.
+ """
+ self.__b10_init._srv_socket = self.__FalseSocket(self)
+ self.__b10_init._srv_accept()
+ # After we accepted, a new socket is added there
+ socket = self.__b10_init._unix_sockets[13][0]
+ # The socket is properly stored there
+ self.assertTrue(isinstance(socket, self.__FalseSocket))
+ # And the buffer (yet empty) is there
+ self.assertEqual({13: (socket, b'')}, self.__b10_init._unix_sockets)
+
+ def __socket_data(self, socket):
+ self.__b10_init.runnable = False
+ self.__socket_data_called = socket
+
+ def test_socket_data(self):
+ """
+ Test that a socket that wants attention gets it.
+ """
+ self.__b10_init._srv_socket = self.__FalseSocket(self)
+ self.__b10_init._socket_data = self.__socket_data
+ self.__b10_init.ccs = self.__CCS()
+ self.__b10_init._unix_sockets = {13: (self.__FalseSocket(self, 13), b'')}
+ self.__b10_init.runnable = True
+ init.select.select = self.__select_data
+ self.__b10_init.run(2)
+ self.assertEqual(13, self.__socket_data_called)
+ self.assertEqual(([2, 1, 42, 13], [], [], None), self.__select_called)
+
+ def __prepare_data(self, data):
+ socket = self.__FalseSocket(self, 13)
+ self.__b10_init._unix_sockets = {13: (socket, b'')}
+ socket.data = data
+ self.__b10_init.socket_consumer_dead = self.__consumer_dead
+ self.__b10_init.socket_request_handler = self.__socket_request_handler
+ return socket
+
+ def __consumer_dead(self, socket):
+ self.__consumer_dead_called = socket
+
+ def __socket_request_handler(self, token, socket):
+ self.__socket_request_handler_called = (token, socket)
+
+ def test_socket_closed(self):
+ """
+ Test that a socket is removed and the socket_consumer_dead is called
+ when it is closed.
+ """
+ socket = self.__prepare_data(None)
+ self.__b10_init._socket_data(13)
+ self.assertEqual(socket, self.__consumer_dead_called)
+ self.assertEqual({}, self.__b10_init._unix_sockets)
+ self.assertTrue(socket.closed)
+
+ def test_socket_short(self):
+ """
+ Test that if there's not enough data to get the whole socket, it is
+ kept there, but nothing is called.
+ """
+ socket = self.__prepare_data(b'tok')
+ self.__b10_init._socket_data(13)
+ self.assertEqual({13: (socket, b'tok')}, self.__b10_init._unix_sockets)
+ self.assertFalse(socket.closed)
+ self.assertIsNone(self.__consumer_dead_called)
+ self.assertIsNone(self.__socket_request_handler_called)
+
+ def test_socket_continue(self):
+ """
+ Test that we call the token handling function when the whole token
+ comes. This test pretends to continue reading where the previous one
+ stopped.
+ """
+ socket = self.__prepare_data(b"en\nanothe")
+ # The data to finish
+ self.__b10_init._unix_sockets[13] = (socket, b'tok')
+ self.__b10_init._socket_data(13)
+ self.assertEqual({13: (socket, b'anothe')}, self.__b10_init._unix_sockets)
+ self.assertFalse(socket.closed)
+ self.assertIsNone(self.__consumer_dead_called)
+ self.assertEqual((b'token', socket),
+ self.__socket_request_handler_called)
+
+ def test_broken_socket(self):
+ """
+ If the socket raises an exception during the read other than EAGAIN,
+ it is broken and we remove it.
+ """
+ sock = self.__prepare_data(socket.error(errno.ENOMEM,
+ "There's more memory available, but not for you"))
+ self.__b10_init._socket_data(13)
+ self.assertEqual(sock, self.__consumer_dead_called)
+ self.assertEqual({}, self.__b10_init._unix_sockets)
+ self.assertTrue(sock.closed)
+
+class TestFunctions(unittest.TestCase):
+ def setUp(self):
+ self.lockfile_testpath = \
+ "@abs_top_builddir@/src/bin/bind10/tests/lockfile_test"
+ self.assertFalse(os.path.exists(self.lockfile_testpath))
+ os.mkdir(self.lockfile_testpath)
+ self.assertTrue(os.path.isdir(self.lockfile_testpath))
+ self.__isfile_orig = init.os.path.isfile
+ self.__unlink_orig = init.os.unlink
+
+ def tearDown(self):
+ os.rmdir(self.lockfile_testpath)
+ self.assertFalse(os.path.isdir(self.lockfile_testpath))
+ os.environ["B10_LOCKFILE_DIR_FROM_BUILD"] = "@abs_top_builddir@"
+ init.os.path.isfile = self.__isfile_orig
+ init.os.unlink = self.__unlink_orig
+
+ def test_remove_lock_files(self):
+ os.environ["B10_LOCKFILE_DIR_FROM_BUILD"] = self.lockfile_testpath
+
+ # create lockfiles for the testcase
+ lockfiles = ["logger_lockfile"]
+ for f in lockfiles:
+ fname = os.environ["B10_LOCKFILE_DIR_FROM_BUILD"] + '/' + f
+ self.assertFalse(os.path.exists(fname))
+ open(fname, "w").close()
+ self.assertTrue(os.path.isfile(fname))
+
+ # first call should clear up all the lockfiles
+ init.remove_lock_files()
+
+ # check if the lockfiles exist
+ for f in lockfiles:
+ fname = os.environ["B10_LOCKFILE_DIR_FROM_BUILD"] + '/' + f
+ self.assertFalse(os.path.isfile(fname))
+
+ # second call should not assert anyway
+ init.remove_lock_files()
+
+ def test_remove_lock_files_fail(self):
+ # Permission error on unlink is ignored; other exceptions are really
+ # unexpected and propagated.
+ def __raising_unlink(unused, ex):
+ raise ex
+
+ init.os.path.isfile = lambda _: True
+ os_error = OSError()
+ init.os.unlink = lambda f: __raising_unlink(f, os_error)
+
+ os_error.errno = errno.EPERM
+ init.remove_lock_files() # no disruption
+
+ os_error.errno = errno.EACCES
+ init.remove_lock_files() # no disruption
+
+ os_error.errno = errno.ENOENT
+ self.assertRaises(OSError, init.remove_lock_files)
+
+ init.os.unlink = lambda f: __raising_unlink(f, Exception('bad'))
+ self.assertRaises(Exception, init.remove_lock_files)
+
+ def test_get_signame(self):
+ # just test with some samples
+ signame = init.get_signame(signal.SIGTERM)
+ self.assertEqual('SIGTERM', signame)
+ signame = init.get_signame(signal.SIGKILL)
+ self.assertEqual('SIGKILL', signame)
+ # 59426 is hopefully an unused signal on most platforms
+ signame = init.get_signame(59426)
+ self.assertEqual('Unknown signal 59426', signame)
+
+ def test_fatal_signal(self):
+ self.assertIsNone(init.b10_init)
+ init.b10_init = Init()
+ init.b10_init.runnable = True
+ init.fatal_signal(signal.SIGTERM, None)
+ # Now, runnable must be False
+ self.assertFalse(init.b10_init.runnable)
+ init.b10_init = None
+
+if __name__ == '__main__':
+ # store os.environ for test_unchanged_environment
+ original_os_environ = copy.deepcopy(os.environ)
+ isc.log.resetUnitTestRootLogger()
+ unittest.main()
diff --git a/src/bin/bindctl/bindcmd.py b/src/bin/bindctl/bindcmd.py
index 7c2b2af..f382e2a 100644
--- a/src/bin/bindctl/bindcmd.py
+++ b/src/bin/bindctl/bindcmd.py
@@ -25,7 +25,7 @@ from bindctl.moduleinfo import *
from bindctl.cmdparse import BindCmdParser
from bindctl import command_sets
from xml.dom import minidom
-import isc
+import isc.config
import isc.cc.data
import http.client
import json
@@ -39,6 +39,7 @@ import csv
import pwd
import getpass
import copy
+import errno
try:
from collections import OrderedDict
@@ -123,6 +124,11 @@ class BindCmdInterpreter(Cmd):
self.csv_file_dir = pwd.getpwnam(getpass.getuser()).pw_dir + \
os.sep + '.bind10' + os.sep
+ def _print(self, *args):
+ '''Simple wrapper around calls to print that can be overridden in
+ unit tests.'''
+ print(*args)
+
def _get_session_id(self):
'''Generate one session id for the connection. '''
rand = os.urandom(16)
@@ -150,19 +156,19 @@ WARNING: Python readline module isn't available, so the command line editor
return 1
self.cmdloop()
- print('\nExit from bindctl')
+ self._print('\nExit from bindctl')
return 0
except FailToLogin as err:
# error already printed when this was raised, ignoring
return 1
except KeyboardInterrupt:
- print('\nExit from bindctl')
+ self._print('\nExit from bindctl')
return 0
except socket.error as err:
- print('Failed to send request, the connection is closed')
+ self._print('Failed to send request, the connection is closed')
return 1
except http.client.CannotSendRequest:
- print('Can not send request, the connection is busy')
+ self._print('Can not send request, the connection is busy')
return 1
def _get_saved_user_info(self, dir, file_name):
@@ -181,7 +187,8 @@ WARNING: Python readline module isn't available, so the command line editor
for row in users_info:
users.append([row[0], row[1]])
except (IOError, IndexError) as err:
- print("Error reading saved username and password from %s%s: %s" % (dir, file_name, err))
+ self._print("Error reading saved username and password "
+ "from %s%s: %s" % (dir, file_name, err))
finally:
if csvfile:
csvfile.close()
@@ -201,12 +208,48 @@ WARNING: Python readline module isn't available, so the command line editor
writer.writerow([username, passwd])
csvfile.close()
except IOError as err:
- print("Error saving user information:", err)
- print("user info file name: %s%s" % (dir, file_name))
+ self._print("Error saving user information:", err)
+ self._print("user info file name: %s%s" % (dir, file_name))
return False
return True
+ def __print_check_ssl_msg(self):
+ self._print("Please check the logs of b10-cmdctl, there may "
+ "be a problem accepting SSL connections, such "
+ "as a permission problem on the server "
+ "certificate file.")
+
+ def _try_login(self, username, password):
+ '''
+ Attempts to log in to cmdctl by sending a POST with
+ the given username and password.
+ On success of the POST (mind, not the login, only the network
+ operation), returns a tuple (response, data).
+ On failure, raises a FailToLogin exception, and prints some
+ information on the failure.
+ This call is essentially 'private', but made 'protected' for
+ easier testing.
+ '''
+ param = {'username': username, 'password' : password}
+ try:
+ response = self.send_POST('/login', param)
+ data = response.read().decode()
+ # return here (will raise error after try block)
+ return (response, data)
+ except ssl.SSLError as err:
+ self._print("SSL error while sending login information: ", err)
+ if err.errno == ssl.SSL_ERROR_EOF:
+ self.__print_check_ssl_msg()
+ except socket.error as err:
+ self._print("Socket error while sending login information: ", err)
+ # An SSL setup error can also bubble up as a plain CONNRESET...
+ # (on some systems it usually does)
+ if err.errno == errno.ECONNRESET:
+ self.__print_check_ssl_msg()
+ pass
+ raise FailToLogin()
+
def login_to_cmdctl(self):
'''Login to cmdctl with the username and password given by
the user. After the login is sucessful, the username and
@@ -217,41 +260,30 @@ WARNING: Python readline module isn't available, so the command line editor
# Look at existing username/password combinations and try to log in
users = self._get_saved_user_info(self.csv_file_dir, CSV_FILE_NAME)
for row in users:
- param = {'username': row[0], 'password' : row[1]}
- try:
- response = self.send_POST('/login', param)
- data = response.read().decode()
- except socket.error as err:
- print("Socket error while sending login information:", err)
- raise FailToLogin()
+ response, data = self._try_login(row[0], row[1])
if response.status == http.client.OK:
# Is interactive?
if sys.stdin.isatty():
- print(data + ' login as ' + row[0])
+ self._print(data + ' login as ' + row[0])
return True
# No valid logins were found, prompt the user for a username/password
count = 0
- print('No stored password file found, please see sections '
+ self._print('No stored password file found, please see sections '
'"Configuration specification for b10-cmdctl" and "bindctl '
'command-line options" of the BIND 10 guide.')
while True:
count = count + 1
if count > 3:
- print("Too many authentication failures")
+ self._print("Too many authentication failures")
return False
username = input("Username: ")
passwd = getpass.getpass()
- param = {'username': username, 'password' : passwd}
- try:
- response = self.send_POST('/login', param)
- data = response.read().decode()
- print(data)
- except socket.error as err:
- print("Socket error while sending login information:", err)
- raise FailToLogin()
+
+ response, data = self._try_login(username, passwd)
+ self._print(data)
if response.status == http.client.OK:
self._save_user_info(username, passwd, self.csv_file_dir,
@@ -449,25 +481,26 @@ WARNING: Python readline module isn't available, so the command line editor
pass
def do_help(self, name):
- print(CONST_BINDCTL_HELP)
+ self._print(CONST_BINDCTL_HELP)
for k in self.modules.values():
n = k.get_name()
if len(n) >= CONST_BINDCTL_HELP_INDENT_WIDTH:
- print(" %s" % n)
- print(textwrap.fill(k.get_desc(),
- initial_indent=" ",
- subsequent_indent=" " +
- " " * CONST_BINDCTL_HELP_INDENT_WIDTH,
- width=70))
+ self._print(" %s" % n)
+ self._print(textwrap.fill(k.get_desc(),
+ initial_indent=" ",
+ subsequent_indent=" " +
+ " " * CONST_BINDCTL_HELP_INDENT_WIDTH,
+ width=70))
else:
- print(textwrap.fill("%s%s%s" %
- (k.get_name(),
- " "*(CONST_BINDCTL_HELP_INDENT_WIDTH - len(k.get_name())),
- k.get_desc()),
- initial_indent=" ",
- subsequent_indent=" " +
- " " * CONST_BINDCTL_HELP_INDENT_WIDTH,
- width=70))
+ self._print(textwrap.fill("%s%s%s" %
+ (k.get_name(),
+ " "*(CONST_BINDCTL_HELP_INDENT_WIDTH -
+ len(k.get_name())),
+ k.get_desc()),
+ initial_indent=" ",
+ subsequent_indent=" " +
+ " " * CONST_BINDCTL_HELP_INDENT_WIDTH,
+ width=70))
def onecmd(self, line):
if line == 'EOF' or line.lower() == "quit":
@@ -642,20 +675,20 @@ WARNING: Python readline module isn't available, so the command line editor
self._validate_cmd(cmd)
self._handle_cmd(cmd)
except (IOError, http.client.HTTPException) as err:
- print('Error: ', err)
+ self._print('Error: ', err)
except BindCtlException as err:
- print("Error! ", err)
+ self._print("Error! ", err)
self._print_correct_usage(err)
except isc.cc.data.DataTypeError as err:
- print("Error! ", err)
+ self._print("Error! ", err)
except isc.cc.data.DataTypeError as dte:
- print("Error: " + str(dte))
+ self._print("Error: " + str(dte))
except isc.cc.data.DataNotFoundError as dnfe:
- print("Error: " + str(dnfe))
+ self._print("Error: " + str(dnfe))
except isc.cc.data.DataAlreadyPresentError as dape:
- print("Error: " + str(dape))
+ self._print("Error: " + str(dape))
except KeyError as ke:
- print("Error: missing " + str(ke))
+ self._print("Error: missing " + str(ke))
def _print_correct_usage(self, ept):
if isinstance(ept, CmdUnknownModuleSyntaxError):
@@ -704,7 +737,8 @@ WARNING: Python readline module isn't available, so the command line editor
module_name = identifier.split('/')[1]
if module_name != "" and (self.config_data is None or \
not self.config_data.have_specification(module_name)):
- print("Error: Module '" + module_name + "' unknown or not running")
+ self._print("Error: Module '" + module_name +
+ "' unknown or not running")
return
if cmd.command == "show":
@@ -718,7 +752,9 @@ WARNING: Python readline module isn't available, so the command line editor
#identifier
identifier += cmd.params['argument']
else:
- print("Error: unknown argument " + cmd.params['argument'] + ", or multiple identifiers given")
+ self._print("Error: unknown argument " +
+ cmd.params['argument'] +
+ ", or multiple identifiers given")
return
values = self.config_data.get_value_maps(identifier, show_all)
for value_map in values:
@@ -746,13 +782,14 @@ WARNING: Python readline module isn't available, so the command line editor
line += "(default)"
if value_map['modified']:
line += "(modified)"
- print(line)
+ self._print(line)
elif cmd.command == "show_json":
if identifier == "":
- print("Need at least the module to show the configuration in JSON format")
+ self._print("Need at least the module to show the "
+ "configuration in JSON format")
else:
data, default = self.config_data.get_value(identifier)
- print(json.dumps(data))
+ self._print(json.dumps(data))
elif cmd.command == "add":
self.config_data.add_value(identifier,
cmd.params.get('value_or_name'),
@@ -764,7 +801,7 @@ WARNING: Python readline module isn't available, so the command line editor
self.config_data.remove_value(identifier, None)
elif cmd.command == "set":
if 'identifier' not in cmd.params:
- print("Error: missing identifier or value")
+ self._print("Error: missing identifier or value")
else:
parsed_value = None
try:
@@ -781,9 +818,9 @@ WARNING: Python readline module isn't available, so the command line editor
try:
self.config_data.commit()
except isc.config.ModuleCCSessionError as mcse:
- print(str(mcse))
+ self._print(str(mcse))
elif cmd.command == "diff":
- print(self.config_data.get_local_changes())
+ self._print(self.config_data.get_local_changes())
elif cmd.command == "go":
self.go(identifier)
@@ -803,7 +840,7 @@ WARNING: Python readline module isn't available, so the command line editor
# check if exists, if not, revert and error
v,d = self.config_data.get_value(new_location)
if v is None:
- print("Error: " + identifier + " not found")
+ self._print("Error: " + identifier + " not found")
return
self.location = new_location
@@ -818,7 +855,7 @@ WARNING: Python readline module isn't available, so the command line editor
with open(command.params['filename']) as command_file:
commands = command_file.readlines()
except IOError as ioe:
- print("Error: " + str(ioe))
+ self._print("Error: " + str(ioe))
return
elif command_sets.has_command_set(command.command):
commands = command_sets.get_commands(command.command)
@@ -836,7 +873,7 @@ WARNING: Python readline module isn't available, so the command line editor
def __show_execute_commands(self, commands):
'''Prints the command list without executing them'''
for line in commands:
- print(line.strip())
+ self._print(line.strip())
def __apply_execute_commands(self, commands):
'''Applies the configuration commands from the given iterator.
@@ -857,18 +894,19 @@ WARNING: Python readline module isn't available, so the command line editor
for line in commands:
line = line.strip()
if verbose:
- print(line)
+ self._print(line)
if line.startswith('#') or len(line) == 0:
continue
elif line.startswith('!'):
if re.match('^!echo ', line, re.I) and len(line) > 6:
- print(line[6:])
+ self._print(line[6:])
elif re.match('^!verbose\s+on\s*$', line, re.I):
verbose = True
elif re.match('^!verbose\s+off$', line, re.I):
verbose = False
else:
- print("Warning: ignoring unknown directive: " + line)
+ self._print("Warning: ignoring unknown directive: " +
+ line)
else:
cmd = BindCmdParser(line)
self._validate_cmd(cmd)
@@ -879,12 +917,12 @@ WARNING: Python readline module isn't available, so the command line editor
isc.cc.data.DataNotFoundError,
isc.cc.data.DataAlreadyPresentError,
KeyError) as err:
- print('Error: ', err)
- print()
- print('Depending on the contents of the script, and which')
- print('commands it has called, there can be committed and')
- print('local changes. It is advised to check your settings,')
- print('and revert local changes with "config revert".')
+ self._print('Error: ', err)
+ self._print()
+ self._print('Depending on the contents of the script, and which')
+ self._print('commands it has called, there can be committed and')
+ self._print('local changes. It is advised to check your settings')
+ self._print(', and revert local changes with "config revert".')
def apply_cmd(self, cmd):
'''Handles a general module command'''
@@ -898,6 +936,7 @@ WARNING: Python readline module isn't available, so the command line editor
# The reply is a string containing JSON data,
# parse it, then prettyprint
if data != "" and data != "{}":
- print(json.dumps(json.loads(data), sort_keys=True, indent=4))
+ self._print(json.dumps(json.loads(data), sort_keys=True,
+ indent=4))
diff --git a/src/bin/bindctl/bindctl.xml b/src/bin/bindctl/bindctl.xml
index 3993739..b5215f4 100644
--- a/src/bin/bindctl/bindctl.xml
+++ b/src/bin/bindctl/bindctl.xml
@@ -218,7 +218,7 @@
<command>config</command> for Configuration commands.
<!-- TODO: or is config from the cfgmgr module? -->
Additional modules may be available, such as
- <command>Boss</command>, <command>Xfrin</command>, and
+ <command>Init</command>, <command>Xfrin</command>, and
<command>Auth</command>.
</para>
diff --git a/src/bin/bindctl/bindctl_main.py.in b/src/bin/bindctl/bindctl_main.py.in
index 546ecc0..875b06e 100755
--- a/src/bin/bindctl/bindctl_main.py.in
+++ b/src/bin/bindctl/bindctl_main.py.in
@@ -34,7 +34,7 @@ isc.util.process.rename()
# number, and the overall BIND 10 version number (set in configure.ac).
VERSION = "bindctl 20110217 (BIND 10 @PACKAGE_VERSION@)"
-DEFAULT_IDENTIFIER_DESC = "The identifier specifies the config item. Child elements are separated with the '/' character. List indices can be specified with '[i]', where i is an integer specifying the index, starting with 0. Examples: 'Boss/start_auth', 'Recurse/listen_on[0]/address'. If no identifier is given, shows the item at the current location."
+DEFAULT_IDENTIFIER_DESC = "The identifier specifies the config item. Child elements are separated with the '/' character. List indices can be specified with '[i]', where i is an integer specifying the index, starting with 0. Examples: 'Init/start_auth', 'Auth/listen_on[0]/address'. If no identifier is given, shows the item at the current location."
def prepare_config_commands(tool):
'''Prepare fixed commands for local configuration editing'''
diff --git a/src/bin/bindctl/command_sets.py b/src/bin/bindctl/command_sets.py
index c001ec8..b146c38 100644
--- a/src/bin/bindctl/command_sets.py
+++ b/src/bin/bindctl/command_sets.py
@@ -35,21 +35,21 @@ command_sets = {
'commands':
[
'!echo adding Authoritative server component',
- 'config add /Boss/components b10-auth',
- 'config set /Boss/components/b10-auth/kind needed',
- 'config set /Boss/components/b10-auth/special auth',
+ 'config add /Init/components b10-auth',
+ 'config set /Init/components/b10-auth/kind needed',
+ 'config set /Init/components/b10-auth/special auth',
'!echo adding Xfrin component',
- 'config add /Boss/components b10-xfrin',
- 'config set /Boss/components/b10-xfrin/address Xfrin',
- 'config set /Boss/components/b10-xfrin/kind dispensable',
+ 'config add /Init/components b10-xfrin',
+ 'config set /Init/components/b10-xfrin/address Xfrin',
+ 'config set /Init/components/b10-xfrin/kind dispensable',
'!echo adding Xfrout component',
- 'config add /Boss/components b10-xfrout',
- 'config set /Boss/components/b10-xfrout/address Xfrout',
- 'config set /Boss/components/b10-xfrout/kind dispensable',
+ 'config add /Init/components b10-xfrout',
+ 'config set /Init/components/b10-xfrout/address Xfrout',
+ 'config set /Init/components/b10-xfrout/kind dispensable',
'!echo adding Zone Manager component',
- 'config add /Boss/components b10-zonemgr',
- 'config set /Boss/components/b10-zonemgr/address Zonemgr',
- 'config set /Boss/components/b10-zonemgr/kind dispensable',
+ 'config add /Init/components b10-zonemgr',
+ 'config set /Init/components/b10-zonemgr/address Zonemgr',
+ 'config set /Init/components/b10-zonemgr/kind dispensable',
'!echo Components added. Please enter "config commit" to',
'!echo finalize initial setup and run the components.'
]
diff --git a/src/bin/bindctl/run_bindctl.sh.in b/src/bin/bindctl/run_bindctl.sh.in
index 999d7ee..8a5d00b 100755
--- a/src/bin/bindctl/run_bindctl.sh.in
+++ b/src/bin/bindctl/run_bindctl.sh.in
@@ -23,7 +23,7 @@ BINDCTL_PATH=@abs_top_builddir@/src/bin/bindctl
# Note: lib/dns/python/.libs is necessary because __init__.py of isc package
# automatically imports isc.datasrc, which then requires the DNS loadable
# module. #2145 should eliminate the need for it.
-PYTHONPATH=@abs_top_srcdir@/src/bin:@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/bin:@abs_top_srcdir@/src/lib/python:@abs_top_builddir@/src/lib/dns/python/.libs
+PYTHONPATH=@abs_top_srcdir@/src/bin:@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/bin:@abs_top_srcdir@/src/lib/python
export PYTHONPATH
# If necessary (rare cases), explicitly specify paths to dynamic libraries
diff --git a/src/bin/bindctl/tests/bindctl_test.py b/src/bin/bindctl/tests/bindctl_test.py
index 5c6aeb2..0ec9b58 100644
--- a/src/bin/bindctl/tests/bindctl_test.py
+++ b/src/bin/bindctl/tests/bindctl_test.py
@@ -18,11 +18,14 @@ import unittest
import isc.cc.data
import os
import io
+import errno
import sys
import socket
+import ssl
import http.client
import pwd
import getpass
+import re
from optparse import OptionParser
from isc.config.config_data import ConfigData, MultiConfigData
from isc.config.module_spec import ModuleSpec
@@ -335,6 +338,8 @@ class TestConfigCommands(unittest.TestCase):
self.tool.add_module_info(mod_info)
self.tool.config_data = FakeCCSession()
self.stdout_backup = sys.stdout
+ self.printed_messages = []
+ self.tool._print = self.store_print
def test_precmd(self):
def update_all_modules_info():
@@ -347,6 +352,111 @@ class TestConfigCommands(unittest.TestCase):
precmd('EOF')
self.assertRaises(socket.error, precmd, 'continue')
+ def store_print(self, *args):
+ '''Method to override _print in BindCmdInterpreter.
+ Instead of printing the values, appends the argument tuple
+ to the list in self.printed_messages'''
+ self.printed_messages.append(" ".join(map(str, args)))
+
+ def __check_printed_message(self, expected_message, printed_message):
+ self.assertIsNotNone(re.match(expected_message, printed_message),
+ "Printed message '" + printed_message +
+ "' does not match '" + expected_message + "'")
+
+ def __check_printed_messages(self, expected_messages):
+ '''Helper test function to check the printed messages against a list
+ of regexps'''
+ self.assertEqual(len(expected_messages), len(self.printed_messages))
+ for _ in map(self.__check_printed_message,
+ expected_messages,
+ self.printed_messages):
+ pass
+
+ def test_try_login(self):
+ # Make sure __try_login raises the correct exception
+ # upon failure of either send_POST or the read() on the
+ # response
+
+ orig_send_POST = self.tool.send_POST
+ expected_printed_messages = []
+ try:
+ def send_POST_raiseImmediately(self, params):
+ raise socket.error("test error")
+
+ self.tool.send_POST = send_POST_raiseImmediately
+ self.assertRaises(FailToLogin, self.tool._try_login, "foo", "bar")
+ expected_printed_messages.append(
+ 'Socket error while sending login information: test error')
+ self.__check_printed_messages(expected_printed_messages)
+
+ def create_send_POST_raiseOnRead(exception):
+ '''Create a replacement send_POST() method that raises
+ the given exception when read() is called on the value
+ returned from send_POST()'''
+ def send_POST_raiseOnRead(self, params):
+ class MyResponse:
+ def read(self):
+ raise exception
+ return MyResponse()
+ return send_POST_raiseOnRead
+
+ # basic socket error
+ self.tool.send_POST =\
+ create_send_POST_raiseOnRead(socket.error("read error"))
+ self.assertRaises(FailToLogin, self.tool._try_login, "foo", "bar")
+ expected_printed_messages.append(
+ 'Socket error while sending login information: read error')
+ self.__check_printed_messages(expected_printed_messages)
+
+ # connection reset
+ exc = socket.error("connection reset")
+ exc.errno = errno.ECONNRESET
+ self.tool.send_POST =\
+ create_send_POST_raiseOnRead(exc)
+ self.assertRaises(FailToLogin, self.tool._try_login, "foo", "bar")
+ expected_printed_messages.append(
+ 'Socket error while sending login information: '
+ 'connection reset')
+ expected_printed_messages.append(
+ 'Please check the logs of b10-cmdctl, there may be a '
+ 'problem accepting SSL connections, such as a permission '
+ 'problem on the server certificate file.'
+ )
+ self.__check_printed_messages(expected_printed_messages)
+
+ # 'normal' SSL error
+ exc = ssl.SSLError()
+ self.tool.send_POST =\
+ create_send_POST_raiseOnRead(exc)
+ self.assertRaises(FailToLogin, self.tool._try_login, "foo", "bar")
+ expected_printed_messages.append(
+ 'SSL error while sending login information: .*')
+ self.__check_printed_messages(expected_printed_messages)
+
+ # 'EOF' SSL error
+ exc = ssl.SSLError()
+ exc.errno = ssl.SSL_ERROR_EOF
+ self.tool.send_POST =\
+ create_send_POST_raiseOnRead(exc)
+ self.assertRaises(FailToLogin, self.tool._try_login, "foo", "bar")
+ expected_printed_messages.append(
+ 'SSL error while sending login information: .*')
+ expected_printed_messages.append(
+ 'Please check the logs of b10-cmdctl, there may be a '
+ 'problem accepting SSL connections, such as a permission '
+ 'problem on the server certificate file.'
+ )
+ self.__check_printed_messages(expected_printed_messages)
+
+ # any other exception should be passed through
+ self.tool.send_POST =\
+ create_send_POST_raiseOnRead(ImportError())
+ self.assertRaises(ImportError, self.tool._try_login, "foo", "bar")
+ self.__check_printed_messages(expected_printed_messages)
+
+ finally:
+ self.tool.send_POST = orig_send_POST
+
def test_run(self):
def login_to_cmdctl():
return True
@@ -360,29 +470,22 @@ class TestConfigCommands(unittest.TestCase):
self.tool.conn.sock = FakeSocket()
self.tool.conn.sock.close()
- # validate log message for socket.err
- socket_err_output = io.StringIO()
- sys.stdout = socket_err_output
self.assertEqual(1, self.tool.run())
# First few lines may be some kind of heading, or a warning that
# Python readline is unavailable, so we do a sub-string check.
self.assertIn("Failed to send request, the connection is closed",
- socket_err_output.getvalue())
-
- socket_err_output.close()
+ self.printed_messages)
+ self.assertEqual(1, len(self.printed_messages))
# validate log message for http.client.CannotSendRequest
- cannot_send_output = io.StringIO()
- sys.stdout = cannot_send_output
self.assertEqual(1, self.tool.run())
# First few lines may be some kind of heading, or a warning that
# Python readline is unavailable, so we do a sub-string check.
self.assertIn("Can not send request, the connection is busy",
- cannot_send_output.getvalue())
-
- cannot_send_output.close()
+ self.printed_messages)
+ self.assertEqual(2, len(self.printed_messages))
def test_apply_cfg_command_int(self):
self.tool.location = '/'
diff --git a/src/bin/cfgmgr/b10-cfgmgr.py.in b/src/bin/cfgmgr/b10-cfgmgr.py.in
index 315e3c5..06b9b0f 100755
--- a/src/bin/cfgmgr/b10-cfgmgr.py.in
+++ b/src/bin/cfgmgr/b10-cfgmgr.py.in
@@ -115,7 +115,7 @@ def main():
cm.read_config()
for ppath in PLUGIN_PATHS:
load_plugins(ppath, cm)
- cm.notify_boss()
+ cm.notify_b10_init()
cm.run()
except SessionError as se:
logger.fatal(CFGMGR_CC_SESSION_ERROR, se)
diff --git a/src/bin/cfgmgr/tests/b10-cfgmgr_test.py.in b/src/bin/cfgmgr/tests/b10-cfgmgr_test.py.in
index 351e8bf..02b48bd 100644
--- a/src/bin/cfgmgr/tests/b10-cfgmgr_test.py.in
+++ b/src/bin/cfgmgr/tests/b10-cfgmgr_test.py.in
@@ -27,7 +27,7 @@ class MyConfigManager:
def __init__(self, path, filename, session=None, rename_config_file=False):
self._path = path
self.read_config_called = False
- self.notify_boss_called = False
+ self.notify_b10_init_called = False
self.run_called = False
self.write_config_called = False
self.rename_config_called = False
@@ -37,8 +37,8 @@ class MyConfigManager:
def read_config(self):
self.read_config_called = True
- def notify_boss(self):
- self.notify_boss_called = True
+ def notify_b10_init(self):
+ self.notify_b10_init_called = True
def run(self):
self.run_called = True
@@ -89,7 +89,7 @@ class TestConfigManagerStartup(unittest.TestCase):
b.load_plugins = orig_load
self.assertTrue(b.cm.read_config_called)
- self.assertTrue(b.cm.notify_boss_called)
+ self.assertTrue(b.cm.notify_b10_init_called)
self.assertTrue(b.cm.run_called)
self.assertTrue(self.loaded_plugins)
# if there are no changes, config is not written
diff --git a/src/bin/cmdctl/Makefile.am b/src/bin/cmdctl/Makefile.am
index bfc13af..ab87dd3 100644
--- a/src/bin/cmdctl/Makefile.am
+++ b/src/bin/cmdctl/Makefile.am
@@ -11,17 +11,12 @@ pylogmessagedir = $(pyexecdir)/isc/log_messages/
b10_cmdctldir = $(pkgdatadir)
-# NOTE: this will overwrite on install
-# So these generic copies are placed in share/bind10 instead of to etc
-# Admin or packageer will need to put into place manually.
+USERSFILES = cmdctl-accounts.csv
+CERTFILES = cmdctl-keyfile.pem cmdctl-certfile.pem
-CMDCTL_CONFIGURATIONS = cmdctl-accounts.csv
-CMDCTL_CONFIGURATIONS += cmdctl-keyfile.pem cmdctl-certfile.pem
+b10_cmdctl_DATA = cmdctl.spec
-b10_cmdctl_DATA = $(CMDCTL_CONFIGURATIONS)
-b10_cmdctl_DATA += cmdctl.spec
-
-EXTRA_DIST = $(CMDCTL_CONFIGURATIONS)
+EXTRA_DIST = $(USERSFILES)
CLEANFILES= b10-cmdctl cmdctl.pyc cmdctl.spec
CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/cmdctl_messages.py
@@ -55,7 +50,7 @@ $(PYTHON_LOGMSGPKG_DIR)/work/cmdctl_messages.py : cmdctl_messages.mes
-d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/cmdctl_messages.mes
# this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
-b10-cmdctl: cmdctl.py $(PYTHON_LOGMSGPKG_DIR)/work/cmdctl_messages.py
+b10-cmdctl: cmdctl.py $(PYTHON_LOGMSGPKG_DIR)/work/cmdctl_messages.py $(CERTFILES)
$(SED) "s|@@PYTHONPATH@@|@pyexecdir@|" cmdctl.py >$@
chmod a+x $@
@@ -76,7 +71,7 @@ if INSTALL_CONFIGURATIONS
# because these file will contain sensitive information.
install-data-local:
$(mkinstalldirs) $(DESTDIR)/@sysconfdir@/@PACKAGE@
- for f in $(CMDCTL_CONFIGURATIONS) ; do \
+ for f in $(USERSFILES) $(CERTFILES) ; do \
if test ! -f $(DESTDIR)$(sysconfdir)/@PACKAGE@/$$f; then \
${INSTALL} -m 640 $(srcdir)/$$f $(DESTDIR)$(sysconfdir)/@PACKAGE@/ ; \
fi ; \
diff --git a/src/bin/cmdctl/b10-cmdctl.xml b/src/bin/cmdctl/b10-cmdctl.xml
index 4b1b32f..4d315ac 100644
--- a/src/bin/cmdctl/b10-cmdctl.xml
+++ b/src/bin/cmdctl/b10-cmdctl.xml
@@ -78,7 +78,7 @@
<refsect1>
<title>OPTIONS</title>
-
+
<para>The arguments are as follows:</para>
<variablelist>
@@ -175,7 +175,7 @@
<command>shutdown</command> exits <command>b10-cmdctl</command>.
This has an optional <varname>pid</varname> argument to
select the process ID to stop.
- (Note that the BIND 10 boss process may restart this service
+ (Note that the b10-init process may restart this service
if configured.)
</para>
diff --git a/src/bin/cmdctl/cmdctl.py.in b/src/bin/cmdctl/cmdctl.py.in
index 52af54a..15a41ec 100755
--- a/src/bin/cmdctl/cmdctl.py.in
+++ b/src/bin/cmdctl/cmdctl.py.in
@@ -82,6 +82,18 @@ SPECFILE_LOCATION = SPECFILE_PATH + os.sep + "cmdctl.spec"
class CmdctlException(Exception):
pass
+def check_file(file_name):
+ # TODO: Check contents of certificate file
+ if not os.path.exists(file_name):
+ raise CmdctlException("'%s' does not exist" % file_name)
+
+ if not os.path.isfile(file_name):
+ raise CmdctlException("'%s' is not a file" % file_name)
+
+ if not os.access(file_name, os.R_OK):
+ raise CmdctlException("'%s' is not readable" % file_name)
+
+
class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
'''https connection request handler.
Currently only GET and POST are supported. '''
@@ -153,7 +165,6 @@ class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
self.end_headers()
self.wfile.write(json.dumps(reply).encode())
-
def _handle_login(self):
if self._is_user_logged_in():
return http.client.OK, ["user has already login"]
@@ -278,12 +289,14 @@ class CommandControl():
if key == 'version':
continue
elif key in ['key_file', 'cert_file']:
- #TODO, only check whether the file exist,
- # further check need to be done: eg. whether
- # the private/certificate is valid.
+ # TODO: we only check whether the file exist, is a
+ # file, and is readable; but further check need to be done:
+ # eg. whether the private/certificate is valid.
path = new_config[key]
- if not os.path.exists(path):
- errstr = "the file doesn't exist: " + path
+ try:
+ check_file(path)
+ except CmdctlException as cce:
+ errstr = str(cce)
elif key == 'accounts_file':
errstr = self._accounts_file_check(new_config[key])
else:
@@ -326,7 +339,7 @@ class CommandControl():
self.modules_spec[args[0]] = args[1]
elif command == ccsession.COMMAND_SHUTDOWN:
- #When cmdctl get 'shutdown' command from boss,
+ #When cmdctl get 'shutdown' command from b10-init,
#shutdown the outer httpserver.
self._module_cc.send_stopping()
self._httpserver.shutdown()
@@ -524,27 +537,27 @@ class SecureHTTPServer(socketserver_mixin.NoPollMixIn,
self.user_sessions[session_id] = time.time()
def _check_key_and_cert(self, key, cert):
- # TODO, check the content of key/certificate file
- if not os.path.exists(key):
- raise CmdctlException("key file '%s' doesn't exist " % key)
-
- if not os.path.exists(cert):
- raise CmdctlException("certificate file '%s' doesn't exist " % cert)
+ check_file(key)
+ check_file(cert);
def _wrap_socket_in_ssl_context(self, sock, key, cert):
try:
self._check_key_and_cert(key, cert)
ssl_sock = ssl.wrap_socket(sock,
- server_side = True,
- certfile = cert,
- keyfile = key,
- ssl_version = ssl.PROTOCOL_SSLv23)
+ server_side=True,
+ certfile=cert,
+ keyfile=key,
+ ssl_version=ssl.PROTOCOL_SSLv23)
+ # Return here (if control leaves this blocks it will raise an
+ # error)
return ssl_sock
- except (ssl.SSLError, CmdctlException) as err :
+ except ssl.SSLError as err:
logger.error(CMDCTL_SSL_SETUP_FAILURE_USER_DENIED, err)
- self.close_request(sock)
- # raise socket error to finish the request
- raise socket.error
+ except (CmdctlException, IOError) as cce:
+ logger.error(CMDCTL_SSL_SETUP_FAILURE_READING_CERT, cce)
+ self.close_request(sock)
+ # raise socket error to finish the request
+ raise socket.error
def get_request(self):
'''Get client request socket and wrap it in SSL context. '''
@@ -637,4 +650,6 @@ if __name__ == '__main__':
if httpd:
httpd.shutdown()
+ logger.info(CMDCTL_EXITING)
+
sys.exit(result)
diff --git a/src/bin/cmdctl/cmdctl_messages.mes b/src/bin/cmdctl/cmdctl_messages.mes
index a3371b9..32afce3 100644
--- a/src/bin/cmdctl/cmdctl_messages.mes
+++ b/src/bin/cmdctl/cmdctl_messages.mes
@@ -43,6 +43,9 @@ specific error is printed in the message.
This debug message indicates that the given command has been sent to
the given module.
+% CMDCTL_EXITING exiting
+The b10-cmdctl daemon is exiting.
+
% CMDCTL_NO_SUCH_USER username not found in user database: %1
A login attempt was made to b10-cmdctl, but the username was not known.
Users can be added with the tool b10-cmdctl-usermgr.
@@ -58,6 +61,13 @@ with the tool b10-cmdctl-usermgr.
This debug message indicates that the given command is being sent to
the given module.
+% CMDCTL_SSL_SETUP_FAILURE_READING_CERT failed to read certificate or key: %1
+The b10-cmdctl daemon is unable to read either the certificate file or
+the private key file, and is therefore unable to accept any SSL connections.
+The specific error is printed in the message.
+The administrator should solve the issue with the files, or recreate them
+with the b10-certgen tool.
+
% CMDCTL_SSL_SETUP_FAILURE_USER_DENIED failed to create an SSL connection (user denied): %1
The user was denied because the SSL connection could not successfully
be set up. The specific error is given in the log message. Possible
diff --git a/src/bin/cmdctl/tests/Makefile.am b/src/bin/cmdctl/tests/Makefile.am
index 6d8f282..74b801a 100644
--- a/src/bin/cmdctl/tests/Makefile.am
+++ b/src/bin/cmdctl/tests/Makefile.am
@@ -25,7 +25,7 @@ endif
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/cmdctl \
- CMDCTL_SPEC_PATH=$(abs_top_builddir)/src/bin/cmdctl \
+ CMDCTL_BUILD_PATH=$(abs_top_builddir)/src/bin/cmdctl \
CMDCTL_SRC_PATH=$(abs_top_srcdir)/src/bin/cmdctl \
B10_LOCKFILE_DIR_FROM_BUILD=$(abs_top_builddir) \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
diff --git a/src/bin/cmdctl/tests/cmdctl_test.py b/src/bin/cmdctl/tests/cmdctl_test.py
index 856adf1..ee44f45 100644
--- a/src/bin/cmdctl/tests/cmdctl_test.py
+++ b/src/bin/cmdctl/tests/cmdctl_test.py
@@ -17,17 +17,18 @@
import unittest
import socket
import tempfile
+import stat
import sys
from cmdctl import *
import isc.log
-SPEC_FILE_PATH = '..' + os.sep
-if 'CMDCTL_SPEC_PATH' in os.environ:
- SPEC_FILE_PATH = os.environ['CMDCTL_SPEC_PATH'] + os.sep
+assert 'CMDCTL_SRC_PATH' in os.environ,\
+ "Please run this test with 'make check'"
+SRC_FILE_PATH = os.environ['CMDCTL_SRC_PATH'] + os.sep
-SRC_FILE_PATH = '..' + os.sep
-if 'CMDCTL_SRC_PATH' in os.environ:
- SRC_FILE_PATH = os.environ['CMDCTL_SRC_PATH'] + os.sep
+assert 'CMDCTL_BUILD_PATH' in os.environ,\
+ "Please run this test with 'make check'"
+BUILD_FILE_PATH = os.environ['CMDCTL_BUILD_PATH'] + os.sep
# Rewrite the class for unittest.
class MySecureHTTPRequestHandler(SecureHTTPRequestHandler):
@@ -36,7 +37,7 @@ class MySecureHTTPRequestHandler(SecureHTTPRequestHandler):
def send_response(self, rcode):
self.rcode = rcode
-
+
def end_headers(self):
pass
@@ -51,13 +52,13 @@ class MySecureHTTPRequestHandler(SecureHTTPRequestHandler):
super().do_POST()
self.wfile.close()
os.remove('tmp.file')
-
+
class FakeSecureHTTPServer(SecureHTTPServer):
def __init__(self):
self.user_sessions = {}
self.cmdctl = FakeCommandControlForTestRequestHandler()
- self._verbose = True
+ self._verbose = True
self._user_infos = {}
self.idle_timeout = 1200
self._lock = threading.Lock()
@@ -71,6 +72,17 @@ class FakeCommandControlForTestRequestHandler(CommandControl):
def send_command(self, mod, cmd, param):
return 0, {}
+# context to temporarily make a file unreadable
+class UnreadableFile:
+ def __init__(self, file_name):
+ self.file_name = file_name
+ self.orig_mode = os.stat(file_name).st_mode
+
+ def __enter__(self):
+ os.chmod(self.file_name, self.orig_mode & ~stat.S_IRUSR)
+
+ def __exit__(self, type, value, traceback):
+ os.chmod(self.file_name, self.orig_mode)
class TestSecureHTTPRequestHandler(unittest.TestCase):
def setUp(self):
@@ -97,7 +109,7 @@ class TestSecureHTTPRequestHandler(unittest.TestCase):
self.handler.path = '/abc'
mod, cmd = self.handler._parse_request_path()
self.assertTrue((mod == 'abc') and (cmd == None))
-
+
self.handler.path = '/abc/edf'
mod, cmd = self.handler._parse_request_path()
self.assertTrue((mod == 'abc') and (cmd == 'edf'))
@@ -125,20 +137,20 @@ class TestSecureHTTPRequestHandler(unittest.TestCase):
def test_do_GET(self):
self.handler.do_GET()
- self.assertEqual(self.handler.rcode, http.client.BAD_REQUEST)
-
+ self.assertEqual(self.handler.rcode, http.client.BAD_REQUEST)
+
def test_do_GET_1(self):
self.handler.headers['cookie'] = 12345
self.handler.do_GET()
- self.assertEqual(self.handler.rcode, http.client.UNAUTHORIZED)
+ self.assertEqual(self.handler.rcode, http.client.UNAUTHORIZED)
def test_do_GET_2(self):
self.handler.headers['cookie'] = 12345
self.handler.server.user_sessions[12345] = time.time() + 1000000
self.handler.path = '/how/are'
self.handler.do_GET()
- self.assertEqual(self.handler.rcode, http.client.NO_CONTENT)
-
+ self.assertEqual(self.handler.rcode, http.client.NO_CONTENT)
+
def test_do_GET_3(self):
self.handler.headers['cookie'] = 12346
self.handler.server.user_sessions[12346] = time.time() + 1000000
@@ -146,8 +158,8 @@ class TestSecureHTTPRequestHandler(unittest.TestCase):
for path in path_vec:
self.handler.path = '/' + path
self.handler.do_GET()
- self.assertEqual(self.handler.rcode, http.client.OK)
-
+ self.assertEqual(self.handler.rcode, http.client.OK)
+
def test_user_logged_in(self):
self.handler.server.user_sessions = {}
self.handler.session_id = 12345
@@ -243,8 +255,8 @@ class TestSecureHTTPRequestHandler(unittest.TestCase):
self.assertEqual(http.client.BAD_REQUEST, rcode)
def _gen_module_spec(self):
- spec = { 'commands': [
- { 'command_name' :'command',
+ spec = { 'commands': [
+ { 'command_name' :'command',
'command_args': [ {
'item_name' : 'param1',
'item_type' : 'integer',
@@ -253,9 +265,9 @@ class TestSecureHTTPRequestHandler(unittest.TestCase):
} ],
'command_description' : 'cmd description'
}
- ]
+ ]
}
-
+
return spec
def test_handle_post_request_2(self):
@@ -290,13 +302,13 @@ class MyCommandControl(CommandControl):
return {}
def _setup_session(self):
- spec_file = SPEC_FILE_PATH + 'cmdctl.spec'
+ spec_file = BUILD_FILE_PATH + 'cmdctl.spec'
module_spec = isc.config.module_spec_from_file(spec_file)
config = isc.config.config_data.ConfigData(module_spec)
self._module_name = 'Cmdctl'
self._cmdctl_config_data = config.get_full_config()
- def _handle_msg_from_msgq(self):
+ def _handle_msg_from_msgq(self):
pass
class TestCommandControl(unittest.TestCase):
@@ -305,7 +317,7 @@ class TestCommandControl(unittest.TestCase):
self.old_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
self.cmdctl = MyCommandControl(None, True)
-
+
def tearDown(self):
sys.stdout.close()
sys.stdout = self.old_stdout
@@ -320,7 +332,7 @@ class TestCommandControl(unittest.TestCase):
old_env = os.environ
if 'B10_FROM_SOURCE' in os.environ:
del os.environ['B10_FROM_SOURCE']
- self.cmdctl.get_cmdctl_config_data()
+ self.cmdctl.get_cmdctl_config_data()
self._check_config(self.cmdctl)
os.environ = old_env
@@ -328,7 +340,7 @@ class TestCommandControl(unittest.TestCase):
os.environ['B10_FROM_SOURCE'] = '../'
self._check_config(self.cmdctl)
os.environ = old_env
-
+
def test_parse_command_result(self):
self.assertEqual({}, self.cmdctl._parse_command_result(1, {'error' : 1}))
self.assertEqual({'a': 1}, self.cmdctl._parse_command_result(0, {'a' : 1}))
@@ -391,13 +403,13 @@ class TestCommandControl(unittest.TestCase):
os.environ = old_env
answer = self.cmdctl.config_handler({'key_file': '/user/non-exist_folder'})
- self._check_answer(answer, 1, "the file doesn't exist: /user/non-exist_folder")
+ self._check_answer(answer, 1, "'/user/non-exist_folder' does not exist")
answer = self.cmdctl.config_handler({'cert_file': '/user/non-exist_folder'})
- self._check_answer(answer, 1, "the file doesn't exist: /user/non-exist_folder")
+ self._check_answer(answer, 1, "'/user/non-exist_folder' does not exist")
answer = self.cmdctl.config_handler({'accounts_file': '/user/non-exist_folder'})
- self._check_answer(answer, 1,
+ self._check_answer(answer, 1,
"Invalid accounts file: [Errno 2] No such file or directory: '/user/non-exist_folder'")
# Test with invalid accounts file
@@ -409,7 +421,7 @@ class TestCommandControl(unittest.TestCase):
answer = self.cmdctl.config_handler({'accounts_file': file_name})
self._check_answer(answer, 1, "Invalid accounts file: list index out of range")
os.remove(file_name)
-
+
def test_send_command(self):
rcode, value = self.cmdctl.send_command('Cmdctl', 'print_settings', None)
self.assertEqual(rcode, 0)
@@ -424,7 +436,7 @@ class TestSecureHTTPServer(unittest.TestCase):
self.old_stderr = sys.stderr
sys.stdout = open(os.devnull, 'w')
sys.stderr = sys.stdout
- self.server = MySecureHTTPServer(('localhost', 8080),
+ self.server = MySecureHTTPServer(('localhost', 8080),
MySecureHTTPRequestHandler,
MyCommandControl, verbose=True)
@@ -458,32 +470,90 @@ class TestSecureHTTPServer(unittest.TestCase):
self.assertEqual(1, len(self.server._user_infos))
self.assertTrue('root' in self.server._user_infos)
+ def test_check_file(self):
+ # Just some file that we know exists
+ file_name = BUILD_FILE_PATH + 'cmdctl-keyfile.pem'
+ check_file(file_name)
+ with UnreadableFile(file_name):
+ self.assertRaises(CmdctlException, check_file, file_name)
+ self.assertRaises(CmdctlException, check_file, '/local/not-exist')
+ self.assertRaises(CmdctlException, check_file, '/')
+
+
def test_check_key_and_cert(self):
+ keyfile = BUILD_FILE_PATH + 'cmdctl-keyfile.pem'
+ certfile = BUILD_FILE_PATH + 'cmdctl-certfile.pem'
+
+ # no exists
+ self.assertRaises(CmdctlException, self.server._check_key_and_cert,
+ keyfile, '/local/not-exist')
+ self.assertRaises(CmdctlException, self.server._check_key_and_cert,
+ '/local/not-exist', certfile)
+
+ # not a file
+ self.assertRaises(CmdctlException, self.server._check_key_and_cert,
+ keyfile, '/')
self.assertRaises(CmdctlException, self.server._check_key_and_cert,
- '/local/not-exist', 'cmdctl-keyfile.pem')
+ '/', certfile)
- self.server._check_key_and_cert(SRC_FILE_PATH + 'cmdctl-keyfile.pem',
- SRC_FILE_PATH + 'cmdctl-certfile.pem')
+ # no read permission
+ with UnreadableFile(certfile):
+ self.assertRaises(CmdctlException,
+ self.server._check_key_and_cert,
+ keyfile, certfile)
+
+ with UnreadableFile(keyfile):
+ self.assertRaises(CmdctlException,
+ self.server._check_key_and_cert,
+ keyfile, certfile)
+
+ # All OK (also happens to check the context code above works)
+ self.server._check_key_and_cert(keyfile, certfile)
def test_wrap_sock_in_ssl_context(self):
sock = socket.socket()
- self.assertRaises(socket.error,
+
+ # Bad files should result in a socket.error raised by our own
+ # code in the basic file checks
+ self.assertRaises(socket.error,
self.server._wrap_socket_in_ssl_context,
- sock,
- '../cmdctl-keyfile',
- '../cmdctl-certfile')
+ sock,
+ 'no_such_file', 'no_such_file')
+ # Using a non-certificate file would cause an SSLError, which
+ # is caught by our code which then raises a basic socket.error
+ self.assertRaises(socket.error,
+ self.server._wrap_socket_in_ssl_context,
+ sock,
+ BUILD_FILE_PATH + 'cmdctl.py',
+ BUILD_FILE_PATH + 'cmdctl-certfile.pem')
+
+ # Should succeed
sock1 = socket.socket()
- self.server._wrap_socket_in_ssl_context(sock1,
- SRC_FILE_PATH + 'cmdctl-keyfile.pem',
- SRC_FILE_PATH + 'cmdctl-certfile.pem')
+ ssl_sock = self.server._wrap_socket_in_ssl_context(sock1,
+ BUILD_FILE_PATH + 'cmdctl-keyfile.pem',
+ BUILD_FILE_PATH + 'cmdctl-certfile.pem')
+ self.assertTrue(isinstance(ssl_sock, ssl.SSLSocket))
+
+ # wrap_socket can also raise IOError, which should be caught and
+ # handled like the other errors.
+ # Force this by temporarily disabling our own file checks
+ orig_check_func = self.server._check_key_and_cert
+ try:
+ self.server._check_key_and_cert = lambda x,y: None
+ self.assertRaises(socket.error,
+ self.server._wrap_socket_in_ssl_context,
+ sock,
+ 'no_such_file', 'no_such_file')
+ finally:
+ self.server._check_key_and_cert = orig_check_func
class TestFuncNotInClass(unittest.TestCase):
def test_check_port(self):
- self.assertRaises(OptionValueError, check_port, None, 'port', -1, None)
- self.assertRaises(OptionValueError, check_port, None, 'port', 65536, None)
- self.assertRaises(OptionValueError, check_addr, None, 'ipstr', 'a.b.d', None)
- self.assertRaises(OptionValueError, check_addr, None, 'ipstr', '1::0:a.b', None)
+ self.assertRaises(OptionValueError, check_port, None, 'port', -1, None)
+ self.assertRaises(OptionValueError, check_port, None, 'port', 65536, None)
+ self.assertRaises(OptionValueError, check_addr, None, 'ipstr', 'a.b.d', None)
+ self.assertRaises(OptionValueError, check_addr, None, 'ipstr', '1::0:a.b', None)
if __name__== "__main__":
diff --git a/src/bin/dbutil/b10-dbutil.xml b/src/bin/dbutil/b10-dbutil.xml
index 752b8a8..c93d060 100644
--- a/src/bin/dbutil/b10-dbutil.xml
+++ b/src/bin/dbutil/b10-dbutil.xml
@@ -93,7 +93,7 @@
file. It has the same name, with ".backup" appended to it. If a
file of that name already exists, the file will have the suffix
".backup-1". If that exists, the file will be suffixed ".backup-2",
- and so on). Exit status is 0 if the upgrade is either succesful or
+ and so on). Exit status is 0 if the upgrade is either successful or
aborted by the user, and non-zero if there is an error.
</para>
diff --git a/src/bin/dbutil/dbutil.py.in b/src/bin/dbutil/dbutil.py.in
index a844484..7a1469c 100755
--- a/src/bin/dbutil/dbutil.py.in
+++ b/src/bin/dbutil/dbutil.py.in
@@ -200,10 +200,16 @@ UPGRADES = [
"CREATE INDEX nsec3_byhash_and_rdtype ON nsec3 " +
"(hash, rdtype)"
]
+ },
+
+ {'from': (2, 1), 'to': (2, 2),
+ 'statements': [
+ "CREATE INDEX records_byrname_and_rdtype ON records (rname, rdtype)"
+ ]
}
# To extend this, leave the above statements in place and add another
-# dictionary to the list. The "from" version should be (2, 1), the "to"
+# dictionary to the list. The "from" version should be (2, 2), the "to"
# version whatever the version the update is to, and the SQL statements are
# the statements required to perform the upgrade. This way, the upgrade
# program will be able to upgrade both a V1.0 and a V2.0 database.
diff --git a/src/bin/dbutil/tests/dbutil_test.sh.in b/src/bin/dbutil/tests/dbutil_test.sh.in
index d60f186..4bc9f85 100755
--- a/src/bin/dbutil/tests/dbutil_test.sh.in
+++ b/src/bin/dbutil/tests/dbutil_test.sh.in
@@ -165,7 +165,7 @@ upgrade_ok_test() {
if [ $? -eq 0 ]
then
# Compare schema with the reference
- get_schema $testdata/v2_1.sqlite3
+ get_schema $testdata/v2_2.sqlite3
expected_schema=$db_schema
get_schema $tempfile
actual_schema=$db_schema
@@ -177,7 +177,7 @@ upgrade_ok_test() {
fi
# Check the version is set correctly
- check_version $tempfile "V2.1"
+ check_version $tempfile "V2.2"
# Check that a backup was made
check_backup $1 $2
@@ -303,26 +303,32 @@ check_version_fail() {
rm -f $tempfile $backupfile
-# Test 1 - check that the utility fails if the database does not exist
-echo "1.1. Non-existent database - check"
+# This is the section number that is echoed during tests. It is
+# incremented when each section is run.
+sec=0
+
+# Test: check that the utility fails if the database does not exist
+sec=`expr $sec + 1`
+echo $sec".1. Non-existent database - check"
${SHELL} ../run_dbutil.sh --check $tempfile
failzero $?
check_no_backup $tempfile $backupfile
-echo "1.2. Non-existent database - upgrade"
+echo $sec".2. Non-existent database - upgrade"
${SHELL} ../run_dbutil.sh --upgrade --noconfirm $tempfile
failzero $?
check_no_backup $tempfile $backupfile
rm -f $tempfile $backupfile
-# Test 2 - should fail to check an empty file and fail to upgrade it
-echo "2.1. Database is an empty file - check"
+# Test: should fail to check an empty file and fail to upgrade it
+sec=`expr $sec + 1`
+echo $sec".1. Database is an empty file - check"
touch $tempfile
check_version_fail $tempfile $backupfile
rm -f $tempfile $backupfile
-echo "2.2. Database is an empty file - upgrade"
+echo $sec".2. Database is an empty file - upgrade"
touch $tempfile
${SHELL} ../run_dbutil.sh --upgrade --noconfirm $tempfile
failzero $?
@@ -330,13 +336,13 @@ failzero $?
check_backup $tempfile $backupfile
rm -f $tempfile $backupfile
-
-echo "3.1. Database is not an SQLite file - check"
+sec=`expr $sec + 1`
+echo $sec".1. Database is not an SQLite file - check"
echo "This is not an sqlite3 database" > $tempfile
check_version_fail $tempfile $backupfile
rm -f $tempfile $backupfile
-echo "3.2. Database is not an SQLite file - upgrade"
+echo $sec".2. Database is not an SQLite file - upgrade"
echo "This is not an sqlite3 database" > $tempfile
${SHELL} ../run_dbutil.sh --upgrade --noconfirm $tempfile
failzero $?
@@ -345,81 +351,113 @@ check_backup $tempfile $backupfile
rm -f $tempfile $backupfile
-echo "4.1. Database is an SQLite3 file without the schema table - check"
+sec=`expr $sec + 1`
+echo $sec".1. Database is an SQLite3 file without the schema table - check"
check_version_fail $testdata/no_schema.sqlite3 $backupfile
rm -f $tempfile $backupfile
-echo "4.1. Database is an SQLite3 file without the schema table - upgrade"
+echo $sec".1. Database is an SQLite3 file without the schema table - upgrade"
upgrade_fail_test $testdata/no_schema.sqlite3 $backupfile
rm -f $tempfile $backupfile
-echo "5.1. Database is an old V1 database - check"
+sec=`expr $sec + 1`
+echo $sec".1. Database is an old V1 database - check"
check_version $testdata/old_v1.sqlite3 "V1.0"
check_no_backup $tempfile $backupfile
rm -f $tempfile $backupfile
-echo "5.2. Database is an old V1 database - upgrade"
+echo $sec".2. Database is an old V1 database - upgrade"
upgrade_ok_test $testdata/old_v1.sqlite3 $backupfile
rm -f $tempfile $backupfile
-echo "6.1. Database is new V1 database - check"
+sec=`expr $sec + 1`
+echo $sec".1. Database is new V1 database - check"
check_version $testdata/new_v1.sqlite3 "V1.0"
check_no_backup $tempfile $backupfile
rm -f $tempfile $backupfile
-echo "6.2. Database is a new V1 database - upgrade"
+echo $sec".2. Database is a new V1 database - upgrade"
upgrade_ok_test $testdata/new_v1.sqlite3 $backupfile
rm -f $tempfile $backupfile
-echo "7.1. Database is V2.0 database - check"
+sec=`expr $sec + 1`
+echo $sec".1. Database is V2.0 database - check"
check_version $testdata/v2_0.sqlite3 "V2.0"
check_no_backup $tempfile $backupfile
rm -f $tempfile $backupfile
-echo "7.2. Database is a V2.0 database - upgrade"
+echo $sec".2. Database is a V2.0 database - upgrade"
upgrade_ok_test $testdata/v2_0.sqlite3 $backupfile
rm -f $tempfile $backupfile
-echo "8.1. Database is V2.0 database with empty schema table - check"
+sec=`expr $sec + 1`
+echo $sec".1. Database is V2.1 database - check"
+check_version $testdata/v2_1.sqlite3 "V2.1"
+check_no_backup $tempfile $backupfile
+rm -f $tempfile $backupfile
+
+echo $sec".2. Database is a V2.1 database - upgrade"
+upgrade_ok_test $testdata/v2_1.sqlite3 $backupfile
+rm -f $tempfile $backupfile
+
+
+sec=`expr $sec + 1`
+echo $sec".1. Database is V2.2 database - check"
+check_version $testdata/v2_2.sqlite3 "V2.2"
+check_no_backup $tempfile $backupfile
+rm -f $tempfile $backupfile
+
+echo $sec".2. Database is a V2.2 database - upgrade"
+upgrade_ok_test $testdata/v2_2.sqlite3 $backupfile
+rm -f $tempfile $backupfile
+
+
+sec=`expr $sec + 1`
+echo $sec".1. Database is V2.0 database with empty schema table - check"
check_version_fail $testdata/empty_version.sqlite3 $backupfile
rm -f $tempfile $backupfile
-echo "8.2. Database is V2.0 database with empty schema table - upgrade"
+echo $sec".2. Database is V2.0 database with empty schema table - upgrade"
upgrade_fail_test $testdata/empty_version.sqlite3 $backupfile
rm -f $tempfile $backupfile
-echo "9.1. Database is V2.0 database with over-full schema table - check"
+sec=`expr $sec + 1`
+echo $sec".1. Database is V2.0 database with over-full schema table - check"
check_version_fail $testdata/too_many_version.sqlite3 $backupfile
rm -f $tempfile $backupfile
-echo "9.2. Database is V2.0 database with over-full schema table - upgrade"
+echo $sec".2. Database is V2.0 database with over-full schema table - upgrade"
upgrade_fail_test $testdata/too_many_version.sqlite3 $backupfile
rm -f $tempfile $backupfile
-echo "10.0. Upgrade corrupt database"
+sec=`expr $sec + 1`
+echo $sec". Upgrade corrupt database"
upgrade_fail_test $testdata/corrupt.sqlite3 $backupfile
rm -f $tempfile $backupfile
-echo "11. Record count test"
+sec=`expr $sec + 1`
+echo $sec". Record count test"
record_count_test $testdata/new_v1.sqlite3
rm -f $tempfile $backupfile
-echo "12. Backup file already exists"
+sec=`expr $sec + 1`
+echo $sec". Backup file already exists"
touch $backupfile
touch ${backupfile}-1
upgrade_ok_test $testdata/v2_0.sqlite3 ${backupfile}-2
rm -f $tempfile $backupfile ${backupfile}-1 ${backupfile}-2
-echo "13.1 Command-line errors"
+sec=`expr $sec + 1`
+echo $sec".1 Command-line errors"
copy_file $testdata/old_v1.sqlite3 $tempfile
${SHELL} ../run_dbutil.sh $tempfile
failzero $?
@@ -437,22 +475,22 @@ ${SHELL} ../run_dbutil.sh --upgrade --noconfirm $tempfile $backupfile
failzero $?
rm -f $tempfile $backupfile
-echo "13.2 verbose flag"
+echo $sec".2 verbose flag"
copy_file $testdata/old_v1.sqlite3 $tempfile
${SHELL} ../run_dbutil.sh --upgrade --noconfirm --verbose $tempfile
passzero $?
rm -f $tempfile $backupfile
-echo "13.3 Interactive prompt - yes"
+echo $sec".3 Interactive prompt - yes"
copy_file $testdata/old_v1.sqlite3 $tempfile
${SHELL} ../run_dbutil.sh --upgrade $tempfile << .
Yes
.
passzero $?
-check_version $tempfile "V2.1"
+check_version $tempfile "V2.2"
rm -f $tempfile $backupfile
-echo "13.4 Interactive prompt - no"
+echo $sec".4 Interactive prompt - no"
copy_file $testdata/old_v1.sqlite3 $tempfile
${SHELL} ../run_dbutil.sh --upgrade $tempfile << .
no
@@ -462,7 +500,7 @@ diff $testdata/old_v1.sqlite3 $tempfile > /dev/null
passzero $?
rm -f $tempfile $backupfile
-echo "13.5 quiet flag"
+echo $sec".5 quiet flag"
copy_file $testdata/old_v1.sqlite3 $tempfile
${SHELL} ../run_dbutil.sh --check --quiet $tempfile 2>&1 | grep .
failzero $?
diff --git a/src/bin/dbutil/tests/testdata/Makefile.am b/src/bin/dbutil/tests/testdata/Makefile.am
index f4873f4..23149b9 100644
--- a/src/bin/dbutil/tests/testdata/Makefile.am
+++ b/src/bin/dbutil/tests/testdata/Makefile.am
@@ -11,3 +11,4 @@ EXTRA_DIST += README
EXTRA_DIST += too_many_version.sqlite3
EXTRA_DIST += v2_0.sqlite3
EXTRA_DIST += v2_1.sqlite3
+EXTRA_DIST += v2_2.sqlite3
diff --git a/src/bin/dbutil/tests/testdata/v2_2.sqlite3 b/src/bin/dbutil/tests/testdata/v2_2.sqlite3
new file mode 100644
index 0000000..b8d6369
Binary files /dev/null and b/src/bin/dbutil/tests/testdata/v2_2.sqlite3 differ
diff --git a/src/bin/ddns/b10-ddns.xml b/src/bin/ddns/b10-ddns.xml
index fb895b9..7935482 100644
--- a/src/bin/ddns/b10-ddns.xml
+++ b/src/bin/ddns/b10-ddns.xml
@@ -56,8 +56,8 @@
<para>The <command>b10-ddns</command> daemon provides the BIND 10
Dynamic Update (DDNS) service, as specified in RFC 2136.
Normally it is started by the
- <citerefentry><refentrytitle>bind10</refentrytitle><manvolnum>8</manvolnum></citerefentry>
- boss process.
+ <citerefentry><refentrytitle>b10-init</refentrytitle><manvolnum>8</manvolnum></citerefentry>
+ process.
</para>
<para>
@@ -119,7 +119,7 @@
<listitem>
<para>
This value is ignored at this moment, but is provided for
- compatibility with the <command>bind10</command> Boss process.
+ compatibility with the <command>b10-init</command> process.
</para>
</listitem>
</varlistentry>
@@ -154,7 +154,7 @@
<command>shutdown</command> exits <command>b10-ddns</command>.
This has an optional <varname>pid</varname> argument to
select the process ID to stop.
- (Note that the BIND 10 boss process may restart this service
+ (Note that the b10-init process may restart this service
if configured.)
</para>
diff --git a/src/bin/ddns/ddns.py.in b/src/bin/ddns/ddns.py.in
index 094e0ec..d7fcab7 100755
--- a/src/bin/ddns/ddns.py.in
+++ b/src/bin/ddns/ddns.py.in
@@ -134,7 +134,7 @@ def get_datasrc_client(cc_session):
function will simply be removed.
'''
- HARDCODED_DATASRC_CLASS = RRClass.IN()
+ HARDCODED_DATASRC_CLASS = RRClass.IN
file, is_default = cc_session.get_remote_config_value("Auth",
"database_file")
# See xfrout.py:get_db_file() for this trick:
@@ -469,7 +469,7 @@ class DDNSServer:
self.__request_msg.clear(Message.PARSE)
# specify PRESERVE_ORDER as we need to handle each RR separately.
self.__request_msg.from_wire(req_data, Message.PRESERVE_ORDER)
- if self.__request_msg.get_opcode() != Opcode.UPDATE():
+ if self.__request_msg.get_opcode() != Opcode.UPDATE:
raise self.InternalError('Update request has unexpected '
'opcode: ' +
str(self.__request_msg.get_opcode()))
@@ -536,7 +536,7 @@ class DDNSServer:
else:
tcp_ctx.close()
except socket.error as ex:
- logger.warn(DDNS_RESPONSE_SOCKET_ERROR, ClientFormatter(dest), ex)
+ logger.warn(DDNS_RESPONSE_SOCKET_SEND_FAILED, ClientFormatter(dest), ex)
return False
return True
@@ -683,7 +683,7 @@ class DDNSServer:
result = ctx[0].send_ready()
if result != DNSTCPContext.SENDING:
if result == DNSTCPContext.CLOSED:
- logger.warn(DDNS_RESPONSE_TCP_SOCKET_ERROR,
+ logger.warn(DDNS_RESPONSE_TCP_SOCKET_SEND_FAILED,
ClientFormatter(ctx[1]))
ctx[0].close()
del self._tcp_ctxs[fileno]
diff --git a/src/bin/ddns/ddns_messages.mes b/src/bin/ddns/ddns_messages.mes
index d128361..cdc7b4d 100644
--- a/src/bin/ddns/ddns_messages.mes
+++ b/src/bin/ddns/ddns_messages.mes
@@ -134,12 +134,12 @@ appropriate ACL configuration or some lower layer filtering. The
number of existing TCP clients are shown in the log, which should be
identical to the current quota.
-% DDNS_RESPONSE_SOCKET_ERROR failed to send update response to %1: %2
+% DDNS_RESPONSE_SOCKET_SEND_FAILED failed to send update response to %1: %2
Network I/O error happens in sending an update response. The
client's address that caused the error and error details are also
logged.
-% DDNS_RESPONSE_TCP_SOCKET_ERROR failed to complete sending update response to %1 over TCP
+% DDNS_RESPONSE_TCP_SOCKET_SEND_FAILED failed to complete sending update response to %1 over TCP
b10-ddns had tried to send an update response over TCP, and it hadn't
been completed at that time, and a followup attempt to complete the
send operation failed due to some network I/O error. While a network
diff --git a/src/bin/ddns/tests/ddns_test.py b/src/bin/ddns/tests/ddns_test.py
index 0f5ca9b..d366f09 100755
--- a/src/bin/ddns/tests/ddns_test.py
+++ b/src/bin/ddns/tests/ddns_test.py
@@ -39,9 +39,9 @@ TESTDATA_PATH = os.environ['TESTDATA_PATH'] + os.sep
READ_ZONE_DB_FILE = TESTDATA_PATH + "rwtest.sqlite3" # original, to be copied
TEST_ZONE_NAME = Name('example.org')
TEST_ZONE_NAME_STR = TEST_ZONE_NAME.to_text()
-UPDATE_RRTYPE = RRType.SOA()
+UPDATE_RRTYPE = RRType.SOA
TEST_QID = 5353 # arbitrary chosen
-TEST_RRCLASS = RRClass.IN()
+TEST_RRCLASS = RRClass.IN
TEST_RRCLASS_STR = TEST_RRCLASS.to_text()
TEST_SERVER6 = ('2001:db8::53', 53, 0, 0)
TEST_CLIENT6 = ('2001:db8::1', 53000, 0, 0)
@@ -169,9 +169,9 @@ class FakeUpdateSession:
self.__msg.make_response()
self.__msg.clear_section(SECTION_ZONE)
if self.__faked_result == UPDATE_SUCCESS:
- self.__msg.set_rcode(Rcode.NOERROR())
+ self.__msg.set_rcode(Rcode.NOERROR)
else:
- self.__msg.set_rcode(Rcode.REFUSED())
+ self.__msg.set_rcode(Rcode.REFUSED)
return self.__msg
class FakeKeyringModule:
@@ -478,7 +478,7 @@ class TestDDNSServer(unittest.TestCase):
# By default (in our faked config) it should be derived from the
# test data source
rrclass, datasrc_client = self.ddns_server._datasrc_info
- self.assertEqual(RRClass.IN(), rrclass)
+ self.assertEqual(RRClass.IN, rrclass)
self.assertEqual(DataSourceClient.SUCCESS,
datasrc_client.find_zone(Name('example.org'))[0])
@@ -491,7 +491,7 @@ class TestDDNSServer(unittest.TestCase):
{'database_file': './notexistentdir/somedb.sqlite3'}
self.__cc_session.add_remote_config_by_name('Auth')
rrclass, datasrc_client = self.ddns_server._datasrc_info
- self.assertEqual(RRClass.IN(), rrclass)
+ self.assertEqual(RRClass.IN, rrclass)
self.assertRaises(isc.datasrc.Error,
datasrc_client.find_zone, Name('example.org'))
@@ -887,12 +887,12 @@ class TestDDNSServer(unittest.TestCase):
self.__select_answer = ([], [10], [])
self.assertRaises(KeyError, self.ddns_server.run)
-def create_msg(opcode=Opcode.UPDATE(), zones=[TEST_ZONE_RECORD], prereq=[],
+def create_msg(opcode=Opcode.UPDATE, zones=[TEST_ZONE_RECORD], prereq=[],
tsigctx=None):
msg = Message(Message.RENDER)
msg.set_qid(TEST_QID)
msg.set_opcode(opcode)
- msg.set_rcode(Rcode.NOERROR())
+ msg.set_rcode(Rcode.NOERROR)
for z in zones:
msg.add_question(z)
for p in prereq:
@@ -936,7 +936,7 @@ class TestDDNSSession(unittest.TestCase):
return FakeUpdateSession(req_message, client_addr, zone_config,
self.__faked_result)
- def check_update_response(self, resp_wire, expected_rcode=Rcode.NOERROR(),
+ def check_update_response(self, resp_wire, expected_rcode=Rcode.NOERROR,
tsig_ctx=None, tcp=False):
'''Check if given wire data are valid form of update response.
@@ -963,7 +963,7 @@ class TestDDNSSession(unittest.TestCase):
self.assertNotEqual(None, tsig_record)
self.assertEqual(TSIGError.NOERROR,
tsig_ctx.verify(tsig_record, resp_wire))
- self.assertEqual(Opcode.UPDATE(), msg.get_opcode())
+ self.assertEqual(Opcode.UPDATE, msg.get_opcode())
self.assertEqual(expected_rcode, msg.get_rcode())
self.assertEqual(TEST_QID, msg.get_qid())
for section in [SECTION_ZONE, SECTION_PREREQUISITE, SECTION_UPDATE]:
@@ -977,7 +977,7 @@ class TestDDNSSession(unittest.TestCase):
server_addr = TEST_SERVER6 if ipv6 else TEST_SERVER4
client_addr = TEST_CLIENT6 if ipv6 else TEST_CLIENT4
tsig = TSIGContext(tsig_key) if tsig_key is not None else None
- rcode = Rcode.NOERROR() if result == UPDATE_SUCCESS else Rcode.REFUSED()
+ rcode = Rcode.NOERROR if result == UPDATE_SUCCESS else Rcode.REFUSED
has_response = (result != UPDATE_DROP)
self.assertEqual(has_response,
@@ -1015,7 +1015,7 @@ class TestDDNSSession(unittest.TestCase):
# Opcode is not UPDATE
self.assertFalse(self.server.handle_request(
- (self.__sock, None, None, create_msg(opcode=Opcode.QUERY()))))
+ (self.__sock, None, None, create_msg(opcode=Opcode.QUERY))))
self.assertEqual((None, None), (s._sent_data, s._sent_addr))
# TSIG verification error. We use UPDATE_DROP to signal check_session
@@ -1031,7 +1031,7 @@ class TestDDNSSession(unittest.TestCase):
TEST_CLIENT6,
create_msg())))
# this check ensures sendto() was really attempted.
- self.check_update_response(self.__sock._sent_data, Rcode.NOERROR())
+ self.check_update_response(self.__sock._sent_data, Rcode.NOERROR)
def test_tcp_request(self):
# A simple case using TCP: all resopnse data are sent out at once.
@@ -1040,7 +1040,7 @@ class TestDDNSSession(unittest.TestCase):
self.assertTrue(self.server.handle_request((s, TEST_SERVER6,
TEST_CLIENT6,
create_msg())))
- self.check_update_response(s._sent_data, Rcode.NOERROR(), tcp=True)
+ self.check_update_response(s._sent_data, Rcode.NOERROR, tcp=True)
# In the current implementation, the socket should be closed
# immedidately after a successful send.
self.assertEqual(1, s._close_called)
@@ -1071,7 +1071,7 @@ class TestDDNSSession(unittest.TestCase):
s.make_send_ready()
self.assertEqual(DNSTCPContext.SEND_DONE,
self.server._tcp_ctxs[s.fileno()][0].send_ready())
- self.check_update_response(s._sent_data, Rcode.NOERROR(), tcp=True)
+ self.check_update_response(s._sent_data, Rcode.NOERROR, tcp=True)
def test_tcp_request_error(self):
# initial send() on the TCP socket will fail. The request handling
@@ -1127,9 +1127,9 @@ class TestDDNSSession(unittest.TestCase):
self.__faked_result = UPDATE_DROP
# Put the same RR twice in the prerequisite section. We should see
# them as separate RRs.
- dummy_record = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.NS(),
+ dummy_record = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.NS,
RRTTL(0))
- dummy_record.add_rdata(Rdata(RRType.NS(), TEST_RRCLASS, "ns.example"))
+ dummy_record.add_rdata(Rdata(RRType.NS, TEST_RRCLASS, "ns.example."))
self.server.handle_request((self.__sock, TEST_SERVER6, TEST_CLIENT6,
create_msg(prereq=[dummy_record,
dummy_record])))
diff --git a/src/bin/dhcp4/Makefile.am b/src/bin/dhcp4/Makefile.am
index 28f08f7..b3818c7 100644
--- a/src/bin/dhcp4/Makefile.am
+++ b/src/bin/dhcp4/Makefile.am
@@ -5,6 +5,10 @@ AM_CPPFLAGS += -I$(top_srcdir)/src/bin -I$(top_builddir)/src/bin
AM_CPPFLAGS += $(BOOST_INCLUDES)
AM_CXXFLAGS = $(B10_CXXFLAGS)
+if USE_CLANGPP
+# Disable unused parameter warning caused by some Boost headers when compiling with clang
+AM_CXXFLAGS += -Wno-unused-parameter
+endif
if USE_STATIC_LINK
AM_LDFLAGS = -static
@@ -51,12 +55,6 @@ b10_dhcp4_SOURCES += dhcp4_srv.cc dhcp4_srv.h
nodist_b10_dhcp4_SOURCES = dhcp4_messages.h dhcp4_messages.cc
EXTRA_DIST += dhcp4_messages.mes
-if USE_CLANGPP
-# Disable unused parameter warning caused by some of the
-# Boost headers when compiling with clang.
-b10_dhcp4_CXXFLAGS = -Wno-unused-parameter
-endif
-
b10_dhcp4_LDADD = $(top_builddir)/src/lib/dhcp/libb10-dhcp++.la
b10_dhcp4_LDADD += $(top_builddir)/src/lib/util/libb10-util.la
b10_dhcp4_LDADD += $(top_builddir)/src/lib/dhcpsrv/libb10-dhcpsrv.la
diff --git a/src/bin/dhcp4/config_parser.cc b/src/bin/dhcp4/config_parser.cc
index 67a7ec6..d8a586b 100644
--- a/src/bin/dhcp4/config_parser.cc
+++ b/src/bin/dhcp4/config_parser.cc
@@ -687,7 +687,7 @@ public:
virtual void commit() {
if (options_ == NULL) {
isc_throw(isc::InvalidOperation, "parser logic error: storage must be set before "
- "commiting option data.");
+ "committing option data.");
} else if (!option_descriptor_.option) {
// Before we can commit the new option should be configured. If it is not
// than somebody must have called commit() before build().
@@ -743,30 +743,34 @@ private:
void createOption() {
// Option code is held in the uint32_t storage but is supposed to
// be uint16_t value. We need to check that value in the configuration
- // does not exceed range of uint16_t and is not zero.
+ // does not exceed range of uint8_t and is not zero.
uint32_t option_code = getParam<uint32_t>("code", uint32_values_);
if (option_code == 0) {
- isc_throw(DhcpConfigError, "Parser error: value of 'code' must not"
- << " be equal to zero. Option code '0' is reserved in"
- << " DHCPv4.");
- } else if (option_code > std::numeric_limits<uint16_t>::max()) {
- isc_throw(DhcpConfigError, "Parser error: value of 'code' must not"
- << " exceed " << std::numeric_limits<uint16_t>::max());
+ isc_throw(DhcpConfigError, "option code must not be zero."
+ << " Option code '0' is reserved in DHCPv4.");
+ } else if (option_code > std::numeric_limits<uint8_t>::max()) {
+ isc_throw(DhcpConfigError, "invalid option code '" << option_code
+ << "', it must not exceed '"
+ << std::numeric_limits<uint8_t>::max() << "'");
}
// Check that the option name has been specified, is non-empty and does not
// contain spaces.
- // @todo possibly some more restrictions apply here?
std::string option_name = getParam<std::string>("name", string_values_);
if (option_name.empty()) {
- isc_throw(DhcpConfigError, "Parser error: option name must not be"
- << " empty");
+ isc_throw(DhcpConfigError, "name of the option with code '"
+ << option_code << "' is empty");
} else if (option_name.find(" ") != std::string::npos) {
- isc_throw(DhcpConfigError, "Parser error: option name must not contain"
- << " spaces");
+ isc_throw(DhcpConfigError, "invalid option name '" << option_name
+ << "', space character is not allowed");
}
std::string option_space = getParam<std::string>("space", string_values_);
- /// @todo Validate option space once #2313 is merged.
+ if (!OptionSpace::validateName(option_space)) {
+ isc_throw(DhcpConfigError, "invalid option space name '"
+ << option_space << "' specified for option '"
+ << option_name << "' (code '" << option_code
+ << "')");
+ }
OptionDefinitionPtr def;
if (option_space == "dhcp4" &&
@@ -820,7 +824,7 @@ private:
try {
util::encode::decodeHex(option_data, binary);
} catch (...) {
- isc_throw(DhcpConfigError, "Parser error: option data is not a valid"
+ isc_throw(DhcpConfigError, "option data is not a valid"
<< " string of hexadecimal digits: " << option_data);
}
}
@@ -855,7 +859,7 @@ private:
// definition of option value makes sense.
if (def->getName() != option_name) {
isc_throw(DhcpConfigError, "specified option name '"
- << option_name << " does not match the "
+ << option_name << "' does not match the "
<< "option definition: '" << option_space
<< "." << def->getName() << "'");
}
@@ -875,6 +879,7 @@ private:
<< ", code: " << option_code << "): "
<< ex.what());
}
+
}
// All went good, so we can set the option space name.
option_space_ = option_space;
@@ -962,13 +967,13 @@ public:
return (new OptionDataListParser(param_name));
}
+ /// Pointer to options instances storage.
+ OptionStorage* options_;
/// Intermediate option storage. This storage is used by
/// lower level parsers to add new options. Values held
/// in this storage are assigned to main storage (options_)
/// if overall parsing was successful.
OptionStorage local_options_;
- /// Pointer to options instances storage.
- OptionStorage* options_;
/// Collection of parsers;
ParserCollection parsers_;
};
@@ -976,7 +981,7 @@ public:
/// @brief Parser for a single option definition.
///
/// This parser creates an instance of a single option definition.
-class OptionDefParser: DhcpConfigParser {
+class OptionDefParser : public DhcpConfigParser {
public:
/// @brief Constructor.
@@ -1003,7 +1008,8 @@ public:
std::string entry(param.first);
ParserPtr parser;
if (entry == "name" || entry == "type" ||
- entry == "record-types" || entry == "space") {
+ entry == "record-types" || entry == "space" ||
+ entry == "encapsulate") {
StringParserPtr
str_parser(dynamic_cast<StringParser*>(StringParser::factory(entry)));
if (str_parser) {
@@ -1053,8 +1059,8 @@ public:
/// @brief Stores the parsed option definition in a storage.
void commit() {
- // @todo validate option space name once 2313 is merged.
- if (storage_ && option_definition_) {
+ if (storage_ && option_definition_ &&
+ OptionSpace::validateName(option_space_name_)) {
storage_->addItem(option_definition_, option_space_name_);
}
}
@@ -1076,11 +1082,10 @@ private:
void createOptionDef() {
// Get the option space name and validate it.
std::string space = getParam<std::string>("space", string_values_);
- // @todo uncomment the code below when the #2313 is merged.
- /* if (!OptionSpace::validateName()) {
+ if (!OptionSpace::validateName(space)) {
isc_throw(DhcpConfigError, "invalid option space name '"
<< space << "'");
- } */
+ }
// Get other parameters that are needed to create the
// option definition.
@@ -1088,9 +1093,35 @@ private:
uint32_t code = getParam<uint32_t>("code", uint32_values_);
std::string type = getParam<std::string>("type", string_values_);
bool array_type = getParam<bool>("array", boolean_values_);
+ std::string encapsulates = getParam<std::string>("encapsulate",
+ string_values_);
- OptionDefinitionPtr def(new OptionDefinition(name, code,
- type, array_type));
+ // Create option definition.
+ OptionDefinitionPtr def;
+ // We need to check if user has set encapsulated option space
+ // name. If so, different constructor will be used.
+ if (!encapsulates.empty()) {
+ // Arrays can't be used together with sub-options.
+ if (array_type) {
+ isc_throw(DhcpConfigError, "option '" << space << "."
+ << "name" << "', comprising an array of data"
+ << " fields may not encapsulate any option space");
+
+ } else if (encapsulates == space) {
+ isc_throw(DhcpConfigError, "option must not encapsulate"
+ << " an option space it belongs to: '"
+ << space << "." << name << "' is set to"
+ << " encapsulate '" << space << "'");
+
+ } else {
+ def.reset(new OptionDefinition(name, code, type,
+ encapsulates.c_str()));
+ }
+
+ } else {
+ def.reset(new OptionDefinition(name, code, type, array_type));
+
+ }
// The record-types field may carry a list of comma separated names
// of data types that form a record.
std::string record_types = getParam<std::string>("record-types",
@@ -1108,7 +1139,7 @@ private:
}
} catch (const Exception& ex) {
isc_throw(DhcpConfigError, "invalid record type values"
- << " specified for the option definition: "
+ << " specified for the option definition: "
<< ex.what());
}
}
@@ -1330,6 +1361,63 @@ private:
return (false);
}
+ /// @brief Append sub-options to an option.
+ ///
+ /// @param option_space a name of the encapsulated option space.
+ /// @param option option instance to append sub-options to.
+ void appendSubOptions(const std::string& option_space, OptionPtr& option) {
+ // Only non-NULL options are stored in option container.
+ // If this option pointer is NULL this is a serious error.
+ assert(option);
+
+ OptionDefinitionPtr def;
+ if (option_space == "dhcp4" &&
+ LibDHCP::isStandardOption(Option::V4, option->getType())) {
+ def = LibDHCP::getOptionDef(Option::V4, option->getType());
+ // Definitions for some of the standard options hasn't been
+ // implemented so it is ok to leave here.
+ if (!def) {
+ return;
+ }
+ } else {
+ const OptionDefContainerPtr defs =
+ option_def_intermediate.getItems(option_space);
+ const OptionDefContainerTypeIndex& idx = defs->get<1>();
+ const OptionDefContainerTypeRange& range =
+ idx.equal_range(option->getType());
+ // There is no definition so we have to leave.
+ if (std::distance(range.first, range.second) == 0) {
+ return;
+ }
+
+ def = *range.first;
+
+ // If the definition exists, it must be non-NULL.
+ // Otherwise it is a programming error.
+ assert(def);
+ }
+
+ // We need to get option definition for the particular option space
+ // and code. This definition holds the information whether our
+ // option encapsulates any option space.
+ // Get the encapsulated option space name.
+ std::string encapsulated_space = def->getEncapsulatedSpace();
+ // If option space name is empty it means that our option does not
+ // encapsulate any option space (does not include sub-options).
+ if (!encapsulated_space.empty()) {
+ // Get the sub-options that belong to the encapsulated
+ // option space.
+ const Subnet::OptionContainerPtr sub_opts =
+ option_defaults.getItems(encapsulated_space);
+ // Append sub-options to the option.
+ BOOST_FOREACH(Subnet::OptionDescriptor desc, *sub_opts) {
+ if (desc.option) {
+ option->addOption(desc.option);
+ }
+ }
+ }
+ }
+
/// @brief Create a new subnet using a data from child parsers.
///
/// @throw isc::dhcp::DhcpConfigError if subnet configuration parsing failed.
@@ -1405,6 +1493,8 @@ private:
LOG_WARN(dhcp4_logger, DHCP4_CONFIG_OPTION_DUPLICATE)
.arg(desc.option->getType()).arg(addr.toText());
}
+ // Add sub-options (if any).
+ appendSubOptions(option_space, desc.option);
// In any case, we add the option to the subnet.
subnet_->addOption(desc.option, false, option_space);
}
@@ -1432,6 +1522,9 @@ private:
Subnet::OptionDescriptor existing_desc =
subnet_->getOptionDescriptor(option_space, desc.option->getType());
if (!existing_desc.option) {
+ // Add sub-options (if any).
+ appendSubOptions(option_space, desc.option);
+
subnet_->addOption(desc.option, false, option_space);
}
}
@@ -1762,7 +1855,7 @@ configureDhcp4Server(Dhcpv4Srv&, ConstElementPtr config_set) {
LOG_INFO(dhcp4_logger, DHCP4_CONFIG_COMPLETE).arg(config_details);
// Everything was fine. Configuration is successful.
- answer = isc::config::createAnswer(0, "Configuration commited.");
+ answer = isc::config::createAnswer(0, "Configuration committed.");
return (answer);
}
diff --git a/src/bin/dhcp4/ctrl_dhcp4_srv.cc b/src/bin/dhcp4/ctrl_dhcp4_srv.cc
index 435a25e..8ac49d3 100644
--- a/src/bin/dhcp4/ctrl_dhcp4_srv.cc
+++ b/src/bin/dhcp4/ctrl_dhcp4_srv.cc
@@ -47,18 +47,61 @@ namespace dhcp {
ControlledDhcpv4Srv* ControlledDhcpv4Srv::server_ = NULL;
ConstElementPtr
+ControlledDhcpv4Srv::dhcp4StubConfigHandler(ConstElementPtr) {
+ // This configuration handler is intended to be used only
+ // when the initial configuration comes in. To receive this
+ // configuration a pointer to this handler must be passed
+ // using ModuleCCSession constructor. This constructor will
+ // invoke the handler and will store the configuration for
+ // the configuration session when the handler returns success.
+ // Since this configuration is partial we just pretend to
+ // parse it and always return success. The function that
+ // initiates the session must get the configuration on its
+ // own using getFullConfig.
+ return (isc::config::createAnswer(0, "Configuration accepted."));
+}
+
+ConstElementPtr
ControlledDhcpv4Srv::dhcp4ConfigHandler(ConstElementPtr new_config) {
- LOG_DEBUG(dhcp4_logger, DBG_DHCP4_COMMAND, DHCP4_CONFIG_UPDATE)
- .arg(new_config->str());
- if (server_) {
- return (configureDhcp4Server(*server_, new_config));
+ if (!server_ || !server_->config_session_) {
+ // That should never happen as we install config_handler
+ // after we instantiate the server.
+ ConstElementPtr answer =
+ isc::config::createAnswer(1, "Configuration rejected,"
+ " server is during startup/shutdown phase.");
+ return (answer);
}
- // That should never happen as we install config_handler after we instantiate
- // the server.
- ConstElementPtr answer = isc::config::createAnswer(1,
- "Configuration rejected, server is during startup/shutdown phase.");
- return (answer);
+ // The configuration passed to this handler function is partial.
+ // In other words, it just includes the values being modified.
+ // In the same time, there are dependencies between various
+ // DHCP configuration parsers. For example: the option value can
+ // be set if the definition of this option is set. If someone removes
+ // an existing option definition then the partial configuration that
+ // removes that definition is triggered while a relevant option value
+ // may remain configured. This eventually results in the DHCP server
+ // configuration being in the inconsistent state.
+ // In order to work around this problem we need to merge the new
+ // configuration with the existing (full) configuration.
+
+ // Let's create a new object that will hold the merged configuration.
+ boost::shared_ptr<MapElement> merged_config(new MapElement());
+ // Let's get the existing configuration.
+ ConstElementPtr full_config = server_->config_session_->getFullConfig();
+ // The full_config and merged_config should be always non-NULL
+ // but to provide some level of exception safety we check that they
+ // really are (in case we go out of memory).
+ if (full_config && merged_config) {
+ merged_config->setValue(full_config->mapValue());
+
+ // Merge an existing and new configuration.
+ isc::data::merge(merged_config, new_config);
+ LOG_DEBUG(dhcp4_logger, DBG_DHCP4_COMMAND, DHCP4_CONFIG_UPDATE)
+ .arg(full_config->str());
+ }
+
+ // Configure the server.
+ return (configureDhcp4Server(*server_, merged_config));
}
ConstElementPtr
@@ -109,8 +152,15 @@ void ControlledDhcpv4Srv::establishSession() {
LOG_DEBUG(dhcp4_logger, DBG_DHCP4_START, DHCP4_CCSESSION_STARTING)
.arg(specfile);
cc_session_ = new Session(io_service_.get_io_service());
+ // Create a session with the dummy configuration handler.
+ // Dumy configuration handler is internally invoked by the
+ // constructor and on success the constructor updates
+ // the current session with the configuration that had been
+ // commited in the previous session. If we did not install
+ // the dummy handler, the previous configuration would have
+ // been lost.
config_session_ = new ModuleCCSession(specfile, *cc_session_,
- NULL,
+ dhcp4StubConfigHandler,
dhcp4CommandHandler, false);
config_session_->start();
diff --git a/src/bin/dhcp4/ctrl_dhcp4_srv.h b/src/bin/dhcp4/ctrl_dhcp4_srv.h
index 9bd261c..ac15c44 100644
--- a/src/bin/dhcp4/ctrl_dhcp4_srv.h
+++ b/src/bin/dhcp4/ctrl_dhcp4_srv.h
@@ -49,7 +49,7 @@ public:
/// @brief Establishes msgq session.
///
/// Creates session that will be used to receive commands and updated
- /// configuration from boss (or indirectly from user via bindctl).
+ /// configuration from cfgmgr (or indirectly from user via bindctl).
void establishSession();
/// @brief Terminates existing msgq session.
@@ -94,6 +94,27 @@ protected:
static isc::data::ConstElementPtr
dhcp4ConfigHandler(isc::data::ConstElementPtr new_config);
+ /// @brief A dummy configuration handler that always returns success.
+ ///
+ /// This configuration handler does not perform configuration
+ /// parsing and always returns success. A dummy hanlder should
+ /// be installed using \ref isc::config::ModuleCCSession ctor
+ /// to get the initial configuration. This initial configuration
+ /// comprises values for only those elements that were modified
+ /// the previous session. The \ref dhcp4ConfigHandler can't be
+ /// used to parse the initial configuration because it needs the
+ /// full configuration to satisfy dependencies between the
+ /// various configuration values. Installing the dummy handler
+ /// that guarantees to return success causes initial configuration
+ /// to be stored for the session being created and that it can
+ /// be later accessed with \ref isc::ConfigData::getFullConfig.
+ ///
+ /// @param new_config new configuration.
+ ///
+ /// @return success configuration status.
+ static isc::data::ConstElementPtr
+ dhcp4StubConfigHandler(isc::data::ConstElementPtr new_config);
+
/// @brief A callback for handling incoming commands.
///
/// @param command textual representation of the command
diff --git a/src/bin/dhcp4/dhcp4.spec b/src/bin/dhcp4/dhcp4.spec
index b139d82..59d727e 100644
--- a/src/bin/dhcp4/dhcp4.spec
+++ b/src/bin/dhcp4/dhcp4.spec
@@ -70,7 +70,7 @@
"item_default": False
},
- { "item_name": "record_types",
+ { "item_name": "record-types",
"item_type": "string",
"item_optional": false,
"item_default": ""
@@ -80,6 +80,12 @@
"item_type": "string",
"item_optional": false,
"item_default": ""
+ },
+
+ { "item_name": "encapsulate",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": ""
} ]
}
},
diff --git a/src/bin/dhcp4/dhcp4_srv.cc b/src/bin/dhcp4/dhcp4_srv.cc
index 20d8597..b2a92e9 100644
--- a/src/bin/dhcp4/dhcp4_srv.cc
+++ b/src/bin/dhcp4/dhcp4_srv.cc
@@ -1,4 +1,4 @@
-// Copyright (C) 2011-2012 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2011-2013 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -17,6 +17,7 @@
#include <dhcp/iface_mgr.h>
#include <dhcp/option4_addrlst.h>
#include <dhcp/option_int.h>
+#include <dhcp/option_int_array.h>
#include <dhcp/pkt4.h>
#include <dhcp/duid.h>
#include <dhcp/hwaddr.h>
@@ -41,11 +42,20 @@ using namespace isc::dhcp;
using namespace isc::log;
using namespace std;
+namespace isc {
+namespace dhcp {
+
+/// @brief file name of a server-id file
+///
+/// Server must store its server identifier in persistent storage that must not
+/// change between restarts. This is name of the file that is created in dataDir
+/// (see isc::dhcp::CfgMgr::getDataDir()). It is a text file that uses
+/// regular IPv4 address, e.g. 192.0.2.1. Server will create it during
+/// first run and then use it afterwards.
+static const char* SERVER_ID_FILE = "b10-dhcp4-serverid";
+
// These are hardcoded parameters. Currently this is a skeleton server that only
// grants those options and a single, fixed, hardcoded lease.
-const std::string HARDCODED_GATEWAY = "192.0.2.1";
-const std::string HARDCODED_DNS_SERVER = "192.0.2.2";
-const std::string HARDCODED_DOMAIN_NAME = "isc.example.com";
Dhcpv4Srv::Dhcpv4Srv(uint16_t port, const char* dbconfig) {
LOG_DEBUG(dhcp4_logger, DBG_DHCP4_START, DHCP4_OPEN_SOCKET).arg(port);
@@ -63,6 +73,7 @@ Dhcpv4Srv::Dhcpv4Srv(uint16_t port, const char* dbconfig) {
string srvid_file = CfgMgr::instance().getDataDir() + "/" + string(SERVER_ID_FILE);
if (loadServerID(srvid_file)) {
LOG_DEBUG(dhcp4_logger, DBG_DHCP4_START, DHCP4_SERVERID_LOADED)
+ .arg(srvidToString(getServerID()))
.arg(srvid_file);
} else {
generateServerID();
@@ -99,7 +110,8 @@ Dhcpv4Srv::~Dhcpv4Srv() {
IfaceMgr::instance().closeSockets();
}
-void Dhcpv4Srv::shutdown() {
+void
+Dhcpv4Srv::shutdown() {
LOG_DEBUG(dhcp4_logger, DBG_DHCP4_BASIC, DHCP4_SHUTDOWN_REQUEST);
shutdown_ = true;
}
@@ -200,7 +212,8 @@ Dhcpv4Srv::run() {
return (true);
}
-bool Dhcpv4Srv::loadServerID(const std::string& file_name) {
+bool
+Dhcpv4Srv::loadServerID(const std::string& file_name) {
// load content of the file into a string
fstream f(file_name.c_str(), ios::in);
@@ -234,7 +247,8 @@ bool Dhcpv4Srv::loadServerID(const std::string& file_name) {
return (true);
}
-void Dhcpv4Srv::generateServerID() {
+void
+Dhcpv4Srv::generateServerID() {
const IfaceMgr::IfaceCollection& ifaces = IfaceMgr::instance().getIfaces();
@@ -271,16 +285,19 @@ void Dhcpv4Srv::generateServerID() {
isc_throw(BadValue, "No suitable interfaces for server-identifier found");
}
-bool Dhcpv4Srv::writeServerID(const std::string& file_name) {
+bool
+Dhcpv4Srv::writeServerID(const std::string& file_name) {
fstream f(file_name.c_str(), ios::out | ios::trunc);
if (!f.good()) {
return (false);
}
f << srvidToString(getServerID());
f.close();
+ return (true);
}
-string Dhcpv4Srv::srvidToString(const OptionPtr& srvid) {
+string
+Dhcpv4Srv::srvidToString(const OptionPtr& srvid) {
if (!srvid) {
isc_throw(BadValue, "NULL pointer passed to srvidToString()");
}
@@ -299,7 +316,8 @@ string Dhcpv4Srv::srvidToString(const OptionPtr& srvid) {
return (addrs[0].toText());
}
-void Dhcpv4Srv::copyDefaultFields(const Pkt4Ptr& question, Pkt4Ptr& answer) {
+void
+Dhcpv4Srv::copyDefaultFields(const Pkt4Ptr& question, Pkt4Ptr& answer) {
answer->setIface(question->getIface());
answer->setIndex(question->getIndex());
answer->setCiaddr(question->getCiaddr());
@@ -328,7 +346,8 @@ void Dhcpv4Srv::copyDefaultFields(const Pkt4Ptr& question, Pkt4Ptr& answer) {
}
}
-void Dhcpv4Srv::appendDefaultOptions(Pkt4Ptr& msg, uint8_t msg_type) {
+void
+Dhcpv4Srv::appendDefaultOptions(Pkt4Ptr& msg, uint8_t msg_type) {
OptionPtr opt;
// add Message Type Option (type 53)
@@ -340,22 +359,80 @@ void Dhcpv4Srv::appendDefaultOptions(Pkt4Ptr& msg, uint8_t msg_type) {
// more options will be added here later
}
+void
+Dhcpv4Srv::appendRequestedOptions(const Pkt4Ptr& question, Pkt4Ptr& msg) {
-void Dhcpv4Srv::appendRequestedOptions(Pkt4Ptr& msg) {
- OptionPtr opt;
+ // Get the subnet relevant for the client. We will need it
+ // to get the options associated with it.
+ Subnet4Ptr subnet = selectSubnet(question);
+ // If we can't find the subnet for the client there is no way
+ // to get the options to be sent to a client. We don't log an
+ // error because it will be logged by the assignLease method
+ // anyway.
+ if (!subnet) {
+ return;
+ }
+
+ // try to get the 'Parameter Request List' option which holds the
+ // codes of requested options.
+ OptionUint8ArrayPtr option_prl = boost::dynamic_pointer_cast<
+ OptionUint8Array>(question->getOption(DHO_DHCP_PARAMETER_REQUEST_LIST));
+ // If there is no PRL option in the message from the client then
+ // there is nothing to do.
+ if (!option_prl) {
+ return;
+ }
+
+ // Get the codes of requested options.
+ const std::vector<uint8_t>& requested_opts = option_prl->getValues();
+ // For each requested option code get the instance of the option
+ // to be returned to the client.
+ for (std::vector<uint8_t>::const_iterator opt = requested_opts.begin();
+ opt != requested_opts.end(); ++opt) {
+ Subnet::OptionDescriptor desc =
+ subnet->getOptionDescriptor("dhcp4", *opt);
+ if (desc.option) {
+ msg->addOption(desc.option);
+ }
+ }
+}
+
+void
+Dhcpv4Srv::appendBasicOptions(const Pkt4Ptr& question, Pkt4Ptr& msg) {
+ // Identify options that we always want to send to the
+ // client (if they are configured).
+ static const uint16_t required_options[] = {
+ DHO_SUBNET_MASK,
+ DHO_ROUTERS,
+ DHO_DOMAIN_NAME_SERVERS,
+ DHO_DOMAIN_NAME };
+
+ static size_t required_options_size =
+ sizeof(required_options) / sizeof(required_options[0]);
- // Domain name (type 15)
- vector<uint8_t> domain(HARDCODED_DOMAIN_NAME.begin(), HARDCODED_DOMAIN_NAME.end());
- opt = OptionPtr(new Option(Option::V4, DHO_DOMAIN_NAME, domain));
- msg->addOption(opt);
- // TODO: Add Option_String class
+ // Get the subnet.
+ Subnet4Ptr subnet = selectSubnet(question);
+ if (!subnet) {
+ return;
+ }
- // DNS servers (type 6)
- opt = OptionPtr(new Option4AddrLst(DHO_DOMAIN_NAME_SERVERS, IOAddress(HARDCODED_DNS_SERVER)));
- msg->addOption(opt);
+ // Try to find all 'required' options in the outgoing
+ // message. Those that are not present will be added.
+ for (int i = 0; i < required_options_size; ++i) {
+ OptionPtr opt = msg->getOption(required_options[i]);
+ if (!opt) {
+ // Check whether option has been configured.
+ Subnet::OptionDescriptor desc =
+ subnet->getOptionDescriptor("dhcp4", required_options[i]);
+ if (desc.option) {
+ msg->addOption(desc.option);
+ }
+ }
+ }
}
-void Dhcpv4Srv::assignLease(const Pkt4Ptr& question, Pkt4Ptr& answer) {
+void
+Dhcpv4Srv::assignLease(const Pkt4Ptr& question, Pkt4Ptr& answer) {
// We need to select a subnet the client is connected in.
Subnet4Ptr subnet = selectSubnet(question);
@@ -424,10 +501,12 @@ void Dhcpv4Srv::assignLease(const Pkt4Ptr& question, Pkt4Ptr& answer) {
opt->setUint32(lease->valid_lft_);
answer->addOption(opt);
- // @todo: include real router information here
// Router (type 3)
- opt = OptionPtr(new Option4AddrLst(DHO_ROUTERS, IOAddress(HARDCODED_GATEWAY)));
- answer->addOption(opt);
+ Subnet::OptionDescriptor opt_routers =
+ subnet->getOptionDescriptor("dhcp4", DHO_ROUTERS);
+ if (opt_routers.option) {
+ answer->addOption(opt_routers.option);
+ }
// Subnet mask (type 1)
answer->addOption(getNetmaskOption(subnet));
@@ -451,7 +530,8 @@ void Dhcpv4Srv::assignLease(const Pkt4Ptr& question, Pkt4Ptr& answer) {
}
}
-OptionPtr Dhcpv4Srv::getNetmaskOption(const Subnet4Ptr& subnet) {
+OptionPtr
+Dhcpv4Srv::getNetmaskOption(const Subnet4Ptr& subnet) {
uint32_t netmask = getNetmask4(subnet->get().second);
OptionPtr opt(new OptionInt<uint32_t>(Option::V4,
@@ -460,33 +540,46 @@ OptionPtr Dhcpv4Srv::getNetmaskOption(const Subnet4Ptr& subnet) {
return (opt);
}
-Pkt4Ptr Dhcpv4Srv::processDiscover(Pkt4Ptr& discover) {
+Pkt4Ptr
+Dhcpv4Srv::processDiscover(Pkt4Ptr& discover) {
Pkt4Ptr offer = Pkt4Ptr
(new Pkt4(DHCPOFFER, discover->getTransid()));
copyDefaultFields(discover, offer);
appendDefaultOptions(offer, DHCPOFFER);
- appendRequestedOptions(offer);
+ appendRequestedOptions(discover, offer);
assignLease(discover, offer);
+ // There are a few basic options that we always want to
+ // include in the response. If client did not request
+ // them we append them for him.
+ appendBasicOptions(discover, offer);
+
return (offer);
}
-Pkt4Ptr Dhcpv4Srv::processRequest(Pkt4Ptr& request) {
+Pkt4Ptr
+Dhcpv4Srv::processRequest(Pkt4Ptr& request) {
Pkt4Ptr ack = Pkt4Ptr
(new Pkt4(DHCPACK, request->getTransid()));
copyDefaultFields(request, ack);
appendDefaultOptions(ack, DHCPACK);
- appendRequestedOptions(ack);
+ appendRequestedOptions(request, ack);
assignLease(request, ack);
+ // There are a few basic options that we always want to
+ // include in the response. If client did not request
+ // them we append them for him.
+ appendBasicOptions(request, ack);
+
return (ack);
}
-void Dhcpv4Srv::processRelease(Pkt4Ptr& release) {
+void
+Dhcpv4Srv::processRelease(Pkt4Ptr& release) {
// Try to find client-id
ClientIdPtr client_id;
@@ -554,11 +647,13 @@ void Dhcpv4Srv::processRelease(Pkt4Ptr& release) {
}
-void Dhcpv4Srv::processDecline(Pkt4Ptr& decline) {
+void
+Dhcpv4Srv::processDecline(Pkt4Ptr& /* decline */) {
/// TODO: Implement this.
}
-Pkt4Ptr Dhcpv4Srv::processInform(Pkt4Ptr& inform) {
+Pkt4Ptr
+Dhcpv4Srv::processInform(Pkt4Ptr& inform) {
/// TODO: Currently implemented echo mode. Implement this for real
return (inform);
}
@@ -594,7 +689,8 @@ Dhcpv4Srv::serverReceivedPacketName(uint8_t type) {
return (UNKNOWN);
}
-Subnet4Ptr Dhcpv4Srv::selectSubnet(const Pkt4Ptr& question) {
+Subnet4Ptr
+Dhcpv4Srv::selectSubnet(const Pkt4Ptr& question) {
// Is this relayed message?
IOAddress relay = question->getGiaddr();
@@ -609,7 +705,8 @@ Subnet4Ptr Dhcpv4Srv::selectSubnet(const Pkt4Ptr& question) {
}
}
-void Dhcpv4Srv::sanityCheck(const Pkt4Ptr& pkt, RequirementLevel serverid) {
+void
+Dhcpv4Srv::sanityCheck(const Pkt4Ptr& pkt, RequirementLevel serverid) {
OptionPtr server_id = pkt->getOption(DHO_DHCP_SERVER_IDENTIFIER);
switch (serverid) {
case FORBIDDEN:
@@ -632,3 +729,6 @@ void Dhcpv4Srv::sanityCheck(const Pkt4Ptr& pkt, RequirementLevel serverid) {
;
}
}
+
+} // namespace dhcp
+} // namespace isc
diff --git a/src/bin/dhcp4/dhcp4_srv.h b/src/bin/dhcp4/dhcp4_srv.h
index 8d26e05..1c988b1 100644
--- a/src/bin/dhcp4/dhcp4_srv.h
+++ b/src/bin/dhcp4/dhcp4_srv.h
@@ -1,4 +1,4 @@
-// Copyright (C) 2011-2012 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2011-2013 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -28,15 +28,6 @@
namespace isc {
namespace dhcp {
-/// @brief file name of a server-id file
-///
-/// Server must store its server identifier in persistent storage that must not
-/// change between restarts. This is name of the file that is created in dataDir
-/// (see isc::dhcp::CfgMgr::getDataDir()). It is a text file that uses
-/// regular IPv4 address, e.g. 192.0.2.1. Server will create it during
-/// first run and then use it afterwards.
-static const char* SERVER_ID_FILE = "b10-dhcp4-serverid";
-
/// @brief DHCPv4 server service.
///
/// This singleton class represents DHCPv4 server. It contains all
@@ -182,8 +173,9 @@ protected:
/// This method assigns options that were requested by client
/// (sent in PRL) or are enforced by server.
///
+ /// @param question DISCOVER or REQUEST message from a client.
/// @param msg outgoing message (options will be added here)
- void appendRequestedOptions(Pkt4Ptr& msg);
+ void appendRequestedOptions(const Pkt4Ptr& question, Pkt4Ptr& msg);
/// @brief Assigns a lease and appends corresponding options
///
@@ -195,6 +187,19 @@ protected:
/// @param answer OFFER or ACK/NAK message (lease options will be added here)
void assignLease(const Pkt4Ptr& question, Pkt4Ptr& answer);
+ /// @brief Append basic options if they are not present.
+ ///
+ /// This function adds the following basic options if they
+ /// are not yet added to the message:
+ /// - Subnet Mask,
+ /// - Router,
+ /// - Name Server,
+ /// - Domain Name.
+ ///
+ /// @param question DISCOVER or REQUEST message from a client.
+ /// @param msg the message to add options to.
+ void appendBasicOptions(const Pkt4Ptr& question, Pkt4Ptr& msg);
+
/// @brief Attempts to renew received addresses
///
/// Attempts to renew existing lease. This typically includes finding a lease that
diff --git a/src/bin/dhcp4/tests/Makefile.am b/src/bin/dhcp4/tests/Makefile.am
index c0ebcb9..73bf00b 100644
--- a/src/bin/dhcp4/tests/Makefile.am
+++ b/src/bin/dhcp4/tests/Makefile.am
@@ -34,6 +34,10 @@ AM_CPPFLAGS += -DINSTALL_PROG=\"$(abs_top_srcdir)/install-sh\"
CLEANFILES = $(builddir)/interfaces.txt $(builddir)/logger_lockfile
AM_CXXFLAGS = $(B10_CXXFLAGS)
+if USE_CLANGPP
+# Disable unused parameter warning caused by some Boost headers when compiling with clang
+AM_CXXFLAGS += -Wno-unused-parameter
+endif
if USE_STATIC_LINK
AM_LDFLAGS = -static
@@ -56,12 +60,6 @@ dhcp4_unittests_SOURCES += ctrl_dhcp4_srv_unittest.cc
dhcp4_unittests_SOURCES += config_parser_unittest.cc
nodist_dhcp4_unittests_SOURCES = ../dhcp4_messages.h ../dhcp4_messages.cc
-if USE_CLANGPP
-# Disable unused parameter warning caused by some of the
-# Boost headers when compiling with clang.
-dhcp4_unittests_CXXFLAGS = -Wno-unused-parameter
-endif
-
dhcp4_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
dhcp4_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
dhcp4_unittests_LDADD = $(GTEST_LDADD)
diff --git a/src/bin/dhcp4/tests/config_parser_unittest.cc b/src/bin/dhcp4/tests/config_parser_unittest.cc
index ba14edf..9b3be51 100644
--- a/src/bin/dhcp4/tests/config_parser_unittest.cc
+++ b/src/bin/dhcp4/tests/config_parser_unittest.cc
@@ -21,6 +21,8 @@
#include <dhcp4/dhcp4_srv.h>
#include <dhcp4/config_parser.h>
#include <dhcp/option4_addrlst.h>
+#include <dhcp/option_custom.h>
+#include <dhcp/option_int.h>
#include <dhcpsrv/subnet.h>
#include <dhcpsrv/cfgmgr.h>
#include <boost/foreach.hpp>
@@ -61,7 +63,7 @@ public:
EXPECT_EQ(expected_value, it->second);
}
- // Checks if config_result (result of DHCP server configuration) has
+ // Checks if the result of DHCP server configuration has
// expected code (0 for success, other for failures).
// Also stores result in rcode_ and comment_.
void checkResult(ConstElementPtr status, int expected_code) {
@@ -345,7 +347,6 @@ TEST_F(Dhcp4ParserTest, subnetGlobalDefaults) {
" \"pool\": [ \"192.0.2.1 - 192.0.2.100\" ],"
" \"subnet\": \"192.0.2.0/24\" } ],"
"\"valid-lifetime\": 4000 }";
- cout << config << endl;
ElementPtr json = Element::fromJSON(config);
@@ -379,7 +380,6 @@ TEST_F(Dhcp4ParserTest, subnetLocal) {
" \"valid-lifetime\": 4,"
" \"subnet\": \"192.0.2.0/24\" } ],"
"\"valid-lifetime\": 4000 }";
- cout << config << endl;
ElementPtr json = Element::fromJSON(config);
@@ -408,7 +408,6 @@ TEST_F(Dhcp4ParserTest, poolOutOfSubnet) {
" \"pool\": [ \"192.0.4.0/28\" ],"
" \"subnet\": \"192.0.2.0/24\" } ],"
"\"valid-lifetime\": 4000 }";
- cout << config << endl;
ElementPtr json = Element::fromJSON(config);
@@ -433,7 +432,6 @@ TEST_F(Dhcp4ParserTest, poolPrefixLen) {
" \"pool\": [ \"192.0.2.128/28\" ],"
" \"subnet\": \"192.0.2.0/24\" } ],"
"\"valid-lifetime\": 4000 }";
- cout << config << endl;
ElementPtr json = Element::fromJSON(config);
@@ -461,7 +459,8 @@ TEST_F(Dhcp4ParserTest, optionDefIpv4Address) {
" \"type\": \"ipv4-address\","
" \"array\": False,"
" \"record-types\": \"\","
- " \"space\": \"isc\""
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"\""
" } ]"
"}";
ElementPtr json = Element::fromJSON(config);
@@ -474,6 +473,7 @@ TEST_F(Dhcp4ParserTest, optionDefIpv4Address) {
ConstElementPtr status;
EXPECT_NO_THROW(status = configureDhcp4Server(*srv_, json));
ASSERT_TRUE(status);
+ checkResult(status, 0);
// The option definition should now be available in the CfgMgr.
def = CfgMgr::instance().getOptionDef("isc", 100);
@@ -484,6 +484,7 @@ TEST_F(Dhcp4ParserTest, optionDefIpv4Address) {
EXPECT_EQ(100, def->getCode());
EXPECT_FALSE(def->getArrayType());
EXPECT_EQ(OPT_IPV4_ADDRESS_TYPE, def->getType());
+ EXPECT_TRUE(def->getEncapsulatedSpace().empty());
}
// The goal of this test is to check whether an option definiiton
@@ -499,7 +500,8 @@ TEST_F(Dhcp4ParserTest, optionDefRecord) {
" \"type\": \"record\","
" \"array\": False,"
" \"record-types\": \"uint16, ipv4-address, ipv6-address, string\","
- " \"space\": \"isc\""
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"\""
" } ]"
"}";
ElementPtr json = Element::fromJSON(config);
@@ -523,6 +525,7 @@ TEST_F(Dhcp4ParserTest, optionDefRecord) {
EXPECT_EQ(100, def->getCode());
EXPECT_EQ(OPT_RECORD_TYPE, def->getType());
EXPECT_FALSE(def->getArrayType());
+ EXPECT_TRUE(def->getEncapsulatedSpace().empty());
// The option comprises the record of data fields. Verify that all
// fields are present and they are of the expected types.
@@ -546,7 +549,8 @@ TEST_F(Dhcp4ParserTest, optionDefMultiple) {
" \"type\": \"uint32\","
" \"array\": False,"
" \"record-types\": \"\","
- " \"space\": \"isc\""
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"\""
" },"
" {"
" \"name\": \"foo-2\","
@@ -554,7 +558,8 @@ TEST_F(Dhcp4ParserTest, optionDefMultiple) {
" \"type\": \"ipv4-address\","
" \"array\": False,"
" \"record-types\": \"\","
- " \"space\": \"isc\""
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"\""
" } ]"
"}";
ElementPtr json = Element::fromJSON(config);
@@ -578,6 +583,7 @@ TEST_F(Dhcp4ParserTest, optionDefMultiple) {
EXPECT_EQ(100, def1->getCode());
EXPECT_EQ(OPT_UINT32_TYPE, def1->getType());
EXPECT_FALSE(def1->getArrayType());
+ EXPECT_TRUE(def1->getEncapsulatedSpace().empty());
// Check the second option definition we have created.
OptionDefinitionPtr def2 = CfgMgr::instance().getOptionDef("isc", 101);
@@ -588,6 +594,7 @@ TEST_F(Dhcp4ParserTest, optionDefMultiple) {
EXPECT_EQ(101, def2->getCode());
EXPECT_EQ(OPT_IPV4_ADDRESS_TYPE, def2->getType());
EXPECT_FALSE(def2->getArrayType());
+ EXPECT_TRUE(def2->getEncapsulatedSpace().empty());
}
// The goal of this test is to verify that the duplicated option
@@ -604,7 +611,8 @@ TEST_F(Dhcp4ParserTest, optionDefDuplicate) {
" \"type\": \"uint32\","
" \"array\": False,"
" \"record-types\": \"\","
- " \"space\": \"isc\""
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"\""
" },"
" {"
" \"name\": \"foo-2\","
@@ -612,7 +620,8 @@ TEST_F(Dhcp4ParserTest, optionDefDuplicate) {
" \"type\": \"ipv4-address\","
" \"array\": False,"
" \"record-types\": \"\","
- " \"space\": \"isc\""
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"\""
" } ]"
"}";
ElementPtr json = Element::fromJSON(config);
@@ -640,7 +649,8 @@ TEST_F(Dhcp4ParserTest, optionDefArray) {
" \"type\": \"uint32\","
" \"array\": True,"
" \"record-types\": \"\","
- " \"space\": \"isc\""
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"\""
" } ]"
"}";
ElementPtr json = Element::fromJSON(config);
@@ -664,6 +674,48 @@ TEST_F(Dhcp4ParserTest, optionDefArray) {
EXPECT_EQ(100, def->getCode());
EXPECT_EQ(OPT_UINT32_TYPE, def->getType());
EXPECT_TRUE(def->getArrayType());
+ EXPECT_TRUE(def->getEncapsulatedSpace().empty());
+}
+
+// The purpose of this test to verify that encapsulated option
+// space name may be specified.
+TEST_F(Dhcp4ParserTest, optionDefEncapsulate) {
+
+ // Configuration string. Included the encapsulated
+ // option space name.
+ std::string config =
+ "{ \"option-def\": [ {"
+ " \"name\": \"foo\","
+ " \"code\": 100,"
+ " \"type\": \"uint32\","
+ " \"array\": False,"
+ " \"record-types\": \"\","
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"sub-opts-space\""
+ " } ]"
+ "}";
+ ElementPtr json = Element::fromJSON(config);
+
+ // Make sure that the particular option definition does not exist.
+ OptionDefinitionPtr def = CfgMgr::instance().getOptionDef("isc", 100);
+ ASSERT_FALSE(def);
+
+ // Use the configuration string to create new option definition.
+ ConstElementPtr status;
+ EXPECT_NO_THROW(status = configureDhcp4Server(*srv_, json));
+ ASSERT_TRUE(status);
+ checkResult(status, 0);
+
+ // The option definition should now be available in the CfgMgr.
+ def = CfgMgr::instance().getOptionDef("isc", 100);
+ ASSERT_TRUE(def);
+
+ // Check the option data.
+ EXPECT_EQ("foo", def->getName());
+ EXPECT_EQ(100, def->getCode());
+ EXPECT_EQ(OPT_UINT32_TYPE, def->getType());
+ EXPECT_FALSE(def->getArrayType());
+ EXPECT_EQ("sub-opts-space", def->getEncapsulatedSpace());
}
/// The purpose of this test is to verify that the option definition
@@ -678,7 +730,8 @@ TEST_F(Dhcp4ParserTest, optionDefInvalidName) {
" \"type\": \"string\","
" \"array\": False,"
" \"record-types\": \"\","
- " \"space\": \"isc\""
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"\""
" } ]"
"}";
ElementPtr json = Element::fromJSON(config);
@@ -703,7 +756,8 @@ TEST_F(Dhcp4ParserTest, optionDefInvalidType) {
" \"type\": \"sting\","
" \"array\": False,"
" \"record-types\": \"\","
- " \"space\": \"isc\""
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"\""
" } ]"
"}";
ElementPtr json = Element::fromJSON(config);
@@ -728,7 +782,62 @@ TEST_F(Dhcp4ParserTest, optionDefInvalidRecordType) {
" \"type\": \"record\","
" \"array\": False,"
" \"record-types\": \"uint32,uint8,sting\","
- " \"space\": \"isc\""
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"\""
+ " } ]"
+ "}";
+ ElementPtr json = Element::fromJSON(config);
+
+ // Use the configuration string to create new option definition.
+ ConstElementPtr status;
+ EXPECT_NO_THROW(status = configureDhcp4Server(*srv_, json));
+ ASSERT_TRUE(status);
+ // Expecting parsing error (error code 1).
+ checkResult(status, 1);
+}
+
+/// The goal of this test is to verify that the invalid encapsulated
+/// option space name is not accepted.
+TEST_F(Dhcp4ParserTest, optionDefInvalidEncapsulatedSpace) {
+ // Configuration string. The encapsulated option space
+ // name is invalid (% character is not allowed).
+ std::string config =
+ "{ \"option-def\": [ {"
+ " \"name\": \"foo\","
+ " \"code\": 100,"
+ " \"type\": \"uint32\","
+ " \"array\": False,"
+ " \"record-types\": \"\","
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"invalid%space%name\""
+ " } ]"
+ "}";
+ ElementPtr json = Element::fromJSON(config);
+
+ // Use the configuration string to create new option definition.
+ ConstElementPtr status;
+ EXPECT_NO_THROW(status = configureDhcp4Server(*srv_, json));
+ ASSERT_TRUE(status);
+ // Expecting parsing error (error code 1).
+ checkResult(status, 1);
+}
+
+/// The goal of this test is to verify that the encapsulated
+/// option space name can't be specified for the option that
+/// comprises an array of data fields.
+TEST_F(Dhcp4ParserTest, optionDefEncapsulatedSpaceAndArray) {
+ // Configuration string. The encapsulated option space
+ // name is set to non-empty value and the array flag
+ // is set.
+ std::string config =
+ "{ \"option-def\": [ {"
+ " \"name\": \"foo\","
+ " \"code\": 100,"
+ " \"type\": \"uint32\","
+ " \"array\": True,"
+ " \"record-types\": \"\","
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"valid-space-name\""
" } ]"
"}";
ElementPtr json = Element::fromJSON(config);
@@ -741,6 +850,31 @@ TEST_F(Dhcp4ParserTest, optionDefInvalidRecordType) {
checkResult(status, 1);
}
+/// The goal of this test is to verify that the option may not
+/// encapsulate option space it belongs to.
+TEST_F(Dhcp4ParserTest, optionDefEncapsulateOwnSpace) {
+ // Configuration string. Option is set to encapsulate
+ // option space it belongs to.
+ std::string config =
+ "{ \"option-def\": [ {"
+ " \"name\": \"foo\","
+ " \"code\": 100,"
+ " \"type\": \"uint32\","
+ " \"array\": False,"
+ " \"record-types\": \"\","
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"isc\""
+ " } ]"
+ "}";
+ ElementPtr json = Element::fromJSON(config);
+
+ // Use the configuration string to create new option definition.
+ ConstElementPtr status;
+ EXPECT_NO_THROW(status = configureDhcp4Server(*srv_, json));
+ ASSERT_TRUE(status);
+ // Expecting parsing error (error code 1).
+ checkResult(status, 1);
+}
/// The purpose of this test is to verify that it is not allowed
/// to override the standard option (that belongs to dhcp4 option
@@ -759,7 +893,8 @@ TEST_F(Dhcp4ParserTest, optionStandardDefOverride) {
" \"type\": \"string\","
" \"array\": False,"
" \"record-types\": \"\","
- " \"space\": \"dhcp4\""
+ " \"space\": \"dhcp4\","
+ " \"encapsulate\": \"\""
" } ]"
"}";
ElementPtr json = Element::fromJSON(config);
@@ -794,7 +929,8 @@ TEST_F(Dhcp4ParserTest, optionStandardDefOverride) {
" \"type\": \"string\","
" \"array\": False,"
" \"record-types\": \"\","
- " \"space\": \"dhcp4\""
+ " \"space\": \"dhcp4\","
+ " \"encapsulate\": \"\""
" } ]"
"}";
json = Element::fromJSON(config);
@@ -907,7 +1043,8 @@ TEST_F(Dhcp4ParserTest, optionDataTwoSpaces) {
" \"type\": \"uint32\","
" \"array\": False,"
" \"record-types\": \"\","
- " \"space\": \"isc\""
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"\""
" } ],"
"\"subnet4\": [ { "
" \"pool\": [ \"192.0.2.1 - 192.0.2.100\" ],"
@@ -940,6 +1077,166 @@ TEST_F(Dhcp4ParserTest, optionDataTwoSpaces) {
ASSERT_FALSE(desc3.option);
}
+// The goal of this test is to verify that it is possible to
+// encapsulate option space containing some options with
+// another option. In this test we create base option that
+// encapsulates option space 'isc' that comprises two other
+// options. Also, for all options their definitions are
+// created.
+TEST_F(Dhcp4ParserTest, optionDataEncapsulate) {
+
+ // @todo DHCP configurations has many dependencies between
+ // parameters. First of all, configuration for subnet is
+ // inherited from the global values. Thus subnet has to be
+ // configured when all global values have been configured.
+ // Also, an option can encapsulate another option only
+ // if the latter has been configured. For this reason in this
+ // test we created two-stage configuration where first we
+ // created options that belong to encapsulated option space.
+ // In the second stage we add the base option. Also, the Subnet
+ // object is configured in the second stage so it is created
+ // at the very end (when all other parameters are configured).
+
+ // Starting stage 1. Configure sub-options and their definitions.
+ string config = "{ \"interface\": [ \"all\" ],"
+ "\"rebind-timer\": 2000,"
+ "\"renew-timer\": 1000,"
+ "\"option-data\": [ {"
+ " \"name\": \"foo\","
+ " \"space\": \"isc\","
+ " \"code\": 1,"
+ " \"data\": \"1234\","
+ " \"csv-format\": True"
+ " },"
+ " {"
+ " \"name\": \"foo2\","
+ " \"space\": \"isc\","
+ " \"code\": 2,"
+ " \"data\": \"192.168.2.1\","
+ " \"csv-format\": True"
+ " } ],"
+ "\"option-def\": [ {"
+ " \"name\": \"foo\","
+ " \"code\": 1,"
+ " \"type\": \"uint32\","
+ " \"array\": False,"
+ " \"record-types\": \"\","
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"\""
+ " },"
+ " {"
+ " \"name\": \"foo2\","
+ " \"code\": 2,"
+ " \"type\": \"ipv4-address\","
+ " \"array\": False,"
+ " \"record-types\": \"\","
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"\""
+ " } ]"
+ "}";
+
+ ConstElementPtr status;
+
+ ElementPtr json = Element::fromJSON(config);
+
+ EXPECT_NO_THROW(status = configureDhcp4Server(*srv_, json));
+ ASSERT_TRUE(status);
+ checkResult(status, 0);
+
+ // Stage 2. Configure base option and a subnet. Please note that
+ // the configuration from the stage 2 is repeated because BIND
+ // configuration manager sends whole configuration for the lists
+ // where at least one element is being modified or added.
+ config = "{ \"interface\": [ \"all\" ],"
+ "\"rebind-timer\": 2000,"
+ "\"renew-timer\": 1000,"
+ "\"option-data\": [ {"
+ " \"name\": \"base-option\","
+ " \"space\": \"dhcp4\","
+ " \"code\": 222,"
+ " \"data\": \"11\","
+ " \"csv-format\": True"
+ " },"
+ " {"
+ " \"name\": \"foo\","
+ " \"space\": \"isc\","
+ " \"code\": 1,"
+ " \"data\": \"1234\","
+ " \"csv-format\": True"
+ " },"
+ " {"
+ " \"name\": \"foo2\","
+ " \"space\": \"isc\","
+ " \"code\": 2,"
+ " \"data\": \"192.168.2.1\","
+ " \"csv-format\": True"
+ " } ],"
+ "\"option-def\": [ {"
+ " \"name\": \"base-option\","
+ " \"code\": 222,"
+ " \"type\": \"uint8\","
+ " \"array\": False,"
+ " \"record-types\": \"\","
+ " \"space\": \"dhcp4\","
+ " \"encapsulate\": \"isc\""
+ "},"
+ "{"
+ " \"name\": \"foo\","
+ " \"code\": 1,"
+ " \"type\": \"uint32\","
+ " \"array\": False,"
+ " \"record-types\": \"\","
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"\""
+ " },"
+ " {"
+ " \"name\": \"foo2\","
+ " \"code\": 2,"
+ " \"type\": \"ipv4-address\","
+ " \"array\": False,"
+ " \"record-types\": \"\","
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"\""
+ " } ],"
+ "\"subnet4\": [ { "
+ " \"pool\": [ \"192.0.2.1 - 192.0.2.100\" ],"
+ " \"subnet\": \"192.0.2.0/24\""
+ " } ]"
+ "}";
+
+
+ json = Element::fromJSON(config);
+
+ EXPECT_NO_THROW(status = configureDhcp4Server(*srv_, json));
+ ASSERT_TRUE(status);
+ checkResult(status, 0);
+
+ // Get the subnet.
+ Subnet4Ptr subnet = CfgMgr::instance().getSubnet4(IOAddress("192.0.2.5"));
+ ASSERT_TRUE(subnet);
+
+ // We should have one option available.
+ Subnet::OptionContainerPtr options = subnet->getOptionDescriptors("dhcp4");
+ ASSERT_TRUE(options);
+ ASSERT_EQ(1, options->size());
+
+ // Get the option.
+ Subnet::OptionDescriptor desc = subnet->getOptionDescriptor("dhcp4", 222);
+ EXPECT_TRUE(desc.option);
+ EXPECT_EQ(222, desc.option->getType());
+
+ // This opton should comprise two sub-options.
+ // One of them is 'foo' with code 1.
+ OptionPtr option_foo = desc.option->getOption(1);
+ ASSERT_TRUE(option_foo);
+ EXPECT_EQ(1, option_foo->getType());
+
+ // ...another one 'foo2' with code 2.
+ OptionPtr option_foo2 = desc.option->getOption(2);
+ ASSERT_TRUE(option_foo2);
+ EXPECT_EQ(2, option_foo2->getType());
+}
+
// Goal of this test is to verify options configuration
// for a single subnet. In particular this test checks
// that local options configuration overrides global
@@ -1290,4 +1587,165 @@ TEST_F(Dhcp4ParserTest, DISABLED_Uint32Parser) {
checkResult(status, 1);
}
+// The goal of this test is to verify that the standard option can
+// be configured to encapsulate multiple other options.
+TEST_F(Dhcp4ParserTest, stdOptionDataEncapsulate) {
+
+ // The configuration is two stage process in this test.
+ // In the first stahe we create definitions of suboptions
+ // that we will add to the base option.
+ // Let's create some dummy options: foo and foo2.
+ string config = "{ \"interface\": [ \"all\" ],"
+ "\"rebind-timer\": 2000,"
+ "\"renew-timer\": 1000,"
+ "\"option-data\": [ {"
+ " \"name\": \"foo\","
+ " \"space\": \"vendor-encapsulated-options-space\","
+ " \"code\": 1,"
+ " \"data\": \"1234\","
+ " \"csv-format\": True"
+ " },"
+ " {"
+ " \"name\": \"foo2\","
+ " \"space\": \"vendor-encapsulated-options-space\","
+ " \"code\": 2,"
+ " \"data\": \"192.168.2.1\","
+ " \"csv-format\": True"
+ " } ],"
+ "\"option-def\": [ {"
+ " \"name\": \"foo\","
+ " \"code\": 1,"
+ " \"type\": \"uint32\","
+ " \"array\": False,"
+ " \"record-types\": \"\","
+ " \"space\": \"vendor-encapsulated-options-space\","
+ " \"encapsulate\": \"\""
+ " },"
+ " {"
+ " \"name\": \"foo2\","
+ " \"code\": 2,"
+ " \"type\": \"ipv4-address\","
+ " \"array\": False,"
+ " \"record-types\": \"\","
+ " \"space\": \"vendor-encapsulated-options-space\","
+ " \"encapsulate\": \"\""
+ " } ]"
+ "}";
+
+ ConstElementPtr status;
+
+ ElementPtr json = Element::fromJSON(config);
+
+ EXPECT_NO_THROW(status = configureDhcp4Server(*srv_, json));
+ ASSERT_TRUE(status);
+ checkResult(status, 0);
+
+ // Once the definitions have been added we can configure the
+ // standard option #17. This option comprises an enterprise
+ // number and sub options. By convention (introduced in
+ // std_option_defs.h) option named 'vendor-opts'
+ // encapsulates the option space named 'vendor-opts-space'.
+ // We add our dummy options to this option space and thus
+ // they should be included as sub-options in the 'vendor-opts'
+ // option.
+ config = "{ \"interface\": [ \"all\" ],"
+ "\"rebind-timer\": 2000,"
+ "\"renew-timer\": 1000,"
+ "\"option-data\": [ {"
+ " \"name\": \"vendor-encapsulated-options\","
+ " \"space\": \"dhcp4\","
+ " \"code\": 43,"
+ " \"data\": \"\","
+ " \"csv-format\": False"
+ " },"
+ " {"
+ " \"name\": \"foo\","
+ " \"space\": \"vendor-encapsulated-options-space\","
+ " \"code\": 1,"
+ " \"data\": \"1234\","
+ " \"csv-format\": True"
+ " },"
+ " {"
+ " \"name\": \"foo2\","
+ " \"space\": \"vendor-encapsulated-options-space\","
+ " \"code\": 2,"
+ " \"data\": \"192.168.2.1\","
+ " \"csv-format\": True"
+ " } ],"
+ "\"option-def\": [ {"
+ " \"name\": \"foo\","
+ " \"code\": 1,"
+ " \"type\": \"uint32\","
+ " \"array\": False,"
+ " \"record-types\": \"\","
+ " \"space\": \"vendor-encapsulated-options-space\","
+ " \"encapsulate\": \"\""
+ " },"
+ " {"
+ " \"name\": \"foo2\","
+ " \"code\": 2,"
+ " \"type\": \"ipv4-address\","
+ " \"array\": False,"
+ " \"record-types\": \"\","
+ " \"space\": \"vendor-encapsulated-options-space\","
+ " \"encapsulate\": \"\""
+ " } ],"
+ "\"subnet4\": [ { "
+ " \"pool\": [ \"192.0.2.1 - 192.0.2.100\" ],"
+ " \"subnet\": \"192.0.2.0/24\""
+ " } ]"
+ "}";
+
+
+ json = Element::fromJSON(config);
+
+ EXPECT_NO_THROW(status = configureDhcp4Server(*srv_, json));
+ ASSERT_TRUE(status);
+ checkResult(status, 0);
+
+ // Get the subnet.
+ Subnet4Ptr subnet = CfgMgr::instance().getSubnet4(IOAddress("192.0.2.5"));
+ ASSERT_TRUE(subnet);
+
+ // We should have one option available.
+ Subnet::OptionContainerPtr options = subnet->getOptionDescriptors("dhcp4");
+ ASSERT_TRUE(options);
+ ASSERT_EQ(1, options->size());
+
+ // Get the option.
+ Subnet::OptionDescriptor desc =
+ subnet->getOptionDescriptor("dhcp4", DHO_VENDOR_ENCAPSULATED_OPTIONS);
+ EXPECT_TRUE(desc.option);
+ EXPECT_EQ(DHO_VENDOR_ENCAPSULATED_OPTIONS, desc.option->getType());
+
+ // Option with the code 1 should be added as a sub-option.
+ OptionPtr option_foo = desc.option->getOption(1);
+ ASSERT_TRUE(option_foo);
+ EXPECT_EQ(1, option_foo->getType());
+ // This option comprises a single uint32_t value thus it is
+ // represented by OptionInt<uint32_t> class. Let's get the
+ // object of this type.
+ boost::shared_ptr<OptionInt<uint32_t> > option_foo_uint32 =
+ boost::dynamic_pointer_cast<OptionInt<uint32_t> >(option_foo);
+ ASSERT_TRUE(option_foo_uint32);
+ // Validate the value according to the configuration.
+ EXPECT_EQ(1234, option_foo_uint32->getValue());
+
+ // Option with the code 2 should be added as a sub-option.
+ OptionPtr option_foo2 = desc.option->getOption(2);
+ ASSERT_TRUE(option_foo2);
+ EXPECT_EQ(2, option_foo2->getType());
+ // This option comprises the IPV4 address. Such option is
+ // represented by OptionCustom object.
+ OptionCustomPtr option_foo2_v4 =
+ boost::dynamic_pointer_cast<OptionCustom>(option_foo2);
+ ASSERT_TRUE(option_foo2_v4);
+ // Get the IP address carried by this option and validate it.
+ EXPECT_EQ("192.168.2.1", option_foo2_v4->readAddress().toText());
+
+ // Option with the code 3 should not be added.
+ EXPECT_FALSE(desc.option->getOption(3));
+}
+
+
};
diff --git a/src/bin/dhcp4/tests/dhcp4_srv_unittest.cc b/src/bin/dhcp4/tests/dhcp4_srv_unittest.cc
index a2331ff..c938155 100644
--- a/src/bin/dhcp4/tests/dhcp4_srv_unittest.cc
+++ b/src/bin/dhcp4/tests/dhcp4_srv_unittest.cc
@@ -18,6 +18,9 @@
#include <asiolink/io_address.h>
#include <dhcp/dhcp4.h>
#include <dhcp/option.h>
+#include <dhcp/option4_addrlst.h>
+#include <dhcp/option_custom.h>
+#include <dhcp/option_int_array.h>
#include <dhcp4/dhcp4_srv.h>
#include <dhcp4/dhcp4_log.h>
#include <dhcpsrv/cfgmgr.h>
@@ -74,14 +77,82 @@ public:
CfgMgr::instance().deleteSubnets4();
CfgMgr::instance().addSubnet4(subnet_);
+ // Add Router option.
+ Option4AddrLstPtr opt_routers(new Option4AddrLst(DHO_ROUTERS));
+ opt_routers->setAddress(IOAddress("192.0.2.2"));
+ subnet_->addOption(opt_routers, false, "dhcp4");
+
// it's ok if that fails. There should not be such a file anyway
unlink(SRVID_FILE);
}
+ /// @brief Add 'Parameter Request List' option to the packet.
+ ///
+ /// This function PRL option comprising the following option codes:
+ /// - 5 - Name Server
+ /// - 15 - Domain Name
+ /// - 7 - Log Server
+ /// - 8 - Quotes Server
+ /// - 9 - LPR Server
+ ///
+ /// @param pkt packet to add PRL option to.
+ void addPrlOption(Pkt4Ptr& pkt) {
+
+ OptionUint8ArrayPtr option_prl =
+ OptionUint8ArrayPtr(new OptionUint8Array(Option::V4,
+ DHO_DHCP_PARAMETER_REQUEST_LIST));
+
+ // Let's request options that have been configured for the subnet.
+ option_prl->addValue(DHO_DOMAIN_NAME_SERVERS);
+ option_prl->addValue(DHO_DOMAIN_NAME);
+ option_prl->addValue(DHO_LOG_SERVERS);
+ option_prl->addValue(DHO_COOKIE_SERVERS);
+ // Let's also request the option that hasn't been configured. In such
+ // case server should ignore request for this particular option.
+ option_prl->addValue(DHO_LPR_SERVERS);
+ // And add 'Parameter Request List' option into the DISCOVER packet.
+ pkt->addOption(option_prl);
+ }
+
+ /// @brief Configures options being requested in the PRL option.
+ ///
+ /// The lpr-servers option is NOT configured here altough it is
+ /// added to the 'Parameter Request List' option in the
+ /// \ref addPrlOption. When requested option is not configured
+ /// the server should not return it in its rensponse. The goal
+ /// of not configuring the requested option is to verify that
+ /// the server will not return it.
+ void configureRequestedOptions() {
+ // dns-servers
+ Option4AddrLstPtr
+ option_dns_servers(new Option4AddrLst(DHO_DOMAIN_NAME_SERVERS));
+ option_dns_servers->addAddress(IOAddress("192.0.2.1"));
+ option_dns_servers->addAddress(IOAddress("192.0.2.100"));
+ ASSERT_NO_THROW(subnet_->addOption(option_dns_servers, false, "dhcp4"));
+
+ // domain-name
+ OptionDefinition def("domain-name", DHO_DOMAIN_NAME, OPT_FQDN_TYPE);
+ boost::shared_ptr<OptionCustom>
+ option_domain_name(new OptionCustom(def, Option::V4));
+ option_domain_name->writeFqdn("example.com");
+ subnet_->addOption(option_domain_name, false, "dhcp4");
+
+ // log-servers
+ Option4AddrLstPtr option_log_servers(new Option4AddrLst(DHO_LOG_SERVERS));
+ option_log_servers->addAddress(IOAddress("192.0.2.2"));
+ option_log_servers->addAddress(IOAddress("192.0.2.10"));
+ ASSERT_NO_THROW(subnet_->addOption(option_log_servers, false, "dhcp4"));
+
+ // cookie-servers
+ Option4AddrLstPtr option_cookie_servers(new Option4AddrLst(DHO_COOKIE_SERVERS));
+ option_cookie_servers->addAddress(IOAddress("192.0.2.1"));
+ ASSERT_NO_THROW(subnet_->addOption(option_cookie_servers, false, "dhcp4"));
+ }
+
/// @brief checks that the response matches request
/// @param q query (client's message)
/// @param a answer (server's message)
- void MessageCheck(const boost::shared_ptr<Pkt4>& q,
+ void messageCheck(const boost::shared_ptr<Pkt4>& q,
const boost::shared_ptr<Pkt4>& a) {
ASSERT_TRUE(q);
ASSERT_TRUE(a);
@@ -91,20 +162,40 @@ public:
EXPECT_EQ(q->getIndex(), a->getIndex());
EXPECT_EQ(q->getGiaddr(), a->getGiaddr());
- // Check that bare minimum of required options are there
+ // Check that bare minimum of required options are there.
+ // We don't check options requested by a client. Those
+ // are checked elsewhere.
EXPECT_TRUE(a->getOption(DHO_SUBNET_MASK));
EXPECT_TRUE(a->getOption(DHO_ROUTERS));
EXPECT_TRUE(a->getOption(DHO_DHCP_SERVER_IDENTIFIER));
EXPECT_TRUE(a->getOption(DHO_DHCP_LEASE_TIME));
EXPECT_TRUE(a->getOption(DHO_SUBNET_MASK));
- EXPECT_TRUE(a->getOption(DHO_ROUTERS));
- EXPECT_TRUE(a->getOption(DHO_DOMAIN_NAME));
- EXPECT_TRUE(a->getOption(DHO_DOMAIN_NAME_SERVERS));
// Check that something is offered
EXPECT_TRUE(a->getYiaddr().toText() != "0.0.0.0");
}
+ /// @brief Check that requested options are present.
+ ///
+ /// @param pkt packet to be checked.
+ void optionsCheck(const Pkt4Ptr& pkt) {
+ // Check that the requested and configured options are returned
+ // in the ACK message.
+ EXPECT_TRUE(pkt->getOption(DHO_DOMAIN_NAME))
+ << "domain-name not present in the response";
+ EXPECT_TRUE(pkt->getOption(DHO_DOMAIN_NAME_SERVERS))
+ << "dns-servers not present in the response";
+ EXPECT_TRUE(pkt->getOption(DHO_LOG_SERVERS))
+ << "log-servers not present in the response";
+ EXPECT_TRUE(pkt->getOption(DHO_COOKIE_SERVERS))
+ << "cookie-servers not present in the response";
+ // Check that the requested but not configured options are not
+ // returned in the ACK message.
+ EXPECT_FALSE(pkt->getOption(DHO_LPR_SERVERS))
+ << "domain-name present in the response but it is"
+ << " expected not to be present";
+ }
+
/// @brief generates client-id option
///
/// Generate client-id option of specified length
@@ -143,11 +234,13 @@ public:
/// Check that address was returned from proper range, that its lease
/// lifetime is correct, that T1 and T2 are returned properly
/// @param rsp response to be checked
- /// @param subnet subnet that should be used to verify assigned address and options
+ /// @param subnet subnet that should be used to verify assigned address
+ /// and options
/// @param t1_mandatory is T1 mandatory?
/// @param t2_mandatory is T2 mandatory?
void checkAddressParams(const Pkt4Ptr& rsp, const SubnetPtr subnet,
- bool t1_mandatory = false, bool t2_mandatory = false) {
+ bool t1_mandatory = false,
+ bool t2_mandatory = false) {
// Technically inPool implies inRange, but let's be on the safe
// side and check both.
@@ -177,7 +270,7 @@ public:
if (opt) {
EXPECT_EQ(opt->getUint32(), subnet->getT2());
} else {
- if (t1_mandatory) {
+ if (t2_mandatory) {
ADD_FAILURE() << "Required T2 option missing";
}
}
@@ -324,6 +417,12 @@ TEST_F(Dhcpv4SrvTest, processDiscover) {
pkt->setHops(3);
pkt->setRemotePort(DHCP4_SERVER_PORT);
+ // We are going to test that certain options are returned
+ // (or not returned) in the OFFER message when requested
+ // using 'Parameter Request List' option. Let's configure
+ // those options that are returned when requested.
+ configureRequestedOptions();
+
// Should not throw
EXPECT_NO_THROW(
offer = srv->processDiscover(pkt);
@@ -337,7 +436,39 @@ TEST_F(Dhcpv4SrvTest, processDiscover) {
// This is relayed message. It should be sent back to relay address.
EXPECT_EQ(pkt->getGiaddr(), offer->getRemoteAddr());
- MessageCheck(pkt, offer);
+ messageCheck(pkt, offer);
+
+ // There are some options that are always present in the
+ // message, even if not requested.
+ EXPECT_TRUE(offer->getOption(DHO_DOMAIN_NAME));
+ EXPECT_TRUE(offer->getOption(DHO_DOMAIN_NAME_SERVERS));
+
+ // We did not request any options so they should not be present
+ // in the OFFER.
+ EXPECT_FALSE(offer->getOption(DHO_LOG_SERVERS));
+ EXPECT_FALSE(offer->getOption(DHO_COOKIE_SERVERS));
+ EXPECT_FALSE(offer->getOption(DHO_LPR_SERVERS));
+
+ // Add 'Parameter Request List' option.
+ addPrlOption(pkt);
+
+ // Now repeat the test but request some options.
+ EXPECT_NO_THROW(
+ offer = srv->processDiscover(pkt);
+ );
+
+ // Should return something
+ ASSERT_TRUE(offer);
+
+ EXPECT_EQ(DHCPOFFER, offer->getType());
+
+ // This is relayed message. It should be sent back to relay address.
+ EXPECT_EQ(pkt->getGiaddr(), offer->getRemoteAddr());
+
+ messageCheck(pkt, offer);
+
+ // Check that the requested options are returned.
+ optionsCheck(offer);
// Now repeat the test for directly sent message
pkt->setHops(0);
@@ -357,7 +488,10 @@ TEST_F(Dhcpv4SrvTest, processDiscover) {
// to relay.
EXPECT_EQ(pkt->getRemoteAddr(), offer->getRemoteAddr());
- MessageCheck(pkt, offer);
+ messageCheck(pkt, offer);
+
+ // Check that the requested options are returned.
+ optionsCheck(offer);
delete srv;
}
@@ -385,6 +519,12 @@ TEST_F(Dhcpv4SrvTest, processRequest) {
req->setRemoteAddr(IOAddress("192.0.2.56"));
req->setGiaddr(IOAddress("192.0.2.67"));
+ // We are going to test that certain options are returned
+ // in the ACK message when requested using 'Parameter
+ // Request List' option. Let's configure those options that
+ // are returned when requested.
+ configureRequestedOptions();
+
// Should not throw
ASSERT_NO_THROW(
ack = srv->processRequest(req);
@@ -398,7 +538,37 @@ TEST_F(Dhcpv4SrvTest, processRequest) {
// This is relayed message. It should be sent back to relay address.
EXPECT_EQ(req->getGiaddr(), ack->getRemoteAddr());
- MessageCheck(req, ack);
+ messageCheck(req, ack);
+
+ // There are some options that are always present in the
+ // message, even if not requested.
+ EXPECT_TRUE(ack->getOption(DHO_DOMAIN_NAME));
+ EXPECT_TRUE(ack->getOption(DHO_DOMAIN_NAME_SERVERS));
+
+ // We did not request any options so these should not be present
+ // in the ACK.
+ EXPECT_FALSE(ack->getOption(DHO_LOG_SERVERS));
+ EXPECT_FALSE(ack->getOption(DHO_COOKIE_SERVERS));
+ EXPECT_FALSE(ack->getOption(DHO_LPR_SERVERS));
+
+ // Add 'Parameter Request List' option.
+ addPrlOption(req);
+
+ // Repeat the test but request some options.
+ ASSERT_NO_THROW(
+ ack = srv->processRequest(req);
+ );
+
+ // Should return something
+ ASSERT_TRUE(ack);
+
+ EXPECT_EQ(DHCPACK, ack->getType());
+
+ // This is relayed message. It should be sent back to relay address.
+ EXPECT_EQ(req->getGiaddr(), ack->getRemoteAddr());
+
+ // Check that the requested options are returned.
+ optionsCheck(ack);
// Now repeat the test for directly sent message
req->setHops(0);
@@ -418,7 +588,10 @@ TEST_F(Dhcpv4SrvTest, processRequest) {
// to relay.
EXPECT_EQ(ack->getRemoteAddr(), req->getRemoteAddr());
- MessageCheck(req, ack);
+ messageCheck(req, ack);
+
+ // Check that the requested options are returned.
+ optionsCheck(ack);
delete srv;
}
@@ -890,7 +1063,6 @@ TEST_F(Dhcpv4SrvTest, RenewBasic) {
// let's create a lease and put it in the LeaseMgr
uint8_t hwaddr2[] = { 0, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe};
- uint8_t clientid2[] = { 8, 7, 6, 5, 4, 3, 2, 1 };
Lease4Ptr used(new Lease4(IOAddress("192.0.2.106"), hwaddr2, sizeof(hwaddr2),
&client_id_->getDuid()[0], client_id_->getDuid().size(),
temp_valid, temp_t1, temp_t2, temp_timestamp,
diff --git a/src/bin/dhcp4/tests/dhcp4_test.py b/src/bin/dhcp4/tests/dhcp4_test.py
index e493e04..276456e 100644
--- a/src/bin/dhcp4/tests/dhcp4_test.py
+++ b/src/bin/dhcp4/tests/dhcp4_test.py
@@ -13,7 +13,7 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-from bind10_src import ProcessInfo, parse_args, dump_pid, unlink_pid_file, _BASETIME
+from init import ProcessInfo, parse_args, dump_pid, unlink_pid_file, _BASETIME
import unittest
import sys
diff --git a/src/bin/dhcp6/Makefile.am b/src/bin/dhcp6/Makefile.am
index decd986..3b07510 100644
--- a/src/bin/dhcp6/Makefile.am
+++ b/src/bin/dhcp6/Makefile.am
@@ -6,6 +6,10 @@ AM_CPPFLAGS += -I$(top_srcdir)/src/lib/cc -I$(top_builddir)/src/lib/cc
AM_CPPFLAGS += $(BOOST_INCLUDES)
AM_CXXFLAGS = $(B10_CXXFLAGS)
+if USE_CLANGPP
+# Disable unused parameter warning caused by some Boost headers when compiling with clang
+AM_CXXFLAGS += -Wno-unused-parameter
+endif
if USE_STATIC_LINK
AM_LDFLAGS = -static
@@ -53,12 +57,6 @@ b10_dhcp6_SOURCES += dhcp6_srv.cc dhcp6_srv.h
nodist_b10_dhcp6_SOURCES = dhcp6_messages.h dhcp6_messages.cc
EXTRA_DIST += dhcp6_messages.mes
-if USE_CLANGPP
-# Disable unused parameter warning caused by some of the
-# Boost headers when compiling with clang.
-b10_dhcp6_CXXFLAGS = -Wno-unused-parameter
-endif
-
b10_dhcp6_LDADD = $(top_builddir)/src/lib/asiolink/libb10-asiolink.la
b10_dhcp6_LDADD += $(top_builddir)/src/lib/cc/libb10-cc.la
b10_dhcp6_LDADD += $(top_builddir)/src/lib/config/libb10-cfgclient.la
diff --git a/src/bin/dhcp6/config_parser.cc b/src/bin/dhcp6/config_parser.cc
index be67f69..76ed228 100644
--- a/src/bin/dhcp6/config_parser.cc
+++ b/src/bin/dhcp6/config_parser.cc
@@ -102,7 +102,6 @@ OptionStorage option_defaults;
/// @brief Global storage for option definitions.
OptionDefStorage option_def_intermediate;
-
/// @brief a dummy configuration parser
///
/// This is a debugging parser. It does not configure anything,
@@ -717,7 +716,7 @@ public:
virtual void commit() {
if (options_ == NULL) {
isc_throw(isc::InvalidOperation, "parser logic error: storage must be set before "
- "commiting option data.");
+ "committing option data.");
} else if (!option_descriptor_.option) {
// Before we can commit the new option should be configured. If it is not
// than somebody must have called commit() before build().
@@ -777,27 +776,31 @@ private:
// does not exceed range of uint16_t and is not zero.
uint32_t option_code = getParam<uint32_t>("code", uint32_values_);
if (option_code == 0) {
- isc_throw(DhcpConfigError, "Parser error: value of 'code' must not"
- << " be equal to zero. Option code '0' is reserved in"
- << " DHCPv6.");
+ isc_throw(DhcpConfigError, "option code must not be zero."
+ << " Option code '0' is reserved in DHCPv6.");
} else if (option_code > std::numeric_limits<uint16_t>::max()) {
- isc_throw(DhcpConfigError, "Parser error: value of 'code' must not"
- << " exceed " << std::numeric_limits<uint16_t>::max());
+ isc_throw(DhcpConfigError, "invalid option code '" << option_code
+ << "', it must not exceed '"
+ << std::numeric_limits<uint16_t>::max() << "'");
}
// Check that the option name has been specified, is non-empty and does not
// contain spaces.
- // @todo possibly some more restrictions apply here?
std::string option_name = getParam<std::string>("name", string_values_);
if (option_name.empty()) {
- isc_throw(DhcpConfigError, "Parser error: option name must not be"
- << " empty");
+ isc_throw(DhcpConfigError, "name of the option with code '"
+ << option_code << "' is empty");
} else if (option_name.find(" ") != std::string::npos) {
- isc_throw(DhcpConfigError, "Parser error: option name must not contain"
- << " spaces");
+ isc_throw(DhcpConfigError, "invalid option name '" << option_name
+ << "', space character is not allowed");
}
std::string option_space = getParam<std::string>("space", string_values_);
- /// @todo Validate option space once #2313 is merged.
+ if (!OptionSpace::validateName(option_space)) {
+ isc_throw(DhcpConfigError, "invalid option space name '"
+ << option_space << "' specified for option '"
+ << option_name << "' (code '" << option_code
+ << "')");
+ }
OptionDefinitionPtr def;
if (option_space == "dhcp6" &&
@@ -886,7 +889,7 @@ private:
// definition of option value makes sense.
if (def->getName() != option_name) {
isc_throw(DhcpConfigError, "specified option name '"
- << option_name << " does not match the "
+ << option_name << "' does not match the "
<< "option definition: '" << option_space
<< "." << def->getName() << "'");
}
@@ -993,13 +996,13 @@ public:
return (new OptionDataListParser(param_name));
}
+ /// Pointer to options instances storage.
+ OptionStorage* options_;
/// Intermediate option storage. This storage is used by
/// lower level parsers to add new options. Values held
/// in this storage are assigned to main storage (options_)
/// if overall parsing was successful.
OptionStorage local_options_;
- /// Pointer to options instances storage.
- OptionStorage* options_;
/// Collection of parsers;
ParserCollection parsers_;
};
@@ -1033,8 +1036,8 @@ public:
BOOST_FOREACH(ConfigPair param, option_def->mapValue()) {
std::string entry(param.first);
ParserPtr parser;
- if (entry == "name" || entry == "type" ||
- entry == "record-types" || entry == "space") {
+ if (entry == "name" || entry == "type" || entry == "record-types" ||
+ entry == "space" || entry == "encapsulate") {
StringParserPtr
str_parser(dynamic_cast<StringParser*>(StringParser::factory(entry)));
if (str_parser) {
@@ -1084,8 +1087,8 @@ public:
/// @brief Stores the parsed option definition in the data store.
void commit() {
- // @todo validate option space name once 2313 is merged.
- if (storage_ && option_definition_) {
+ if (storage_ && option_definition_ &&
+ OptionSpace::validateName(option_space_name_)) {
storage_->addItem(option_definition_, option_space_name_);
}
}
@@ -1107,11 +1110,10 @@ private:
void createOptionDef() {
// Get the option space name and validate it.
std::string space = getParam<std::string>("space", string_values_);
- // @todo uncomment the code below when the #2313 is merged.
- /* if (!OptionSpace::validateName()) {
+ if (!OptionSpace::validateName(space)) {
isc_throw(DhcpConfigError, "invalid option space name '"
<< space << "'");
- } */
+ }
// Get other parameters that are needed to create the
// option definition.
@@ -1119,9 +1121,36 @@ private:
uint32_t code = getParam<uint32_t>("code", uint32_values_);
std::string type = getParam<std::string>("type", string_values_);
bool array_type = getParam<bool>("array", boolean_values_);
+ std::string encapsulates = getParam<std::string>("encapsulate",
+ string_values_);
+
+ // Create option definition.
+ OptionDefinitionPtr def;
+ // We need to check if user has set encapsulated option space
+ // name. If so, different constructor will be used.
+ if (!encapsulates.empty()) {
+ // Arrays can't be used together with sub-options.
+ if (array_type) {
+ isc_throw(DhcpConfigError, "option '" << space << "."
+ << "name" << "', comprising an array of data"
+ << " fields may not encapsulate any option space");
+
+ } else if (encapsulates == space) {
+ isc_throw(DhcpConfigError, "option must not encapsulate"
+ << " an option space it belongs to: '"
+ << space << "." << name << "' is set to"
+ << " encapsulate '" << space << "'");
+
+ } else {
+ def.reset(new OptionDefinition(name, code, type,
+ encapsulates.c_str()));
+ }
+
+ } else {
+ def.reset(new OptionDefinition(name, code, type, array_type));
+
+ }
- OptionDefinitionPtr def(new OptionDefinition(name, code,
- type, array_type));
// The record-types field may carry a list of comma separated names
// of data types that form a record.
std::string record_types = getParam<std::string>("record-types",
@@ -1139,7 +1168,7 @@ private:
}
} catch (const Exception& ex) {
isc_throw(DhcpConfigError, "invalid record type values"
- << " specified for the option definition: "
+ << " specified for the option definition: "
<< ex.what());
}
}
@@ -1358,6 +1387,63 @@ private:
return (false);
}
+ /// @brief Append sub-options to an option.
+ ///
+ /// @param option_space a name of the encapsulated option space.
+ /// @param option option instance to append sub-options to.
+ void appendSubOptions(const std::string& option_space, OptionPtr& option) {
+ // Only non-NULL options are stored in option container.
+ // If this option pointer is NULL this is a serious error.
+ assert(option);
+
+ OptionDefinitionPtr def;
+ if (option_space == "dhcp6" &&
+ LibDHCP::isStandardOption(Option::V6, option->getType())) {
+ def = LibDHCP::getOptionDef(Option::V6, option->getType());
+ // Definitions for some of the standard options hasn't been
+ // implemented so it is ok to leave here.
+ if (!def) {
+ return;
+ }
+ } else {
+ const OptionDefContainerPtr defs =
+ option_def_intermediate.getItems(option_space);
+ const OptionDefContainerTypeIndex& idx = defs->get<1>();
+ const OptionDefContainerTypeRange& range =
+ idx.equal_range(option->getType());
+ // There is no definition so we have to leave.
+ if (std::distance(range.first, range.second) == 0) {
+ return;
+ }
+
+ def = *range.first;
+
+ // If the definition exists, it must be non-NULL.
+ // Otherwise it is a programming error.
+ assert(def);
+ }
+
+ // We need to get option definition for the particular option space
+ // and code. This definition holds the information whether our
+ // option encapsulates any option space.
+ // Get the encapsulated option space name.
+ std::string encapsulated_space = def->getEncapsulatedSpace();
+ // If option space name is empty it means that our option does not
+ // encapsulate any option space (does not include sub-options).
+ if (!encapsulated_space.empty()) {
+ // Get the sub-options that belong to the encapsulated
+ // option space.
+ const Subnet::OptionContainerPtr sub_opts =
+ option_defaults.getItems(encapsulated_space);
+ // Append sub-options to the option.
+ BOOST_FOREACH(Subnet::OptionDescriptor desc, *sub_opts) {
+ if (desc.option) {
+ option->addOption(desc.option);
+ }
+ }
+ }
+ }
+
/// @brief Create a new subnet using a data from child parsers.
///
/// @throw isc::dhcp::DhcpConfigError if subnet configuration parsing failed.
@@ -1458,6 +1544,8 @@ private:
LOG_WARN(dhcp6_logger, DHCP6_CONFIG_OPTION_DUPLICATE)
.arg(desc.option->getType()).arg(addr.toText());
}
+ // Add sub-options (if any).
+ appendSubOptions(option_space, desc.option);
// In any case, we add the option to the subnet.
subnet_->addOption(desc.option, false, option_space);
}
@@ -1485,6 +1573,9 @@ private:
Subnet::OptionDescriptor existing_desc =
subnet_->getOptionDescriptor(option_space, desc.option->getType());
if (!existing_desc.option) {
+ // Add sub-options (if any).
+ appendSubOptions(option_space, desc.option);
+
subnet_->addOption(desc.option, false, option_space);
}
}
@@ -1817,7 +1908,7 @@ configureDhcp6Server(Dhcpv6Srv&, ConstElementPtr config_set) {
LOG_INFO(dhcp6_logger, DHCP6_CONFIG_COMPLETE).arg(config_details);
// Everything was fine. Configuration is successful.
- answer = isc::config::createAnswer(0, "Configuration commited.");
+ answer = isc::config::createAnswer(0, "Configuration committed.");
return (answer);
}
diff --git a/src/bin/dhcp6/ctrl_dhcp6_srv.cc b/src/bin/dhcp6/ctrl_dhcp6_srv.cc
index a1fe4e5..e4e17f1 100644
--- a/src/bin/dhcp6/ctrl_dhcp6_srv.cc
+++ b/src/bin/dhcp6/ctrl_dhcp6_srv.cc
@@ -45,19 +45,62 @@ namespace dhcp {
ControlledDhcpv6Srv* ControlledDhcpv6Srv::server_ = NULL;
ConstElementPtr
+ControlledDhcpv6Srv::dhcp6StubConfigHandler(ConstElementPtr) {
+ // This configuration handler is intended to be used only
+ // when the initial configuration comes in. To receive this
+ // configuration a pointer to this handler must be passed
+ // using ModuleCCSession constructor. This constructor will
+ // invoke the handler and will store the configuration for
+ // the configuration session when the handler returns success.
+ // Since this configuration is partial we just pretend to
+ // parse it and always return success. The function that
+ // initiates the session must get the configuration on its
+ // own using getFullConfig.
+ return (isc::config::createAnswer(0, "Configuration accepted."));
+}
+
+ConstElementPtr
ControlledDhcpv6Srv::dhcp6ConfigHandler(ConstElementPtr new_config) {
- LOG_DEBUG(dhcp6_logger, DBG_DHCP6_COMMAND, DHCP6_CONFIG_UPDATE)
- .arg(new_config->str());
- if (server_) {
- return (configureDhcp6Server(*server_, new_config));
+ if (!server_ || !server_->config_session_) {
+ // That should never happen as we install config_handler
+ // after we instantiate the server.
+ ConstElementPtr answer =
+ isc::config::createAnswer(1, "Configuration rejected,"
+ " server is during startup/shutdown phase.");
+ return (answer);
}
- // That should never happen as we install config_handler after we instantiate
- // the server.
- ConstElementPtr answer = isc::config::createAnswer(1,
- "Configuration rejected, server is during startup/shutdown phase.");
- return (answer);
+ // The configuration passed to this handler function is partial.
+ // In other words, it just includes the values being modified.
+ // In the same time, there are dependencies between various
+ // DHCP configuration parsers. For example: the option value can
+ // be set if the definition of this option is set. If someone removes
+ // an existing option definition then the partial configuration that
+ // removes that definition is triggered while a relevant option value
+ // may remain configured. This eventually results in the DHCP server
+ // configuration being in the inconsistent state.
+ // In order to work around this problem we need to merge the new
+ // configuration with the existing (full) configuration.
+
+ // Let's create a new object that will hold the merged configuration.
+ boost::shared_ptr<MapElement> merged_config(new MapElement());
+ // Let's get the existing configuration.
+ ConstElementPtr full_config = server_->config_session_->getFullConfig();
+ // The full_config and merged_config should be always non-NULL
+ // but to provide some level of exception safety we check that they
+ // really are (in case we go out of memory).
+ if (full_config && merged_config) {
+ merged_config->setValue(full_config->mapValue());
+
+ // Merge an existing and new configuration.
+ isc::data::merge(merged_config, new_config);
+ LOG_DEBUG(dhcp6_logger, DBG_DHCP6_COMMAND, DHCP6_CONFIG_UPDATE)
+ .arg(merged_config->str());
+ }
+
+ // Configure the server.
+ return (configureDhcp6Server(*server_, merged_config));
}
ConstElementPtr
@@ -108,18 +151,26 @@ void ControlledDhcpv6Srv::establishSession() {
LOG_DEBUG(dhcp6_logger, DBG_DHCP6_START, DHCP6_CCSESSION_STARTING)
.arg(specfile);
cc_session_ = new Session(io_service_.get_io_service());
+ // Create a session with the dummy configuration handler.
+ // Dumy configuration handler is internally invoked by the
+ // constructor and on success the constructor updates
+ // the current session with the configuration that had been
+ // commited in the previous session. If we did not install
+ // the dummy handler, the previous configuration would have
+ // been lost.
config_session_ = new ModuleCCSession(specfile, *cc_session_,
- NULL,
+ dhcp6StubConfigHandler,
dhcp6CommandHandler, false);
config_session_->start();
- // We initially create ModuleCCSession() without configHandler, as
- // the session module is too eager to send partial configuration.
- // We want to get the full configuration, so we explicitly call
- // getFullConfig() and then pass it to our configHandler.
+ // The constructor already pulled the configuration that had
+ // been created in the previous session thanks to the dummy
+ // handler. We can switch to the handler that will be
+ // parsing future changes to the configuration.
config_session_->setConfigHandler(dhcp6ConfigHandler);
try {
+ // Pull the full configuration out from the session.
configureDhcp6Server(*this, config_session_->getFullConfig());
} catch (const DhcpConfigError& ex) {
LOG_ERROR(dhcp6_logger, DHCP6_CONFIG_LOAD_FAIL).arg(ex.what());
diff --git a/src/bin/dhcp6/ctrl_dhcp6_srv.h b/src/bin/dhcp6/ctrl_dhcp6_srv.h
index ef8f085..0e699ce 100644
--- a/src/bin/dhcp6/ctrl_dhcp6_srv.h
+++ b/src/bin/dhcp6/ctrl_dhcp6_srv.h
@@ -49,7 +49,7 @@ public:
/// @brief Establishes msgq session.
///
/// Creates session that will be used to receive commands and updated
- /// configuration from boss (or indirectly from user via bindctl).
+ /// configuration from cfgmgr (or indirectly from user via bindctl).
void establishSession();
/// @brief Terminates existing msgq session.
@@ -92,6 +92,27 @@ protected:
static isc::data::ConstElementPtr
dhcp6ConfigHandler(isc::data::ConstElementPtr new_config);
+ /// @brief A dummy configuration handler that always returns success.
+ ///
+ /// This configuration handler does not perform configuration
+ /// parsing and always returns success. A dummy hanlder should
+ /// be installed using \ref isc::config::ModuleCCSession ctor
+ /// to get the initial configuration. This initial configuration
+ /// comprises values for only those elements that were modified
+ /// the previous session. The \ref dhcp6ConfigHandler can't be
+ /// used to parse the initial configuration because it needs the
+ /// full configuration to satisfy dependencies between the
+ /// various configuration values. Installing the dummy handler
+ /// that guarantees to return success causes initial configuration
+ /// to be stored for the session being created and that it can
+ /// be later accessed with \ref isc::ConfigData::getFullConfig.
+ ///
+ /// @param new_config new configuration.
+ ///
+ /// @return success configuration status.
+ static isc::data::ConstElementPtr
+ dhcp6StubConfigHandler(isc::data::ConstElementPtr new_config);
+
/// @brief A callback for handling incoming commands.
///
/// @param command textual representation of the command
diff --git a/src/bin/dhcp6/dhcp6.spec b/src/bin/dhcp6/dhcp6.spec
index 2799f06..1129aec 100644
--- a/src/bin/dhcp6/dhcp6.spec
+++ b/src/bin/dhcp6/dhcp6.spec
@@ -76,7 +76,7 @@
"item_default": False
},
- { "item_name": "record_types",
+ { "item_name": "record-types",
"item_type": "string",
"item_optional": false,
"item_default": ""
@@ -86,6 +86,12 @@
"item_type": "string",
"item_optional": false,
"item_default": ""
+ },
+
+ { "item_name": "encapsulate",
+ "item_type": "string",
+ "item_optional": false,
+ "item_default": ""
} ]
}
},
diff --git a/src/bin/dhcp6/dhcp6_srv.cc b/src/bin/dhcp6/dhcp6_srv.cc
index 1e0b4da..851f405 100644
--- a/src/bin/dhcp6/dhcp6_srv.cc
+++ b/src/bin/dhcp6/dhcp6_srv.cc
@@ -56,6 +56,16 @@ using namespace std;
namespace isc {
namespace dhcp {
+/// @brief file name of a server-id file
+///
+/// Server must store its duid in persistent storage that must not change
+/// between restarts. This is name of the file that is created in dataDir
+/// (see isc::dhcp::CfgMgr::getDataDir()). It is a text file that uses
+/// double digit hex values separated by colons format, e.g.
+/// 01:ff:02:03:06:80:90:ab:cd:ef. Server will create it during first
+/// run and then use it afterwards.
+static const char* SERVER_DUID_FILE = "b10-dhcp6-serverid";
+
Dhcpv6Srv::Dhcpv6Srv(uint16_t port)
: alloc_engine_(), serverid_(), shutdown_(true) {
@@ -78,6 +88,7 @@ Dhcpv6Srv::Dhcpv6Srv(uint16_t port)
string duid_file = CfgMgr::instance().getDataDir() + "/" + string(SERVER_DUID_FILE);
if (loadServerID(duid_file)) {
LOG_DEBUG(dhcp6_logger, DBG_DHCP6_START, DHCP6_SERVERID_LOADED)
+ .arg(duidToString(getServerID()))
.arg(duid_file);
} else {
generateServerID();
@@ -205,7 +216,7 @@ bool Dhcpv6Srv::run() {
LOG_DEBUG(dhcp6_logger, DBG_DHCP6_DETAIL_DATA,
DHCP6_RESPONSE_DATA)
- .arg(rsp->getType()).arg(rsp->toText());
+ .arg(static_cast<int>(rsp->getType())).arg(rsp->toText());
if (rsp->pack()) {
try {
@@ -255,7 +266,8 @@ bool Dhcpv6Srv::loadServerID(const std::string& file_name) {
return (true);
}
-std::string Dhcpv6Srv::duidToString(const OptionPtr& opt) {
+std::string
+Dhcpv6Srv::duidToString(const OptionPtr& opt) {
stringstream tmp;
OptionBuffer data = opt->getData();
@@ -274,16 +286,19 @@ std::string Dhcpv6Srv::duidToString(const OptionPtr& opt) {
return tmp.str();
}
-bool Dhcpv6Srv::writeServerID(const std::string& file_name) {
+bool
+Dhcpv6Srv::writeServerID(const std::string& file_name) {
fstream f(file_name.c_str(), ios::out | ios::trunc);
if (!f.good()) {
return (false);
}
f << duidToString(getServerID());
f.close();
+ return (true);
}
-void Dhcpv6Srv::generateServerID() {
+void
+Dhcpv6Srv::generateServerID() {
/// @todo: This code implements support for DUID-LLT (the recommended one).
/// We should eventually add support for other DUID types: DUID-LL, DUID-EN
@@ -366,7 +381,8 @@ void Dhcpv6Srv::generateServerID() {
srvid.begin(), srvid.end()));
}
-void Dhcpv6Srv::copyDefaultOptions(const Pkt6Ptr& question, Pkt6Ptr& answer) {
+void
+Dhcpv6Srv::copyDefaultOptions(const Pkt6Ptr& question, Pkt6Ptr& answer) {
// Add client-id.
OptionPtr clientid = question->getOption(D6O_CLIENTID);
if (clientid) {
@@ -376,7 +392,8 @@ void Dhcpv6Srv::copyDefaultOptions(const Pkt6Ptr& question, Pkt6Ptr& answer) {
// TODO: Should throw if there is no client-id (except anonymous INF-REQUEST)
}
-void Dhcpv6Srv::appendDefaultOptions(const Pkt6Ptr& question, Pkt6Ptr& answer) {
+void
+Dhcpv6Srv::appendDefaultOptions(const Pkt6Ptr& question, Pkt6Ptr& answer) {
// add server-id
answer->addOption(getServerID());
@@ -392,7 +409,8 @@ void Dhcpv6Srv::appendDefaultOptions(const Pkt6Ptr& question, Pkt6Ptr& answer) {
}
-void Dhcpv6Srv::appendRequestedOptions(const Pkt6Ptr& question, Pkt6Ptr& answer) {
+void
+Dhcpv6Srv::appendRequestedOptions(const Pkt6Ptr& question, Pkt6Ptr& answer) {
// Get the subnet for a particular address.
Subnet6Ptr subnet = CfgMgr::instance().getSubnet6(question->getRemoteAddr());
if (!subnet) {
@@ -411,20 +429,16 @@ void Dhcpv6Srv::appendRequestedOptions(const Pkt6Ptr& question, Pkt6Ptr& answer)
}
// Get the list of options that client requested.
const std::vector<uint16_t>& requested_opts = option_oro->getValues();
- // Get the list of options configured for a subnet.
- Subnet::OptionContainerPtr options = subnet->getOptionDescriptors("dhcp6");
- const Subnet::OptionContainerTypeIndex& idx = options->get<1>();
- // Try to match requested options with those configured for a subnet.
- // If match is found, append configured option to the answer message.
BOOST_FOREACH(uint16_t opt, requested_opts) {
- const Subnet::OptionContainerTypeRange& range = idx.equal_range(opt);
- BOOST_FOREACH(Subnet::OptionDescriptor desc, range) {
+ Subnet::OptionDescriptor desc = subnet->getOptionDescriptor("dhcp6", opt);
+ if (desc.option) {
answer->addOption(desc.option);
}
}
}
-OptionPtr Dhcpv6Srv::createStatusCode(uint16_t code, const std::string& text) {
+OptionPtr
+Dhcpv6Srv::createStatusCode(uint16_t code, const std::string& text) {
// @todo This function uses OptionCustom class to manage contents
// of the data fields. Since this this option is frequently used
// it may be good to implement dedicated class to avoid performance
@@ -450,8 +464,9 @@ OptionPtr Dhcpv6Srv::createStatusCode(uint16_t code, const std::string& text) {
return (option_status);
}
-void Dhcpv6Srv::sanityCheck(const Pkt6Ptr& pkt, RequirementLevel clientid,
- RequirementLevel serverid) {
+void
+Dhcpv6Srv::sanityCheck(const Pkt6Ptr& pkt, RequirementLevel clientid,
+ RequirementLevel serverid) {
Option::OptionCollection client_ids = pkt->getOptions(D6O_CLIENTID);
switch (clientid) {
case MANDATORY:
@@ -498,7 +513,8 @@ void Dhcpv6Srv::sanityCheck(const Pkt6Ptr& pkt, RequirementLevel clientid,
}
}
-Subnet6Ptr Dhcpv6Srv::selectSubnet(const Pkt6Ptr& question) {
+Subnet6Ptr
+Dhcpv6Srv::selectSubnet(const Pkt6Ptr& question) {
/// @todo: pass interface information only if received direct (non-relayed) message
@@ -514,7 +530,8 @@ Subnet6Ptr Dhcpv6Srv::selectSubnet(const Pkt6Ptr& question) {
return (subnet);
}
-void Dhcpv6Srv::assignLeases(const Pkt6Ptr& question, Pkt6Ptr& answer) {
+void
+Dhcpv6Srv::assignLeases(const Pkt6Ptr& question, Pkt6Ptr& answer) {
// We need to allocate addresses for all IA_NA options in the client's
// question (i.e. SOLICIT or REQUEST) message.
@@ -583,8 +600,9 @@ void Dhcpv6Srv::assignLeases(const Pkt6Ptr& question, Pkt6Ptr& answer) {
}
}
-OptionPtr Dhcpv6Srv::assignIA_NA(const Subnet6Ptr& subnet, const DuidPtr& duid,
- Pkt6Ptr question, boost::shared_ptr<Option6IA> ia) {
+OptionPtr
+Dhcpv6Srv::assignIA_NA(const Subnet6Ptr& subnet, const DuidPtr& duid,
+ Pkt6Ptr question, boost::shared_ptr<Option6IA> ia) {
// If there is no subnet selected for handling this IA_NA, the only thing to do left is
// to say that we are sorry, but the user won't get an address. As a convenience, we
// use a different status text to indicate that (compare to the same status code,
@@ -679,8 +697,9 @@ OptionPtr Dhcpv6Srv::assignIA_NA(const Subnet6Ptr& subnet, const DuidPtr& duid,
return (ia_rsp);
}
-OptionPtr Dhcpv6Srv::renewIA_NA(const Subnet6Ptr& subnet, const DuidPtr& duid,
- Pkt6Ptr question, boost::shared_ptr<Option6IA> ia) {
+OptionPtr
+Dhcpv6Srv::renewIA_NA(const Subnet6Ptr& subnet, const DuidPtr& duid,
+ Pkt6Ptr /* question */, boost::shared_ptr<Option6IA> ia) {
Lease6Ptr lease = LeaseMgrFactory::instance().getLease6(*duid, ia->getIAID(),
subnet->getID());
@@ -723,7 +742,8 @@ OptionPtr Dhcpv6Srv::renewIA_NA(const Subnet6Ptr& subnet, const DuidPtr& duid,
return (ia_rsp);
}
-void Dhcpv6Srv::renewLeases(const Pkt6Ptr& renew, Pkt6Ptr& reply) {
+void
+Dhcpv6Srv::renewLeases(const Pkt6Ptr& renew, Pkt6Ptr& reply) {
// We need to renew addresses for all IA_NA options in the client's
// RENEW message.
@@ -779,7 +799,8 @@ void Dhcpv6Srv::renewLeases(const Pkt6Ptr& renew, Pkt6Ptr& reply) {
}
}
-void Dhcpv6Srv::releaseLeases(const Pkt6Ptr& release, Pkt6Ptr& reply) {
+void
+Dhcpv6Srv::releaseLeases(const Pkt6Ptr& release, Pkt6Ptr& reply) {
// We need to release addresses for all IA_NA options in the client's
// RELEASE message.
@@ -835,9 +856,9 @@ void Dhcpv6Srv::releaseLeases(const Pkt6Ptr& release, Pkt6Ptr& reply) {
"Summary status for all processed IA_NAs"));
}
-OptionPtr Dhcpv6Srv::releaseIA_NA(const DuidPtr& duid, Pkt6Ptr question,
- int& general_status,
- boost::shared_ptr<Option6IA> ia) {
+OptionPtr
+Dhcpv6Srv::releaseIA_NA(const DuidPtr& duid, Pkt6Ptr /* question */,
+ int& general_status, boost::shared_ptr<Option6IA> ia) {
// Release can be done in one of two ways:
// Approach 1: extract address from client's IA_NA and see if it belongs
// to this particular client.
@@ -946,8 +967,8 @@ OptionPtr Dhcpv6Srv::releaseIA_NA(const DuidPtr& duid, Pkt6Ptr question,
}
}
-
-Pkt6Ptr Dhcpv6Srv::processSolicit(const Pkt6Ptr& solicit) {
+Pkt6Ptr
+Dhcpv6Srv::processSolicit(const Pkt6Ptr& solicit) {
sanityCheck(solicit, MANDATORY, FORBIDDEN);
@@ -962,7 +983,8 @@ Pkt6Ptr Dhcpv6Srv::processSolicit(const Pkt6Ptr& solicit) {
return (advertise);
}
-Pkt6Ptr Dhcpv6Srv::processRequest(const Pkt6Ptr& request) {
+Pkt6Ptr
+Dhcpv6Srv::processRequest(const Pkt6Ptr& request) {
sanityCheck(request, MANDATORY, MANDATORY);
@@ -977,7 +999,8 @@ Pkt6Ptr Dhcpv6Srv::processRequest(const Pkt6Ptr& request) {
return (reply);
}
-Pkt6Ptr Dhcpv6Srv::processRenew(const Pkt6Ptr& renew) {
+Pkt6Ptr
+Dhcpv6Srv::processRenew(const Pkt6Ptr& renew) {
sanityCheck(renew, MANDATORY, MANDATORY);
@@ -992,19 +1015,22 @@ Pkt6Ptr Dhcpv6Srv::processRenew(const Pkt6Ptr& renew) {
return reply;
}
-Pkt6Ptr Dhcpv6Srv::processRebind(const Pkt6Ptr& rebind) {
+Pkt6Ptr
+Dhcpv6Srv::processRebind(const Pkt6Ptr& rebind) {
/// @todo: Implement this
Pkt6Ptr reply(new Pkt6(DHCPV6_REPLY, rebind->getTransid()));
return reply;
}
-Pkt6Ptr Dhcpv6Srv::processConfirm(const Pkt6Ptr& confirm) {
+Pkt6Ptr
+Dhcpv6Srv::processConfirm(const Pkt6Ptr& confirm) {
/// @todo: Implement this
Pkt6Ptr reply(new Pkt6(DHCPV6_REPLY, confirm->getTransid()));
return reply;
}
-Pkt6Ptr Dhcpv6Srv::processRelease(const Pkt6Ptr& release) {
+Pkt6Ptr
+Dhcpv6Srv::processRelease(const Pkt6Ptr& release) {
sanityCheck(release, MANDATORY, MANDATORY);
@@ -1018,13 +1044,15 @@ Pkt6Ptr Dhcpv6Srv::processRelease(const Pkt6Ptr& release) {
return reply;
}
-Pkt6Ptr Dhcpv6Srv::processDecline(const Pkt6Ptr& decline) {
+Pkt6Ptr
+Dhcpv6Srv::processDecline(const Pkt6Ptr& decline) {
/// @todo: Implement this
Pkt6Ptr reply(new Pkt6(DHCPV6_REPLY, decline->getTransid()));
return reply;
}
-Pkt6Ptr Dhcpv6Srv::processInfRequest(const Pkt6Ptr& infRequest) {
+Pkt6Ptr
+Dhcpv6Srv::processInfRequest(const Pkt6Ptr& infRequest) {
/// @todo: Implement this
Pkt6Ptr reply(new Pkt6(DHCPV6_REPLY, infRequest->getTransid()));
return reply;
diff --git a/src/bin/dhcp6/dhcp6_srv.h b/src/bin/dhcp6/dhcp6_srv.h
index 7c6f77b..bdcb560 100644
--- a/src/bin/dhcp6/dhcp6_srv.h
+++ b/src/bin/dhcp6/dhcp6_srv.h
@@ -31,16 +31,6 @@
namespace isc {
namespace dhcp {
-/// @brief file name of a server-id file
-///
-/// Server must store its duid in persistent storage that must not change
-/// between restarts. This is name of the file that is created in dataDir
-/// (see isc::dhcp::CfgMgr::getDataDir()). It is a text file that uses
-/// double digit hex values separated by colons format, e.g.
-/// 01:ff:02:03:06:80:90:ab:cd:ef. Server will create it during first
-/// run and then use it afterwards.
-static const char* SERVER_DUID_FILE = "b10-dhcp6-serverid";
-
/// @brief DHCPv6 server service.
///
/// This class represents DHCPv6 server. It contains all
diff --git a/src/bin/dhcp6/tests/Makefile.am b/src/bin/dhcp6/tests/Makefile.am
index d251df3..feb4bfa 100644
--- a/src/bin/dhcp6/tests/Makefile.am
+++ b/src/bin/dhcp6/tests/Makefile.am
@@ -30,6 +30,10 @@ AM_CPPFLAGS += -DINSTALL_PROG=\"$(abs_top_srcdir)/install-sh\"
CLEANFILES = $(builddir)/interfaces.txt $(builddir)/logger_lockfile
AM_CXXFLAGS = $(B10_CXXFLAGS)
+if USE_CLANGPP
+# Disable unused parameter warning caused by some Boost headers when compiling with clang
+AM_CXXFLAGS += -Wno-unused-parameter
+endif
if USE_STATIC_LINK
AM_LDFLAGS = -static
@@ -53,12 +57,6 @@ dhcp6_unittests_SOURCES += ../ctrl_dhcp6_srv.cc
dhcp6_unittests_SOURCES += ../config_parser.cc ../config_parser.h
nodist_dhcp6_unittests_SOURCES = ../dhcp6_messages.h ../dhcp6_messages.cc
-if USE_CLANGPP
-# Disable unused parameter warning caused by some of the
-# Boost headers when compiling with clang.
-dhcp6_unittests_CXXFLAGS = -Wno-unused-parameter
-endif
-
dhcp6_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
dhcp6_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
dhcp6_unittests_LDADD = $(GTEST_LDADD)
diff --git a/src/bin/dhcp6/tests/config_parser_unittest.cc b/src/bin/dhcp6/tests/config_parser_unittest.cc
index b3fe88e..4430cb2 100644
--- a/src/bin/dhcp6/tests/config_parser_unittest.cc
+++ b/src/bin/dhcp6/tests/config_parser_unittest.cc
@@ -18,6 +18,8 @@
#include <dhcp/libdhcp++.h>
#include <dhcp/option6_ia.h>
#include <dhcp/iface_mgr.h>
+#include <dhcp/option_custom.h>
+#include <dhcp/option_int.h>
#include <dhcp6/config_parser.h>
#include <dhcp6/dhcp6_srv.h>
#include <dhcpsrv/cfgmgr.h>
@@ -275,9 +277,9 @@ public:
expected_data_len));
}
+ int rcode_;
Dhcpv6Srv srv_;
- int rcode_;
ConstElementPtr comment_;
string valid_iface_;
@@ -349,7 +351,6 @@ TEST_F(Dhcp6ParserTest, subnetGlobalDefaults) {
" \"pool\": [ \"2001:db8:1::1 - 2001:db8:1::ffff\" ],"
" \"subnet\": \"2001:db8:1::/64\" } ],"
"\"valid-lifetime\": 4000 }";
- cout << config << endl;
ElementPtr json = Element::fromJSON(config);
@@ -388,7 +389,6 @@ TEST_F(Dhcp6ParserTest, subnetLocal) {
" \"valid-lifetime\": 4,"
" \"subnet\": \"2001:db8:1::/64\" } ],"
"\"valid-lifetime\": 4000 }";
- cout << config << endl;
ElementPtr json = Element::fromJSON(config);
@@ -514,7 +514,7 @@ TEST_F(Dhcp6ParserTest, poolOutOfSubnet) {
" \"pool\": [ \"4001:db8:1::/80\" ],"
" \"subnet\": \"2001:db8:1::/64\" } ],"
"\"valid-lifetime\": 4000 }";
- cout << config << endl;
+
ElementPtr json = Element::fromJSON(config);
@@ -542,7 +542,6 @@ TEST_F(Dhcp6ParserTest, poolPrefixLen) {
" \"pool\": [ \"2001:db8:1::/80\" ],"
" \"subnet\": \"2001:db8:1::/64\" } ],"
"\"valid-lifetime\": 4000 }";
- cout << config << endl;
ElementPtr json = Element::fromJSON(config);
@@ -573,7 +572,8 @@ TEST_F(Dhcp6ParserTest, optionDefIpv6Address) {
" \"type\": \"ipv6-address\","
" \"array\": False,"
" \"record-types\": \"\","
- " \"space\": \"isc\""
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"\""
" } ]"
"}";
ElementPtr json = Element::fromJSON(config);
@@ -611,7 +611,8 @@ TEST_F(Dhcp6ParserTest, optionDefRecord) {
" \"type\": \"record\","
" \"array\": False,"
" \"record-types\": \"uint16, ipv4-address, ipv6-address, string\","
- " \"space\": \"isc\""
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"\""
" } ]"
"}";
ElementPtr json = Element::fromJSON(config);
@@ -658,7 +659,8 @@ TEST_F(Dhcp6ParserTest, optionDefMultiple) {
" \"type\": \"uint32\","
" \"array\": False,"
" \"record-types\": \"\","
- " \"space\": \"isc\""
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"\""
" },"
" {"
" \"name\": \"foo-2\","
@@ -666,7 +668,8 @@ TEST_F(Dhcp6ParserTest, optionDefMultiple) {
" \"type\": \"ipv4-address\","
" \"array\": False,"
" \"record-types\": \"\","
- " \"space\": \"isc\""
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"\""
" } ]"
"}";
ElementPtr json = Element::fromJSON(config);
@@ -716,7 +719,8 @@ TEST_F(Dhcp6ParserTest, optionDefDuplicate) {
" \"type\": \"uint32\","
" \"array\": False,"
" \"record-types\": \"\","
- " \"space\": \"isc\""
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"\""
" },"
" {"
" \"name\": \"foo-2\","
@@ -724,7 +728,8 @@ TEST_F(Dhcp6ParserTest, optionDefDuplicate) {
" \"type\": \"ipv4-address\","
" \"array\": False,"
" \"record-types\": \"\","
- " \"space\": \"isc\""
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"\""
" } ]"
"}";
ElementPtr json = Element::fromJSON(config);
@@ -752,7 +757,8 @@ TEST_F(Dhcp6ParserTest, optionDefArray) {
" \"type\": \"uint32\","
" \"array\": True,"
" \"record-types\": \"\","
- " \"space\": \"isc\""
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"\""
" } ]"
"}";
ElementPtr json = Element::fromJSON(config);
@@ -778,6 +784,47 @@ TEST_F(Dhcp6ParserTest, optionDefArray) {
EXPECT_TRUE(def->getArrayType());
}
+// The purpose of this test to verify that encapsulated option
+// space name may be specified.
+TEST_F(Dhcp6ParserTest, optionDefEncapsulate) {
+
+ // Configuration string. Included the encapsulated
+ // option space name.
+ std::string config =
+ "{ \"option-def\": [ {"
+ " \"name\": \"foo\","
+ " \"code\": 100,"
+ " \"type\": \"uint32\","
+ " \"array\": False,"
+ " \"record-types\": \"\","
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"sub-opts-space\""
+ " } ]"
+ "}";
+ ElementPtr json = Element::fromJSON(config);
+
+ // Make sure that the particular option definition does not exist.
+ OptionDefinitionPtr def = CfgMgr::instance().getOptionDef("isc", 100);
+ ASSERT_FALSE(def);
+
+ // Use the configuration string to create new option definition.
+ ConstElementPtr status;
+ EXPECT_NO_THROW(status = configureDhcp6Server(srv_, json));
+ ASSERT_TRUE(status);
+ checkResult(status, 0);
+
+ // The option definition should now be available in the CfgMgr.
+ def = CfgMgr::instance().getOptionDef("isc", 100);
+ ASSERT_TRUE(def);
+
+ // Check the option data.
+ EXPECT_EQ("foo", def->getName());
+ EXPECT_EQ(100, def->getCode());
+ EXPECT_EQ(OPT_UINT32_TYPE, def->getType());
+ EXPECT_FALSE(def->getArrayType());
+ EXPECT_EQ("sub-opts-space", def->getEncapsulatedSpace());
+}
+
/// The purpose of this test is to verify that the option definition
/// with invalid name is not accepted.
TEST_F(Dhcp6ParserTest, optionDefInvalidName) {
@@ -790,7 +837,8 @@ TEST_F(Dhcp6ParserTest, optionDefInvalidName) {
" \"type\": \"string\","
" \"array\": False,"
" \"record-types\": \"\","
- " \"space\": \"isc\""
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"\""
" } ]"
"}";
ElementPtr json = Element::fromJSON(config);
@@ -815,7 +863,8 @@ TEST_F(Dhcp6ParserTest, optionDefInvalidType) {
" \"type\": \"sting\","
" \"array\": False,"
" \"record-types\": \"\","
- " \"space\": \"isc\""
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"\""
" } ]"
"}";
ElementPtr json = Element::fromJSON(config);
@@ -840,7 +889,8 @@ TEST_F(Dhcp6ParserTest, optionDefInvalidRecordType) {
" \"type\": \"record\","
" \"array\": False,"
" \"record-types\": \"uint32,uint8,sting\","
- " \"space\": \"isc\""
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"\""
" } ]"
"}";
ElementPtr json = Element::fromJSON(config);
@@ -853,6 +903,85 @@ TEST_F(Dhcp6ParserTest, optionDefInvalidRecordType) {
checkResult(status, 1);
}
+/// The goal of this test is to verify that the invalid encapsulated
+/// option space name is not accepted.
+TEST_F(Dhcp6ParserTest, optionDefInvalidEncapsulatedSpace) {
+ // Configuration string. The encapsulated option space
+ // name is invalid (% character is not allowed).
+ std::string config =
+ "{ \"option-def\": [ {"
+ " \"name\": \"foo\","
+ " \"code\": 100,"
+ " \"type\": \"uint32\","
+ " \"array\": False,"
+ " \"record-types\": \"\","
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"invalid%space%name\""
+ " } ]"
+ "}";
+ ElementPtr json = Element::fromJSON(config);
+
+ // Use the configuration string to create new option definition.
+ ConstElementPtr status;
+ EXPECT_NO_THROW(status = configureDhcp6Server(srv_, json));
+ ASSERT_TRUE(status);
+ // Expecting parsing error (error code 1).
+ checkResult(status, 1);
+}
+
+/// The goal of this test is to verify that the encapsulated
+/// option space name can't be specified for the option that
+/// comprises an array of data fields.
+TEST_F(Dhcp6ParserTest, optionDefEncapsulatedSpaceAndArray) {
+ // Configuration string. The encapsulated option space
+ // name is set to non-empty value and the array flag
+ // is set.
+ std::string config =
+ "{ \"option-def\": [ {"
+ " \"name\": \"foo\","
+ " \"code\": 100,"
+ " \"type\": \"uint32\","
+ " \"array\": True,"
+ " \"record-types\": \"\","
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"valid-space-name\""
+ " } ]"
+ "}";
+ ElementPtr json = Element::fromJSON(config);
+
+ // Use the configuration string to create new option definition.
+ ConstElementPtr status;
+ EXPECT_NO_THROW(status = configureDhcp6Server(srv_, json));
+ ASSERT_TRUE(status);
+ // Expecting parsing error (error code 1).
+ checkResult(status, 1);
+}
+
+/// The goal of this test is to verify that the option may not
+/// encapsulate option space it belongs to.
+TEST_F(Dhcp6ParserTest, optionDefEncapsulateOwnSpace) {
+ // Configuration string. Option is set to encapsulate
+ // option space it belongs to.
+ std::string config =
+ "{ \"option-def\": [ {"
+ " \"name\": \"foo\","
+ " \"code\": 100,"
+ " \"type\": \"uint32\","
+ " \"array\": False,"
+ " \"record-types\": \"\","
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"isc\""
+ " } ]"
+ "}";
+ ElementPtr json = Element::fromJSON(config);
+
+ // Use the configuration string to create new option definition.
+ ConstElementPtr status;
+ EXPECT_NO_THROW(status = configureDhcp6Server(srv_, json));
+ ASSERT_TRUE(status);
+ // Expecting parsing error (error code 1).
+ checkResult(status, 1);
+}
/// The purpose of this test is to verify that it is not allowed
/// to override the standard option (that belongs to dhcp6 option
@@ -871,7 +1000,8 @@ TEST_F(Dhcp6ParserTest, optionStandardDefOverride) {
" \"type\": \"string\","
" \"array\": False,"
" \"record-types\": \"\","
- " \"space\": \"dhcp6\""
+ " \"space\": \"dhcp6\","
+ " \"encapsulate\": \"\""
" } ]"
"}";
ElementPtr json = Element::fromJSON(config);
@@ -906,7 +1036,8 @@ TEST_F(Dhcp6ParserTest, optionStandardDefOverride) {
" \"type\": \"string\","
" \"array\": False,"
" \"record-types\": \"\","
- " \"space\": \"dhcp6\""
+ " \"space\": \"dhcp6\","
+ " \"encapsulate\": \"\""
" } ]"
"}";
json = Element::fromJSON(config);
@@ -1028,7 +1159,8 @@ TEST_F(Dhcp6ParserTest, optionDataTwoSpaces) {
" \"type\": \"uint32\","
" \"array\": False,"
" \"record-types\": \"\","
- " \"space\": \"isc\""
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"\""
" } ],"
"\"subnet6\": [ { "
" \"pool\": [ \"2001:db8:1::/80\" ],"
@@ -1061,81 +1193,164 @@ TEST_F(Dhcp6ParserTest, optionDataTwoSpaces) {
ASSERT_FALSE(desc3.option);
}
-// The goal of this test is to verify options configuration
-// for a single subnet. In particular this test checks
-// that local options configuration overrides global
-// option setting.
-TEST_F(Dhcp6ParserTest, optionDataInSingleSubnet) {
- ConstElementPtr x;
+// The goal of this test is to verify that it is possible to
+// encapsulate option space containing some options with
+// another option. In this test we create base option that
+// encapsulates option space 'isc' that comprises two other
+// options. Also, for all options their definitions are
+// created.
+TEST_F(Dhcp6ParserTest, optionDataEncapsulate) {
+
+ // @todo DHCP configurations has many dependencies between
+ // parameters. First of all, configuration for subnet is
+ // inherited from the global values. Thus subnet has to be
+ // configured when all global values have been configured.
+ // Also, an option can encapsulate another option only
+ // if the latter has been configured. For this reason in this
+ // test we created two-stage configuration where first we
+ // created options that belong to encapsulated option space.
+ // In the second stage we add the base option. Also, the Subnet
+ // object is configured in the second stage so it is created
+ // at the very end (when all other parameters are configured).
+
+ // Starting stage 1. Configure sub-options and their definitions.
string config = "{ \"interface\": [ \"all\" ],"
- "\"preferred-lifetime\": 3000,"
- "\"rebind-timer\": 2000, "
- "\"renew-timer\": 1000, "
+ "\"rebind-timer\": 2000,"
+ "\"renew-timer\": 1000,"
"\"option-data\": [ {"
- " \"name\": \"subscriber-id\","
- " \"space\": \"dhcp6\","
- " \"code\": 38,"
- " \"data\": \"AB\","
- " \"csv-format\": False"
+ " \"name\": \"foo\","
+ " \"space\": \"isc\","
+ " \"code\": 110,"
+ " \"data\": \"1234\","
+ " \"csv-format\": True"
+ " },"
+ " {"
+ " \"name\": \"foo2\","
+ " \"space\": \"isc\","
+ " \"code\": 111,"
+ " \"data\": \"192.168.2.1\","
+ " \"csv-format\": True"
+ " } ],"
+ "\"option-def\": [ {"
+ " \"name\": \"foo\","
+ " \"code\": 110,"
+ " \"type\": \"uint32\","
+ " \"array\": False,"
+ " \"record-types\": \"\","
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"\""
+ " },"
+ " {"
+ " \"name\": \"foo2\","
+ " \"code\": 111,"
+ " \"type\": \"ipv4-address\","
+ " \"array\": False,"
+ " \"record-types\": \"\","
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"\""
+ " } ]"
+ "}";
+
+ ConstElementPtr status;
+
+ ElementPtr json = Element::fromJSON(config);
+
+ EXPECT_NO_THROW(status = configureDhcp6Server(srv_, json));
+ ASSERT_TRUE(status);
+ checkResult(status, 0);
+
+ // Stage 2. Configure base option and a subnet. Please note that
+ // the configuration from the stage 2 is repeated because BIND
+ // configuration manager sends whole configuration for the lists
+ // where at least one element is being modified or added.
+ config = "{ \"interface\": [ \"all\" ],"
+ "\"rebind-timer\": 2000,"
+ "\"renew-timer\": 1000,"
+ "\"option-data\": [ {"
+ " \"name\": \"base-option\","
+ " \"space\": \"dhcp6\","
+ " \"code\": 100,"
+ " \"data\": \"11\","
+ " \"csv-format\": True"
+ " },"
+ " {"
+ " \"name\": \"foo\","
+ " \"space\": \"isc\","
+ " \"code\": 110,"
+ " \"data\": \"1234\","
+ " \"csv-format\": True"
+ " },"
+ " {"
+ " \"name\": \"foo2\","
+ " \"space\": \"isc\","
+ " \"code\": 111,"
+ " \"data\": \"192.168.2.1\","
+ " \"csv-format\": True"
+ " } ],"
+ "\"option-def\": [ {"
+ " \"name\": \"base-option\","
+ " \"code\": 100,"
+ " \"type\": \"uint8\","
+ " \"array\": False,"
+ " \"record-types\": \"\","
+ " \"space\": \"dhcp6\","
+ " \"encapsulate\": \"isc\""
+ "},"
+ "{"
+ " \"name\": \"foo\","
+ " \"code\": 110,"
+ " \"type\": \"uint32\","
+ " \"array\": False,"
+ " \"record-types\": \"\","
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"\""
+ " },"
+ " {"
+ " \"name\": \"foo2\","
+ " \"code\": 111,"
+ " \"type\": \"ipv4-address\","
+ " \"array\": False,"
+ " \"record-types\": \"\","
+ " \"space\": \"isc\","
+ " \"encapsulate\": \"\""
" } ],"
"\"subnet6\": [ { "
" \"pool\": [ \"2001:db8:1::/80\" ],"
- " \"subnet\": \"2001:db8:1::/64\", "
- " \"option-data\": [ {"
- " \"name\": \"subscriber-id\","
- " \"space\": \"dhcp6\","
- " \"code\": 38,"
- " \"data\": \"AB CDEF0105\","
- " \"csv-format\": False"
- " },"
- " {"
- " \"name\": \"preference\","
- " \"space\": \"dhcp6\","
- " \"code\": 7,"
- " \"data\": \"01\","
- " \"csv-format\": False"
- " } ]"
- " } ],"
- "\"valid-lifetime\": 4000 }";
+ " \"subnet\": \"2001:db8:1::/64\""
+ " } ]"
+ "}";
- ElementPtr json = Element::fromJSON(config);
- EXPECT_NO_THROW(x = configureDhcp6Server(srv_, json));
- ASSERT_TRUE(x);
- comment_ = parseAnswer(rcode_, x);
- ASSERT_EQ(0, rcode_);
+ json = Element::fromJSON(config);
+ EXPECT_NO_THROW(status = configureDhcp6Server(srv_, json));
+ ASSERT_TRUE(status);
+ checkResult(status, 0);
+
+ // Get the subnet.
Subnet6Ptr subnet = CfgMgr::instance().getSubnet6(IOAddress("2001:db8:1::5"));
ASSERT_TRUE(subnet);
- Subnet::OptionContainerPtr options = subnet->getOptionDescriptors("dhcp6");
- ASSERT_EQ(2, options->size());
- // Get the search index. Index #1 is to search using option code.
- const Subnet::OptionContainerTypeIndex& idx = options->get<1>();
-
- // Get the options for specified index. Expecting one option to be
- // returned but in theory we may have multiple options with the same
- // code so we get the range.
- std::pair<Subnet::OptionContainerTypeIndex::const_iterator,
- Subnet::OptionContainerTypeIndex::const_iterator> range =
- idx.equal_range(D6O_SUBSCRIBER_ID);
- // Expect single option with the code equal to 38.
- ASSERT_EQ(1, std::distance(range.first, range.second));
- const uint8_t subid_expected[] = {
- 0xAB, 0xCD, 0xEF, 0x01, 0x05
- };
- // Check if option is valid in terms of code and carried data.
- testOption(*range.first, D6O_SUBSCRIBER_ID, subid_expected,
- sizeof(subid_expected));
+ // We should have one option available.
+ Subnet::OptionContainerPtr options = subnet->getOptionDescriptors("dhcp6");
+ ASSERT_TRUE(options);
+ ASSERT_EQ(1, options->size());
- range = idx.equal_range(D6O_PREFERENCE);
- ASSERT_EQ(1, std::distance(range.first, range.second));
- // Do another round of testing with second option.
- const uint8_t pref_expected[] = {
- 0x01
- };
- testOption(*range.first, D6O_PREFERENCE, pref_expected,
- sizeof(pref_expected));
+ // Get the option.
+ Subnet::OptionDescriptor desc = subnet->getOptionDescriptor("dhcp6", 100);
+ EXPECT_TRUE(desc.option);
+ EXPECT_EQ(100, desc.option->getType());
+
+ // This opton should comprise two sub-options.
+ // Onf of them is 'foo' with code 110.
+ OptionPtr option_foo = desc.option->getOption(110);
+ ASSERT_TRUE(option_foo);
+ EXPECT_EQ(110, option_foo->getType());
+
+ // ...another one 'foo2' with code 111.
+ OptionPtr option_foo2 = desc.option->getOption(111);
+ ASSERT_TRUE(option_foo2);
+ EXPECT_EQ(111, option_foo2->getType());
}
// Goal of this test is to verify options configuration
@@ -1377,4 +1592,164 @@ TEST_F(Dhcp6ParserTest, stdOptionData) {
EXPECT_EQ(1516, optionIA->getT2());
}
+// The goal of this test is to verify that the standard option can
+// be configured to encapsulate multiple other options.
+TEST_F(Dhcp6ParserTest, stdOptionDataEncapsulate) {
+
+ // The configuration is two stage process in this test.
+ // In the first stahe we create definitions of suboptions
+ // that we will add to the base option.
+ // Let's create some dummy options: foo and foo2.
+ string config = "{ \"interface\": [ \"all\" ],"
+ "\"rebind-timer\": 2000,"
+ "\"renew-timer\": 1000,"
+ "\"option-data\": [ {"
+ " \"name\": \"foo\","
+ " \"space\": \"vendor-opts-space\","
+ " \"code\": 110,"
+ " \"data\": \"1234\","
+ " \"csv-format\": True"
+ " },"
+ " {"
+ " \"name\": \"foo2\","
+ " \"space\": \"vendor-opts-space\","
+ " \"code\": 111,"
+ " \"data\": \"192.168.2.1\","
+ " \"csv-format\": True"
+ " } ],"
+ "\"option-def\": [ {"
+ " \"name\": \"foo\","
+ " \"code\": 110,"
+ " \"type\": \"uint32\","
+ " \"array\": False,"
+ " \"record-types\": \"\","
+ " \"space\": \"vendor-opts-space\","
+ " \"encapsulate\": \"\""
+ " },"
+ " {"
+ " \"name\": \"foo2\","
+ " \"code\": 111,"
+ " \"type\": \"ipv4-address\","
+ " \"array\": False,"
+ " \"record-types\": \"\","
+ " \"space\": \"vendor-opts-space\","
+ " \"encapsulate\": \"\""
+ " } ]"
+ "}";
+
+ ConstElementPtr status;
+
+ ElementPtr json = Element::fromJSON(config);
+
+ EXPECT_NO_THROW(status = configureDhcp6Server(srv_, json));
+ ASSERT_TRUE(status);
+ checkResult(status, 0);
+
+ // Once the definitions have been added we can configure the
+ // standard option #17. This option comprises an enterprise
+ // number and sub options. By convention (introduced in
+ // std_option_defs.h) option named 'vendor-opts'
+ // encapsulates the option space named 'vendor-opts-space'.
+ // We add our dummy options to this option space and thus
+ // they should be included as sub-options in the 'vendor-opts'
+ // option.
+ config = "{ \"interface\": [ \"all\" ],"
+ "\"rebind-timer\": 2000,"
+ "\"renew-timer\": 1000,"
+ "\"option-data\": [ {"
+ " \"name\": \"vendor-opts\","
+ " \"space\": \"dhcp6\","
+ " \"code\": 17,"
+ " \"data\": \"1234\","
+ " \"csv-format\": True"
+ " },"
+ " {"
+ " \"name\": \"foo\","
+ " \"space\": \"vendor-opts-space\","
+ " \"code\": 110,"
+ " \"data\": \"1234\","
+ " \"csv-format\": True"
+ " },"
+ " {"
+ " \"name\": \"foo2\","
+ " \"space\": \"vendor-opts-space\","
+ " \"code\": 111,"
+ " \"data\": \"192.168.2.1\","
+ " \"csv-format\": True"
+ " } ],"
+ "\"option-def\": [ {"
+ " \"name\": \"foo\","
+ " \"code\": 110,"
+ " \"type\": \"uint32\","
+ " \"array\": False,"
+ " \"record-types\": \"\","
+ " \"space\": \"vendor-opts-space\","
+ " \"encapsulate\": \"\""
+ " },"
+ " {"
+ " \"name\": \"foo2\","
+ " \"code\": 111,"
+ " \"type\": \"ipv4-address\","
+ " \"array\": False,"
+ " \"record-types\": \"\","
+ " \"space\": \"vendor-opts-space\","
+ " \"encapsulate\": \"\""
+ " } ],"
+ "\"subnet6\": [ { "
+ " \"pool\": [ \"2001:db8:1::/80\" ],"
+ " \"subnet\": \"2001:db8:1::/64\""
+ " } ]"
+ "}";
+
+
+ json = Element::fromJSON(config);
+
+ EXPECT_NO_THROW(status = configureDhcp6Server(srv_, json));
+ ASSERT_TRUE(status);
+ checkResult(status, 0);
+
+ // Get the subnet.
+ Subnet6Ptr subnet = CfgMgr::instance().getSubnet6(IOAddress("2001:db8:1::5"));
+ ASSERT_TRUE(subnet);
+
+ // We should have one option available.
+ Subnet::OptionContainerPtr options = subnet->getOptionDescriptors("dhcp6");
+ ASSERT_TRUE(options);
+ ASSERT_EQ(1, options->size());
+
+ // Get the option.
+ Subnet::OptionDescriptor desc =
+ subnet->getOptionDescriptor("dhcp6", D6O_VENDOR_OPTS);
+ EXPECT_TRUE(desc.option);
+ EXPECT_EQ(D6O_VENDOR_OPTS, desc.option->getType());
+
+ // Option with the code 110 should be added as a sub-option.
+ OptionPtr option_foo = desc.option->getOption(110);
+ ASSERT_TRUE(option_foo);
+ EXPECT_EQ(110, option_foo->getType());
+ // This option comprises a single uint32_t value thus it is
+ // represented by OptionInt<uint32_t> class. Let's get the
+ // object of this type.
+ boost::shared_ptr<OptionInt<uint32_t> > option_foo_uint32 =
+ boost::dynamic_pointer_cast<OptionInt<uint32_t> >(option_foo);
+ ASSERT_TRUE(option_foo_uint32);
+ // Validate the value according to the configuration.
+ EXPECT_EQ(1234, option_foo_uint32->getValue());
+
+ // Option with the code 111 should be added as a sub-option.
+ OptionPtr option_foo2 = desc.option->getOption(111);
+ ASSERT_TRUE(option_foo2);
+ EXPECT_EQ(111, option_foo2->getType());
+ // This option comprises the IPV4 address. Such option is
+ // represented by OptionCustom object.
+ OptionCustomPtr option_foo2_v4 =
+ boost::dynamic_pointer_cast<OptionCustom>(option_foo2);
+ ASSERT_TRUE(option_foo2_v4);
+ // Get the IP address carried by this option and validate it.
+ EXPECT_EQ("192.168.2.1", option_foo2_v4->readAddress().toText());
+
+ // Option with the code 112 should not be added.
+ EXPECT_FALSE(desc.option->getOption(112));
+}
+
};
diff --git a/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc b/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc
index ef443e7..b742a13 100644
--- a/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc
+++ b/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc
@@ -222,7 +222,8 @@ public:
// Check that generated IAADDR option contains expected address.
void checkIAAddr(const boost::shared_ptr<Option6IAAddr>& addr,
const IOAddress& expected_addr,
- uint32_t expected_preferred, uint32_t expected_valid) {
+ uint32_t /* expected_preferred */,
+ uint32_t /* expected_valid */) {
// Check that the assigned address is indeed from the configured pool.
// Note that when comparing addresses, we compare the textual
diff --git a/src/bin/dhcp6/tests/dhcp6_test.py b/src/bin/dhcp6/tests/dhcp6_test.py
index 1870392..3333111 100644
--- a/src/bin/dhcp6/tests/dhcp6_test.py
+++ b/src/bin/dhcp6/tests/dhcp6_test.py
@@ -13,7 +13,7 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-from bind10_src import ProcessInfo, parse_args, dump_pid, unlink_pid_file, _BASETIME
+from init import ProcessInfo, parse_args, dump_pid, unlink_pid_file, _BASETIME
import unittest
import sys
@@ -157,7 +157,7 @@ class TestDhcpv6Daemon(unittest.TestCase):
def test_alive(self):
"""
- Simple test. Checks that b10-dhcp6 can be started and prints out info
+ Simple test. Checks that b10-dhcp6 can be started and prints out info
about starting DHCPv6 operation.
"""
print("Note: Purpose of some of the tests is to check if DHCPv6 server can be started,")
diff --git a/src/bin/loadzone/loadzone.py.in b/src/bin/loadzone/loadzone.py.in
index 3ed3b7d..736aa31 100755
--- a/src/bin/loadzone/loadzone.py.in
+++ b/src/bin/loadzone/loadzone.py.in
@@ -25,6 +25,7 @@ from isc.datasrc import *
import isc.util.process
import isc.log
from isc.log_messages.loadzone_messages import *
+from datetime import timedelta
isc.util.process.rename()
@@ -87,7 +88,6 @@ class LoadZoneRunner:
'''
def __init__(self, command_args):
self.__command_args = command_args
- self.__loaded_rrs = 0
self.__interrupted = False # will be set to True on receiving signal
# system-wide log configuration. We need to configure logging this
@@ -103,8 +103,9 @@ class LoadZoneRunner:
[{"output": "stderr",
"destination": "console"}]}]}
- # These are essentially private, and defined as "protected" for the
+ # These are essentially private, but defined as "protected" for the
# convenience of tests inspecting them
+ self._loaded_rrs = 0
self._zone_class = None
self._zone_name = None
self._zone_file = None
@@ -113,6 +114,10 @@ class LoadZoneRunner:
self._log_severity = 'INFO'
self._log_debuglevel = 0
self._report_interval = LOAD_INTERVAL_DEFAULT
+ self._start_time = None
+ # This one will be used in (rare) cases where we want to allow tests to
+ # fake time.time()
+ self._get_time = time.time
self._config_log()
@@ -159,7 +164,7 @@ class LoadZoneRunner:
self._zone_class = RRClass(options.zone_class)
except isc.dns.InvalidRRClass as ex:
raise BadArgument('Invalid zone class: ' + str(ex))
- if self._zone_class != RRClass.IN():
+ if self._zone_class != RRClass.IN:
raise BadArgument("RR class is not supported: " +
str(self._zone_class))
@@ -200,16 +205,37 @@ class LoadZoneRunner:
logger.info(LOADZONE_SQLITE3_USING_DEFAULT_CONFIG, default_db_file)
return '{"database_file": "' + default_db_file + '"}'
- def _report_progress(self, loaded_rrs):
+ def _report_progress(self, loaded_rrs, progress, dump=True):
'''Dump the current progress report to stdout.
This is essentially private, but defined as "protected" for tests.
+ Normally dump is True, but tests will set it False to get the
+ text to be reported. Tests may also fake self._get_time (which
+ is set to time.time() by default) and self._start_time for control
+ time related conditions.
'''
- elapsed = time.time() - self.__start_time
- sys.stdout.write("\r" + (80 * " "))
- sys.stdout.write("\r%d RRs loaded in %.2f seconds" %
- (loaded_rrs, elapsed))
+ elapsed = self._get_time() - self._start_time
+ speed = int(loaded_rrs / elapsed) if elapsed > 0 else 0
+ etc = None # calculate estimated time of completion
+ if progress != ZoneLoader.PROGRESS_UNKNOWN:
+ etc = (1 - progress) * (elapsed / progress)
+
+ # Build report text
+ report_txt = '\r%d RRs' % loaded_rrs
+ if progress != ZoneLoader.PROGRESS_UNKNOWN:
+ report_txt += ' (%.1f%%)' % (progress * 100)
+ report_txt += ' in %s, %d RRs/sec' % \
+ (str(timedelta(seconds=int(elapsed))), speed)
+ if etc is not None:
+ report_txt += ', %s ETC' % str(timedelta(seconds=int(etc)))
+
+ # Dump or return the report text.
+ if dump:
+ sys.stdout.write("\r" + (80 * " "))
+ sys.stdout.write(report_txt)
+ else:
+ return report_txt
def _do_load(self):
'''Main part of the load logic.
@@ -230,7 +256,7 @@ class LoadZoneRunner:
self._zone_class)
loader = ZoneLoader(datasrc_client, self._zone_name,
self._zone_file)
- self.__start_time = time.time()
+ self._start_time = time.time()
if self._report_interval > 0:
limit = self._report_interval
else:
@@ -239,17 +265,20 @@ class LoadZoneRunner:
limit = LOAD_INTERVAL_DEFAULT
while (not self.__interrupted and
not loader.load_incremental(limit)):
- self.__loaded_rrs += self._report_interval
+ self._loaded_rrs += self._report_interval
if self._report_interval > 0:
- self._report_progress(self.__loaded_rrs)
+ self._report_progress(self._loaded_rrs,
+ loader.get_progress())
if self.__interrupted:
raise LoadFailure('loading interrupted by signal')
# On successful completion, add final '\n' to the progress
# report output (on failure don't bother to make it prettier).
if (self._report_interval > 0 and
- self.__loaded_rrs >= self._report_interval):
+ self._loaded_rrs >= self._report_interval):
sys.stdout.write('\n')
+ # record the final count of the loaded RRs for logging
+ self._loaded_rrs = loader.get_rr_count()
except Exception as ex:
# release any remaining lock held in the loader
loader = None
@@ -273,8 +302,8 @@ class LoadZoneRunner:
self._set_signal_handlers()
self._parse_args()
self._do_load()
- total_elapsed_txt = "%.2f" % (time.time() - self.__start_time)
- logger.info(LOADZONE_DONE, self.__loaded_rrs, self._zone_name,
+ total_elapsed_txt = "%.2f" % (time.time() - self._start_time)
+ logger.info(LOADZONE_DONE, self._loaded_rrs, self._zone_name,
self._zone_class, total_elapsed_txt)
return 0
except BadArgument as ex:
diff --git a/src/bin/loadzone/loadzone_messages.mes b/src/bin/loadzone/loadzone_messages.mes
index ca241b3..744a1a4 100644
--- a/src/bin/loadzone/loadzone_messages.mes
+++ b/src/bin/loadzone/loadzone_messages.mes
@@ -27,16 +27,11 @@ LOADZONE_ZONE_CREATED), but the loading operation has subsequently
failed. The newly created zone has been removed from the data source,
so that the data source will go back to the original state.
-% LOADZONE_DONE Loaded (at least) %1 RRs into zone %2/%3 in %4 seconds
+% LOADZONE_DONE Loaded %1 RRs into zone %2/%3 in %4 seconds
b10-loadzone has successfully loaded the specified zone. If there was
an old version of the zone in the data source, it is now deleted.
-It also prints (a lower bound of) the number of RRs that have been loaded
-and the time spent for the loading. Due to a limitation of the
-current implementation of the underlying library however, it cannot show the
-exact number of the loaded RRs; it's counted for every N-th RR where N
-is the value of the -i command line option. So, for smaller zones that
-don't even contain N RRs, the reported value will be 0. This will be
-improved in a future version.
+It also prints the number of RRs that have been loaded
+and the time spent for the loading.
% LOADZONE_LOAD_ERROR Failed to load zone %1/%2: %3
Loading a zone by b10-loadzone fails for some reason in the middle of
diff --git a/src/bin/loadzone/tests/correct/example.db b/src/bin/loadzone/tests/correct/example.db
index 38d1329..fe012cf 100644
--- a/src/bin/loadzone/tests/correct/example.db
+++ b/src/bin/loadzone/tests/correct/example.db
@@ -2,17 +2,11 @@
$ORIGIN example.com.
$TTL 60
@ IN SOA ns1.example.com. hostmaster.example.com. (1 43200 900 1814400 7200)
-; these need #2390
-; IN 20 NS ns1
-; NS ns2
- IN 20 NS ns1.example.com.
- NS ns2.example.com.
+ IN 20 NS ns1
+ NS ns2
ns1 IN 30 A 192.168.1.102
-; these need #2390
-; 70 NS ns3
-; IN NS ns4
- 70 NS ns3.example.com.
- IN NS ns4.example.com.
+ 70 NS ns3
+ IN NS ns4
10 IN MX 10 mail.example.com.
ns2 80 A 1.1.1.1
ns3 IN A 2.2.2.2
diff --git a/src/bin/loadzone/tests/correct/include.db b/src/bin/loadzone/tests/correct/include.db
index 53871bb..f60a240 100644
--- a/src/bin/loadzone/tests/correct/include.db
+++ b/src/bin/loadzone/tests/correct/include.db
@@ -7,9 +7,7 @@ $TTL 300
1814400
3600
)
-; this needs #2390
-; NS ns
- NS ns.include.
+ NS ns
ns A 127.0.0.1
diff --git a/src/bin/loadzone/tests/correct/mix1.db b/src/bin/loadzone/tests/correct/mix1.db
index 059fde7..a9d58a8 100644
--- a/src/bin/loadzone/tests/correct/mix1.db
+++ b/src/bin/loadzone/tests/correct/mix1.db
@@ -6,9 +6,7 @@ $ORIGIN mix1.
1814400
3
)
-; this needs #2390
-; NS ns
- NS ns.mix1.
+ NS ns
ns A 10.53.0.1
a TXT "soa minttl 3"
b 2 TXT "explicit ttl 2"
diff --git a/src/bin/loadzone/tests/correct/mix2.db b/src/bin/loadzone/tests/correct/mix2.db
index e89c2af..2c8153d 100644
--- a/src/bin/loadzone/tests/correct/mix2.db
+++ b/src/bin/loadzone/tests/correct/mix2.db
@@ -6,9 +6,7 @@ $ORIGIN mix2.
1814400
3
)
-; this needs #2390
-; NS ns
- NS ns.mix2.
+ NS ns
ns A 10.53.0.1
a TXT "inherited ttl 1"
$INCLUDE mix2sub1.txt
diff --git a/src/bin/loadzone/tests/correct/ttl1.db b/src/bin/loadzone/tests/correct/ttl1.db
index 7f04ff8..aa6e2bb 100644
--- a/src/bin/loadzone/tests/correct/ttl1.db
+++ b/src/bin/loadzone/tests/correct/ttl1.db
@@ -6,9 +6,7 @@ $ORIGIN ttl1.
1814400
3
)
-; this needs #2390
-; NS ns
- NS ns.ttl1.
+ NS ns
ns A 10.53.0.1
a TXT "soa minttl 3"
b 2 TXT "explicit ttl 2"
diff --git a/src/bin/loadzone/tests/correct/ttl2.db b/src/bin/loadzone/tests/correct/ttl2.db
index b7df040..f7f6eee 100644
--- a/src/bin/loadzone/tests/correct/ttl2.db
+++ b/src/bin/loadzone/tests/correct/ttl2.db
@@ -6,9 +6,7 @@ $ORIGIN ttl2.
1814400
3
)
-; this needs #2390
-; NS ns
- NS ns.ttl2.
+ NS ns
ns A 10.53.0.1
a TXT "inherited ttl 1"
b 2 TXT "explicit ttl 2"
diff --git a/src/bin/loadzone/tests/correct/ttlext.db b/src/bin/loadzone/tests/correct/ttlext.db
index 844f452..f8b96ea 100644
--- a/src/bin/loadzone/tests/correct/ttlext.db
+++ b/src/bin/loadzone/tests/correct/ttlext.db
@@ -6,9 +6,7 @@ $ORIGIN ttlext.
1814400
3
)
-; this needs #2390
-; NS ns
- NS ns.ttlext.
+ NS ns
ns A 10.53.0.1
a TXT "soa minttl 3"
b 2S TXT "explicit ttl 2"
diff --git a/src/bin/loadzone/tests/loadzone_test.py b/src/bin/loadzone/tests/loadzone_test.py
index d1ee131..351bc59 100755
--- a/src/bin/loadzone/tests/loadzone_test.py
+++ b/src/bin/loadzone/tests/loadzone_test.py
@@ -79,7 +79,7 @@ class TestLoadZoneRunner(unittest.TestCase):
self.assertEqual(DATASRC_CONFIG, self.__runner._datasrc_config)
self.assertEqual('sqlite3', self.__runner._datasrc_type) # default
self.assertEqual(10000, self.__runner._report_interval) # default
- self.assertEqual(RRClass.IN(), self.__runner._zone_class) # default
+ self.assertEqual(RRClass.IN, self.__runner._zone_class) # default
self.assertEqual('INFO', self.__runner._log_severity) # default
self.assertEqual(0, self.__runner._log_debuglevel)
@@ -135,14 +135,14 @@ class TestLoadZoneRunner(unittest.TestCase):
'memory')
def __common_load_setup(self):
- self.__runner._zone_class = RRClass.IN()
+ self.__runner._zone_class = RRClass.IN
self.__runner._zone_name = TEST_ZONE_NAME
self.__runner._zone_file = NEW_ZONE_TXT_FILE
self.__runner._datasrc_type = 'sqlite3'
self.__runner._datasrc_config = DATASRC_CONFIG
self.__runner._report_interval = 1
self.__reports = []
- self.__runner._report_progress = lambda x: self.__reports.append(x)
+ self.__runner._report_progress = lambda x, _: self.__reports.append(x)
def __check_zone_soa(self, soa_txt, zone_name=TEST_ZONE_NAME):
"""Check that the given SOA RR exists and matches the expected string
@@ -159,7 +159,7 @@ class TestLoadZoneRunner(unittest.TestCase):
self.assertEqual(client.NOTFOUND, result)
return
self.assertEqual(client.SUCCESS, result)
- result, rrset, _ = finder.find(zone_name, RRType.SOA())
+ result, rrset, _ = finder.find(zone_name, RRType.SOA)
if soa_txt:
self.assertEqual(finder.SUCCESS, result)
self.assertEqual(soa_txt, rrset.to_text())
@@ -175,6 +175,7 @@ class TestLoadZoneRunner(unittest.TestCase):
# be 3 RRs
self.assertEqual([1, 2, 3], self.__reports)
self.__check_zone_soa(NEW_SOA_TXT)
+ self.assertEqual(3, self.__runner._loaded_rrs)
def test_load_update_skipped_report(self):
'''successful loading, with reports for every 2 RRs'''
@@ -182,6 +183,8 @@ class TestLoadZoneRunner(unittest.TestCase):
self.__runner._report_interval = 2
self.__runner._do_load()
self.assertEqual([2], self.__reports)
+ # total RRs should still be set the actual value
+ self.assertEqual(3, self.__runner._loaded_rrs)
def test_load_update_no_report(self):
'''successful loading, without progress reports'''
@@ -190,6 +193,36 @@ class TestLoadZoneRunner(unittest.TestCase):
self.__runner._do_load()
self.assertEqual([], self.__reports) # no report
self.__check_zone_soa(NEW_SOA_TXT) # but load is completed
+ self.assertEqual(3, self.__runner._loaded_rrs)
+
+ def test_report_progress(self):
+ '''Check the output format of report_progress.
+
+ For some simple scenario and minor corner cases. We tweak the
+ start and current times so the test results will be predicatble.
+
+ '''
+ # 10 RRs in 10 sec, which is 25% of the entire zone. it'll take
+ # 30 sec more
+ self.__runner._start_time = 10
+ self.__runner._get_time = lambda: 20
+ self.assertEqual('\r10 RRs (25.0%) in 0:00:10, 1 RRs/sec, ' +
+ '0:00:30 ETC',
+ self.__runner._report_progress(10, 0.25, False))
+
+ # start time == current time. unlikely to happen in practice, but
+ # it shouldn't cause disruption.
+ self.__runner._get_time = lambda: 10
+ self.assertEqual('\r10 RRs (25.0%) in 0:00:00, 0 RRs/sec, ' +
+ '0:00:00 ETC',
+ self.__runner._report_progress(10, 0.25, False))
+
+ # progress is unknown
+ self.__runner._get_time = lambda: 20
+ unknown_progress = isc.datasrc.ZoneLoader.PROGRESS_UNKNOWN
+ self.assertEqual('\r10 RRs in 0:00:10, 1 RRs/sec',
+ self.__runner._report_progress(10, unknown_progress,
+ False))
def test_create_and_load(self):
'''successful case to loading contents to a new zone (created).'''
@@ -275,7 +308,8 @@ class TestLoadZoneRunner(unittest.TestCase):
def test_load_interrupted(self):
'''Load attempt fails due to signal interruption'''
self.__common_load_setup()
- self.__runner._report_progress = lambda x: self.__interrupt_progress(x)
+ self.__runner._report_progress = \
+ lambda x, _: self.__interrupt_progress(x)
# The interrupting _report_progress() will terminate the loading
# in the middle. the number of reports is smaller, and the zone
# won't be changed.
@@ -290,7 +324,8 @@ class TestLoadZoneRunner(unittest.TestCase):
'''
self.__common_load_setup()
- self.__runner._report_progress = lambda x: self.__interrupt_progress(x)
+ self.__runner._report_progress = \
+ lambda x, _: self.__interrupt_progress(x)
self.__runner._zone_name = Name('example.com')
self.__runner._zone_file = ALT_NEW_ZONE_TXT_FILE
self.__check_zone_soa(None, zone_name=Name('example.com'))
diff --git a/src/bin/msgq/msgq.py.in b/src/bin/msgq/msgq.py.in
index 68c18dc..ca5d705 100755
--- a/src/bin/msgq/msgq.py.in
+++ b/src/bin/msgq/msgq.py.in
@@ -70,6 +70,23 @@ SPECFILE_LOCATION = SPECFILE_PATH + "/msgq.spec"
class MsgQReceiveError(Exception): pass
+class MsgQCloseOnReceive(Exception):
+ """Exception raised when reading data from a socket results in 'shutdown'.
+
+ This happens when msgq received 0-length data. This class holds whether
+ it happens in the middle of reading (i.e. after reading some) via
+ partial_read parameter, which is set to True if and only if so.
+ This will be used by an upper layer catching the exception to distinguish
+ the severity of the event.
+
+ """
+ def __init__(self, reason, partial_read):
+ self.partial_read = partial_read
+ self.__reason = reason
+
+ def __str__(self):
+ return self.__reason
+
class SubscriptionManager:
def __init__(self, cfgmgr_ready):
"""
@@ -311,23 +328,22 @@ class MsgQ:
lname = self.newlname()
self.lnames[lname] = newsocket
+ logger.debug(TRACE_BASIC, MSGQ_SOCKET_REGISTERED, newsocket.fileno(),
+ lname)
+
if self.poller:
self.poller.register(newsocket, select.POLLIN)
else:
self.add_kqueue_socket(newsocket)
- def process_socket(self, fd):
- """Process a read on a socket."""
- if not fd in self.sockets:
- logger.error(MSGQ_READ_UNKNOWN_FD, fd)
- return
- sock = self.sockets[fd]
- self.process_packet(fd, sock)
-
def kill_socket(self, fd, sock):
"""Fully close down the socket."""
+ # Unregister events on the socket. Note that we don't have to do
+ # this for kqueue because the registered events are automatically
+ # deleted when the corresponding socket is closed.
if self.poller:
self.poller.unregister(sock)
+
self.subs.unsubscribe_all(sock)
lname = [ k for k, v in self.lnames.items() if v == sock ][0]
del self.lnames[lname]
@@ -337,24 +353,35 @@ class MsgQ:
del self.sendbuffs[fd]
logger.debug(TRACE_BASIC, MSGQ_SOCK_CLOSE, fd)
- def getbytes(self, fd, sock, length):
+ def __getbytes(self, fd, sock, length, continued):
"""Get exactly the requested bytes, or raise an exception if
- EOF."""
+ EOF.
+
+ continued is set to True if this method is called to complete
+ already read data.
+ """
received = b''
while len(received) < length:
try:
data = sock.recv(length - len(received))
- except socket.error:
- raise MsgQReceiveError(socket.error)
+
+ except socket.error as err:
+ # This case includes ECONNRESET, which seems to happen when
+ # the remote client has closed its socket at some subtle
+ # timing (it should normally result in receiving empty data).
+ # Since we didn't figure out how exactly that could happen,
+ # we treat it just like other really-unexpected socket errors.
+ raise MsgQReceiveError(str(err))
if len(data) == 0:
- raise MsgQReceiveError("EOF")
+ raise MsgQCloseOnReceive("EOF", continued)
received += data
+ continued = True
return received
def read_packet(self, fd, sock):
"""Read a correctly formatted packet. Will raise exceptions if
something fails."""
- lengths = self.getbytes(fd, sock, 6)
+ lengths = self.__getbytes(fd, sock, 6, False)
overall_length, routing_length = struct.unpack(">IH", lengths)
if overall_length < 2:
raise MsgQReceiveError("overall_length < 2")
@@ -365,9 +392,9 @@ class MsgQ:
raise MsgQReceiveError("routing_length == 0")
data_length = overall_length - routing_length
# probably need to sanity check lengths here...
- routing = self.getbytes(fd, sock, routing_length)
+ routing = self.__getbytes(fd, sock, routing_length, True)
if data_length > 0:
- data = self.getbytes(fd, sock, data_length)
+ data = self.__getbytes(fd, sock, data_length, True)
else:
data = None
return (routing, data)
@@ -376,8 +403,15 @@ class MsgQ:
"""Process one packet."""
try:
routing, data = self.read_packet(fd, sock)
- except MsgQReceiveError as err:
- logger.error(MSGQ_RECV_ERR, fd, err)
+ except (MsgQReceiveError, MsgQCloseOnReceive) as err:
+ # If it's MsgQCloseOnReceive and that happens without reading
+ # any data, it basically means the remote clinet has closed the
+ # socket, so we log it as debug information. Otherwise, it's
+ # a somewhat unexpected event, so we consider it an "error".
+ if isinstance(err, MsgQCloseOnReceive) and not err.partial_read:
+ logger.debug(TRACE_BASIC, MSGQ_CLOSE_ON_RECV, fd)
+ else:
+ logger.error(MSGQ_RECV_ERROR, fd, err)
self.kill_socket(fd, sock)
return
@@ -385,7 +419,7 @@ class MsgQ:
routingmsg = isc.cc.message.from_wire(routing)
except DecodeError as err:
self.kill_socket(fd, sock)
- logger.error(MSGQ_HDR_DECODE_ERR, fd, err)
+ logger.error(MSGQ_HDR_DECODE_ERROR, fd, err)
return
self.process_command(fd, sock, routingmsg, data)
@@ -428,9 +462,12 @@ class MsgQ:
def sendmsg(self, sock, env, msg = None):
self.send_prepared_msg(sock, self.preparemsg(env, msg))
- def __send_data(self, sock, data):
+ def _send_data(self, sock, data):
"""
- Send a piece of data to the given socket.
+ Send a piece of data to the given socket. This method is
+ essentially "private" to MsgQ, but defined as if it were "protected"
+ for easier access from tests.
+
Parameters:
sock: The socket to send to
data: The list of bytes to send
@@ -446,15 +483,17 @@ class MsgQ:
sock.setblocking(0)
return sock.send(data)
except socket.error as e:
- if e.errno in [ errno.EAGAIN,
- errno.EWOULDBLOCK,
- errno.EINTR ]:
+ if e.errno in [ errno.EAGAIN, errno.EWOULDBLOCK, errno.EINTR ]:
return 0
- elif e.errno in [ errno.EPIPE,
- errno.ECONNRESET,
- errno.ENOBUFS ]:
- logger.error(MSGQ_SEND_ERR, sock.fileno(),
- errno.errorcode[e.errno])
+ elif e.errno in [ errno.EPIPE, errno.ECONNRESET, errno.ENOBUFS ]:
+ # EPIPE happens if the remote module has terminated by the time
+ # of this send; its severity can vary, but in many cases it
+ # shouldn't be critical, so we log it separately as a warning.
+ if e.errno == errno.EPIPE:
+ logger.warn(MSGQ_CLOSE_ON_SEND, sock.fileno())
+ else:
+ logger.error(MSGQ_SEND_ERROR, sock.fileno(),
+ errno.errorcode[e.errno])
self.kill_socket(sock.fileno(), sock)
return None
else:
@@ -469,7 +508,7 @@ class MsgQ:
if fileno in self.sendbuffs:
amount_sent = 0
else:
- amount_sent = self.__send_data(sock, msg)
+ amount_sent = self._send_data(sock, msg)
if amount_sent is None:
# Socket has been killed, drop the send
return
@@ -489,7 +528,7 @@ class MsgQ:
last_sent = now
if self.poller:
self.poller.register(fileno, select.POLLIN |
- select.POLLOUT)
+ select.POLLOUT)
else:
self.add_kqueue_socket(sock, True)
self.sendbuffs[fileno] = (last_sent, buff)
@@ -498,7 +537,7 @@ class MsgQ:
# Try to send some data from the buffer
(_, msg) = self.sendbuffs[fileno]
sock = self.sockets[fileno]
- amount_sent = self.__send_data(sock, msg)
+ amount_sent = self._send_data(sock, msg)
if amount_sent is not None:
# Keep the rest
msg = msg[amount_sent:]
@@ -581,7 +620,7 @@ class MsgQ:
if err.args[0] == errno.EINTR:
events = []
else:
- logger.fatal(MSGQ_POLL_ERR, err)
+ logger.fatal(MSGQ_POLL_ERROR, err)
break
with self.__lock:
for (fd, event) in events:
@@ -592,12 +631,17 @@ class MsgQ:
self.running = False
break
else:
- if event & select.POLLOUT:
- self.__process_write(fd)
- elif event & select.POLLIN:
- self.process_socket(fd)
- else:
+ writable = event & select.POLLOUT
+ # Note: it may be okay to read data if available
+ # immediately after write some, but due to unexpected
+ # regression (see comments on the kqueue version below)
+ # we restrict one operation per iteration for now.
+ # In future we may clarify the point and enable the
+ # "read/write" mode.
+ readable = not writable and (event & select.POLLIN)
+ if not writable and not readable:
logger.error(MSGQ_POLL_UNKNOWN_EVENT, fd, event)
+ self._process_fd(fd, writable, readable, False)
def run_kqueue(self):
while self.running:
@@ -616,14 +660,35 @@ class MsgQ:
self.running = False
break;
else:
- if event.filter == select.KQ_FILTER_WRITE:
- self.__process_write(event.ident)
- if event.filter == select.KQ_FILTER_READ and \
- event.data > 0:
- self.process_socket(event.ident)
- elif event.flags & select.KQ_EV_EOF:
- self.kill_socket(event.ident,
- self.sockets[event.ident])
+ fd = event.ident
+ writable = event.filter == select.KQ_FILTER_WRITE
+ readable = (event.filter == select.KQ_FILTER_READ and
+ event.data > 0)
+ # It seems to break some of our test cases if we
+ # immediately close the socket on EOF after reading
+ # some data. It may be possible to avoid by tweaking
+ # the test, but unless we can be sure we'll hold off.
+ closed = (not readable and
+ (event.flags & select.KQ_EV_EOF))
+ self._process_fd(fd, writable, readable, closed)
+
+ def _process_fd(self, fd, writable, readable, closed):
+ '''Process a single FD: unified subroutine of run_kqueue/poller.
+
+ closed can be True only in the case of kqueue. This is essentially
+ private but is defined as if it were "protected" so it's callable
+ from tests.
+
+ '''
+ # We need to check if FD is still in the sockets dict, because
+ # it's possible that the socket has been "killed" while processing
+ # other FDs; it's even possible it's killed within this method.
+ if writable and fd in self.sockets:
+ self.__process_write(fd)
+ if readable and fd in self.sockets:
+ self.process_packet(fd, self.sockets[fd])
+ if closed and fd in self.sockets:
+ self.kill_socket(fd, self.sockets[fd])
def stop(self):
# Signal it should terminate.
@@ -760,3 +825,5 @@ if __name__ == "__main__":
pass
msgq.shutdown()
+
+ logger.info(MSGQ_EXITING)
diff --git a/src/bin/msgq/msgq_messages.mes b/src/bin/msgq/msgq_messages.mes
index 75e4227..09c9030 100644
--- a/src/bin/msgq/msgq_messages.mes
+++ b/src/bin/msgq/msgq_messages.mes
@@ -23,6 +23,31 @@
This is a debug message. The message queue has little bit of special handling
for the configuration manager. This special handling is happening now.
+% MSGQ_CLOSE_ON_RECV Reading from socket canceled as it's closed: FD=%1
+A debug message. The msgq daemon was notified of a read event on a
+socket, but its initial read operation failed because the remote
+client has closed its socket. This is possible in a normal operation
+when a module shuts down.
+
+% MSGQ_CLOSE_ON_SEND Sending to socket failed as already closed (okay to ignore on shutdown): FD=%1
+The msgq daemon tries to send some data to a client module, but it
+failed because the socket has been closed. This normally means the
+client terminates (for some reason - either gracefully or as a crash)
+while other modules try to send a message to the terminated module.
+Since msgq doesn't keep track of the status of client modules, this
+can happen and is not really an error for msgq; however, it can still
+be an unexpected event for the BIND 10 system as a whole in that this
+particular message is lost, so it's logged as a warning. If this
+message is logged for a running BIND 10 system, it's suggested to
+check other log messages; there may be an error from other modules
+reporting a missing response message. One common, less critical case
+where this message is logged is during shutdown. The ordering of
+process shutdown is basically arbitrary at this moment, so it's
+possible that some module tries to send a "quitting" message to some
+other module but the latter has already shut down. Such cases are
+generally non critical, but you may want to check other possible error
+messages.
+
% MSGQ_COMMAND Running command %1 with arguments %2
Debug message. The message queue received a command and it is running it.
@@ -34,13 +59,21 @@ the message queue version and version of the module.
% MSGQ_CONFIG_DATA Received configuration update for the msgq: %1
Debug message. The message queue received a configuration update, handling it.
-% MSGQ_HDR_DECODE_ERR Error decoding header received from socket %1: %2
+% MSGQ_EXITING exiting
+The msgq daemon is exiting.
+
+% MSGQ_HDR_DECODE_ERROR Error decoding header received from socket %1: %2
The socket with mentioned file descriptor sent a packet. However, it was not
possible to decode the routing header of the packet. The packet is ignored.
This may be caused by a programmer error (one of the components sending invalid
data) or possibly by incompatible version of msgq and the component (but that's
unlikely, as the protocol is not changed often).
+% MSGQ_INVALID_CMD Received invalid command: %1
+An unknown command listed in the log has been received. It is ignored. This
+indicates either a programmer error (eg. a typo in the command name) or
+incompatible version of a module and message queue daemon.
+
% MSGQ_LISTENER_FAILED Failed to initialize listener on socket file '%1': %2
The message queue daemon tried to listen on a file socket (the path is in the
message), but it failed. The error from the operating system is logged.
@@ -52,7 +85,7 @@ Debug message. The listener is trying to open a listening socket.
Debug message. The message queue successfully opened a listening socket and
waits for incoming connections.
-% MSGQ_POLL_ERR Error while polling for events: %1
+% MSGQ_POLL_ERROR Error while polling for events: %1
A low-level error happened when waiting for events, the error is logged. The
reason for this varies, but it usually means the system is short on some
resources.
@@ -63,30 +96,41 @@ happen and it is either a programmer error or OS bug. The event is ignored. The
number noted as the event is the raw encoded value, which might be useful to
the authors when figuring the problem out.
-% MSGQ_READ_UNKNOWN_FD Got read on strange socket %1
-The OS reported a file descriptor is ready to read. But the daemon doesn't know
-the mentioned file descriptor, which is either a programmer error or OS bug.
-The read event is ignored.
-
-% MSGQ_RECV_ERR Error reading from socket %1: %2
+% MSGQ_RECV_ERROR Error reading from socket %1: %2
There was a low-level error when reading from a socket. The error is logged and
-the corresponding socket is dropped.
+the corresponding socket is dropped. The errors include receiving
+broken or (non empty but) incomplete data. In either case it usually suggests
+something unexpected happens within the BIND 10 system; it's probably
+better to restart the system, and if it continues it should be
+reported as a bug. One known, probably non critical case is
+the "connection reset by peer" (or its variants) socket error appearing
+on shutdown. It's known this happens when the remote client closes the
+connection as part of shutdown process. Such cases are normally expected
+to be reported as receiving empty data (which we log it at the debug level
+as the MSGQ_CLOSE_ON_RECV message), but for some (yet) unknown reason
+it can also be reported as the system error. At shutdown time it's expected
+that connections are closed, so it's probably safe to ignore these messages
+in such a case. We still log them as an error as we've not figured out
+how exactly that can happen. In future, we may make the shutdown process
+more robust so the msgq daemon can explicitly know when a client shuts down
+more reliably. If and when it's implemented this error message won't appear
+on shutdown unless there's really something unexpected.
% MSGQ_RECV_HDR Received header: %1
Debug message. This message includes the whole routing header of a packet.
-% MSGQ_INVALID_CMD Received invalid command: %1
-An unknown command listed in the log has been received. It is ignored. This
-indicates either a programmer error (eg. a typo in the command name) or
-incompatible version of a module and message queue daemon.
-
-% MSGQ_SEND_ERR Error while sending to socket %1: %2
+% MSGQ_SEND_ERROR Error while sending to socket %1: %2
There was a low-level error when sending data to a socket. The error is logged
and the corresponding socket is dropped.
% MSGQ_SHUTDOWN Stopping Msgq
Debug message. The message queue is shutting down.
+% MSGQ_SOCKET_REGISTERED Registered a socket descriptor %1 with lname %2
+Debug message. The msgq daemon accepted a session request on the
+shown descriptor of socket and assigned a unique identifier (lname)
+for the client on that socket.
+
% MSGQ_SOCK_CLOSE Closing socket fd %1
Debug message. Closing the mentioned socket.
diff --git a/src/bin/msgq/tests/msgq_test.py b/src/bin/msgq/tests/msgq_test.py
index 00e15d8..1f04e84 100644
--- a/src/bin/msgq/tests/msgq_test.py
+++ b/src/bin/msgq/tests/msgq_test.py
@@ -1,3 +1,4 @@
+import msgq
from msgq import SubscriptionManager, MsgQ
import unittest
@@ -454,9 +455,8 @@ class SendNonblock(unittest.TestCase):
Two tests are done: one where the error is raised on the 3rd octet,
and one on the 23rd.
"""
- sockerr = socket.error
for err in [ errno.EAGAIN, errno.EWOULDBLOCK, errno.EINTR ]:
- sockerr.errno = err
+ sockerr = socket.error(err, 'Socket error')
self.do_send_with_send_error(3, sockerr)
self.do_send_with_send_error(23, sockerr)
@@ -467,9 +467,8 @@ class SendNonblock(unittest.TestCase):
Two tests are done: one where the error is raised on the 3rd octet,
and one on the 23rd.
"""
- sockerr = socket.error
for err in [ errno.EPIPE, errno.ENOBUFS, errno.ECONNRESET ]:
- sockerr.errno = err
+ sockerr = socket.error(err, 'Socket error')
self.do_send_with_send_error(3, sockerr, False)
self.do_send_with_send_error(23, sockerr, False)
@@ -561,6 +560,178 @@ class ThreadTests(unittest.TestCase):
test_thread.join(60)
self.assertTrue(self.__result)
+class SocketTests(unittest.TestCase):
+ '''Test cases for micro behaviors related to socket operations.
+
+ Some cases are covered as part of other tests, but in this fixture
+ we check more details of specific method related to socket operation,
+ with the help of mock classes to avoid expensive overhead.
+
+ '''
+ class MockSocket():
+ '''A mock socket used instead of standard socket objects.'''
+ def __init__(self):
+ self.ex_on_send = None # raised from send() if not None
+ self.recv_result = b'test' # dummy data or exception
+ self.blockings = [] # history of setblocking() params
+ def setblocking(self, on):
+ self.blockings.append(on)
+ def send(self, data):
+ if self.ex_on_send is not None:
+ raise self.ex_on_send
+ return 10 # arbitrary choice
+ def recv(self, len):
+ if isinstance(self.recv_result, Exception):
+ raise self.recv_result
+ ret = self.recv_result
+ self.recv_result = b'' # if called again, return empty data
+ return ret
+ def fileno(self):
+ return 42 # arbitrary choice
+
+ class LoggerWrapper():
+ '''A simple wrapper of logger to inspect log messages.'''
+ def __init__(self, logger):
+ self.error_called = 0
+ self.warn_called = 0
+ self.debug_called = 0
+ self.orig_logger = logger
+ def error(self, *args):
+ self.error_called += 1
+ self.orig_logger.error(*args)
+ def warn(self, *args):
+ self.warn_called += 1
+ self.orig_logger.warn(*args)
+ def debug(self, *args):
+ self.debug_called += 1
+ self.orig_logger.debug(*args)
+
+ def mock_kill_socket(self, fileno, sock):
+ '''A replacement of MsgQ.kill_socket method for inspection.'''
+ self.__killed_socket = (fileno, sock)
+ if fileno in self.__msgq.sockets:
+ del self.__msgq.sockets[fileno]
+
+ def setUp(self):
+ self.__msgq = MsgQ()
+ self.__msgq.kill_socket = self.mock_kill_socket
+ self.__sock = self.MockSocket()
+ self.__data = b'dummy'
+ self.__msgq.sockets[42] = self.__sock
+ self.__msgq.sendbuffs[42] = (None, b'testdata')
+ self.__sock_error = socket.error()
+ self.__killed_socket = None
+ self.__logger = self.LoggerWrapper(msgq.logger)
+ msgq.logger = self.__logger
+
+ def tearDown(self):
+ msgq.logger = self.__logger.orig_logger
+
+ def test_send_data(self):
+ # Successful case: _send_data() returns the hardcoded value, and
+ # setblocking() is called twice with the expected parameters
+ self.assertEqual(10, self.__msgq._send_data(self.__sock, self.__data))
+ self.assertEqual([0, 1], self.__sock.blockings)
+ self.assertIsNone(self.__killed_socket)
+
+ def test_send_data_interrupt(self):
+ '''send() is interruptted. send_data() returns 0, sock isn't killed.'''
+ expected_blockings = []
+ for eno in [errno.EAGAIN, errno.EWOULDBLOCK, errno.EINTR]:
+ self.__sock_error.errno = eno
+ self.__sock.ex_on_send = self.__sock_error
+ self.assertEqual(0, self.__msgq._send_data(self.__sock,
+ self.__data))
+ expected_blockings.extend([0, 1])
+ self.assertEqual(expected_blockings, self.__sock.blockings)
+ self.assertIsNone(self.__killed_socket)
+
+ def test_send_data_error(self):
+ '''Unexpected error happens on send(). The socket is killed.
+
+ If the error is EPIPE, it's logged at the warn level; otherwise
+ an error message is logged.
+
+ '''
+ expected_blockings = []
+ expected_errors = 0
+ expected_warns = 0
+ for eno in [errno.EPIPE, errno.ECONNRESET, errno.ENOBUFS]:
+ self.__sock_error.errno = eno
+ self.__sock.ex_on_send = self.__sock_error
+ self.__killed_socket = None # clear any previuos value
+ self.assertEqual(None, self.__msgq._send_data(self.__sock,
+ self.__data))
+ self.assertEqual((42, self.__sock), self.__killed_socket)
+ expected_blockings.extend([0, 1])
+ self.assertEqual(expected_blockings, self.__sock.blockings)
+
+ if eno == errno.EPIPE:
+ expected_warns += 1
+ else:
+ expected_errors += 1
+ self.assertEqual(expected_errors, self.__logger.error_called)
+ self.assertEqual(expected_warns, self.__logger.warn_called)
+
+ def test_process_fd_read_after_bad_write(self):
+ '''Check the specific case of write fail followed by read attempt.
+
+ The write failure results in kill_socket, then read shouldn't tried.
+
+ '''
+ self.__sock_error.errno = errno.EPIPE
+ self.__sock.ex_on_send = self.__sock_error
+ self.__msgq.process_socket = None # if called, trigger an exception
+ self.__msgq._process_fd(42, True, True, False) # shouldn't crash
+
+ # check the socket is deleted from the fileno=>sock dictionary
+ self.assertEqual({}, self.__msgq.sockets)
+
+ def test_process_fd_close_after_bad_write(self):
+ '''Similar to the previous, but for checking dup'ed kill attempt'''
+ self.__sock_error.errno = errno.EPIPE
+ self.__sock.ex_on_send = self.__sock_error
+ self.__msgq._process_fd(42, True, False, True) # shouldn't crash
+ self.assertEqual({}, self.__msgq.sockets)
+
+ def test_process_fd_writer_after_close(self):
+ '''Emulate a "writable" socket has been already closed and killed.'''
+ # This just shouldn't crash
+ self.__msgq._process_fd(4200, True, False, False)
+
+ def test_process_packet(self):
+ '''Check some failure cases in handling an incoming message.'''
+ expected_errors = 0
+ expected_debugs = 0
+
+ # if socket.recv() fails due to socket.error, it will be logged
+ # as error and the socket will be killed regardless of errno.
+ for eno in [errno.ENOBUFS, errno.ECONNRESET]:
+ self.__sock_error.errno = eno
+ self.__sock.recv_result = self.__sock_error
+ self.__killed_socket = None # clear any previuos value
+ self.__msgq.process_packet(42, self.__sock)
+ self.assertEqual((42, self.__sock), self.__killed_socket)
+ expected_errors += 1
+ self.assertEqual(expected_errors, self.__logger.error_called)
+ self.assertEqual(expected_debugs, self.__logger.debug_called)
+
+ # if socket.recv() returns empty data, the result depends on whether
+ # there's any preceding data; in the second case below, at least
+ # 6 bytes of data will be expected, and the second call to our faked
+ # recv() returns empty data. In that case it will be logged as error.
+ for recv_data in [b'', b'short']:
+ self.__sock.recv_result = recv_data
+ self.__killed_socket = None
+ self.__msgq.process_packet(42, self.__sock)
+ self.assertEqual((42, self.__sock), self.__killed_socket)
+ if len(recv_data) == 0:
+ expected_debugs += 1
+ else:
+ expected_errors += 1
+ self.assertEqual(expected_errors, self.__logger.error_called)
+ self.assertEqual(expected_debugs, self.__logger.debug_called)
+
if __name__ == '__main__':
isc.log.resetUnitTestRootLogger()
unittest.main()
diff --git a/src/bin/resolver/b10-resolver.xml b/src/bin/resolver/b10-resolver.xml
index 485d022..ae73c3d 100644
--- a/src/bin/resolver/b10-resolver.xml
+++ b/src/bin/resolver/b10-resolver.xml
@@ -52,8 +52,8 @@
<title>DESCRIPTION</title>
<para>The <command>b10-resolver</command> daemon provides the BIND 10
recursive DNS server. Normally it is started by the
- <citerefentry><refentrytitle>bind10</refentrytitle><manvolnum>8</manvolnum></citerefentry>
- boss process.
+ <citerefentry><refentrytitle>b10-init</refentrytitle><manvolnum>8</manvolnum></citerefentry>
+ process.
</para>
<para>
@@ -205,7 +205,7 @@ once that is merged you can for instance do 'config add Resolver/forward_address
<command>shutdown</command> exits <command>b10-resolver</command>.
This has an optional <varname>pid</varname> argument to
select the process ID to stop.
- (Note that the BIND 10 boss process may restart this service
+ (Note that the b10-init process may restart this service
if configured.)
</para>
diff --git a/src/bin/resolver/resolver.cc b/src/bin/resolver/resolver.cc
index 9536608..a3de340 100644
--- a/src/bin/resolver/resolver.cc
+++ b/src/bin/resolver/resolver.cc
@@ -431,13 +431,14 @@ Resolver::processMessage(const IOMessage& io_message,
// Ignore all responses.
if (query_message->getHeaderFlag(Message::HEADERFLAG_QR)) {
- LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_UNEXPECTED_RESPONSE);
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO,
+ RESOLVER_UNEXPECTED_RESPONSE);
server->resume(false);
return;
}
} catch (const Exception& ex) {
- LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_HEADER_ERROR)
- .arg(ex.what());
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO,
+ RESOLVER_HEADER_PROCESSING_FAILED).arg(ex.what());
server->resume(false);
return;
}
@@ -446,14 +447,16 @@ Resolver::processMessage(const IOMessage& io_message,
try {
query_message->fromWire(request_buffer);
} catch (const DNSProtocolError& error) {
- LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_PROTOCOL_ERROR)
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO,
+ RESOLVER_PROTOCOL_BODY_PARSE_FAILED)
.arg(error.what()).arg(error.getRcode());
makeErrorMessage(query_message, answer_message,
buffer, error.getRcode());
server->resume(true);
return;
} catch (const Exception& ex) {
- LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO, RESOLVER_MESSAGE_ERROR)
+ LOG_DEBUG(resolver_logger, RESOLVER_DBG_IO,
+ RESOLVER_MESSAGE_PROCESSING_FAILED)
.arg(ex.what()).arg(Rcode::SERVFAIL());
makeErrorMessage(query_message, answer_message,
buffer, Rcode::SERVFAIL());
diff --git a/src/bin/resolver/resolver_messages.mes b/src/bin/resolver/resolver_messages.mes
index 214519b..c722af1 100644
--- a/src/bin/resolver/resolver_messages.mes
+++ b/src/bin/resolver/resolver_messages.mes
@@ -81,7 +81,7 @@ has passed a set of checks (message is well-formed, it is allowed by the
ACL, it is a supported opcode, etc.) and is being forwarded to upstream
servers.
-% RESOLVER_HEADER_ERROR message received, exception when processing header: %1
+% RESOLVER_HEADER_PROCESSING_FAILED message received, exception when processing header: %1
This is a debug message from the resolver noting that an exception
occurred during the processing of a received packet. The packet has
been dropped.
@@ -97,7 +97,7 @@ During the update of the resolver's configuration parameters, the value
of the lookup timeout was found to be too small. The configuration
update will not be applied.
-% RESOLVER_MESSAGE_ERROR error parsing received message: %1 - returning %2
+% RESOLVER_MESSAGE_PROCESSING_FAILED error parsing received message: %1 - returning %2
This is a debug message noting that parsing of the body of a received
message by the resolver failed due to some error (although the parsing of
the header succeeded). The message parameters give a textual description
@@ -135,18 +135,11 @@ A warning message issued during resolver startup, this indicates that
no root addresses have been set. This may be because the resolver will
get them from a priming query.
-% RESOLVER_PARSE_ERROR error parsing received message: %1 - returning %2
-This is a debug message noting that the resolver received a message and
-the parsing of the body of the message failed due to some non-protocol
-related reason (although the parsing of the header succeeded).
-The message parameters give a textual description of the problem and
-the RCODE returned.
-
% RESOLVER_PRINT_COMMAND print message command, arguments are: %1
This debug message is logged when a "print_message" command is received
by the resolver over the command channel.
-% RESOLVER_PROTOCOL_ERROR protocol error parsing received message: %1 - returning %2
+% RESOLVER_PROTOCOL_BODY_PARSE_FAILED protocol error parsing received message: %1 - returning %2
This is a debug message noting that the resolver received a message and
the parsing of the body of the message failed due to some protocol error
(although the parsing of the header succeeded). The message parameters
diff --git a/src/bin/sockcreator/tests/sockcreator_tests.cc b/src/bin/sockcreator/tests/sockcreator_tests.cc
index 9604567..b834e1c 100644
--- a/src/bin/sockcreator/tests/sockcreator_tests.cc
+++ b/src/bin/sockcreator/tests/sockcreator_tests.cc
@@ -12,6 +12,8 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <config.h>
+
#include "../sockcreator.h"
#include <util/unittests/fork.h>
@@ -195,7 +197,12 @@ TEST(get_sock, tcp4_create) {
testAnyCreate<sockaddr_in>(SOCK_STREAM, tcpCheck);
}
-TEST(get_sock, udp6_create) {
+#ifdef HAVE_BROKEN_GET_IPV6_USE_MIN_MTU
+TEST(get_sock, DISABLED_udp6_create)
+#else
+TEST(get_sock, udp6_create)
+#endif
+{
testAnyCreate<sockaddr_in6>(SOCK_DGRAM, udpCheck<sockaddr_in6>);
}
diff --git a/src/bin/stats/b10-stats-httpd.xml b/src/bin/stats/b10-stats-httpd.xml
index 28d6ac9..be91737 100644
--- a/src/bin/stats/b10-stats-httpd.xml
+++ b/src/bin/stats/b10-stats-httpd.xml
@@ -54,7 +54,7 @@
intended for HTTP/XML interface for statistics module. This server
process runs as a process separated from the process of the BIND 10 Stats
daemon (<command>b10-stats</command>). The server is initially executed
- by the BIND 10 boss process (<command>bind10</command>) and eventually
+ by the b10-init process and eventually
exited by it. The server is intended to serve requests by HTTP
clients like web browsers and third-party modules. When the server is
asked, it requests BIND 10 statistics data or its schema from
@@ -74,7 +74,7 @@
10 statistics. The server uses CC session in communication
with <command>b10-stats</command>. CC session is provided
by <command>b10-msgq</command> which is started
- by <command>bind10</command> in advance. The server is implemented by
+ by <command>b10-init</command> in advance. The server is implemented by
HTTP-server libraries included in Python 3. The server obtains the
configuration from the config manager (<command>b10-cfgmgr</command>) in
runtime. Please see below for more details about this spec file and
@@ -176,7 +176,7 @@
exits the <command>b10-stats-httpd</command> process.
This has an optional <varname>pid</varname> argument to
select the process ID to stop.
- (Note that the BIND 10 boss process may restart this service
+ (Note that the b10-init process may restart this service
if configured.)
</para>
</listitem>
diff --git a/src/bin/stats/b10-stats.xml b/src/bin/stats/b10-stats.xml
index ee89ad2..bbdb96e 100644
--- a/src/bin/stats/b10-stats.xml
+++ b/src/bin/stats/b10-stats.xml
@@ -56,18 +56,18 @@
from each BIND 10 module. Its statistics information may be
reported via <command>bindctl</command> or
<command>b10-stats-httpd</command>. It is started by
- <command>bind10</command> and communicates by using the
+ <command>b10-init</command> and communicates by using the
Command Channel by <command>b10-msgq</command> with other
- modules like <command>bind10</command>, <command>b10-auth</command>
+ modules like <command>b10-init</command>, <command>b10-auth</command>
and so on. <command>b10-stats</command> periodically requests statistics
data from each module. The interval time can be configured
via <command>bindctl</command>. <command>b10-stats</command> cannot
accept any command from other modules for updating statistics data. The
stats module collects data and
aggregates it. <command>b10-stats</command> invokes an internal
- command for <command>bind10</command> after its initial
+ command for <command>b10-init</command> after its initial
starting to make sure it collects statistics data from
- <command>bind10</command>.
+ <command>b10-init</command>.
</para>
</refsect1>
@@ -131,7 +131,7 @@
<command>b10-stats</command> process.
This has an optional <varname>pid</varname> argument to
select the process ID to stop.
- (Note that the BIND 10 boss process may restart this service
+ (Note that the b10-init process may restart this service
if configured.)
</para>
diff --git a/src/bin/stats/stats.py.in b/src/bin/stats/stats.py.in
index 7123c53..0af0933 100755
--- a/src/bin/stats/stats.py.in
+++ b/src/bin/stats/stats.py.in
@@ -26,7 +26,8 @@ from optparse import OptionParser, OptionValueError
import errno
import select
-import isc
+import isc.cc
+import isc.config
import isc.util.process
import isc.log
from isc.log_messages.stats_messages import *
@@ -249,17 +250,17 @@ class Stats:
# It counts the number of instances of same module by
# examining the third value from the array result of
- # 'show_processes' of Boss
+ # 'show_processes' of Init
seq = self.cc_session.group_sendmsg(
isc.config.ccsession.create_command("show_processes"),
- 'Boss')
+ 'Init')
(answer, env) = self.cc_session.group_recvmsg(False, seq)
modules = []
if answer:
(rcode, value) = isc.config.ccsession.parse_answer(answer)
if rcode == 0 and type(value) is list:
# NOTE: For example, the "show_processes" command
- # of Boss is assumed to return the response in this
+ # of Init is assumed to return the response in this
# format:
# [
# ...
@@ -696,3 +697,5 @@ if __name__ == "__main__":
sys.exit(1)
except KeyboardInterrupt as kie:
logger.info(STATS_STOPPED_BY_KEYBOARD)
+
+ logger.info(STATS_EXITING)
diff --git a/src/bin/stats/stats.spec b/src/bin/stats/stats.spec
index 6fbf7bb..dab10a2 100644
--- a/src/bin/stats/stats.spec
+++ b/src/bin/stats/stats.spec
@@ -92,7 +92,7 @@
"item_optional": false,
"item_default": "1970-01-01T00:00:00Z",
"item_title": "Last update time",
- "item_description": "The latest date time when the stats module receives from other modules like auth server or boss process and so on",
+ "item_description": "The latest date time when the stats module receives from other modules like auth server or b10-init process and so on",
"item_format": "date-time"
},
{
diff --git a/src/bin/stats/stats_httpd.py.in b/src/bin/stats/stats_httpd.py.in
old mode 100644
new mode 100755
index 057b8ca..367f56e
--- a/src/bin/stats/stats_httpd.py.in
+++ b/src/bin/stats/stats_httpd.py.in
@@ -43,8 +43,8 @@ isc.log.init("b10-stats-httpd", buffer=True)
logger = isc.log.Logger("stats-httpd")
# Some constants for debug levels.
-DBG_STATHTTPD_INIT = logger.DBGLVL_START_SHUT
-DBG_STATHTTPD_MESSAGING = logger.DBGLVL_COMMAND
+DBG_STATSHTTPD_INIT = logger.DBGLVL_START_SHUT
+DBG_STATSHTTPD_MESSAGING = logger.DBGLVL_COMMAND
# If B10_FROM_SOURCE is set in the environment, we use data files
# from a directory relative to that, otherwise we use the ones
@@ -130,9 +130,9 @@ def item_name_list(element, identifier):
return ret
class HttpHandler(http.server.BaseHTTPRequestHandler):
- """HTTP handler class for HttpServer class. The class inhrits the super
- class http.server.BaseHTTPRequestHandler. It implemets do_GET()
- and do_HEAD() and orverrides log_message()"""
+ """HTTP handler class for HttpServer class. The class inherits the super
+ class http.server.BaseHTTPRequestHandler. It implements do_GET()
+ and do_HEAD() and overrides log_message()"""
def do_GET(self):
body = self.send_head()
if body is not None:
@@ -187,11 +187,11 @@ class HttpHandler(http.server.BaseHTTPRequestHandler):
# Couldn't find neither specified module name nor
# specified item name
self.send_error(404)
- logger.error(STATHTTPD_SERVER_DATAERROR, err)
+ logger.error(STATSHTTPD_SERVER_DATAERROR, err)
return None
except Exception as err:
self.send_error(500)
- logger.error(STATHTTPD_SERVER_ERROR, err)
+ logger.error(STATSHTTPD_SERVER_ERROR, err)
return None
else:
self.send_response(200)
@@ -201,6 +201,14 @@ class HttpHandler(http.server.BaseHTTPRequestHandler):
self.end_headers()
return body
+ def log_message(self, format, *args):
+ """overrides the parent method log_message()
+ to use the bind10 logging framework.
+ """
+ logger.debug(DBG_STATSHTTPD_MESSAGING, STATSHTTPD_HTTPLOG,
+ self.address_string(),
+ format%args)
+
class HttpServerError(Exception):
"""Exception class for HttpServer class. It is intended to be
passed from the HttpServer object to the StatsHttpd object."""
@@ -263,7 +271,7 @@ class StatsHttpd:
def open_mccs(self):
"""Opens a ModuleCCSession object"""
# create ModuleCCSession
- logger.debug(DBG_STATHTTPD_INIT, STATHTTPD_STARTING_CC_SESSION)
+ logger.debug(DBG_STATSHTTPD_INIT, STATSHTTPD_STARTING_CC_SESSION)
self.mccs = isc.config.ModuleCCSession(
SPECFILE_LOCATION, self.config_handler, self.command_handler)
self.cc_session = self.mccs._session
@@ -274,7 +282,7 @@ class StatsHttpd:
return
self.mccs.send_stopping()
- logger.debug(DBG_STATHTTPD_INIT, STATHTTPD_CLOSING_CC_SESSION)
+ logger.debug(DBG_STATSHTTPD_INIT, STATSHTTPD_CLOSING_CC_SESSION)
self.mccs.close()
self.mccs = None
@@ -317,7 +325,7 @@ class StatsHttpd:
server_address, HttpHandler,
self.xml_handler, self.xsd_handler, self.xsl_handler,
self.write_log)
- logger.info(STATHTTPD_STARTED, server_address[0],
+ logger.info(STATSHTTPD_STARTED, server_address[0],
server_address[1])
return httpd
except (socket.gaierror, socket.error,
@@ -333,7 +341,7 @@ class StatsHttpd:
"""Closes sockets for HTTP"""
while len(self.httpd)>0:
ht = self.httpd.pop()
- logger.info(STATHTTPD_CLOSING, ht.server_address[0],
+ logger.info(STATSHTTPD_CLOSING, ht.server_address[0],
ht.server_address[1])
ht.server_close()
@@ -369,7 +377,7 @@ class StatsHttpd:
def stop(self):
"""Stops the running StatsHttpd objects. Closes CC session and
HTTP handling sockets"""
- logger.info(STATHTTPD_SHUTDOWN)
+ logger.info(STATSHTTPD_SHUTDOWN)
self.close_httpd()
self.close_mccs()
self.running = False
@@ -387,7 +395,7 @@ class StatsHttpd:
def config_handler(self, new_config):
"""Config handler for the ModuleCCSession object. It resets
addresses and ports to listen HTTP requests on."""
- logger.debug(DBG_STATHTTPD_MESSAGING, STATHTTPD_HANDLE_CONFIG,
+ logger.debug(DBG_STATSHTTPD_MESSAGING, STATSHTTPD_HANDLE_CONFIG,
new_config)
errors = []
if not self.mccs.get_module_spec().\
@@ -405,7 +413,7 @@ class StatsHttpd:
try:
self.open_httpd()
except HttpServerError as err:
- logger.error(STATHTTPD_SERVER_ERROR, err)
+ logger.error(STATSHTTPD_SERVER_INIT_ERROR, err)
# restore old config
self.load_config(old_config)
self.open_httpd()
@@ -417,18 +425,18 @@ class StatsHttpd:
"""Command handler for the ModuleCCSesson object. It handles
"status" and "shutdown" commands."""
if command == "status":
- logger.debug(DBG_STATHTTPD_MESSAGING,
- STATHTTPD_RECEIVED_STATUS_COMMAND)
+ logger.debug(DBG_STATSHTTPD_MESSAGING,
+ STATSHTTPD_RECEIVED_STATUS_COMMAND)
return isc.config.ccsession.create_answer(
0, "Stats Httpd is up. (PID " + str(os.getpid()) + ")")
elif command == "shutdown":
- logger.debug(DBG_STATHTTPD_MESSAGING,
- STATHTTPD_RECEIVED_SHUTDOWN_COMMAND)
+ logger.debug(DBG_STATSHTTPD_MESSAGING,
+ STATSHTTPD_RECEIVED_SHUTDOWN_COMMAND)
self.running = False
return isc.config.ccsession.create_answer(0)
else:
- logger.debug(DBG_STATHTTPD_MESSAGING,
- STATHTTPD_RECEIVED_UNKNOWN_COMMAND, command)
+ logger.debug(DBG_STATSHTTPD_MESSAGING,
+ STATSHTTPD_RECEIVED_UNKNOWN_COMMAND, command)
return isc.config.ccsession.create_answer(
1, "Unknown command: " + str(command))
@@ -613,13 +621,15 @@ if __name__ == "__main__":
stats_httpd = StatsHttpd()
stats_httpd.start()
except OptionValueError as ove:
- logger.fatal(STATHTTPD_BAD_OPTION_VALUE, ove)
+ logger.fatal(STATSHTTPD_BAD_OPTION_VALUE, ove)
sys.exit(1)
except isc.cc.session.SessionError as se:
- logger.fatal(STATHTTPD_CC_SESSION_ERROR, se)
+ logger.fatal(STATSHTTPD_CC_SESSION_ERROR, se)
sys.exit(1)
except HttpServerError as hse:
- logger.fatal(STATHTTPD_START_SERVER_INIT_ERROR, hse)
+ logger.fatal(STATSHTTPD_START_SERVER_INIT_ERROR, hse)
sys.exit(1)
except KeyboardInterrupt as kie:
- logger.info(STATHTTPD_STOPPED_BY_KEYBOARD)
+ logger.info(STATSHTTPD_STOPPED_BY_KEYBOARD)
+
+ logger.info(STATSHTTPD_EXITING)
diff --git a/src/bin/stats/stats_httpd_messages.mes b/src/bin/stats/stats_httpd_messages.mes
index ad2e97f..93491b6 100644
--- a/src/bin/stats/stats_httpd_messages.mes
+++ b/src/bin/stats/stats_httpd_messages.mes
@@ -15,81 +15,90 @@
# No namespace declaration - these constants go in the global namespace
# of the stats_httpd_messages python module.
-% STATHTTPD_BAD_OPTION_VALUE bad command line argument: %1
+% STATSHTTPD_BAD_OPTION_VALUE bad command line argument: %1
The stats-httpd module was called with a bad command-line argument
and will not start.
-% STATHTTPD_CC_SESSION_ERROR error connecting to message bus: %1
+% STATSHTTPD_CC_SESSION_ERROR error connecting to message bus: %1
The stats-httpd module was unable to connect to the BIND 10 command
and control bus. A likely problem is that the message bus daemon
(b10-msgq) is not running. The stats-httpd module will now shut down.
-% STATHTTPD_CLOSING closing %1#%2
+% STATSHTTPD_CLOSING closing %1#%2
The stats-httpd daemon will stop listening for requests on the given
address and port number.
-% STATHTTPD_CLOSING_CC_SESSION stopping cc session
+% STATSHTTPD_CLOSING_CC_SESSION stopping cc session
Debug message indicating that the stats-httpd module is disconnecting
from the command and control bus.
-% STATHTTPD_HANDLE_CONFIG reading configuration: %1
+% STATSHTTPD_EXITING exiting
+The stats HTTP server is exiting.
+
+% STATSHTTPD_HANDLE_CONFIG reading configuration: %1
The stats-httpd daemon has received new configuration data and will now
process it. The (changed) data is printed.
-% STATHTTPD_RECEIVED_SHUTDOWN_COMMAND shutdown command received
+% STATSHTTPD_HTTPLOG %1 %2
+Debug HTTP log message. These are the messages logged by the http server
+instance. For most logs, the message shows HTTP client and query
+information like HTTP method, URI, and status code, but the http server
+can also log other information, such as extended status reports.
+
+% STATSHTTPD_RECEIVED_SHUTDOWN_COMMAND shutdown command received
A shutdown command was sent to the stats-httpd module, and it will
now shut down.
-% STATHTTPD_RECEIVED_STATUS_COMMAND received command to return status
+% STATSHTTPD_RECEIVED_STATUS_COMMAND received command to return status
A status command was sent to the stats-httpd module, and it will
respond with 'Stats Httpd is up.' and its PID.
-% STATHTTPD_RECEIVED_UNKNOWN_COMMAND received unknown command: %1
+% STATSHTTPD_RECEIVED_UNKNOWN_COMMAND received unknown command: %1
An unknown command has been sent to the stats-httpd module. The
stats-httpd module will respond with an error, and the command will
be ignored.
-% STATHTTPD_SERVER_DATAERROR HTTP server data error: %1
+% STATSHTTPD_SERVER_DATAERROR HTTP server data error: %1
An internal error occurred while handling an HTTP request. An HTTP 404
response will be sent back, and the specific error is printed. This
is an error condition that likely points the specified data
corresponding to the requested URI is incorrect.
-% STATHTTPD_SERVER_ERROR HTTP server error: %1
+% STATSHTTPD_SERVER_ERROR HTTP server error: %1
An internal error occurred while handling an HTTP request. An HTTP 500
response will be sent back, and the specific error is printed. This
is an error condition that likely points to a module that is not
-responding correctly to statistic requests.
+responding correctly to statistics requests.
-% STATHTTPD_SERVER_INIT_ERROR HTTP server initialization error: %1
+% STATSHTTPD_SERVER_INIT_ERROR HTTP server initialization error: %1
There was a problem initializing the HTTP server in the stats-httpd
module upon receiving its configuration data. The most likely cause
is a port binding problem or a bad configuration value. The specific
error is printed in the message. The new configuration is ignored,
and an error is sent back.
-% STATHTTPD_SHUTDOWN shutting down
+% STATSHTTPD_SHUTDOWN shutting down
The stats-httpd daemon is shutting down.
-% STATHTTPD_STARTED listening on %1#%2
+% STATSHTTPD_STARTED listening on %1#%2
The stats-httpd daemon will now start listening for requests on the
given address and port number.
-% STATHTTPD_STARTING_CC_SESSION starting cc session
+% STATSHTTPD_STARTING_CC_SESSION starting cc session
Debug message indicating that the stats-httpd module is connecting to
the command and control bus.
-% STATHTTPD_START_SERVER_INIT_ERROR HTTP server initialization error: %1
+% STATSHTTPD_START_SERVER_INIT_ERROR HTTP server initialization error: %1
There was a problem initializing the HTTP server in the stats-httpd
module upon startup. The most likely cause is that it was not able
to bind to the listening port. The specific error is printed, and the
module will shut down.
-% STATHTTPD_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down
+% STATSHTTPD_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down
There was a keyboard interrupt signal to stop the stats-httpd
daemon. The daemon will now shut down.
-% STATHTTPD_UNKNOWN_CONFIG_ITEM unknown configuration item: %1
+% STATSHTTPD_UNKNOWN_CONFIG_ITEM unknown configuration item: %1
The stats-httpd daemon received a configuration update from the
configuration manager. However, one of the items in the
configuration is unknown. The new configuration is ignored, and an
diff --git a/src/bin/stats/stats_messages.mes b/src/bin/stats/stats_messages.mes
index 3960c26..b6f0b16 100644
--- a/src/bin/stats/stats_messages.mes
+++ b/src/bin/stats/stats_messages.mes
@@ -24,6 +24,9 @@ The stats module was unable to connect to the BIND 10 command and
control bus. A likely problem is that the message bus daemon
(b10-msgq) is not running. The stats module will now shut down.
+% STATS_EXITING exiting
+The stats module process is exiting.
+
% STATS_RECEIVED_INVALID_STATISTICS_DATA received invalid statistics data from %1
Invalid statistics data has been received from the module while
polling and it has been discarded.
diff --git a/src/bin/stats/tests/b10-stats-httpd_test.py b/src/bin/stats/tests/b10-stats-httpd_test.py
index fb0510a..98689c8 100644
--- a/src/bin/stats/tests/b10-stats-httpd_test.py
+++ b/src/bin/stats/tests/b10-stats-httpd_test.py
@@ -68,7 +68,7 @@ XMLNS_XSD = "http://www.w3.org/2001/XMLSchema"
XMLNS_XSI = stats_httpd.XMLNS_XSI
DUMMY_DATA = {
- 'Boss' : {
+ 'Init' : {
"boot_time": time.strftime('%Y-%m-%dT%H:%M:%SZ', CONST_BASETIME)
},
'Auth' : {
@@ -278,7 +278,7 @@ class TestHttpHandler(unittest.TestCase):
+ stats_httpd.XSD_URL_PATH)
# check the path of XSL
self.assertTrue(xsl_doctype.startswith(
- '<?xml-stylesheet type="text/xsl" href="' +
+ '<?xml-stylesheet type="text/xsl" href="' +
stats_httpd.XSL_URL_PATH
+ '"?>'))
# check whether the list of 'identifier' attributes in
@@ -397,7 +397,7 @@ class TestHttpHandler(unittest.TestCase):
# 404 NotFound (too long path)
self.client._http_vsn_str = 'HTTP/1.0'
- self.client.putrequest('GET', stats_httpd.XML_URL_PATH + '/Boss/boot_time/a')
+ self.client.putrequest('GET', stats_httpd.XML_URL_PATH + '/Init/boot_time/a')
self.client.endheaders()
response = self.client.getresponse()
self.assertEqual(response.status, 404)
@@ -1001,7 +1001,7 @@ class TestStatsHttpd(unittest.TestCase):
self.assertFalse('item_format' in spec)
self.assertFalse('format' in stats_xml[i].attrib)
- @unittest.skipUnless(xml_parser, "skipping the test using XMLParser")
+ @unittest.skipUnless(xml_parser, "skipping the test using XMLParser")
def test_xsd_handler(self):
self.stats_httpd = MyStatsHttpd(get_availaddr())
xsd_string = self.stats_httpd.xsd_handler()
@@ -1036,7 +1036,7 @@ class TestStatsHttpd(unittest.TestCase):
self.assertEqual(attribs[i][1], stats_xsd[i].attrib['type'])
self.assertEqual(attribs[i][2], stats_xsd[i].attrib['use'])
- @unittest.skipUnless(xml_parser, "skipping the test using XMLParser")
+ @unittest.skipUnless(xml_parser, "skipping the test using XMLParser")
def test_xsl_handler(self):
self.stats_httpd = MyStatsHttpd(get_availaddr())
xsl_string = self.stats_httpd.xsl_handler()
diff --git a/src/bin/stats/tests/b10-stats_test.py b/src/bin/stats/tests/b10-stats_test.py
index d18abf1..80bd3a6 100644
--- a/src/bin/stats/tests/b10-stats_test.py
+++ b/src/bin/stats/tests/b10-stats_test.py
@@ -319,15 +319,15 @@ class TestStats(unittest.TestCase):
self.assertEqual(
send_command(
'show', 'Stats',
- params={ 'owner' : 'Boss',
+ params={ 'owner' : 'Init',
'name' : 'boot_time' }),
- (0, {'Boss': {'boot_time': self.const_datetime}}))
+ (0, {'Init': {'boot_time': self.const_datetime}}))
self.assertEqual(
send_command(
'show', 'Stats',
- params={ 'owner' : 'Boss',
+ params={ 'owner' : 'Init',
'name' : 'boot_time' }),
- (0, {'Boss': {'boot_time': self.const_datetime}}))
+ (0, {'Init': {'boot_time': self.const_datetime}}))
self.assertEqual(
send_command('status', 'Stats'),
(0, "Stats is up. (PID " + str(os.getpid()) + ")"))
@@ -335,13 +335,13 @@ class TestStats(unittest.TestCase):
(rcode, value) = send_command('show', 'Stats')
self.assertEqual(rcode, 0)
self.assertEqual(len(value), 3)
- self.assertTrue('Boss' in value)
+ self.assertTrue('Init' in value)
self.assertTrue('Stats' in value)
self.assertTrue('Auth' in value)
self.assertEqual(len(value['Stats']), 5)
- self.assertEqual(len(value['Boss']), 1)
- self.assertTrue('boot_time' in value['Boss'])
- self.assertEqual(value['Boss']['boot_time'], self.const_datetime)
+ self.assertEqual(len(value['Init']), 1)
+ self.assertTrue('boot_time' in value['Init'])
+ self.assertEqual(value['Init']['boot_time'], self.const_datetime)
self.assertTrue('report_time' in value['Stats'])
self.assertTrue('boot_time' in value['Stats'])
self.assertTrue('last_update_time' in value['Stats'])
@@ -350,12 +350,12 @@ class TestStats(unittest.TestCase):
(rcode, value) = send_command('showschema', 'Stats')
self.assertEqual(rcode, 0)
self.assertEqual(len(value), 3)
- self.assertTrue('Boss' in value)
+ self.assertTrue('Init' in value)
self.assertTrue('Stats' in value)
self.assertTrue('Auth' in value)
self.assertEqual(len(value['Stats']), 5)
- self.assertEqual(len(value['Boss']), 1)
- for item in value['Boss']:
+ self.assertEqual(len(value['Init']), 1)
+ for item in value['Init']:
self.assertTrue(len(item) == 7)
self.assertTrue('item_name' in item)
self.assertTrue('item_type' in item)
@@ -383,10 +383,10 @@ class TestStats(unittest.TestCase):
def test_update_modules(self):
self.stats = stats.Stats()
- self.assertEqual(len(self.stats.modules), 3) # Auth, Boss, Stats
+ self.assertEqual(len(self.stats.modules), 3) # Auth, Init, Stats
self.stats.update_modules()
self.assertTrue('Stats' in self.stats.modules)
- self.assertTrue('Boss' in self.stats.modules)
+ self.assertTrue('Init' in self.stats.modules)
self.assertFalse('Dummy' in self.stats.modules)
my_statistics_data = stats.get_spec_defaults(self.stats.modules['Stats'].get_statistics_spec())
self.assertTrue('report_time' in my_statistics_data)
@@ -399,7 +399,7 @@ class TestStats(unittest.TestCase):
self.assertEqual(my_statistics_data['last_update_time'], self.const_default_datetime)
self.assertEqual(my_statistics_data['timestamp'], 0.0)
self.assertEqual(my_statistics_data['lname'], "")
- my_statistics_data = stats.get_spec_defaults(self.stats.modules['Boss'].get_statistics_spec())
+ my_statistics_data = stats.get_spec_defaults(self.stats.modules['Init'].get_statistics_spec())
self.assertTrue('boot_time' in my_statistics_data)
self.assertEqual(my_statistics_data['boot_time'], self.const_default_datetime)
orig_parse_answer = stats.isc.config.ccsession.parse_answer
@@ -411,8 +411,8 @@ class TestStats(unittest.TestCase):
self.stats = stats.Stats()
my_statistics_data = self.stats.get_statistics_data()
self.assertTrue('Stats' in my_statistics_data)
- self.assertTrue('Boss' in my_statistics_data)
- self.assertTrue('boot_time' in my_statistics_data['Boss'])
+ self.assertTrue('Init' in my_statistics_data)
+ self.assertTrue('boot_time' in my_statistics_data['Init'])
my_statistics_data = self.stats.get_statistics_data(owner='Stats')
self.assertTrue('Stats' in my_statistics_data)
self.assertTrue('report_time' in my_statistics_data['Stats'])
@@ -601,7 +601,7 @@ class TestStats(unittest.TestCase):
['foo1']['nds_queries.perzone'],\
_new_val)
self.assertEqual(self.stats.update_statistics_data(
- 'Boss', 'bar1', _test_exp7), ["KeyError: 'foo'"])
+ 'Init', 'bar1', _test_exp7), ["KeyError: 'foo'"])
self.assertEqual(self.stats.update_statistics_data(
'Foo', 'foo1', _test_exp6), ['unknown module name: Foo'])
@@ -659,7 +659,7 @@ class TestStats(unittest.TestCase):
self.assertEqual(self.stats.statistics_data['Auth']['queries.udp'], sum_qudp)
self.assertTrue('Auth' in self.stats.statistics_data_bymid)
# restore statistics data of killed auth
- # self.base.boss.server.pid_list = [ killed ] + self.base.boss.server.pid_list[:]
+ # self.base.b10_init.server.pid_list = [ killed ] + self.base.b10_init.server.pid_list[:]
self.stats.update_statistics_data('Auth',
"bar1 at foo",
{'queries.tcp': bar1_tcp})
@@ -717,9 +717,9 @@ class TestStats(unittest.TestCase):
self.assertEqual(
send_command(
'show', 'Stats',
- params={ 'owner' : 'Boss',
+ params={ 'owner' : 'Init',
'name' : 'boot_time' }),
- (0, {'Boss': {'boot_time': self.const_datetime}}))
+ (0, {'Init': {'boot_time': self.const_datetime}}))
stats_server.shutdown()
def test_commands(self):
@@ -833,7 +833,7 @@ class TestStats(unittest.TestCase):
self.assertEqual(rcode, 0)
self.assertEqual(len(value), 3)
self.assertTrue('Stats' in value)
- self.assertTrue('Boss' in value)
+ self.assertTrue('Init' in value)
self.assertTrue('Auth' in value)
self.assertFalse('__Dummy__' in value)
schema = value['Stats']
@@ -849,7 +849,7 @@ class TestStats(unittest.TestCase):
if len(item) == 7:
self.assertTrue('item_format' in item)
- schema = value['Boss']
+ schema = value['Init']
self.assertEqual(len(schema), 1)
for item in schema:
self.assertTrue(len(item) == 7)
@@ -879,7 +879,7 @@ class TestStats(unittest.TestCase):
self.stats.command_showschema(owner='Stats'))
self.assertEqual(rcode, 0)
self.assertTrue('Stats' in value)
- self.assertFalse('Boss' in value)
+ self.assertFalse('Init' in value)
self.assertFalse('Auth' in value)
for item in value['Stats']:
self.assertTrue(len(item) == 6 or len(item) == 7)
@@ -896,7 +896,7 @@ class TestStats(unittest.TestCase):
self.stats.command_showschema(owner='Stats', name='report_time'))
self.assertEqual(rcode, 0)
self.assertTrue('Stats' in value)
- self.assertFalse('Boss' in value)
+ self.assertFalse('Init' in value)
self.assertFalse('Auth' in value)
self.assertEqual(len(value['Stats'][0]), 7)
self.assertTrue('item_name' in value['Stats'][0])
@@ -1150,15 +1150,15 @@ class TestStats(unittest.TestCase):
self.assertEqual(
send_command('show', 'Stats'),
(0, stat.statistics_data))
- # check statistics data of 'Boss'
- boss = self.base.boss.server
+ # check statistics data of 'Init'
+ b10_init = self.base.b10_init.server
self.assertEqual(
- stat.statistics_data_bymid['Boss'][boss.cc_session.lname],
+ stat.statistics_data_bymid['Init'][b10_init.cc_session.lname],
{'boot_time': self.const_datetime})
self.assertEqual(
- len(stat.statistics_data_bymid['Boss']), 1)
+ len(stat.statistics_data_bymid['Init']), 1)
self.assertEqual(
- stat.statistics_data['Boss'],
+ stat.statistics_data['Init'],
{'boot_time': self.const_datetime})
# check statistics data of each 'Auth' instances
list_auth = ['', '2']
@@ -1219,17 +1219,17 @@ class TestStats(unittest.TestCase):
def test_polling2(self):
# set invalid statistics
- boss = self.base.boss.server
- boss.statistics_data = {'boot_time':1}
+ b10_init = self.base.b10_init.server
+ b10_init.statistics_data = {'boot_time':1}
stats_server = ThreadingServerManager(MyStats)
stat = stats_server.server
stats_server.run()
self.assertEqual(
send_command('status', 'Stats'),
(0, "Stats is up. (PID " + str(os.getpid()) + ")"))
- # check default statistics data of 'Boss'
+ # check default statistics data of 'Init'
self.assertEqual(
- stat.statistics_data['Boss'],
+ stat.statistics_data['Init'],
{'boot_time': self.const_default_datetime})
stats_server.shutdown()
diff --git a/src/bin/stats/tests/test_utils.py b/src/bin/stats/tests/test_utils.py
index 5c1855a..1c5cc3c 100644
--- a/src/bin/stats/tests/test_utils.py
+++ b/src/bin/stats/tests/test_utils.py
@@ -140,11 +140,11 @@ class MockCfgmgr:
def shutdown(self):
self.cfgmgr.running = False
-class MockBoss:
+class MockInit:
spec_str = """\
{
"module_spec": {
- "module_name": "Boss",
+ "module_name": "Init",
"module_description": "Mock Master process",
"config_data": [
{
@@ -210,7 +210,7 @@ class MockBoss:
},
{
"command_name": "ping",
- "command_description": "Ping the boss process",
+ "command_description": "Ping the b10-init process",
"command_args": []
},
{
@@ -537,9 +537,9 @@ class BaseModules:
# MockCfgmgr
self.cfgmgr = ThreadingServerManager(MockCfgmgr)
self.cfgmgr.run()
- # MockBoss
- self.boss = ThreadingServerManager(MockBoss)
- self.boss.run()
+ # MockInit
+ self.b10_init = ThreadingServerManager(MockInit)
+ self.b10_init.run()
# MockAuth
self.auth = ThreadingServerManager(MockAuth)
self.auth.run()
@@ -558,8 +558,8 @@ class BaseModules:
# MockAuth
self.auth2.shutdown(True)
self.auth.shutdown(True)
- # MockBoss
- self.boss.shutdown(True)
+ # MockInit
+ self.b10_init.shutdown(True)
# MockCfgmgr
self.cfgmgr.shutdown(True)
# remove the unused socket file
diff --git a/src/bin/stats/tests/testdata/b10-config.db b/src/bin/stats/tests/testdata/b10-config.db
index 2f89b98..7dd9daf 100644
--- a/src/bin/stats/tests/testdata/b10-config.db
+++ b/src/bin/stats/tests/testdata/b10-config.db
@@ -1,5 +1,5 @@
{ "version": 2,
- "Boss": {
+ "Init": {
"components": {
"b10-auth": {
"kind": "needed",
diff --git a/src/bin/sysinfo/run_sysinfo.sh.in b/src/bin/sysinfo/run_sysinfo.sh.in
index 6459c2d..b5593b9 100755
--- a/src/bin/sysinfo/run_sysinfo.sh.in
+++ b/src/bin/sysinfo/run_sysinfo.sh.in
@@ -20,20 +20,8 @@ export PYTHON_EXEC
SYSINFO_PATH=@abs_top_builddir@/src/bin/sysinfo
-# Note: we shouldn't need log_messages and lib/dns except for the seemingly
-# necessary dependency due to the automatic import in the isc package (its
-# __init__.py imports some other modules)
-# #2145 should eliminate the need for them.
-PYTHONPATH=@abs_top_builddir@/src/lib/python:@abs_top_srcdir@/src/lib/python:@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/dns/python/.libs
+PYTHONPATH=@abs_top_builddir@/src/lib/python:@abs_top_srcdir@/src/lib/python
export PYTHONPATH
-# Likewise, we need only because isc.log requires some loadable modules.
-# sysinfo itself shouldn't need any of them.
-SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
-if test $SET_ENV_LIBRARY_PATH = yes; then
- @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/acl/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
- export @ENV_LIBRARY_PATH@
-fi
-
cd ${SYSINFO_PATH}
exec ${PYTHON_EXEC} -O sysinfo.py "$@"
diff --git a/src/bin/tests/process_rename_test.py.in b/src/bin/tests/process_rename_test.py.in
index 055ebdc..ea8ad87 100644
--- a/src/bin/tests/process_rename_test.py.in
+++ b/src/bin/tests/process_rename_test.py.in
@@ -42,7 +42,7 @@ class TestRename(unittest.TestCase):
# Scripts named in this list are not expected to be renamed and
# should be excluded from the scan.
- EXCLUDED_SCRIPTS = ['isc-sysinfo']
+ EXCLUDED_SCRIPTS = ['isc-sysinfo', 'bind10']
# Regexp to find all the *_SCRIPTS = something lines (except for
# noinst_SCRIPTS, which are scripts for tests), including line
diff --git a/src/bin/xfrin/b10-xfrin.xml b/src/bin/xfrin/b10-xfrin.xml
index eb16ab3..0f3e99c 100644
--- a/src/bin/xfrin/b10-xfrin.xml
+++ b/src/bin/xfrin/b10-xfrin.xml
@@ -56,8 +56,8 @@
<para>The <command>b10-xfrin</command> daemon provides the BIND 10
incoming DNS zone transfer service.
Normally it is started by the
- <citerefentry><refentrytitle>bind10</refentrytitle><manvolnum>8</manvolnum></citerefentry>
- boss process.
+ <citerefentry><refentrytitle>b10-init</refentrytitle><manvolnum>8</manvolnum></citerefentry>
+ process.
When triggered it can request and receive a zone transfer and store
the zone in a BIND 10 zone data source.
</para>
@@ -180,8 +180,8 @@ in separate zonemgr process.
<para>
<command>shutdown</command> stops all incoming zone transfers
- and exits <command>b10-xfrin</command>. (Note that the BIND 10
- boss process will restart this service.)
+ and exits <command>b10-xfrin</command>. (Note that the
+ b10-init process will restart this service.)
</para>
<!-- TODO:
add a usage example of xfrin -->
diff --git a/src/bin/xfrin/tests/xfrin_test.py b/src/bin/xfrin/tests/xfrin_test.py
index 99c5e1e..a1714de 100644
--- a/src/bin/xfrin/tests/xfrin_test.py
+++ b/src/bin/xfrin/tests/xfrin_test.py
@@ -40,7 +40,7 @@ import sqlite3
#
TEST_ZONE_NAME_STR = "example.com."
TEST_ZONE_NAME = Name(TEST_ZONE_NAME_STR)
-TEST_RRCLASS = RRClass.IN()
+TEST_RRCLASS = RRClass.IN
TEST_RRCLASS_STR = 'IN'
TEST_DB_FILE = 'db_file'
TEST_MASTER_IPV4_ADDRESS = '127.0.0.1'
@@ -59,21 +59,21 @@ TEST_MASTER_PORT = '53535'
TSIG_KEY = TSIGKey("example.com:SFuWd/q99SzF8Yzd1QbB9g==")
# SOA intended to be used for the new SOA as a result of transfer.
-soa_rdata = Rdata(RRType.SOA(), TEST_RRCLASS,
+soa_rdata = Rdata(RRType.SOA, TEST_RRCLASS,
'master.example.com. admin.example.com. ' +
'1234 3600 1800 2419200 7200')
-soa_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA(), RRTTL(3600))
+soa_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA, RRTTL(3600))
soa_rrset.add_rdata(soa_rdata)
# SOA intended to be used for the current SOA at the secondary side.
# Note that its serial is smaller than that of soa_rdata.
-begin_soa_rdata = Rdata(RRType.SOA(), TEST_RRCLASS,
+begin_soa_rdata = Rdata(RRType.SOA, TEST_RRCLASS,
'master.example.com. admin.example.com. ' +
'1230 3600 1800 2419200 7200')
-begin_soa_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA(), RRTTL(3600))
+begin_soa_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA, RRTTL(3600))
begin_soa_rrset.add_rdata(begin_soa_rdata)
-example_axfr_question = Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.AXFR())
-example_soa_question = Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA())
+example_axfr_question = Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.AXFR)
+example_soa_question = Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA)
default_questions = [example_axfr_question]
default_answers = [soa_rrset]
@@ -153,13 +153,19 @@ class MockCC(MockModuleCCSession):
def remove_remote_config(self, module_name):
pass
+class MockRRsetCollection:
+ '''
+ A mock RRset collection. We don't use it really (we mock the method that
+ it is passed to too), so it's empty.
+ '''
+ pass
+
class MockDataSourceClient():
'''A simple mock data source client.
This class provides a minimal set of wrappers related the data source
API that would be used by Diff objects. For our testing purposes they
- only keep truck of the history of the changes.
-
+ only keep track of the history of the changes.
'''
def __init__(self):
self.force_fail = False # if True, raise an exception on commit
@@ -202,12 +208,12 @@ class MockDataSourceClient():
zone names.
'''
- if name == TEST_ZONE_NAME and rrtype == RRType.SOA():
+ if name == TEST_ZONE_NAME and rrtype == RRType.SOA:
return (ZoneFinder.SUCCESS, begin_soa_rrset, 0)
if name == Name('no-soa.example'):
return (ZoneFinder.NXDOMAIN, None, 0)
if name == Name('dup-soa.example'):
- dup_soa_rrset = RRset(name, TEST_RRCLASS, RRType.SOA(), RRTTL(0))
+ dup_soa_rrset = RRset(name, TEST_RRCLASS, RRType.SOA, RRTTL(0))
dup_soa_rrset.add_rdata(begin_soa_rdata)
dup_soa_rrset.add_rdata(soa_rdata)
return (ZoneFinder.SUCCESS, dup_soa_rrset, 0)
@@ -217,6 +223,12 @@ class MockDataSourceClient():
self._journaling_enabled = journaling
return self
+ def get_rrset_collection(self):
+ '''
+ Pretend to be a zone updater and provide a (dummy) rrset collection.
+ '''
+ return MockRRsetCollection()
+
def add_rrset(self, rrset):
self.diffs.append(('add', rrset))
@@ -317,7 +329,7 @@ class MockXfrinConnection(XfrinConnection):
return len(data)
def create_response_data(self, response=True, auth=True, bad_qid=False,
- rcode=Rcode.NOERROR(),
+ rcode=Rcode.NOERROR,
questions=default_questions,
answers=default_answers,
authorities=[],
@@ -327,7 +339,7 @@ class MockXfrinConnection(XfrinConnection):
if bad_qid:
qid += 1
resp.set_qid(qid)
- resp.set_opcode(Opcode.QUERY())
+ resp.set_opcode(Opcode.QUERY)
resp.set_rcode(rcode)
if response:
resp.set_header_flag(Message.HEADERFLAG_QR)
@@ -354,17 +366,17 @@ class TestXfrinState(unittest.TestCase):
TEST_RRCLASS, None, threading.Event(),
TEST_MASTER_IPV4_ADDRINFO)
self.conn.init_socket()
- self.begin_soa = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA(),
+ self.begin_soa = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA,
RRTTL(3600))
- self.begin_soa.add_rdata(Rdata(RRType.SOA(), TEST_RRCLASS,
+ self.begin_soa.add_rdata(Rdata(RRType.SOA, TEST_RRCLASS,
'm. r. 1230 0 0 0 0'))
- self.ns_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.NS(),
+ self.ns_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.NS,
RRTTL(3600))
- self.ns_rrset.add_rdata(Rdata(RRType.NS(), TEST_RRCLASS,
- 'ns.example.com'))
- self.a_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.A(),
+ self.ns_rrset.add_rdata(Rdata(RRType.NS, TEST_RRCLASS,
+ 'ns.example.com.'))
+ self.a_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.A,
RRTTL(3600))
- self.a_rrset.add_rdata(Rdata(RRType.A(), TEST_RRCLASS, '192.0.2.1'))
+ self.a_rrset.add_rdata(Rdata(RRType.A, TEST_RRCLASS, '192.0.2.1'))
self.conn._datasrc_client = MockDataSourceClient()
self.conn._diff = Diff(self.conn._datasrc_client, TEST_ZONE_NAME)
@@ -396,14 +408,14 @@ class TestXfrinInitialSOA(TestXfrinState):
self.ns_rrset)
def test_handle_ixfr_uptodate(self):
- self.conn._request_type = RRType.IXFR()
+ self.conn._request_type = RRType.IXFR
self.conn._request_serial = isc.dns.Serial(1234) # same as soa_rrset
self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
self.assertEqual(type(XfrinIXFRUptodate()),
type(self.conn.get_xfrstate()))
def test_handle_ixfr_uptodate2(self):
- self.conn._request_type = RRType.IXFR()
+ self.conn._request_type = RRType.IXFR
self.conn._request_serial = isc.dns.Serial(1235) # > soa_rrset
self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
self.assertEqual(type(XfrinIXFRUptodate()),
@@ -412,7 +424,7 @@ class TestXfrinInitialSOA(TestXfrinState):
def test_handle_ixfr_uptodate3(self):
# Similar to the previous case, but checking serial number arithmetic
# comparison
- self.conn._request_type = RRType.IXFR()
+ self.conn._request_type = RRType.IXFR
self.conn._request_serial = isc.dns.Serial(0xffffffff)
self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
self.assertEqual(type(XfrinFirstData()),
@@ -420,7 +432,7 @@ class TestXfrinInitialSOA(TestXfrinState):
def test_handle_axfr_uptodate(self):
# "request serial" should matter only for IXFR
- self.conn._request_type = RRType.AXFR()
+ self.conn._request_type = RRType.AXFR
self.conn._request_serial = isc.dns.Serial(1234) # same as soa_rrset
self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
self.assertEqual(type(XfrinFirstData()),
@@ -433,13 +445,13 @@ class TestXfrinFirstData(TestXfrinState):
def setUp(self):
super().setUp()
self.state = XfrinFirstData()
- self.conn._request_type = RRType.IXFR()
+ self.conn._request_type = RRType.IXFR
# arbitrary chosen serial < 1234:
self.conn._request_serial = isc.dns.Serial(1230)
self.conn._diff = None # should be replaced in the AXFR case
def test_handle_ixfr_begin_soa(self):
- self.conn._request_type = RRType.IXFR()
+ self.conn._request_type = RRType.IXFR
self.assertFalse(self.state.handle_rr(self.conn, self.begin_soa))
self.assertEqual(type(XfrinIXFRDeleteSOA()),
type(self.conn.get_xfrstate()))
@@ -447,7 +459,7 @@ class TestXfrinFirstData(TestXfrinState):
def test_handle_axfr(self):
# If the original type is AXFR, other conditions aren't considered,
# and AXFR processing will continue
- self.conn._request_type = RRType.AXFR()
+ self.conn._request_type = RRType.AXFR
self.assertFalse(self.state.handle_rr(self.conn, self.begin_soa))
self.assertEqual(type(XfrinAXFR()), type(self.conn.get_xfrstate()))
@@ -586,9 +598,9 @@ class TestXfrinIXFRAdd(TestXfrinState):
# First, push a starting SOA inside. This should be OK, nothing checked
# yet.
self.state.handle_rr(self.conn, self.begin_soa)
- end_soa_rdata = Rdata(RRType.SOA(), TEST_RRCLASS,
+ end_soa_rdata = Rdata(RRType.SOA, TEST_RRCLASS,
'm. r. 1234 0 0 0 0')
- end_soa_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA(),
+ end_soa_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA,
RRTTL(3600))
end_soa_rrset.add_rdata(end_soa_rdata)
# This would try to finish up. But the TSIG pretends not everything is
@@ -712,7 +724,7 @@ class TestXfrinConnection(unittest.TestCase):
'bad_qid': False,
'response': True,
'auth': True,
- 'rcode': Rcode.NOERROR(),
+ 'rcode': Rcode.NOERROR,
'answers': default_answers,
'authorities': [],
'tsig': False,
@@ -726,11 +738,27 @@ class TestXfrinConnection(unittest.TestCase):
'tsig_1st': None,
'tsig_2nd': None
}
+ self.__orig_check_zone = xfrin.check_zone
+ xfrin.check_zone = self.__check_zone
+ self._check_zone_result = True
+ self._check_zone_params = None
def tearDown(self):
self.conn.close()
if os.path.exists(TEST_DB_FILE):
os.remove(TEST_DB_FILE)
+ xfrin.check_zone = self.__orig_check_zone
+
+ def __check_zone(self, name, rrclass, rrsets, callbacks):
+ '''
+ A mock function used instead of dns.check_zone.
+ '''
+ self._check_zone_params = (name, rrclass, rrsets, callbacks)
+ # Call both callbacks to see they do nothing. This checks
+ # the transfer depends on the result only.
+ callbacks[0]("Test error")
+ callbacks[1]("Test warning")
+ return self._check_zone_result
def _create_normal_response_data(self):
# This helper method creates a simple sequence of DNS messages that
@@ -789,21 +817,21 @@ class TestXfrinConnection(unittest.TestCase):
self.conn.reply_data += bogus_data
def _create_a(self, address):
- rrset = RRset(Name('a.example.com'), TEST_RRCLASS, RRType.A(),
+ rrset = RRset(Name('a.example.com'), TEST_RRCLASS, RRType.A,
RRTTL(3600))
- rrset.add_rdata(Rdata(RRType.A(), TEST_RRCLASS, address))
+ rrset.add_rdata(Rdata(RRType.A, TEST_RRCLASS, address))
return rrset
def _create_soa(self, serial):
- rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA(),
+ rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA,
RRTTL(3600))
rdata_str = 'm. r. ' + serial + ' 3600 1800 2419200 7200'
- rrset.add_rdata(Rdata(RRType.SOA(), TEST_RRCLASS, rdata_str))
+ rrset.add_rdata(Rdata(RRType.SOA, TEST_RRCLASS, rdata_str))
return rrset
def _create_ns(self, nsname='ns.'+TEST_ZONE_NAME_STR):
- rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.NS(), RRTTL(3600))
- rrset.add_rdata(Rdata(RRType.NS(), TEST_RRCLASS, nsname))
+ rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.NS, RRTTL(3600))
+ rrset.add_rdata(Rdata(RRType.NS, TEST_RRCLASS, nsname))
return rrset
def _set_test_zone(self, zone_name):
@@ -825,6 +853,7 @@ class TestAXFR(TestXfrinConnection):
def tearDown(self):
time.time = self.orig_time_time
+ super().tearDown()
def __create_mock_tsig(self, key, error, has_last_signature=True):
# This helper function creates a MockTSIGContext for a given key
@@ -870,19 +899,19 @@ class TestAXFR(TestXfrinConnection):
c.close()
def test_init_chclass(self):
- c = MockXfrinConnection({}, TEST_ZONE_NAME, RRClass.CH(), None,
+ c = MockXfrinConnection({}, TEST_ZONE_NAME, RRClass.CH, None,
threading.Event(), TEST_MASTER_IPV4_ADDRINFO)
c.init_socket()
- axfrmsg = c._create_query(RRType.AXFR())
+ axfrmsg = c._create_query(RRType.AXFR)
self.assertEqual(axfrmsg.get_question()[0].get_class(),
- RRClass.CH())
+ RRClass.CH)
c.close()
def test_create_query(self):
def check_query(expected_qtype, expected_auth):
'''Helper method to repeat the same pattern of tests'''
- self.assertEqual(Opcode.QUERY(), msg.get_opcode())
- self.assertEqual(Rcode.NOERROR(), msg.get_rcode())
+ self.assertEqual(Opcode.QUERY, msg.get_opcode())
+ self.assertEqual(Rcode.NOERROR, msg.get_rcode())
self.assertEqual(1, msg.get_rr_count(Message.SECTION_QUESTION))
self.assertEqual(TEST_ZONE_NAME, msg.get_question()[0].get_name())
self.assertEqual(expected_qtype, msg.get_question()[0].get_type())
@@ -907,16 +936,16 @@ class TestAXFR(TestXfrinConnection):
# Actual tests start here
# SOA query
- msg = self.conn._create_query(RRType.SOA())
- check_query(RRType.SOA(), None)
+ msg = self.conn._create_query(RRType.SOA)
+ check_query(RRType.SOA, None)
# AXFR query
- msg = self.conn._create_query(RRType.AXFR())
- check_query(RRType.AXFR(), None)
+ msg = self.conn._create_query(RRType.AXFR)
+ check_query(RRType.AXFR, None)
# IXFR query
- msg = self.conn._create_query(RRType.IXFR())
- check_query(RRType.IXFR(), begin_soa_rrset)
+ msg = self.conn._create_query(RRType.IXFR)
+ check_query(RRType.IXFR, begin_soa_rrset)
self.assertEqual(1230, self.conn._request_serial.get_value())
def test_create_ixfr_query_fail(self):
@@ -925,20 +954,20 @@ class TestAXFR(TestXfrinConnection):
self._set_test_zone(Name('no-such-zone.example'))
self.assertRaises(XfrinException, self.conn._create_query,
- RRType.IXFR())
+ RRType.IXFR)
self._set_test_zone(Name('partial-match-zone.example'))
self.assertRaises(XfrinException, self.conn._create_query,
- RRType.IXFR())
+ RRType.IXFR)
self._set_test_zone(Name('no-soa.example'))
self.assertRaises(XfrinException, self.conn._create_query,
- RRType.IXFR())
+ RRType.IXFR)
self._set_test_zone(Name('dup-soa.example'))
self.conn._zone_soa = self.conn._get_zone_soa()
self.assertRaises(XfrinException, self.conn._create_query,
- RRType.IXFR())
+ RRType.IXFR)
def test_send_query(self):
def message_has_tsig(data):
@@ -951,11 +980,11 @@ class TestAXFR(TestXfrinConnection):
# soa request with tsig
self.conn._tsig_key = TSIG_KEY
- self.conn._send_query(RRType.SOA())
+ self.conn._send_query(RRType.SOA)
self.assertTrue(message_has_tsig(self.conn.query_data[2:]))
# axfr request with tsig
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.assertTrue(message_has_tsig(self.conn.query_data[2:]))
def test_response_with_invalid_msg(self):
@@ -966,14 +995,14 @@ class TestAXFR(TestXfrinConnection):
def test_response_with_tsigfail(self):
self.conn._tsig_key = TSIG_KEY
# server tsig check fail, return with RCODE 9 (NOTAUTH)
- self.conn._send_query(RRType.SOA())
+ self.conn._send_query(RRType.SOA)
self.conn.reply_data = \
- self.conn.create_response_data(rcode=Rcode.NOTAUTH())
+ self.conn.create_response_data(rcode=Rcode.NOTAUTH)
self.assertRaises(XfrinProtocolError,
self.conn._handle_xfrin_responses)
def test_response_without_end_soa(self):
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.conn.reply_data = self.conn.create_response_data()
# This should result in timeout in the asyncore loop. We emulate
# that situation in recv() by emptying the reply data buffer.
@@ -981,7 +1010,7 @@ class TestAXFR(TestXfrinConnection):
self.conn._handle_xfrin_responses)
def test_response_bad_qid(self):
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.conn.reply_data = self.conn.create_response_data(bad_qid=True)
self.assertRaises(XfrinProtocolError,
self.conn._handle_xfrin_responses)
@@ -990,9 +1019,9 @@ class TestAXFR(TestXfrinConnection):
self.conn._tsig_key = TSIG_KEY
self.conn._tsig_ctx_creator = \
lambda key: self.__create_mock_tsig(key, TSIGError.BAD_SIG)
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.conn.reply_data = self.conn.create_response_data(
- rcode=Rcode.SERVFAIL())
+ rcode=Rcode.SERVFAIL)
# xfrin should check TSIG before other part of incoming message
# validate log message for XfrinException
self.__match_exception(XfrinProtocolError,
@@ -1003,7 +1032,7 @@ class TestAXFR(TestXfrinConnection):
self.conn._tsig_key = TSIG_KEY
self.conn._tsig_ctx_creator = \
lambda key: self.__create_mock_tsig(key, TSIGError.BAD_KEY)
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.conn.reply_data = self.conn.create_response_data(bad_qid=True)
# xfrin should check TSIG before other part of incoming message
# validate log message for XfrinException
@@ -1012,26 +1041,26 @@ class TestAXFR(TestXfrinConnection):
self.conn._handle_xfrin_responses)
def test_response_non_response(self):
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.conn.reply_data = self.conn.create_response_data(response=False)
self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
def test_response_error_code(self):
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.conn.reply_data = self.conn.create_response_data(
- rcode=Rcode.SERVFAIL())
+ rcode=Rcode.SERVFAIL)
self.assertRaises(XfrinProtocolError,
self.conn._handle_xfrin_responses)
def test_response_multi_question(self):
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.conn.reply_data = self.conn.create_response_data(
questions=[example_axfr_question, example_axfr_question])
self.assertRaises(XfrinProtocolError,
self.conn._handle_xfrin_responses)
def test_response_non_response(self):
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.conn.reply_data = self.conn.create_response_data(response = False)
self.assertRaises(XfrinProtocolError,
self.conn._handle_xfrin_responses)
@@ -1069,7 +1098,7 @@ class TestAXFR(TestXfrinConnection):
self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
def test_soacheck_error_code(self):
- self.soa_response_params['rcode'] = Rcode.SERVFAIL()
+ self.soa_response_params['rcode'] = Rcode.SERVFAIL
self.conn.response_generator = self._create_soa_response_data
self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
@@ -1117,21 +1146,21 @@ class TestAXFR(TestXfrinConnection):
self.conn.response_generator = self._create_soa_response_data
self.soa_response_params['questions'] = [Question(Name('example.org'),
TEST_RRCLASS,
- RRType.SOA())]
+ RRType.SOA)]
self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
def test_soacheck_question_class_mismatch(self):
self.conn.response_generator = self._create_soa_response_data
self.soa_response_params['questions'] = [Question(TEST_ZONE_NAME,
- RRClass.CH(),
- RRType.SOA())]
+ RRClass.CH,
+ RRType.SOA)]
self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
def test_soacheck_question_type_mismatch(self):
self.conn.response_generator = self._create_soa_response_data
self.soa_response_params['questions'] = [Question(TEST_ZONE_NAME,
TEST_RRCLASS,
- RRType.AAAA())]
+ RRType.AAAA)]
self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
def test_soacheck_no_soa(self):
@@ -1149,8 +1178,8 @@ class TestAXFR(TestXfrinConnection):
def test_soacheck_soa_class_mismatch(self):
self.conn.response_generator = self._create_soa_response_data
- soa = RRset(TEST_ZONE_NAME, RRClass.CH(), RRType.SOA(), RRTTL(0))
- soa.add_rdata(Rdata(RRType.SOA(), RRClass.CH(), 'm. r. 1234 0 0 0 0'))
+ soa = RRset(TEST_ZONE_NAME, RRClass.CH, RRType.SOA, RRTTL(0))
+ soa.add_rdata(Rdata(RRType.SOA, RRClass.CH, 'm. r. 1234 0 0 0 0'))
self.soa_response_params['answers'] = [soa]
self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
@@ -1168,7 +1197,7 @@ class TestAXFR(TestXfrinConnection):
def test_soacheck_referral_response(self):
self.conn.response_generator = self._create_soa_response_data
self.soa_response_params['answers'] = []
- self.soa_response_params['authorities'] = [create_ns('ns.example.com')]
+ self.soa_response_params['authorities'] = [create_ns('ns.example.com.')]
self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
def test_soacheck_nodata_response(self):
@@ -1191,7 +1220,7 @@ class TestAXFR(TestXfrinConnection):
self.conn._tsig_key = TSIG_KEY
self.conn._tsig_ctx_creator = \
lambda key: self.__create_mock_tsig(key, TSIGError.BAD_SIG)
- self.soa_response_params['rcode'] = Rcode.NOTAUTH()
+ self.soa_response_params['rcode'] = Rcode.NOTAUTH
self.conn.response_generator = self._create_soa_response_data
self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
@@ -1228,7 +1257,7 @@ class TestAXFR(TestXfrinConnection):
def test_response_shutdown(self):
self.conn.response_generator = self._create_normal_response_data
self.conn._shutdown_event.set()
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
def test_response_timeout(self):
@@ -1243,13 +1272,13 @@ class TestAXFR(TestXfrinConnection):
def test_response_bad_message(self):
self.conn.response_generator = self._create_broken_response_data
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.assertRaises(Exception, self.conn._handle_xfrin_responses)
def test_axfr_response(self):
# A simple normal case: AXFR consists of SOA, NS, then trailing SOA.
self.conn.response_generator = self._create_normal_response_data
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.conn._handle_xfrin_responses()
self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
check_diffs(self.assertEqual,
@@ -1270,7 +1299,7 @@ class TestAXFR(TestXfrinConnection):
self._create_ns(),
soa_rrset]
self.conn.response_generator = self._create_normal_response_data
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.conn._handle_xfrin_responses()
self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
check_diffs(self.assertEqual,
@@ -1285,10 +1314,9 @@ class TestAXFR(TestXfrinConnection):
'''
ns_rr = self._create_ns()
a_rr = self._create_a('192.0.2.1')
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.conn.reply_data = self.conn.create_response_data(
- questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
- RRType.AXFR())],
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.AXFR)],
# begin serial=1230, end serial=1234. end will be used.
answers=[begin_soa_rrset, ns_rr, a_rr, soa_rrset])
self.conn._handle_xfrin_responses()
@@ -1297,6 +1325,32 @@ class TestAXFR(TestXfrinConnection):
[[('add', ns_rr), ('add', a_rr), ('add', soa_rrset)]],
self.conn._datasrc_client.committed_diffs)
+ def test_axfr_response_fail_validation(self):
+ """
+ Test we reject a zone transfer if it fails the check_zone validation.
+ """
+ a_rr = self._create_a('192.0.2.1')
+ self.conn._send_query(RRType.AXFR)
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.AXFR)],
+ # begin serial=1230, end serial=1234. end will be used.
+ answers=[begin_soa_rrset, a_rr, soa_rrset])
+ # Make it fail the validation
+ self._check_zone_result = False
+ self.assertRaises(XfrinZoneError, self.conn._handle_xfrin_responses)
+ self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+ self.assertEqual([], self.conn._datasrc_client.committed_diffs)
+ # Check the validation is called with the correct parameters
+ self.assertEqual(TEST_ZONE_NAME, self._check_zone_params[0])
+ self.assertEqual(TEST_RRCLASS, self._check_zone_params[1])
+ self.assertTrue(isinstance(self._check_zone_params[2],
+ MockRRsetCollection))
+ # Check we can safely call the callbacks. They have no sideeffects
+ # we can check (checking logging is hard), but we at least check
+ # they don't crash.
+ self._check_zone_params[3][0]("Test error")
+ self._check_zone_params[3][1]("Test warning")
+
def test_axfr_response_extra(self):
'''Test with an extra RR after the end of AXFR session.
@@ -1305,10 +1359,10 @@ class TestAXFR(TestXfrinConnection):
'''
ns_rr = self._create_ns()
a_rr = self._create_a('192.0.2.1')
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.conn.reply_data = self.conn.create_response_data(
questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
- RRType.AXFR())],
+ RRType.AXFR)],
answers=[soa_rrset, ns_rr, a_rr, soa_rrset, a_rr])
self.assertRaises(XfrinProtocolError,
self.conn._handle_xfrin_responses)
@@ -1322,9 +1376,9 @@ class TestAXFR(TestXfrinConnection):
'''
self.axfr_response_params['question_1st'] = \
- [Question(Name('mismatch.example'), TEST_RRCLASS, RRType.AXFR())]
+ [Question(Name('mismatch.example'), TEST_RRCLASS, RRType.AXFR)]
self.conn.response_generator = self._create_normal_response_data
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.conn._handle_xfrin_responses()
self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
check_diffs(self.assertEqual,
@@ -1338,9 +1392,9 @@ class TestAXFR(TestXfrinConnection):
'''
self.axfr_response_params['question_1st'] = \
- [Question(TEST_ZONE_NAME, RRClass.CH(), RRType.AXFR())]
+ [Question(TEST_ZONE_NAME, RRClass.CH, RRType.AXFR)]
self.conn.response_generator = self._create_normal_response_data
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.conn._handle_xfrin_responses()
self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
check_diffs(self.assertEqual,
@@ -1355,9 +1409,9 @@ class TestAXFR(TestXfrinConnection):
'''
# returning IXFR in question to AXFR query
self.axfr_response_params['question_1st'] = \
- [Question(TEST_ZONE_NAME, RRClass.CH(), RRType.IXFR())]
+ [Question(TEST_ZONE_NAME, RRClass.CH, RRType.IXFR)]
self.conn.response_generator = self._create_normal_response_data
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.conn._handle_xfrin_responses()
self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
check_diffs(self.assertEqual,
@@ -1372,7 +1426,7 @@ class TestAXFR(TestXfrinConnection):
'''
self.axfr_response_params['question_1st'] = []
self.conn.response_generator = self._create_normal_response_data
- self.conn._send_query(RRType.AXFR())
+ self.conn._send_query(RRType.AXFR)
self.conn._handle_xfrin_responses()
self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
check_diffs(self.assertEqual,
@@ -1499,6 +1553,15 @@ class TestAXFR(TestXfrinConnection):
self.conn.response_generator = self._create_normal_response_data
self.assertEqual(self.conn.do_xfrin(False), XFRIN_FAIL)
+ def test_do_xfrin_invalid_zone(self):
+ """
+ Test receiving an invalid zone. We mock the check and see the whole
+ transfer is rejected.
+ """
+ self._check_zone_result = False
+ self.conn.response_generator = self._create_normal_response_data
+ self.assertEqual(self.conn.do_xfrin(False), XFRIN_FAIL)
+
def test_do_soacheck_and_xfrin(self):
self.conn.response_generator = self._create_soa_response_data
self.assertEqual(self.conn.do_xfrin(True), XFRIN_OK)
@@ -1552,7 +1615,7 @@ class TestIXFRResponse(TestXfrinConnection):
super().setUp()
self.conn._query_id = self.conn.qid = 1035
self.conn._request_serial = isc.dns.Serial(1230)
- self.conn._request_type = RRType.IXFR()
+ self.conn._request_type = RRType.IXFR
self.conn._datasrc_client = MockDataSourceClient()
XfrinInitialSOA().set_xfrstate(self.conn, XfrinInitialSOA())
@@ -1566,7 +1629,7 @@ class TestIXFRResponse(TestXfrinConnection):
'''
self.conn.reply_data = self.conn.create_response_data(
- questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR)],
answers=[soa_rrset, begin_soa_rrset, soa_rrset, soa_rrset])
self.conn._handle_xfrin_responses()
self.assertEqual(type(XfrinIXFREnd()), type(self.conn.get_xfrstate()))
@@ -1576,12 +1639,32 @@ class TestIXFRResponse(TestXfrinConnection):
[[('delete', begin_soa_rrset), ('add', soa_rrset)]],
self.conn._datasrc_client.committed_diffs)
+ def test_ixfr_response_fail_validation(self):
+ '''
+ An IXFR that fails validation later on. Check it is rejected.
+ '''
+ self.conn.reply_data = self.conn.create_response_data(
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR)],
+ answers=[soa_rrset, begin_soa_rrset, soa_rrset, soa_rrset])
+ self._check_zone_result = False
+ self.assertRaises(XfrinZoneError, self.conn._handle_xfrin_responses)
+ self.assertEqual([], self.conn._datasrc_client.committed_diffs)
+ self.assertEqual(TEST_ZONE_NAME, self._check_zone_params[0])
+ self.assertEqual(TEST_RRCLASS, self._check_zone_params[1])
+ self.assertTrue(isinstance(self._check_zone_params[2],
+ MockRRsetCollection))
+ # Check we can safely call the callbacks. They have no sideeffects
+ # we can check (checking logging is hard), but we at least check
+ # they don't crash.
+ self._check_zone_params[3][0]("Test error")
+ self._check_zone_params[3][1]("Test warning")
+
def test_ixfr_response_multi_sequences(self):
'''Similar to the previous case, but with multiple diff seqs.
'''
self.conn.reply_data = self.conn.create_response_data(
- questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR)],
answers=[soa_rrset,
# removing one A in serial 1230
begin_soa_rrset, self._create_a('192.0.2.1'),
@@ -1621,10 +1704,10 @@ class TestIXFRResponse(TestXfrinConnection):
'''
self.conn.reply_data = self.conn.create_response_data(
- questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR)],
answers=[soa_rrset, begin_soa_rrset, soa_rrset])
self.conn.reply_data += self.conn.create_response_data(
- questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR)],
answers=[soa_rrset])
self.conn._handle_xfrin_responses()
self.assertEqual(type(XfrinIXFREnd()), type(self.conn.get_xfrstate()))
@@ -1635,7 +1718,7 @@ class TestIXFRResponse(TestXfrinConnection):
def test_ixfr_response_uptodate(self):
'''IXFR response indicates the zone is new enough'''
self.conn.reply_data = self.conn.create_response_data(
- questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR)],
answers=[begin_soa_rrset])
self.assertRaises(XfrinZoneUptodate, self.conn._handle_xfrin_responses)
# no diffs should have been committed
@@ -1648,7 +1731,7 @@ class TestIXFRResponse(TestXfrinConnection):
'''
# SOA sequence is out-of-sync
self.conn.reply_data = self.conn.create_response_data(
- questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR)],
answers=[soa_rrset, begin_soa_rrset, soa_rrset,
self._create_soa('1235')])
self.assertRaises(XfrinProtocolError,
@@ -1665,7 +1748,7 @@ class TestIXFRResponse(TestXfrinConnection):
specification, but it is how BIND 9 works and we do the same.
'''
self.conn.reply_data = self.conn.create_response_data(
- questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR)],
answers=[soa_rrset, begin_soa_rrset, soa_rrset, soa_rrset,
self._create_a('192.0.2.1')])
self.assertRaises(XfrinProtocolError,
@@ -1682,7 +1765,7 @@ class TestIXFRResponse(TestXfrinConnection):
'''
self.conn.reply_data = self.conn.create_response_data(
- questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR)],
answers=[begin_soa_rrset, soa_rrset])
self.assertRaises(XfrinProtocolError,
self.conn._handle_xfrin_responses)
@@ -1699,7 +1782,7 @@ class TestIXFRResponse(TestXfrinConnection):
ns_rr = self._create_ns()
a_rr = self._create_a('192.0.2.1')
self.conn.reply_data = self.conn.create_response_data(
- questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR)],
answers=[soa_rrset, ns_rr, a_rr, soa_rrset])
self.conn._handle_xfrin_responses()
self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
@@ -1721,7 +1804,7 @@ class TestIXFRResponse(TestXfrinConnection):
ns_rr = self._create_ns()
a_rr = self._create_a('192.0.2.1')
self.conn.reply_data = self.conn.create_response_data(
- questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR)],
answers=[soa_rrset, ns_rr, a_rr, begin_soa_rrset])
self.conn._handle_xfrin_responses()
self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
@@ -1740,7 +1823,7 @@ class TestIXFRResponse(TestXfrinConnection):
ns_rr = self._create_ns()
a_rr = self._create_a('192.0.2.1')
self.conn.reply_data = self.conn.create_response_data(
- questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+ questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR)],
answers=[soa_rrset, ns_rr, a_rr, soa_rrset, a_rr])
self.assertRaises(XfrinProtocolError,
self.conn._handle_xfrin_responses)
@@ -1767,10 +1850,10 @@ class TestIXFRSession(TestXfrinConnection):
def create_ixfr_response():
self.conn.reply_data = self.conn.create_response_data(
questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
- RRType.IXFR())],
+ RRType.IXFR)],
answers=[soa_rrset, begin_soa_rrset, soa_rrset, soa_rrset])
self.conn.response_generator = create_ixfr_response
- self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.IXFR()))
+ self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.IXFR))
# Check some details of the IXFR protocol processing
self.assertEqual(type(XfrinIXFREnd()), type(self.conn.get_xfrstate()))
@@ -1784,7 +1867,7 @@ class TestIXFRSession(TestXfrinConnection):
qmsg.from_wire(qdata, len(qdata))
self.assertEqual(1, qmsg.get_rr_count(Message.SECTION_QUESTION))
self.assertEqual(TEST_ZONE_NAME, qmsg.get_question()[0].get_name())
- self.assertEqual(RRType.IXFR(), qmsg.get_question()[0].get_type())
+ self.assertEqual(RRType.IXFR, qmsg.get_question()[0].get_type())
self.assertEqual(1, self.conn._transfer_stats.message_count)
self.assertEqual(0, self.conn._transfer_stats.axfr_rr_count)
@@ -1801,18 +1884,18 @@ class TestIXFRSession(TestXfrinConnection):
def create_ixfr_response():
self.conn.reply_data = self.conn.create_response_data(
questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
- RRType.IXFR())],
+ RRType.IXFR)],
answers=[soa_rrset, begin_soa_rrset, soa_rrset,
self._create_soa('1235')])
self.conn.response_generator = create_ixfr_response
- self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
+ self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR))
def test_do_xfrin_fail2(self):
'''IXFR fails due to a bogus DNS message.
'''
self._create_broken_response_data()
- self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
+ self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR))
def test_do_xfrin_uptodate(self):
'''IXFR is (gracefully) aborted because serial is not new
@@ -1821,10 +1904,10 @@ class TestIXFRSession(TestXfrinConnection):
def create_response():
self.conn.reply_data = self.conn.create_response_data(
questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
- RRType.IXFR())],
+ RRType.IXFR)],
answers=[begin_soa_rrset])
self.conn.response_generator = create_response
- self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.IXFR()))
+ self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.IXFR))
self.assertEqual(1, self.conn._transfer_stats.message_count)
self.assertEqual(0, self.conn._transfer_stats.axfr_rr_count)
@@ -1871,7 +1954,7 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
def get_zone_serial(self):
result, finder = self.conn._datasrc_client.find_zone(TEST_ZONE_NAME)
self.assertEqual(DataSourceClient.SUCCESS, result)
- result, soa, _ = finder.find(TEST_ZONE_NAME, RRType.SOA())
+ result, soa, _ = finder.find(TEST_ZONE_NAME, RRType.SOA)
self.assertEqual(ZoneFinder.SUCCESS, result)
self.assertEqual(1, soa.get_rdata_count())
return get_soa_serial(soa.get_rdata()[0])
@@ -1886,13 +1969,13 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
def create_ixfr_response():
self.conn.reply_data = self.conn.create_response_data(
questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
- RRType.IXFR())],
+ RRType.IXFR)],
answers=[soa_rrset, begin_soa_rrset, soa_rrset, soa_rrset])
self.conn.response_generator = create_ixfr_response
# Confirm xfrin succeeds and SOA is updated
self.assertEqual(1230, self.get_zone_serial().get_value())
- self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.IXFR()))
+ self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.IXFR))
self.assertEqual(1234, self.get_zone_serial().get_value())
# Also confirm the corresponding diffs are stored in the diffs table
@@ -1917,18 +2000,18 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
def create_ixfr_response():
self.conn.reply_data = self.conn.create_response_data(
questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
- RRType.IXFR())],
+ RRType.IXFR)],
answers=[soa_rrset, begin_soa_rrset, soa_rrset,
self._create_soa('1235')])
self.conn.response_generator = create_ixfr_response
self.assertEqual(1230, self.get_zone_serial().get_value())
- self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
+ self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR))
self.assertEqual(1230, self.get_zone_serial().get_value())
def test_do_ixfrin_nozone_sqlite3(self):
self._set_test_zone(Name('nosuchzone.example'))
- self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
+ self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR))
# This should fail even before starting state transition
self.assertEqual(None, self.conn.get_xfrstate())
@@ -1945,23 +2028,23 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
# Confirm xfrin succeeds and SOA is updated, A RR is deleted.
self.assertEqual(1230, self.get_zone_serial().get_value())
self.assertTrue(self.record_exist(Name('dns01.example.com'),
- RRType.A()))
+ RRType.A))
self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, type))
self.assertEqual(1234, self.get_zone_serial().get_value())
self.assertFalse(self.record_exist(Name('dns01.example.com'),
- RRType.A()))
+ RRType.A))
def test_do_ixfrin_axfr_sqlite3(self):
'''AXFR-style IXFR.
'''
- self.axfr_check(RRType.IXFR())
+ self.axfr_check(RRType.IXFR)
def test_do_axfrin_sqlite3(self):
'''AXFR.
'''
- self.axfr_check(RRType.AXFR())
+ self.axfr_check(RRType.AXFR)
def axfr_failure_check(self, type):
'''Similar to the previous two tests, but xfrin fails due to error.
@@ -1977,23 +2060,23 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
self.assertEqual(1230, self.get_zone_serial().get_value())
self.assertTrue(self.record_exist(Name('dns01.example.com'),
- RRType.A()))
+ RRType.A))
self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, type))
self.assertEqual(1230, self.get_zone_serial().get_value())
self.assertTrue(self.record_exist(Name('dns01.example.com'),
- RRType.A()))
+ RRType.A))
def test_do_xfrin_axfr_sqlite3_fail(self):
'''Failure case for AXFR-style IXFR.
'''
- self.axfr_failure_check(RRType.IXFR())
+ self.axfr_failure_check(RRType.IXFR)
def test_do_axfrin_sqlite3_fail(self):
'''Failure case for AXFR.
'''
- self.axfr_failure_check(RRType.AXFR())
+ self.axfr_failure_check(RRType.AXFR)
def test_do_axfrin_nozone_sqlite3(self):
'''AXFR test with an empty SQLite3 DB file, thus no target zone there.
@@ -2010,16 +2093,16 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
def create_response():
self.conn.reply_data = self.conn.create_response_data(
questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
- RRType.AXFR())],
+ RRType.AXFR)],
answers=[soa_rrset, self._create_ns(), soa_rrset])
self.conn.response_generator = create_response
self._set_test_zone(Name('example.com'))
- self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.AXFR()))
+ self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.AXFR))
self.assertEqual(type(XfrinAXFREnd()),
type(self.conn.get_xfrstate()))
self.assertEqual(1234, self.get_zone_serial().get_value())
self.assertFalse(self.record_exist(Name('dns01.example.com'),
- RRType.A()))
+ RRType.A))
class TestXfrinRecorder(unittest.TestCase):
def setUp(self):
@@ -2128,7 +2211,7 @@ class TestXfrinProcess(unittest.TestCase):
# Normal, successful case. We only check that things are cleaned up
# at the tearDown time.
process_xfrin(self, self, TEST_ZONE_NAME, TEST_RRCLASS, None, None,
- self.master, False, None, RRType.AXFR(),
+ self.master, False, None, RRType.AXFR,
self.create_xfrinconn)
def test_process_xfrin_exception_on_connect(self):
@@ -2136,7 +2219,7 @@ class TestXfrinProcess(unittest.TestCase):
# cleaned up.
self.do_raise_on_connect = True
process_xfrin(self, self, TEST_ZONE_NAME, TEST_RRCLASS, None, None,
- self.master, False, None, RRType.AXFR(),
+ self.master, False, None, RRType.AXFR,
self.create_xfrinconn)
def test_process_xfrin_exception_on_close(self):
@@ -2146,7 +2229,7 @@ class TestXfrinProcess(unittest.TestCase):
self.do_raise_on_connect = True
self.do_raise_on_close = True
process_xfrin(self, self, TEST_ZONE_NAME, TEST_RRCLASS, None, None,
- self.master, False, None, RRType.AXFR(),
+ self.master, False, None, RRType.AXFR,
self.create_xfrinconn)
def test_process_xfrin_exception_on_publish(self):
@@ -2154,7 +2237,7 @@ class TestXfrinProcess(unittest.TestCase):
# everything must still be cleaned up.
self.do_raise_on_publish = True
process_xfrin(self, self, TEST_ZONE_NAME, TEST_RRCLASS, None, None,
- self.master, False, None, RRType.AXFR(),
+ self.master, False, None, RRType.AXFR,
self.create_xfrinconn)
class TestXfrin(unittest.TestCase):
@@ -2207,7 +2290,7 @@ class TestXfrin(unittest.TestCase):
def test_parse_cmd_params_chclass(self):
self.args['zone_class'] = 'CH'
- self.assertEqual(self._do_parse_zone_name_class()[1], RRClass.CH())
+ self.assertEqual(self._do_parse_zone_name_class()[1], RRClass.CH)
def test_parse_cmd_params_bogusclass(self):
self.args['zone_class'] = 'XXX'
@@ -2254,7 +2337,7 @@ class TestXfrin(unittest.TestCase):
self.assertEqual(self.args['master'], self.xfr.xfrin_started_master_addr)
self.assertEqual(int(self.args['port']), self.xfr.xfrin_started_master_port)
# By default we use AXFR (for now)
- self.assertEqual(RRType.AXFR(), self.xfr.xfrin_started_request_type)
+ self.assertEqual(RRType.AXFR, self.xfr.xfrin_started_request_type)
def test_command_handler_retransfer_short_command1(self):
# try it when only specifying the zone name (of unknown zone)
@@ -2368,7 +2451,7 @@ class TestXfrin(unittest.TestCase):
self.assertEqual(int(TEST_MASTER_PORT),
self.xfr.xfrin_started_master_port)
# By default we use AXFR (for now)
- self.assertEqual(RRType.AXFR(), self.xfr.xfrin_started_request_type)
+ self.assertEqual(RRType.AXFR, self.xfr.xfrin_started_request_type)
def test_command_handler_notify(self):
# at this level, refresh is no different than retransfer.
@@ -2435,7 +2518,7 @@ class TestXfrin(unittest.TestCase):
self.xfr._max_transfers_in)
for zone_config in config_given['zones']:
zone_name = zone_config['name']
- zone_info = self.xfr._get_zone_info(Name(zone_name), RRClass.IN())
+ zone_info = self.xfr._get_zone_info(Name(zone_name), RRClass.IN)
self.assertEqual(str(zone_info.master_addr), zone_config['master_addr'])
self.assertEqual(zone_info.master_port, zone_config['master_port'])
if 'tsig_key' in zone_config:
@@ -2610,16 +2693,16 @@ class TestXfrin(unittest.TestCase):
def test_command_handler_retransfer_ixfr_enabled(self):
# If IXFR is explicitly enabled in config, IXFR will be used
self.common_ixfr_setup('retransfer', True)
- self.assertEqual(RRType.IXFR(), self.xfr.xfrin_started_request_type)
+ self.assertEqual(RRType.IXFR, self.xfr.xfrin_started_request_type)
def test_command_handler_refresh_ixfr_enabled(self):
# Same for refresh
self.common_ixfr_setup('refresh', True)
- self.assertEqual(RRType.IXFR(), self.xfr.xfrin_started_request_type)
+ self.assertEqual(RRType.IXFR, self.xfr.xfrin_started_request_type)
def test_command_handler_retransfer_with_tsig(self):
self.common_ixfr_setup('retransfer', False, 'example.com.key')
- self.assertEqual(RRType.AXFR(), self.xfr.xfrin_started_request_type)
+ self.assertEqual(RRType.AXFR, self.xfr.xfrin_started_request_type)
def test_command_handler_retransfer_with_tsig_bad_key(self):
# bad keys should not reach xfrin, but should they somehow,
@@ -2633,7 +2716,7 @@ class TestXfrin(unittest.TestCase):
def test_command_handler_refresh_with_tsig(self):
self.common_ixfr_setup('refresh', False, 'example.com.key')
- self.assertEqual(RRType.AXFR(), self.xfr.xfrin_started_request_type)
+ self.assertEqual(RRType.AXFR, self.xfr.xfrin_started_request_type)
def test_command_handler_refresh_with_tsig_bad_key(self):
# bad keys should not reach xfrin, but should they somehow,
@@ -2649,12 +2732,12 @@ class TestXfrin(unittest.TestCase):
# Similar to the previous case, but explicitly disabled. AXFR should
# be used.
self.common_ixfr_setup('retransfer', False)
- self.assertEqual(RRType.AXFR(), self.xfr.xfrin_started_request_type)
+ self.assertEqual(RRType.AXFR, self.xfr.xfrin_started_request_type)
def test_command_handler_refresh_ixfr_disabled(self):
# Same for refresh
self.common_ixfr_setup('refresh', False)
- self.assertEqual(RRType.AXFR(), self.xfr.xfrin_started_request_type)
+ self.assertEqual(RRType.AXFR, self.xfr.xfrin_started_request_type)
class TestXfrinMemoryZones(unittest.TestCase):
def setUp(self):
@@ -2930,7 +3013,7 @@ class TestXfrinProcess(unittest.TestCase):
self.__rets = rets
published = rets[-1]
xfrin.process_xfrin(self, XfrinRecorder(), Name("example.org."),
- RRClass.IN(), None, None, None, True, None,
+ RRClass.IN, None, None, None, True, None,
request_type, self.__get_connection)
self.assertEqual([], self.__rets)
self.assertEqual(transfers, self.__transfers)
@@ -2942,7 +3025,7 @@ class TestXfrinProcess(unittest.TestCase):
"""
Everything OK the first time, over IXFR.
"""
- self.__do_test([XFRIN_OK], [RRType.IXFR()], RRType.IXFR())
+ self.__do_test([XFRIN_OK], [RRType.IXFR], RRType.IXFR)
# Check there was loadzone command
self.assertTrue(self._send_cc_session.send_called)
self.assertTrue(self._send_cc_session.send_called_correctly)
@@ -2953,7 +3036,7 @@ class TestXfrinProcess(unittest.TestCase):
"""
Everything OK the first time, over AXFR.
"""
- self.__do_test([XFRIN_OK], [RRType.AXFR()], RRType.AXFR())
+ self.__do_test([XFRIN_OK], [RRType.AXFR], RRType.AXFR)
def test_axfr_fail(self):
"""
@@ -2961,15 +3044,15 @@ class TestXfrinProcess(unittest.TestCase):
to fail on AXFR, but succeed on IXFR and we didn't use IXFR in the first
place for some reason.
"""
- self.__do_test([XFRIN_FAIL], [RRType.AXFR()], RRType.AXFR())
+ self.__do_test([XFRIN_FAIL], [RRType.AXFR], RRType.AXFR)
def test_ixfr_fallback(self):
"""
The transfer fails over IXFR, but suceeds over AXFR. It should fall back
to it and say everything is OK.
"""
- self.__do_test([XFRIN_FAIL, XFRIN_OK], [RRType.IXFR(), RRType.AXFR()],
- RRType.IXFR())
+ self.__do_test([XFRIN_FAIL, XFRIN_OK], [RRType.IXFR, RRType.AXFR],
+ RRType.IXFR)
def test_ixfr_fail(self):
"""
@@ -2977,13 +3060,13 @@ class TestXfrinProcess(unittest.TestCase):
(only once) and should try both before giving up.
"""
self.__do_test([XFRIN_FAIL, XFRIN_FAIL],
- [RRType.IXFR(), RRType.AXFR()], RRType.IXFR())
+ [RRType.IXFR, RRType.AXFR], RRType.IXFR)
def test_send_loadzone(self):
"""
Check the loadzone command is sent after successful transfer.
"""
- self.__do_test([XFRIN_OK], [RRType.IXFR()], RRType.IXFR())
+ self.__do_test([XFRIN_OK], [RRType.IXFR], RRType.IXFR)
self.assertTrue(self._send_cc_session.send_called)
self.assertTrue(self._send_cc_session.send_called_correctly)
self.assertTrue(self._send_cc_session.recv_called)
diff --git a/src/bin/xfrin/xfrin.py.in b/src/bin/xfrin/xfrin.py.in
index da0f207..55d9818 100755
--- a/src/bin/xfrin/xfrin.py.in
+++ b/src/bin/xfrin/xfrin.py.in
@@ -36,6 +36,7 @@ from isc.xfrin.diff import Diff
from isc.server_common.auth_command import auth_loadzone_command
from isc.server_common.tsig_keyring import init_keyring, get_keyring
from isc.log_messages.xfrin_messages import *
+from isc.dns import *
isc.log.init("b10-xfrin", buffer=True)
logger = isc.log.Logger("xfrin")
@@ -45,13 +46,6 @@ logger = isc.log.Logger("xfrin")
DBG_PROCESS = logger.DBGLVL_TRACE_BASIC
DBG_COMMANDS = logger.DBGLVL_TRACE_DETAIL
-try:
- from pydnspp import *
-except ImportError as e:
- # C++ loadable module may not be installed; even so the xfrin process
- # must keep running, so we warn about it and move forward.
- logger.error(XFRIN_IMPORT_DNS, str(e))
-
isc.util.process.rename()
# If B10_FROM_BUILD is set in the environment, we use data files
@@ -84,7 +78,7 @@ DBG_XFRIN_TRACE = logger.DBGLVL_TRACE_BASIC
# (TODO: have similar support to get default values for command
# arguments as we do for config options)
DEFAULT_MASTER_PORT = 53
-DEFAULT_ZONE_CLASS = RRClass.IN()
+DEFAULT_ZONE_CLASS = RRClass.IN
__version__ = 'BIND10'
@@ -100,8 +94,17 @@ class XfrinProtocolError(Exception):
'''
pass
+class XfrinZoneError(Exception):
+ '''
+ An exception raised when the received zone is broken enough to be unusable.
+ '''
+ pass
+
class XfrinZoneUptodate(Exception):
- '''TBD
+ '''
+ Thrown when the zone is already up to date, so there's no need to download
+ the zone. This is not really an error case (but it's still an exceptional
+ condition and the control flow is different than usual).
'''
pass
@@ -132,7 +135,7 @@ def _check_zone_class(zone_class_str):
"""If the given argument is a string: checks if the given class is
a valid one, and returns an RRClass object if so.
Raises XfrinZoneInfoException if not.
- If it is None, this function returns the default RRClass.IN()"""
+ If it is None, this function returns the default RRClass.IN"""
if zone_class_str is None:
return DEFAULT_ZONE_CLASS
try:
@@ -313,12 +316,12 @@ class XfrinState:
class XfrinInitialSOA(XfrinState):
def handle_rr(self, conn, rr):
- if rr.get_type() != RRType.SOA():
+ if rr.get_type() != RRType.SOA:
raise XfrinProtocolError('First RR in zone transfer must be SOA ('
+ rr.get_type().to_text() + ' received)')
conn._end_serial = get_soa_serial(rr.get_rdata()[0])
- if conn._request_type == RRType.IXFR() and \
+ if conn._request_type == RRType.IXFR and \
conn._end_serial <= conn._request_serial:
logger.info(XFRIN_IXFR_UPTODATE, conn.zone_str(),
conn._request_serial, conn._end_serial)
@@ -361,8 +364,8 @@ class XfrinFirstData(XfrinState):
http://www.ietf.org/mail-archive/web/dnsext/current/msg07908.html
'''
- if conn._request_type == RRType.IXFR() and \
- rr.get_type() == RRType.SOA() and \
+ if conn._request_type == RRType.IXFR and \
+ rr.get_type() == RRType.SOA and \
conn._request_serial == get_soa_serial(rr.get_rdata()[0]):
logger.debug(DBG_XFRIN_TRACE, XFRIN_GOT_INCREMENTAL_RESP,
conn.zone_str())
@@ -379,7 +382,7 @@ class XfrinFirstData(XfrinState):
class XfrinIXFRDeleteSOA(XfrinState):
def handle_rr(self, conn, rr):
- if rr.get_type() != RRType.SOA():
+ if rr.get_type() != RRType.SOA:
# this shouldn't happen; should this occur it means an internal
# bug.
raise XfrinException(rr.get_type().to_text() +
@@ -399,7 +402,7 @@ class XfrinIXFRDeleteSOA(XfrinState):
class XfrinIXFRDelete(XfrinState):
def handle_rr(self, conn, rr):
- if rr.get_type() == RRType.SOA():
+ if rr.get_type() == RRType.SOA:
# This is the only place where current_serial is set
conn._current_serial = get_soa_serial(rr.get_rdata()[0])
self.set_xfrstate(conn, XfrinIXFRAddSOA())
@@ -410,7 +413,7 @@ class XfrinIXFRDelete(XfrinState):
class XfrinIXFRAddSOA(XfrinState):
def handle_rr(self, conn, rr):
- if rr.get_type() != RRType.SOA():
+ if rr.get_type() != RRType.SOA:
# this shouldn't happen; should this occur it means an internal
# bug.
raise XfrinException(rr.get_type().to_text() +
@@ -422,15 +425,15 @@ class XfrinIXFRAddSOA(XfrinState):
class XfrinIXFRAdd(XfrinState):
def handle_rr(self, conn, rr):
- if rr.get_type() == RRType.SOA():
+ if rr.get_type() == RRType.SOA:
# This SOA marks the end of a difference sequence
conn.get_transfer_stats().ixfr_changeset_count += 1
soa_serial = get_soa_serial(rr.get_rdata()[0])
if soa_serial == conn._end_serial:
- # The final part is there. Check all was signed
- # and commit it to the database.
- conn._check_response_tsig_last()
- conn._diff.commit()
+ # The final part is there. Finish the transfer by
+ # checking the last TSIG (if required), the zone data and
+ # commiting.
+ conn.finish_transfer()
self.set_xfrstate(conn, XfrinIXFREnd())
return True
elif soa_serial != conn._current_serial:
@@ -477,7 +480,7 @@ class XfrinAXFR(XfrinState):
Handle the RR by putting it into the zone.
"""
conn._diff.add_data(rr)
- if rr.get_type() == RRType.SOA():
+ if rr.get_type() == RRType.SOA:
# SOA means end. Don't commit it yet - we need to perform
# post-transfer checks
@@ -500,15 +503,11 @@ class XfrinAXFREnd(XfrinState):
"""
Final processing after processing an entire AXFR session.
- In this process all the AXFR changes are committed to the
- data source.
-
- There might be more actions here, but for now we simply return False,
- indicating there will be no more message to receive.
-
+ This simply calls the finish_transfer method of the connection
+ that ensures it is signed by TSIG (if required), the zone data
+ is valid and commits it.
"""
- conn._check_response_tsig_last()
- conn._diff.commit()
+ conn.finish_transfer()
return False
class XfrinTransferStats:
@@ -663,7 +662,7 @@ class XfrinConnection(asyncore.dispatcher):
result, finder = self._datasrc_client.find_zone(self._zone_name)
if result != DataSourceClient.SUCCESS:
return None
- result, soa_rrset, _ = finder.find(self._zone_name, RRType.SOA())
+ result, soa_rrset, _ = finder.find(self._zone_name, RRType.SOA)
if result != ZoneFinder.SUCCESS:
logger.info(XFRIN_ZONE_NO_SOA, self.zone_str())
return None
@@ -715,8 +714,8 @@ class XfrinConnection(asyncore.dispatcher):
query_id = random.randint(0, 0xFFFF)
self._query_id = query_id
msg.set_qid(query_id)
- msg.set_opcode(Opcode.QUERY())
- msg.set_rcode(Rcode.NOERROR())
+ msg.set_opcode(Opcode.QUERY)
+ msg.set_rcode(Rcode.NOERROR)
msg.add_question(Question(self._zone_name, self._rrclass, query_type))
# Remember our serial, if known
@@ -724,7 +723,7 @@ class XfrinConnection(asyncore.dispatcher):
if self._zone_soa is not None else None
# Set the authority section with our SOA for IXFR
- if query_type == RRType.IXFR():
+ if query_type == RRType.IXFR:
if self._zone_soa is None:
# (incremental) IXFR doesn't work without known SOA
raise XfrinException('Failed to create IXFR query due to no ' +
@@ -805,6 +804,31 @@ class XfrinConnection(asyncore.dispatcher):
raise XfrinProtocolError('TSIG verify fail: no TSIG on last '+
'message')
+ def __validate_error(self, reason):
+ '''
+ Used as error callback below.
+ '''
+ logger.error(XFRIN_ZONE_INVALID, self._zone_name, self._rrclass,
+ reason)
+
+ def __validate_warning(self, reason):
+ '''
+ Used as warning callback below.
+ '''
+ logger.warn(XFRIN_ZONE_WARN, self._zone_name, self._rrclass, reason)
+
+ def finish_transfer(self):
+ """
+ Perform any necessary checks after a transfer. Then complete the
+ transfer by commiting the transaction into the data source.
+ """
+ self._check_response_tsig_last()
+ if not check_zone(self._zone_name, self._rrclass,
+ self._diff.get_rrset_collection(),
+ (self.__validate_error, self.__validate_warning)):
+ raise XfrinZoneError('Validation of the new zone failed')
+ self._diff.commit()
+
def __parse_soa_response(self, msg, response_data):
'''Parse a response to SOA query and extract the SOA from answer.
@@ -831,7 +855,7 @@ class XfrinConnection(asyncore.dispatcher):
resp_question = msg.get_question()[0]
if resp_question.get_name() != self._zone_name or \
resp_question.get_class() != self._rrclass or \
- resp_question.get_type() != RRType.SOA():
+ resp_question.get_type() != RRType.SOA:
raise XfrinProtocolError('Invalid response to SOA query: '
'question mismatch: ' +
str(resp_question))
@@ -839,21 +863,21 @@ class XfrinConnection(asyncore.dispatcher):
# Look into the answer section for SOA
soa = None
for rr in msg.get_section(Message.SECTION_ANSWER):
- if rr.get_type() == RRType.SOA():
+ if rr.get_type() == RRType.SOA:
if soa is not None:
raise XfrinProtocolError('SOA response had multiple SOAs')
soa = rr
# There should not be a CNAME record at top of zone.
- if rr.get_type() == RRType.CNAME():
+ if rr.get_type() == RRType.CNAME:
raise XfrinProtocolError('SOA query resulted in CNAME')
# If SOA is not found, try to figure out the reason then report it.
if soa is None:
# See if we have any SOA records in the authority section.
for rr in msg.get_section(Message.SECTION_AUTHORITY):
- if rr.get_type() == RRType.NS():
+ if rr.get_type() == RRType.NS:
raise XfrinProtocolError('SOA query resulted in referral')
- if rr.get_type() == RRType.SOA():
+ if rr.get_type() == RRType.SOA:
raise XfrinProtocolError('SOA query resulted in NODATA')
raise XfrinProtocolError('No SOA record found in response to ' +
'SOA query')
@@ -877,7 +901,7 @@ class XfrinConnection(asyncore.dispatcher):
'''
- self._send_query(RRType.SOA())
+ self._send_query(RRType.SOA)
data_len = self._get_request_response(2)
msg_len = socket.htons(struct.unpack('H', data_len)[0])
soa_response = self._get_request_response(msg_len)
@@ -901,7 +925,7 @@ class XfrinConnection(asyncore.dispatcher):
return XFRIN_OK
- def do_xfrin(self, check_soa, request_type=RRType.AXFR()):
+ def do_xfrin(self, check_soa, request_type=RRType.AXFR):
'''Do an xfr session by sending xfr request and parsing responses.'''
try:
@@ -909,7 +933,7 @@ class XfrinConnection(asyncore.dispatcher):
self._request_type = request_type
# Right now RRType.[IA]XFR().to_text() is 'TYPExxx', so we need
# to hardcode here.
- req_str = 'IXFR' if request_type == RRType.IXFR() else 'AXFR'
+ req_str = 'IXFR' if request_type == RRType.IXFR else 'AXFR'
if check_soa:
self._check_soa_serial()
self.close()
@@ -950,8 +974,16 @@ class XfrinConnection(asyncore.dispatcher):
# of trying another primary server, etc, but for now we treat it
# as "success".
pass
+ except XfrinZoneError:
+ # The log message doesn't contain the exception text, since there's
+ # only one place where the exception is thrown now and it'd be the
+ # same generic message every time.
+ logger.error(XFRIN_INVALID_ZONE_DATA, self.zone_str(),
+ format_addrinfo(self._master_addrinfo))
+ ret = XFRIN_FAIL
except XfrinProtocolError as e:
- logger.info(XFRIN_XFR_TRANSFER_PROTOCOL_ERROR, req_str,
+ # FIXME: Why is this .info? Even the messageID contains "ERROR".
+ logger.info(XFRIN_XFR_TRANSFER_PROTOCOL_VIOLATION, req_str,
self.zone_str(),
format_addrinfo(self._master_addrinfo), str(e))
ret = XFRIN_FAIL
@@ -992,7 +1024,7 @@ class XfrinConnection(asyncore.dispatcher):
# cause interoperability trouble with stricter checks.
msg_rcode = msg.get_rcode()
- if msg_rcode != Rcode.NOERROR():
+ if msg_rcode != Rcode.NOERROR:
raise XfrinProtocolError('error response: %s' %
msg_rcode.to_text())
@@ -1088,13 +1120,13 @@ def __process_xfrin(server, zone_name, rrclass, db_file,
ret = XFRIN_FAIL
if conn.connect_to_master():
ret = conn.do_xfrin(check_soa, request_type)
- if ret == XFRIN_FAIL and request_type == RRType.IXFR():
+ if ret == XFRIN_FAIL and request_type == RRType.IXFR:
# IXFR failed for some reason. It might mean the server can't
# handle it, or we don't have the zone or we are out of sync or
# whatever else. So we retry with with AXFR, as it may succeed
# in many such cases.
retry = True
- request_type = RRType.AXFR()
+ request_type = RRType.AXFR
logger.warn(XFRIN_XFR_TRANSFER_FALLBACK, conn.zone_str())
conn.close()
conn = None
@@ -1140,7 +1172,7 @@ def process_xfrin(server, xfrin_recorder, zone_name, rrclass, db_file,
xfrin_recorder.decrement(zone_name)
if exception is not None:
- typestr = "AXFR" if request_type == RRType.AXFR() else "IXFR"
+ typestr = "AXFR" if request_type == RRType.AXFR else "IXFR"
logger.error(XFRIN_XFR_PROCESS_FAILURE, typestr, zone_name.to_text(),
str(rrclass), str(exception))
@@ -1475,9 +1507,9 @@ class Xfrin:
logger.info(XFRIN_RETRANSFER_UNKNOWN_ZONE, zone_str)
answer = create_answer(1, errmsg)
else:
- request_type = RRType.AXFR()
+ request_type = RRType.AXFR
if zone_info.use_ixfr:
- request_type = RRType.IXFR()
+ request_type = RRType.IXFR
master_addr = zone_info.get_master_addr_info()
if notify_addr[0] == master_addr[0] and\
notify_addr[2] == master_addr[2]:
@@ -1506,11 +1538,11 @@ class Xfrin:
rrclass)
zone_info = self._get_zone_info(zone_name, rrclass)
tsig_key = None
- request_type = RRType.AXFR()
+ request_type = RRType.AXFR
if zone_info:
tsig_key = zone_info.get_tsig_key()
if zone_info.use_ixfr:
- request_type = RRType.IXFR()
+ request_type = RRType.IXFR
db_file = args.get('db_file') or self._get_db_file()
ret = self.xfrin_start(zone_name,
rrclass,
@@ -1706,5 +1738,7 @@ def main(xfrin_class, use_signal=True):
if xfrind:
xfrind.shutdown()
+ logger.info(XFRIN_EXITING)
+
if __name__ == '__main__':
main(Xfrin)
diff --git a/src/bin/xfrin/xfrin_messages.mes b/src/bin/xfrin/xfrin_messages.mes
index 770a8b2..1d90b75 100644
--- a/src/bin/xfrin/xfrin_messages.mes
+++ b/src/bin/xfrin/xfrin_messages.mes
@@ -60,6 +60,9 @@ error is given in the log message.
There was an error opening a connection to the master. The error is
shown in the log message.
+% XFRIN_EXITING exiting
+The xfrin daemon is exiting.
+
% XFRIN_GOT_INCREMENTAL_RESP got incremental response for %1
In an attempt of IXFR processing, the beginning SOA of the first difference
(following the initial SOA that specified the final SOA for all the
@@ -77,6 +80,11 @@ is not equal to the requested SOA serial.
There was an error importing the python DNS module pydnspp. The most
likely cause is a PYTHONPATH problem.
+% XFRIN_INVALID_ZONE_DATA zone %1 received from %2 is broken and unusable
+The zone was received, but it failed sanity validation. The previous version
+of zone (if any is available) will be used. Look for previous
+XFRIN_ZONE_INVALID messages to see the exact problem(s).
+
% XFRIN_IXFR_TRANSFER_SUCCESS incremental IXFR transfer of zone %1 succeeded (messages: %2, changesets: %3, deletions: %4, additions: %5, bytes: %6, run time: %7 seconds, %8 bytes/second)
The IXFR transfer for the given zone was successful.
The provided information contains the following values:
@@ -117,10 +125,6 @@ There was a problem sending a message to the xfrout module or the
zone manager. This most likely means that the msgq daemon has quit or
was killed.
-% XFRIN_MSGQ_SEND_ERROR_AUTH error while contacting %1
-There was a problem sending a message to b10-auth. This most likely
-means that the msgq daemon has quit or was killed.
-
% XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER error while contacting %1
There was a problem sending a message to the zone manager. This most
likely means that the msgq daemon has quit or was killed.
@@ -175,6 +179,10 @@ exception message is printed in the log message.
The XFR transfer for the given zone has failed due to a problem outside
of the xfrin module. Possible reasons are a broken DNS message or failure
in database connection. The error is shown in the log message.
+One common cause of this error could be a locked database; especially when
+using sqlite3 where a single transaction involving write operations blocks
+any other read or write transactions. This is not a critical error, and
+the transfer will be attempted again at the next retry time.
% XFRIN_XFR_PROCESS_FAILURE %1 transfer of zone %2/%3 failed: %4
An XFR session failed outside the main protocol handling. This
@@ -201,7 +209,7 @@ such that the remote server doesn't support IXFR, we don't have the SOA record
(or the zone at all), we are out of sync, etc. In many of these situations,
AXFR could still work. Therefore we try that one in case it helps.
-% XFRIN_XFR_TRANSFER_PROTOCOL_ERROR %1 transfer of zone %2 with %3 failed: %4
+% XFRIN_XFR_TRANSFER_PROTOCOL_VIOLATION %1 transfer of zone %2 with %3 failed: %4
The XFR transfer for the given zone has failed due to a protocol
error, such as an unexpected response from the primary server. The
error is shown in the log message. It may be because the primary
@@ -228,6 +236,12 @@ zones at a higher level. In future it is more likely that a separate
zone management framework is provided, and the situation where the
given zone isn't found in xfrout will be treated as an error.
+% XFRIN_ZONE_INVALID Newly received zone %1/%2 fails validation: %3
+The zone was received successfully, but it failed validation. The problem
+is severe enough that the new version of zone is discarded and the old version,
+if any, will stay in use. New transfer will be attempted after some time.
+The problem needs to be fixed in the zone data on the remote server.
+
% XFRIN_ZONE_MULTIPLE_SOA Zone %1 has %2 SOA RRs
On starting an xfrin session, it is identified that the zone to be
transferred has multiple SOA RRs. Such a zone is broken, but could be
@@ -254,3 +268,9 @@ the latest version of the zone. But if the primary server is known to
be the real source of the zone, some unexpected inconsistency may have
happened, and you may want to take a closer look. In this case xfrin
doesn't perform subsequent zone transfer.
+
+% XFRIN_ZONE_WARN Newly received zone %1/%2 has a problem: %3
+The zone was received successfully, but when checking it, it was discovered
+there's some issue with it. It might be correct, but it should be checked
+and possibly fixed on the remote server. The problem is described in the
+message. The problem does not stop the zone from being used.
diff --git a/src/bin/xfrout/b10-xfrout.xml b/src/bin/xfrout/b10-xfrout.xml
index 8b616d2..0790f98 100644
--- a/src/bin/xfrout/b10-xfrout.xml
+++ b/src/bin/xfrout/b10-xfrout.xml
@@ -55,8 +55,8 @@
outgoing DNS zone transfer service using AXFR or IXFR.
It is also used to send outgoing NOTIFY messages.
Normally it is started by the
- <citerefentry><refentrytitle>bind10</refentrytitle><manvolnum>8</manvolnum></citerefentry>
- boss process.
+ <citerefentry><refentrytitle>b10-init</refentrytitle><manvolnum>8</manvolnum></citerefentry>
+ process.
When the <command>b10-auth</command> DNS server receives
a transfer request, <command>b10-xfrout</command> sends the
zone as found in the BIND 10 zone data store.
@@ -147,7 +147,7 @@
and exits <command>b10-xfrout</command>.
This has an optional <varname>pid</varname> argument to
select the process ID to stop.
- (Note that the BIND 10 boss process may restart this service
+ (Note that the BIND 10 b10-init process may restart this service
if configured.)
</para>
diff --git a/src/bin/xfrout/tests/xfrout_test.py.in b/src/bin/xfrout/tests/xfrout_test.py.in
index 774187f..bc0fae9 100644
--- a/src/bin/xfrout/tests/xfrout_test.py.in
+++ b/src/bin/xfrout/tests/xfrout_test.py.in
@@ -38,7 +38,7 @@ TSIG_KEY = TSIGKey("example.com:SFuWd/q99SzF8Yzd1QbB9g==")
#
TEST_ZONE_NAME_STR = "example.com."
TEST_ZONE_NAME = Name(TEST_ZONE_NAME_STR)
-TEST_RRCLASS = RRClass.IN()
+TEST_RRCLASS = RRClass.IN
IXFR_OK_VERSION = 2011111802
IXFR_NG_VERSION = 2011111803
SOA_CURRENT_VERSION = 2011112001
@@ -109,16 +109,16 @@ class MockDataSrcClient:
zone names.
'''
- if name == Name('nosoa.example.com') and rrtype == RRType.SOA():
+ if name == Name('nosoa.example.com') and rrtype == RRType.SOA:
return (ZoneFinder.NXDOMAIN, None, 0)
- elif name == Name('multisoa.example.com') and rrtype == RRType.SOA():
+ elif name == Name('multisoa.example.com') and rrtype == RRType.SOA:
soa_rrset = create_soa(SOA_CURRENT_VERSION)
soa_rrset.add_rdata(soa_rrset.get_rdata()[0])
return (ZoneFinder.SUCCESS, soa_rrset, 0)
elif name == Name('maxserial.example.com'):
soa_rrset = create_soa(0xffffffff)
return (ZoneFinder.SUCCESS, soa_rrset, 0)
- elif rrtype == RRType.SOA():
+ elif rrtype == RRType.SOA:
return (ZoneFinder.SUCCESS, create_soa(SOA_CURRENT_VERSION), 0)
raise ValueError('Unexpected input to mock finder: bug in test case?')
@@ -238,17 +238,17 @@ class TestXfroutSessionBase(unittest.TestCase):
msg = Message(Message.RENDER)
query_id = 0x1035
msg.set_qid(query_id)
- msg.set_opcode(Opcode.QUERY())
- msg.set_rcode(Rcode.NOERROR())
- req_type = RRType.AXFR() if ixfr is None else RRType.IXFR()
+ msg.set_opcode(Opcode.QUERY)
+ msg.set_rcode(Rcode.NOERROR)
+ req_type = RRType.AXFR if ixfr is None else RRType.IXFR
if with_question:
- msg.add_question(Question(zone_name, RRClass.IN(),
+ msg.add_question(Question(zone_name, RRClass.IN,
req_type if qtype is None else qtype))
- if req_type == RRType.IXFR():
- soa = RRset(zone_name, soa_class, RRType.SOA(), RRTTL(0))
+ if req_type == RRType.IXFR:
+ soa = RRset(zone_name, soa_class, RRType.SOA, RRTTL(0))
# In the RDATA only the serial matters.
for i in range(0, num_soa):
- soa.add_rdata(Rdata(RRType.SOA(), soa_class,
+ soa.add_rdata(Rdata(RRType.SOA, soa_class,
'm. r. ' + str(ixfr) + ' 1 1 1 1'))
msg.add_rrset(Message.SECTION_AUTHORITY, soa)
@@ -263,7 +263,7 @@ class TestXfroutSessionBase(unittest.TestCase):
def set_request_type(self, type):
self.xfrsess._request_type = type
- if type == RRType.AXFR():
+ if type == RRType.AXFR:
self.xfrsess._request_typestr = 'AXFR'
else:
self.xfrsess._request_typestr = 'IXFR'
@@ -280,7 +280,7 @@ class TestXfroutSessionBase(unittest.TestCase):
[{"action": "ACCEPT"}]),
{},
**self._counters)
- self.set_request_type(RRType.AXFR()) # test AXFR by default
+ self.set_request_type(RRType.AXFR) # test AXFR by default
self.mdata = self.create_request_data()
self.soa_rrset = create_soa(SOA_CURRENT_VERSION)
# some test replaces a module-wide function. We should ensure the
@@ -342,7 +342,7 @@ class TestXfroutSession(TestXfroutSessionBase):
self.xfrsess._request_data = self.mdata
self.xfrsess._server.increase_transfers_counter = lambda : False
XfroutSession._handle(self.xfrsess)
- self.assertEqual(self.sock.read_msg().get_rcode(), Rcode.REFUSED())
+ self.assertEqual(self.sock.read_msg().get_rcode(), Rcode.REFUSED)
def test_quota_ok(self):
'''The default case in terms of the xfrout quota.
@@ -355,7 +355,7 @@ class TestXfroutSession(TestXfroutSessionBase):
# Replace the data source client to avoid datasrc related exceptions
self.xfrsess.ClientClass = MockDataSrcClient
XfroutSession._handle(self.xfrsess)
- self.assertEqual(self.sock.read_msg().get_rcode(), Rcode.FORMERR())
+ self.assertEqual(self.sock.read_msg().get_rcode(), Rcode.FORMERR)
def test_exception_from_session(self):
'''Test the case where the main processing raises an exception.
@@ -372,14 +372,14 @@ class TestXfroutSession(TestXfroutSessionBase):
def test_parse_query_message(self):
# Valid AXFR
[get_rcode, get_msg] = self.xfrsess._parse_query_message(self.mdata)
- self.assertEqual(RRType.AXFR(), self.xfrsess._request_type)
+ self.assertEqual(RRType.AXFR, self.xfrsess._request_type)
self.assertEqual(get_rcode.to_text(), "NOERROR")
# Valid IXFR
request_data = self.create_request_data(ixfr=2011111801)
rcode, msg = self.xfrsess._parse_query_message(request_data)
- self.assertEqual(RRType.IXFR(), self.xfrsess._request_type)
- self.assertEqual(Rcode.NOERROR(), rcode)
+ self.assertEqual(RRType.IXFR, self.xfrsess._request_type)
+ self.assertEqual(Rcode.NOERROR, rcode)
# Broken request: no question
self.assertRaises(RuntimeError, self.xfrsess._parse_query_message,
@@ -387,7 +387,7 @@ class TestXfroutSession(TestXfroutSessionBase):
# Broken request: invalid RR type (neither AXFR nor IXFR)
self.assertRaises(RuntimeError, self.xfrsess._parse_query_message,
- self.create_request_data(qtype=RRType.A()))
+ self.create_request_data(qtype=RRType.A))
# NOERROR
request_data = self.create_request_data(ixfr=IXFR_OK_VERSION)
@@ -554,7 +554,7 @@ class TestXfroutSession(TestXfroutSessionBase):
# should be used.
self.xfrsess._acl = isc.acl.dns.REQUEST_LOADER.load([
{"from": "127.0.0.1", "action": "ACCEPT"}])
- acl = self.xfrsess._get_transfer_acl(Name('example.com'), RRClass.IN())
+ acl = self.xfrsess._get_transfer_acl(Name('example.com'), RRClass.IN)
self.assertEqual(acl, self.xfrsess._acl)
# install a per zone config with transfer ACL for example.com. Then
@@ -567,15 +567,15 @@ class TestXfroutSession(TestXfroutSessionBase):
com_acl
self.assertEqual(com_acl,
self.xfrsess._get_transfer_acl(Name('example.com'),
- RRClass.IN()))
+ RRClass.IN))
self.assertEqual(self.xfrsess._acl,
self.xfrsess._get_transfer_acl(Name('example.org'),
- RRClass.IN()))
+ RRClass.IN))
# Name matching should be case insensitive.
self.assertEqual(com_acl,
self.xfrsess._get_transfer_acl(Name('EXAMPLE.COM'),
- RRClass.IN()))
+ RRClass.IN))
def test_send_data(self):
self.xfrsess._send_data(self.sock, self.mdata)
@@ -600,9 +600,9 @@ class TestXfroutSession(TestXfroutSessionBase):
msg = self.getmsg()
msg.make_response()
# SOA record data with different cases
- soa_rrset = RRset(Name('Example.com.'), RRClass.IN(), RRType.SOA(),
+ soa_rrset = RRset(Name('Example.com.'), RRClass.IN, RRType.SOA,
RRTTL(3600))
- soa_rrset.add_rdata(Rdata(RRType.SOA(), RRClass.IN(),
+ soa_rrset.add_rdata(Rdata(RRType.SOA, RRClass.IN,
'master.Example.com. admin.exAmple.com. ' +
'2011112001 3600 1800 2419200 7200'))
msg.add_rrset(Message.SECTION_ANSWER, soa_rrset)
@@ -680,8 +680,8 @@ class TestXfroutSession(TestXfroutSessionBase):
self.assertEqual(get_msg.get_rr_count(Message.SECTION_AUTHORITY), 0)
def test_trigger_send_message_with_last_soa(self):
- rrset_a = RRset(Name("example.com"), RRClass.IN(), RRType.A(), RRTTL(3600))
- rrset_a.add_rdata(Rdata(RRType.A(), RRClass.IN(), "192.0.2.1"))
+ rrset_a = RRset(Name("example.com"), RRClass.IN, RRType.A, RRTTL(3600))
+ rrset_a.add_rdata(Rdata(RRType.A, RRClass.IN, "192.0.2.1"))
msg = self.getmsg()
msg.make_response()
@@ -759,36 +759,36 @@ class TestXfroutSession(TestXfroutSessionBase):
self.xfrsess.ClientClass = MockDataSrcClient
# Successful case. A zone iterator should be set up.
self.assertEqual(self.xfrsess._xfrout_setup(
- self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR())
+ self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR)
self.assertNotEqual(None, self.xfrsess._iterator)
# Failure cases
self.assertEqual(self.xfrsess._xfrout_setup(
self.getmsg(), Name('notauth.example.com'), TEST_RRCLASS),
- Rcode.NOTAUTH())
+ Rcode.NOTAUTH)
self.assertEqual(self.xfrsess._xfrout_setup(
self.getmsg(), Name('nosoa.example.com'), TEST_RRCLASS),
- Rcode.SERVFAIL())
+ Rcode.SERVFAIL)
self.assertEqual(self.xfrsess._xfrout_setup(
self.getmsg(), Name('multisoa.example.com'), TEST_RRCLASS),
- Rcode.SERVFAIL())
+ Rcode.SERVFAIL)
def test_xfrout_ixfr_setup(self):
self.xfrsess.ClientClass = MockDataSrcClient
- self.set_request_type(RRType.IXFR())
+ self.set_request_type(RRType.IXFR)
# Successful case of pure IXFR. A zone journal reader should be set
# up.
self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION)
self.assertEqual(self.xfrsess._xfrout_setup(
- self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR())
+ self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR)
self.assertNotEqual(None, self.xfrsess._jnl_reader)
# Successful case, but as a result of falling back to AXFR-style
# IXFR. A zone iterator should be set up instead of a journal reader.
self.mdata = self.create_request_data(ixfr=IXFR_NG_VERSION)
self.assertEqual(self.xfrsess._xfrout_setup(
- self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR())
+ self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR)
self.assertNotEqual(None, self.xfrsess._iterator)
self.assertEqual(None, self.xfrsess._jnl_reader)
@@ -797,7 +797,7 @@ class TestXfroutSession(TestXfroutSessionBase):
# indicating that the response will contain just one SOA.
self.mdata = self.create_request_data(ixfr=SOA_CURRENT_VERSION+1)
self.assertEqual(self.xfrsess._xfrout_setup(
- self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR())
+ self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR)
self.assertEqual(None, self.xfrsess._iterator)
self.assertEqual(None, self.xfrsess._jnl_reader)
@@ -805,7 +805,7 @@ class TestXfroutSession(TestXfroutSessionBase):
# the local SOA.
self.mdata = self.create_request_data(ixfr=SOA_CURRENT_VERSION)
self.assertEqual(self.xfrsess._xfrout_setup(
- self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR())
+ self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR)
self.assertEqual(None, self.xfrsess._iterator)
self.assertEqual(None, self.xfrsess._jnl_reader)
@@ -814,7 +814,7 @@ class TestXfroutSession(TestXfroutSessionBase):
zone_name = Name('maxserial.example.com') # whose SOA is 0xffffffff
self.mdata = self.create_request_data(ixfr=1, zone_name=zone_name)
self.assertEqual(self.xfrsess._xfrout_setup(
- self.getmsg(), zone_name, TEST_RRCLASS), Rcode.NOERROR())
+ self.getmsg(), zone_name, TEST_RRCLASS), Rcode.NOERROR)
self.assertEqual(None, self.xfrsess._iterator)
self.assertEqual(None, self.xfrsess._jnl_reader)
@@ -823,7 +823,7 @@ class TestXfroutSession(TestXfroutSessionBase):
self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
zone_name=zone_name)
self.assertEqual(self.xfrsess._xfrout_setup(
- self.getmsg(), zone_name, TEST_RRCLASS), Rcode.NOERROR())
+ self.getmsg(), zone_name, TEST_RRCLASS), Rcode.NOERROR)
self.assertNotEqual(None, self.xfrsess._iterator)
# Failure cases
@@ -831,42 +831,42 @@ class TestXfroutSession(TestXfroutSessionBase):
self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
zone_name=zone_name)
self.assertEqual(self.xfrsess._xfrout_setup(
- self.getmsg(), zone_name, TEST_RRCLASS), Rcode.NOTAUTH())
+ self.getmsg(), zone_name, TEST_RRCLASS), Rcode.NOTAUTH)
# this is a strange case: zone's SOA will be found but the journal
# reader won't be created due to 'no such zone'.
zone_name = Name('notauth2.example.com')
self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
zone_name=zone_name)
self.assertEqual(self.xfrsess._xfrout_setup(
- self.getmsg(), zone_name, TEST_RRCLASS), Rcode.NOTAUTH())
+ self.getmsg(), zone_name, TEST_RRCLASS), Rcode.NOTAUTH)
zone_name = Name('nosoa.example.com')
self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
zone_name=zone_name)
self.assertEqual(self.xfrsess._xfrout_setup(
- self.getmsg(), zone_name, TEST_RRCLASS), Rcode.SERVFAIL())
+ self.getmsg(), zone_name, TEST_RRCLASS), Rcode.SERVFAIL)
zone_name = Name('multisoa.example.com')
self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
zone_name=zone_name)
self.assertEqual(self.xfrsess._xfrout_setup(
- self.getmsg(), zone_name, TEST_RRCLASS), Rcode.SERVFAIL())
+ self.getmsg(), zone_name, TEST_RRCLASS), Rcode.SERVFAIL)
# query name doesn't match the SOA's owner
self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION)
self.assertEqual(self.xfrsess._xfrout_setup(
- self.getmsg(), zone_name, TEST_RRCLASS), Rcode.FORMERR())
+ self.getmsg(), zone_name, TEST_RRCLASS), Rcode.FORMERR)
# query's RR class doesn't match the SOA's class
zone_name = TEST_ZONE_NAME # make sure the name matches this time
self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
- soa_class=RRClass.CH())
+ soa_class=RRClass.CH)
self.assertEqual(self.xfrsess._xfrout_setup(
- self.getmsg(), zone_name, TEST_RRCLASS), Rcode.FORMERR())
+ self.getmsg(), zone_name, TEST_RRCLASS), Rcode.FORMERR)
# multiple SOA RRs
self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
num_soa=2)
self.assertEqual(self.xfrsess._xfrout_setup(
- self.getmsg(), zone_name, TEST_RRCLASS), Rcode.FORMERR())
+ self.getmsg(), zone_name, TEST_RRCLASS), Rcode.FORMERR)
def test_dns_xfrout_start_formerror(self):
# formerror
@@ -876,7 +876,7 @@ class TestXfroutSession(TestXfroutSessionBase):
def test_dns_xfrout_start_notauth(self):
def notauth(msg, name, rrclass):
- return Rcode.NOTAUTH()
+ return Rcode.NOTAUTH
self.xfrsess._xfrout_setup = notauth
self.xfrsess.dns_xfrout_start(self.sock, self.mdata)
get_msg = self.sock.read_msg()
@@ -887,11 +887,11 @@ class TestXfroutSession(TestXfroutSessionBase):
raise isc.datasrc.Error('exception for the sake of test')
self.xfrsess.ClientClass = internal_raise
self.xfrsess.dns_xfrout_start(self.sock, self.mdata)
- self.assertEqual(self.sock.read_msg().get_rcode(), Rcode.SERVFAIL())
+ self.assertEqual(self.sock.read_msg().get_rcode(), Rcode.SERVFAIL)
def test_dns_xfrout_start_noerror(self):
def noerror(msg, name, rrclass):
- return Rcode.NOERROR()
+ return Rcode.NOERROR
self.xfrsess._xfrout_setup = noerror
def myreply(msg, sock):
@@ -905,7 +905,7 @@ class TestXfroutSession(TestXfroutSessionBase):
def test_dns_xfrout_start_with_notcallable_xfrreqdone(self):
def noerror(msg, name, rrclass):
- return Rcode.NOERROR()
+ return Rcode.NOERROR
self.xfrsess._xfrout_setup = noerror
def myreply(msg, sock):
@@ -925,9 +925,9 @@ class TestXfroutSession(TestXfroutSessionBase):
self.assertEqual(reply_msg.get_rr_count(Message.SECTION_ANSWER), 2)
def test_reply_xfrout_query_axfr_with_tsig(self):
- rrset = RRset(Name('a.example.com'), RRClass.IN(), RRType.A(),
+ rrset = RRset(Name('a.example.com'), RRClass.IN, RRType.A,
RRTTL(3600))
- rrset.add_rdata(Rdata(RRType.A(), RRClass.IN(), '192.0.2.1'))
+ rrset.add_rdata(Rdata(RRType.A, RRClass.IN, '192.0.2.1'))
global xfrout
def get_rrset_len(rrset):
@@ -1015,8 +1015,8 @@ class TestXfroutSession(TestXfroutSessionBase):
algorithm = hmac-md5)
'''
- soa = RRset(Name('.'), RRClass.IN(), RRType.SOA(), RRTTL(3600))
- soa.add_rdata(Rdata(RRType.SOA(), RRClass.IN(), '. . 0 0 0 0 0'))
+ soa = RRset(Name('.'), RRClass.IN, RRType.SOA, RRTTL(3600))
+ soa.add_rdata(Rdata(RRType.SOA, RRClass.IN, '. . 0 0 0 0 0'))
self.mdata = self.create_request_data(zone_name=Name('.'))
self.xfrsess._soa = soa
if tsig:
@@ -1144,7 +1144,7 @@ class TestXfroutSessionWithSQLite3(TestXfroutSessionBase):
self.xfrsess._request_data = self.mdata
self.xfrsess._server.get_db_file = lambda : TESTDATA_SRCDIR + \
'test.sqlite3'
- self.ns_name = 'a.dns.example.com'
+ self.ns_name = 'a.dns.example.com.'
def check_axfr_stream(self, response):
'''Common checks for AXFR(-style) response for the test zone.
@@ -1177,10 +1177,10 @@ class TestXfroutSessionWithSQLite3(TestXfroutSessionBase):
self.assertEqual(self.get_counter('ixfr_ended'), 0)
XfroutSession._handle(self.xfrsess)
response = self.sock.read_msg(Message.PRESERVE_ORDER);
- self.assertEqual(Rcode.NOERROR(), response.get_rcode())
+ self.assertEqual(Rcode.NOERROR, response.get_rcode())
self.check_axfr_stream(response)
- self.assertEqual(self.xfrsess._request_type, RRType.AXFR())
- self.assertNotEqual(self.xfrsess._request_type, RRType.IXFR())
+ self.assertEqual(self.xfrsess._request_type, RRType.AXFR)
+ self.assertNotEqual(self.xfrsess._request_type, RRType.IXFR)
self.assertEqual(self.get_counter('axfr_started'), 1)
self.assertEqual(self.get_counter('axfr_ended'), 1)
self.assertEqual(self.get_counter('ixfr_started'), 0)
@@ -1191,10 +1191,10 @@ class TestXfroutSessionWithSQLite3(TestXfroutSessionBase):
self.create_request_data(ixfr=IXFR_NG_VERSION)
XfroutSession._handle(self.xfrsess)
response = self.sock.read_msg(Message.PRESERVE_ORDER);
- self.assertEqual(Rcode.NOERROR(), response.get_rcode())
+ self.assertEqual(Rcode.NOERROR, response.get_rcode())
# This is an AXFR-style IXFR. So the question section should indicate
# that it's an IXFR resposne.
- self.assertEqual(RRType.IXFR(), response.get_question()[0].get_type())
+ self.assertEqual(RRType.IXFR, response.get_question()[0].get_type())
self.check_axfr_stream(response)
def test_ixfr_normal_session(self):
@@ -1222,8 +1222,8 @@ class TestXfroutSessionWithSQLite3(TestXfroutSessionBase):
self.assertEqual(len(expected_records), len(actual_records))
for (expected_rr, actual_rr) in zip(expected_records, actual_records):
self.assertTrue(rrsets_equal(expected_rr, actual_rr))
- self.assertNotEqual(self.xfrsess._request_type, RRType.AXFR())
- self.assertEqual(self.xfrsess._request_type, RRType.IXFR())
+ self.assertNotEqual(self.xfrsess._request_type, RRType.AXFR)
+ self.assertEqual(self.xfrsess._request_type, RRType.IXFR)
self.assertEqual(self.get_counter('axfr_started'), 0)
self.assertEqual(self.get_counter('axfr_ended'), 0)
self.assertEqual(self.get_counter('ixfr_started'), 1)
diff --git a/src/bin/xfrout/xfrout.py.in b/src/bin/xfrout/xfrout.py.in
index f869955..5d25276 100755
--- a/src/bin/xfrout/xfrout.py.in
+++ b/src/bin/xfrout/xfrout.py.in
@@ -227,9 +227,9 @@ class XfroutSession():
self._tsig_key_ring)
tsig_error = self._tsig_ctx.verify(tsig_record, request_data)
if tsig_error != TSIGError.NOERROR:
- return Rcode.NOTAUTH()
+ return Rcode.NOTAUTH
- return Rcode.NOERROR()
+ return Rcode.NOERROR
def _parse_query_message(self, mdata):
''' parse query message to [socket,message]'''
@@ -239,11 +239,11 @@ class XfroutSession():
Message.from_wire(msg, mdata)
except Exception as err: # Exception is too broad
logger.error(XFROUT_PARSE_QUERY_ERROR, err)
- return Rcode.FORMERR(), None
+ return Rcode.FORMERR, None
# TSIG related checks
rcode = self._check_request_tsig(msg, mdata)
- if rcode != Rcode.NOERROR():
+ if rcode != Rcode.NOERROR:
return rcode, msg
# Make sure the question is valid. This should be ensured by
@@ -257,9 +257,9 @@ class XfroutSession():
# Identify the request type
self._request_type = question.get_type()
- if self._request_type == RRType.AXFR():
+ if self._request_type == RRType.AXFR:
self._request_typestr = 'AXFR'
- elif self._request_type == RRType.IXFR():
+ elif self._request_type == RRType.IXFR:
self._request_typestr = 'IXFR'
else:
# Likewise, this should be impossible.
@@ -283,7 +283,7 @@ class XfroutSession():
logger.debug(DBG_XFROUT_TRACE, XFROUT_QUERY_REJECTED,
self._request_type, format_addrinfo(self._remote),
format_zone_str(zone_name, zone_class))
- return Rcode.REFUSED(), msg
+ return Rcode.REFUSED, msg
return rcode, msg
@@ -351,16 +351,16 @@ class XfroutSession():
'''
result, finder = self._datasrc_client.find_zone(zone_name)
if result != DataSourceClient.SUCCESS:
- return (Rcode.NOTAUTH(), None)
- result, soa_rrset, _ = finder.find(zone_name, RRType.SOA())
+ return (Rcode.NOTAUTH, None)
+ result, soa_rrset, _ = finder.find(zone_name, RRType.SOA)
if result != ZoneFinder.SUCCESS:
- return (Rcode.SERVFAIL(), None)
+ return (Rcode.SERVFAIL, None)
# Especially for database-based zones, a working zone may be in
# a broken state where it has more than one SOA RR. We proactively
# check the condition and abort the xfr attempt if we identify it.
if soa_rrset.get_rdata_count() != 1:
- return (Rcode.SERVFAIL(), None)
- return (Rcode.NOERROR(), soa_rrset)
+ return (Rcode.SERVFAIL, None)
+ return (Rcode.NOERROR, soa_rrset)
def __axfr_setup(self, zone_name):
'''Setup a zone iterator for AXFR or AXFR-style IXFR.
@@ -379,16 +379,16 @@ class XfroutSession():
# update get_iterator() API so that we can distinguish "no such
# zone" and other cases (#1373). For now we consider all these
# cases as NOTAUTH.
- return Rcode.NOTAUTH()
+ return Rcode.NOTAUTH
# If we are an authoritative name server for the zone, but fail
# to find the zone's SOA record in datasource, xfrout can't
# provide zone transfer for it.
self._soa = self._iterator.get_soa()
if self._soa is None or self._soa.get_rdata_count() != 1:
- return Rcode.SERVFAIL()
+ return Rcode.SERVFAIL
- return Rcode.NOERROR()
+ return Rcode.NOERROR
def __ixfr_setup(self, request_msg, zone_name, zone_class):
'''Setup a zone journal reader for IXFR.
@@ -405,21 +405,21 @@ class XfroutSession():
# Ignore data whose owner name is not the zone apex, and
# ignore non-SOA or different class of records.
if auth_rrset.get_name() != zone_name or \
- auth_rrset.get_type() != RRType.SOA() or \
+ auth_rrset.get_type() != RRType.SOA or \
auth_rrset.get_class() != zone_class:
continue
if auth_rrset.get_rdata_count() != 1:
logger.info(XFROUT_IXFR_MULTIPLE_SOA,
format_addrinfo(self._remote))
- return Rcode.FORMERR()
+ return Rcode.FORMERR
remote_soa = auth_rrset
if remote_soa is None:
logger.info(XFROUT_IXFR_NO_SOA, format_addrinfo(self._remote))
- return Rcode.FORMERR()
+ return Rcode.FORMERR
# Retrieve the local SOA
rcode, self._soa = self._get_zone_soa(zone_name)
- if rcode != Rcode.NOERROR():
+ if rcode != Rcode.NOERROR:
return rcode
# RFC1995 says "If an IXFR query with the same or newer version
@@ -437,7 +437,7 @@ class XfroutSession():
logger.info(XFROUT_IXFR_UPTODATE, format_addrinfo(self._remote),
format_zone_str(zone_name, zone_class),
begin_serial, end_serial)
- return Rcode.NOERROR()
+ return Rcode.NOERROR
# Set up the journal reader or fall back to AXFR-style IXFR
try:
@@ -462,12 +462,12 @@ class XfroutSession():
# between these two operations. We treat it as NOTAUTH.
logger.warn(XFROUT_IXFR_NO_ZONE, format_addrinfo(self._remote),
format_zone_str(zone_name, zone_class))
- return Rcode.NOTAUTH()
+ return Rcode.NOTAUTH
# Use the reader as the iterator to generate the response.
self._iterator = self._jnl_reader
- return Rcode.NOERROR()
+ return Rcode.NOERROR
def _xfrout_setup(self, request_msg, zone_name, zone_class):
'''Setup a context for xfr responses according to the request type.
@@ -490,7 +490,7 @@ class XfroutSession():
self._server.get_db_file() + '"}'
self._datasrc_client = self.ClientClass('sqlite3', datasrc_config)
- if self._request_type == RRType.AXFR():
+ if self._request_type == RRType.AXFR:
return self.__axfr_setup(zone_name)
else:
return self.__ixfr_setup(request_msg, zone_name, zone_class)
@@ -500,17 +500,17 @@ class XfroutSession():
#TODO. create query message and parse header
if rcode_ is None: # Dropped by ACL
return
- elif rcode_ == Rcode.NOTAUTH() or rcode_ == Rcode.REFUSED():
+ elif rcode_ == Rcode.NOTAUTH or rcode_ == Rcode.REFUSED:
return self._reply_query_with_error_rcode(msg, sock_fd, rcode_)
- elif rcode_ != Rcode.NOERROR():
+ elif rcode_ != Rcode.NOERROR:
return self._reply_query_with_error_rcode(msg, sock_fd,
- Rcode.FORMERR())
+ Rcode.FORMERR)
elif not quota_ok:
logger.warn(XFROUT_QUERY_QUOTA_EXCCEEDED, self._request_typestr,
format_addrinfo(self._remote),
self._server._max_transfers_out)
return self._reply_query_with_error_rcode(msg, sock_fd,
- Rcode.REFUSED())
+ Rcode.REFUSED)
question = msg.get_question()[0]
zone_name = question.get_name()
@@ -522,15 +522,15 @@ class XfroutSession():
except Exception as ex:
logger.error(XFROUT_XFR_TRANSFER_CHECK_ERROR, self._request_typestr,
format_addrinfo(self._remote), zone_str, ex)
- rcode_ = Rcode.SERVFAIL()
- if rcode_ != Rcode.NOERROR():
+ rcode_ = Rcode.SERVFAIL
+ if rcode_ != Rcode.NOERROR:
logger.info(XFROUT_XFR_TRANSFER_FAILED, self._request_typestr,
format_addrinfo(self._remote), zone_str, rcode_)
return self._reply_query_with_error_rcode(msg, sock_fd, rcode_)
try:
# increment Xfr starts by RRType
- if self._request_type == RRType.AXFR():
+ if self._request_type == RRType.AXFR:
self._inc_axfr_running()
else:
self._inc_ixfr_running()
@@ -542,7 +542,7 @@ class XfroutSession():
format_addrinfo(self._remote), zone_str, err)
finally:
# decrement Xfr starts by RRType
- if self._request_type == RRType.AXFR():
+ if self._request_type == RRType.AXFR:
self._dec_axfr_running()
else:
self._dec_ixfr_running()
@@ -614,7 +614,7 @@ class XfroutSession():
# For AXFR (or AXFR-style IXFR), in which case _jnl_reader is None,
# we should skip SOAs from the iterator.
- if self._jnl_reader is None and rrset.get_type() == RRType.SOA():
+ if self._jnl_reader is None and rrset.get_type() == RRType.SOA:
continue
# We calculate the maximum size of the RRset (i.e. the
@@ -756,7 +756,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn,
"""
sock_fd = recv_fd(request.fileno())
if sock_fd < 0:
- logger.warn(XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR)
+ logger.warn(XFROUT_RECEIVE_FD_FAILED)
return False
# receive request msg. If it fails we simply terminate the thread;
@@ -1215,7 +1215,7 @@ class XfroutServer:
zone_name = args.get('zone_name')
zone_class = args.get('zone_class')
if not zone_class:
- zone_class = str(RRClass.IN())
+ zone_class = str(RRClass.IN)
if zone_name:
logger.info(XFROUT_NOTIFY_COMMAND, zone_name, zone_class)
if self.send_notify(zone_name, zone_class):
@@ -1274,7 +1274,7 @@ if '__main__' == __name__:
xfrout_server = XfroutServer()
xfrout_server.run()
except KeyboardInterrupt:
- logger.INFO(XFROUT_STOPPED_BY_KEYBOARD)
+ logger.info(XFROUT_STOPPED_BY_KEYBOARD)
except SessionError as e:
logger.error(XFROUT_CC_SESSION_ERROR, str(e))
except ModuleCCSessionError as e:
@@ -1287,3 +1287,4 @@ if '__main__' == __name__:
if xfrout_server:
xfrout_server.shutdown()
+ logger.info(XFROUT_EXITING)
diff --git a/src/bin/xfrout/xfrout_messages.mes b/src/bin/xfrout/xfrout_messages.mes
index d48aa24..5fb254e 100644
--- a/src/bin/xfrout/xfrout_messages.mes
+++ b/src/bin/xfrout/xfrout_messages.mes
@@ -32,6 +32,9 @@ configuration manager b10-cfgmgr is not running.
The xfrout process encountered an error when installing the configuration at
startup time. Details of the error are included in the log message.
+% XFROUT_EXITING exiting
+The xfrout daemon is exiting.
+
% XFROUT_FETCH_REQUEST_ERROR socket error while fetching a request from the auth daemon
There was a socket error while contacting the b10-auth daemon to
fetch a transfer request. The auth daemon may have shutdown.
@@ -152,7 +155,7 @@ statistics data should be sent to the stats daemon.
The xfrout daemon received a shutdown command from the command channel
and will now shut down.
-% XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR error receiving the file descriptor for an XFR connection
+% XFROUT_RECEIVE_FD_FAILED error receiving the file descriptor for an XFR connection
There was an error receiving the file descriptor for the transfer
request from b10-auth. There can be several reasons for this, but
the most likely cause is that b10-auth terminates for some reason
diff --git a/src/bin/zonemgr/b10-zonemgr.xml b/src/bin/zonemgr/b10-zonemgr.xml
index f859d23..91cdfc2 100644
--- a/src/bin/zonemgr/b10-zonemgr.xml
+++ b/src/bin/zonemgr/b10-zonemgr.xml
@@ -55,8 +55,8 @@
as the BIND 10 secondary manager, keeps track of timers
and other information necessary for BIND 10 to act as a DNS slave.
Normally it is started by the
- <citerefentry><refentrytitle>bind10</refentrytitle><manvolnum>8</manvolnum></citerefentry>
- boss process.
+ <citerefentry><refentrytitle>b10-init</refentrytitle><manvolnum>8</manvolnum></citerefentry>
+ process.
</para>
<para>
@@ -74,7 +74,7 @@
<!--
- self._send_command(XFRIN_MODULE_NAME, ZONE_NOTIFY_COMMAND, param)
+ self._send_command(XFRIN_MODULE_NAME, ZONE_NOTIFY_COMMAND, param)
self._clear_zone_notifier_master(zone_name_class)
# Send refresh command to xfrin module
else:
@@ -188,7 +188,7 @@
<command>shutdown</command> exits <command>b10-zonemgr</command>.
This has an optional <varname>pid</varname> argument to
select the process ID to stop.
- (Note that the BIND 10 boss process may restart this service
+ (Note that the b10-init process may restart this service
if configured.)
</para>
diff --git a/src/bin/zonemgr/zonemgr.py.in b/src/bin/zonemgr/zonemgr.py.in
index 0412e3f..40bfa39 100755
--- a/src/bin/zonemgr/zonemgr.py.in
+++ b/src/bin/zonemgr/zonemgr.py.in
@@ -625,7 +625,7 @@ class Zonemgr:
ZONE_NOTIFY_COMMAND is issued by Auth process;
ZONE_NEW_DATA_READY_CMD and ZONE_XFRIN_FAILED are issued by
Xfrin process;
- shutdown is issued by a user or Boss process. """
+ shutdown is issued by a user or Init process. """
answer = create_answer(0)
if command == ZONE_NOTIFY_COMMAND:
""" Handle Auth notify command"""
@@ -714,4 +714,4 @@ if '__main__' == __name__:
if zonemgrd and zonemgrd.running:
zonemgrd.shutdown()
- logger.debug(DBG_START_SHUT, ZONEMGR_SHUTDOWN)
+ logger.info(ZONEMGR_SHUTDOWN)
diff --git a/src/bin/zonemgr/zonemgr_messages.mes b/src/bin/zonemgr/zonemgr_messages.mes
index 4f58271..f67b5b9 100644
--- a/src/bin/zonemgr/zonemgr_messages.mes
+++ b/src/bin/zonemgr/zonemgr_messages.mes
@@ -69,7 +69,7 @@ new data.
% ZONEMGR_RECEIVE_SHUTDOWN received SHUTDOWN command
This is a debug message indicating that the zone manager has received
-a SHUTDOWN command over the command channel from the Boss process.
+a SHUTDOWN command over the command channel from the Init process.
It will act on this command and shut down.
% ZONEMGR_RECEIVE_UNKNOWN received unknown command '%1'
@@ -114,7 +114,7 @@ connecting to the command channel daemon. The most usual cause of this
problem is that the daemon is not running.
% ZONEMGR_SHUTDOWN zone manager has shut down
-A debug message, output when the zone manager has shut down completely.
+The zone manager has shut down completely.
% ZONEMGR_STARTED zonemgr started
This informational message is output by zonemgr when all initialization
diff --git a/src/lib/cc/cc_messages.mes b/src/lib/cc/cc_messages.mes
index 94b955a..b561784 100644
--- a/src/lib/cc/cc_messages.mes
+++ b/src/lib/cc/cc_messages.mes
@@ -60,6 +60,10 @@ and its length (2 bytes) is counted in the total length.
There should be data representing the length of message on the socket, but it
is not there.
+% CC_LNAME_RECEIVED received local name: %1
+Debug message: the local module received its unique identifier (name)
+from msgq on completion of establishing the session with msgq.
+
% CC_NO_MESSAGE no message ready to be received yet
The program polled for incoming messages, but there was no message waiting.
This is a debug message which may happen only after CC_GROUP_RECEIVE.
diff --git a/src/lib/cc/data.cc b/src/lib/cc/data.cc
index f9c23db..af3602a 100644
--- a/src/lib/cc/data.cc
+++ b/src/lib/cc/data.cc
@@ -261,7 +261,7 @@ skipChars(std::istream& in, const char* chars, int& line, int& pos) {
} else {
++pos;
}
- in.get();
+ in.ignore();
c = in.peek();
}
}
@@ -291,7 +291,7 @@ skipTo(std::istream& in, const std::string& file, int& line,
pos = 1;
++line;
}
- in.get();
+ in.ignore();
++pos;
}
in.putback(c);
@@ -352,7 +352,7 @@ strFromStringstream(std::istream& in, const std::string& file,
throwJSONError("Bad escape", file, line, pos);
}
// drop the escaped char
- in.get();
+ in.ignore();
++pos;
}
ss.put(c);
@@ -490,14 +490,14 @@ fromStringstreamMap(std::istream& in, const std::string& file, int& line,
throwJSONError(std::string("Unterminated map, <string> or } expected"), file, line, pos);
} else if (c == '}') {
// empty map, skip closing curly
- c = in.get();
+ in.ignore();
} else {
while (c != EOF && c != '}') {
std::string key = strFromStringstream(in, file, line, pos);
skipTo(in, file, line, pos, ":", WHITESPACE);
// skip the :
- in.get();
+ in.ignore();
pos++;
ConstElementPtr value = Element::fromJSON(in, file, line, pos);
diff --git a/src/lib/cc/session.cc b/src/lib/cc/session.cc
index 4455b68..1d3fac2 100644
--- a/src/lib/cc/session.cc
+++ b/src/lib/cc/session.cc
@@ -333,6 +333,7 @@ Session::establish(const char* socket_file) {
recvmsg(routing, msg, false);
impl_->lname_ = msg->get("lname")->stringValue();
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED, CC_LNAME_RECEIVED).arg(impl_->lname_);
// At this point there's no risk of resource leak.
session_holder.clear();
diff --git a/src/lib/config/tests/ccsession_unittests.cc b/src/lib/config/tests/ccsession_unittests.cc
index e07c5a3..2a5e758 100644
--- a/src/lib/config/tests/ccsession_unittests.cc
+++ b/src/lib/config/tests/ccsession_unittests.cc
@@ -343,6 +343,7 @@ TEST_F(CCSessionTest, checkCommand) {
session.addMessage(el("{ \"command\": \"bad_command\" }"), "Spec29", "*");
result = mccs.checkCommand();
EXPECT_EQ(0, session.getMsgQueue()->size());
+ EXPECT_EQ(0, result);
session.addMessage(el("{ \"command\": [ \"bad_command\" ] }"),
"Spec29", "*");
@@ -627,6 +628,7 @@ TEST_F(CCSessionTest, ignoreRemoteConfigCommands) {
EXPECT_EQ(1, session.getMsgQueue()->size());
result = mccs.checkCommand();
EXPECT_EQ(0, session.getMsgQueue()->size());
+ EXPECT_EQ(0, result);
}
TEST_F(CCSessionTest, initializationFail) {
diff --git a/src/lib/datasrc/Makefile.am b/src/lib/datasrc/Makefile.am
index dc1007a..1a5776f 100644
--- a/src/lib/datasrc/Makefile.am
+++ b/src/lib/datasrc/Makefile.am
@@ -28,10 +28,12 @@ libb10_datasrc_la_SOURCES += rbnode_rrset.h
libb10_datasrc_la_SOURCES += rbtree.h
libb10_datasrc_la_SOURCES += exceptions.h
libb10_datasrc_la_SOURCES += zonetable.h zonetable.cc
-libb10_datasrc_la_SOURCES += zone.h zone_finder.cc zone_finder_context.cc
+libb10_datasrc_la_SOURCES += zone.h zone_finder.h zone_finder.cc
+libb10_datasrc_la_SOURCES += zone_finder_context.cc
+libb10_datasrc_la_SOURCES += zone_iterator.h
libb10_datasrc_la_SOURCES += result.h
libb10_datasrc_la_SOURCES += logger.h logger.cc
-libb10_datasrc_la_SOURCES += client.h client.cc iterator.h
+libb10_datasrc_la_SOURCES += client.h client.cc
libb10_datasrc_la_SOURCES += database.h database.cc
libb10_datasrc_la_SOURCES += factory.h factory.cc
libb10_datasrc_la_SOURCES += client_list.h client_list.cc
diff --git a/src/lib/datasrc/client.h b/src/lib/datasrc/client.h
index 607af05..9c5d262 100644
--- a/src/lib/datasrc/client.h
+++ b/src/lib/datasrc/client.h
@@ -21,6 +21,7 @@
#include <boost/shared_ptr.hpp>
#include <datasrc/zone.h>
+#include <datasrc/zone_finder.h>
/// \file
/// Datasource clients
@@ -66,7 +67,7 @@
namespace isc {
namespace datasrc {
-// The iterator.h is not included on purpose, most application won't need it
+// zone_iterator.h is not included on purpose, most application won't need it
class ZoneIterator;
typedef boost::shared_ptr<ZoneIterator> ZoneIteratorPtr;
diff --git a/src/lib/datasrc/database.cc b/src/lib/datasrc/database.cc
index 0b010a4..f6d8252 100644
--- a/src/lib/datasrc/database.cc
+++ b/src/lib/datasrc/database.cc
@@ -18,7 +18,7 @@
#include <datasrc/database.h>
#include <datasrc/data_source.h>
-#include <datasrc/iterator.h>
+#include <datasrc/zone_iterator.h>
#include <datasrc/rrset_collection_base.h>
#include <exceptions/exceptions.h>
@@ -1385,7 +1385,7 @@ DatabaseClient::getIterator(const isc::dns::Name& name,
return (iterator);
}
-/// \brief datasrc implementation of RRsetCollectionBase.
+/// \brief Database implementation of RRsetCollectionBase.
class RRsetCollection : public isc::datasrc::RRsetCollectionBase {
public:
/// \brief Constructor.
@@ -1393,26 +1393,11 @@ public:
isc::datasrc::RRsetCollectionBase(updater, rrclass)
{}
- /// \brief Destructor
- virtual ~RRsetCollection() {}
-
/// \brief A wrapper around \c disable() so that it can be used as a
/// public method. \c disable() is protected.
void disableWrapper() {
disable();
}
-
-protected:
- // TODO: RRsetCollectionBase::Iter is not implemented and the
- // following two methods just throw.
-
- virtual RRsetCollectionBase::IterPtr getBeginning() {
- isc_throw(NotImplemented, "This method is not implemented.");
- }
-
- virtual RRsetCollectionBase::IterPtr getEnd() {
- isc_throw(NotImplemented, "This method is not implemented.");
- }
};
//
@@ -1454,7 +1439,7 @@ public:
virtual ZoneFinder& getFinder() { return (*finder_); }
- virtual isc::datasrc::RRsetCollectionBase& getRRsetCollection() {
+ virtual isc::dns::RRsetCollectionBase& getRRsetCollection() {
if (!rrset_collection_) {
// This is only assigned the first time and remains for the
// lifetime of the DatabaseUpdater.
diff --git a/src/lib/datasrc/datasrc_messages.mes b/src/lib/datasrc/datasrc_messages.mes
index 6ac9db0..5311203 100644
--- a/src/lib/datasrc/datasrc_messages.mes
+++ b/src/lib/datasrc/datasrc_messages.mes
@@ -197,6 +197,16 @@ modify the database). This is what the client would do when such RRs
were given in a DNS response according to RFC2181. The data in
database should be checked and fixed.
+% DATASRC_DATABASE_JOURNALREADER_BADDATA failed to convert a diff to RRset in %1/%2 on %3 between %4 and %5: %6
+This is an error message indicating that a zone's diff is broken and
+the data source library failed to convert it to a valid RRset. The
+most likely cause of this is that someone has manually modified the
+zone's diff in the database and inserted invalid data as a result.
+The zone's name and class, database name, and the start and end
+serials, and an additional detail of the error are shown in the
+message. The administrator should examine the diff in the database
+to find any invalid data and fix it.
+
% DATASRC_DATABASE_JOURNALREADER_END %1/%2 on %3 from %4 to %5
This is a debug message indicating that the program (successfully)
reaches the end of sequences of a zone's differences. The zone's name
@@ -215,16 +225,6 @@ a zone's difference sequences from a database-based data source. The
zone's name and class, database name, and the start and end serials
are shown in the message.
-% DATASRC_DATABASE_JOURNALREADER_BADDATA failed to convert a diff to RRset in %1/%2 on %3 between %4 and %5: %6
-This is an error message indicating that a zone's diff is broken and
-the data source library failed to convert it to a valid RRset. The
-most likely cause of this is that someone has manually modified the
-zone's diff in the database and inserted invalid data as a result.
-The zone's name and class, database name, and the start and end
-serials, and an additional detail of the error are shown in the
-message. The administrator should examine the diff in the database
-to find any invalid data and fix it.
-
% DATASRC_DATABASE_NO_MATCH not match for %2/%3/%4 in %1
No match (not even a wildcard) was found in the named data source for the given
name/type/class in the data source.
@@ -442,6 +442,10 @@ shown name, the search tries the superdomain name that share the shown
www.example.com. with shown label count of 3, example.com. is being
tried).
+% DATASRC_MEM_FIND_TYPE_AT_ORIGIN origin query for type %1 in in-memory zone %2/%3 successful
+Debug information. A specific type RRset is requested at a zone origin
+of an in-memory zone and it is found.
+
% DATASRC_MEM_FIND_ZONE looking for zone '%1'
Debug information. A zone object for this zone is being searched for in the
in-memory data source.
diff --git a/src/lib/datasrc/iterator.h b/src/lib/datasrc/iterator.h
deleted file mode 100644
index e1c6929..0000000
--- a/src/lib/datasrc/iterator.h
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-// PERFORMANCE OF THIS SOFTWARE.
-
-#ifndef DATASRC_ZONE_ITERATOR_H
-#define DATASRC_ZONE_ITERATOR_H 1
-
-#include <dns/rrset.h>
-
-#include <boost/noncopyable.hpp>
-
-#include <datasrc/zone.h>
-
-namespace isc {
-namespace datasrc {
-
-/**
- * \brief Read-only iterator to a zone.
- *
- * You can get an instance of (descendand of) ZoneIterator from
- * DataSourceClient::getIterator() method. The actual concrete implementation
- * will be different depending on the actual data source used. This is the
- * abstract interface.
- *
- * There's no way to start iterating from the beginning again or return.
- */
-class ZoneIterator : public boost::noncopyable {
-public:
- /**
- * \brief Destructor
- *
- * Virtual destructor. It is empty, but ensures the right destructor from
- * descendant is called.
- */
- virtual ~ ZoneIterator() { }
-
- /**
- * \brief Get next RRset from the zone.
- *
- * This returns the next RRset in the zone as a shared pointer. The
- * shared pointer is used to allow both accessing in-memory data and
- * automatic memory management.
- *
- * Any special order is not guaranteed.
- *
- * While this can potentially throw anything (including standard allocation
- * errors), it should be rare.
- *
- * \return Pointer to the next RRset or NULL pointer when the iteration
- * gets to the end of the zone.
- */
- virtual isc::dns::ConstRRsetPtr getNextRRset() = 0;
-
- /**
- * \brief Return the SOA record of the zone in the iterator context.
- *
- * This method returns the zone's SOA record (if any, and a valid zone
- * should have it) in the form of an RRset object. This SOA is identical
- * to that (again, if any) contained in the sequence of RRsets returned
- * by the iterator. In that sense this method is redundant, but is
- * provided as a convenient utility for the application of the
- * iterator; the application may need to know the SOA serial or the
- * SOA RR itself for the purpose of protocol handling or skipping the
- * expensive iteration processing.
- *
- * If the zone doesn't have an SOA (which is broken, but some data source
- * may allow that situation), this method returns NULL. Also, in the
- * normal and valid case, the SOA should have exactly one RDATA, but
- * this API does not guarantee it as some data source may accept such an
- * abnormal condition. It's up to the caller whether to check the number
- * of RDATA and how to react to the unexpected case.
- *
- * Each concrete derived method must ensure that the SOA returned by this
- * method is identical to the zone's SOA returned via the iteration.
- * For example, even if another thread or process updates the SOA while
- * the iterator is working, the result of this method must not be
- * affected by the update. For database based data sources, this can
- * be done by making the entire iterator operation as a single database
- * transaction, but the actual implementation can differ.
- *
- * \exception None
- *
- * \return A shared pointer to an SOA RRset that would be returned
- * from the iteration. It will be NULL if the zone doesn't have an SOA.
- */
- virtual isc::dns::ConstRRsetPtr getSOA() const = 0;
-};
-
-}
-}
-#endif // DATASRC_ZONE_ITERATOR_H
-
-// Local Variables:
-// mode: c++
-// End:
diff --git a/src/lib/datasrc/memory/memory_client.h b/src/lib/datasrc/memory/memory_client.h
index 169421f..10e8a81 100644
--- a/src/lib/datasrc/memory/memory_client.h
+++ b/src/lib/datasrc/memory/memory_client.h
@@ -17,7 +17,7 @@
#include <util/memory_segment.h>
-#include <datasrc/iterator.h>
+#include <datasrc/zone_iterator.h>
#include <datasrc/client.h>
#include <datasrc/memory/zone_table.h>
#include <datasrc/memory/zone_data.h>
diff --git a/src/lib/datasrc/memory/rdata_serialization.cc b/src/lib/datasrc/memory/rdata_serialization.cc
index 6ac18d0..7cef6d6 100644
--- a/src/lib/datasrc/memory/rdata_serialization.cc
+++ b/src/lib/datasrc/memory/rdata_serialization.cc
@@ -25,13 +25,20 @@
#include <dns/rrclass.h>
#include <dns/rrtype.h>
+#include <boost/static_assert.hpp>
+#include <boost/function.hpp>
+#include <boost/bind.hpp>
+#include <boost/optional.hpp>
+
#include <cassert>
#include <cstring>
+#include <set>
#include <vector>
-#include <boost/static_assert.hpp>
using namespace isc::dns;
+using namespace isc::dns::rdata;
using std::vector;
+using std::set;
namespace isc {
namespace datasrc {
@@ -222,7 +229,6 @@ getRdataEncodeSpec(const RRClass& rrclass, const RRType& rrtype) {
}
namespace {
-
// This class is a helper for RdataEncoder to divide the content of RDATA
// fields for encoding by "abusing" the message rendering logic.
// The idea is to identify domain name fields in the writeName() method,
@@ -368,16 +374,78 @@ private:
} // end of unnamed namespace
+namespace {
+// A trivial comparison function used for std::set<ConstRdataPtr> below.
+bool
+RdataLess(const ConstRdataPtr& rdata1, const ConstRdataPtr& rdata2) {
+ return (rdata1->compare(*rdata2) < 0);
+}
+}
+
struct RdataEncoder::RdataEncoderImpl {
RdataEncoderImpl() : encode_spec_(NULL), rrsig_buffer_(0),
- rdata_count_(0)
+ old_varlen_count_(0), old_sig_count_(0),
+ old_data_len_(0), old_sig_len_(0),
+ old_length_fields_(NULL), old_data_(NULL),
+ old_sig_data_(NULL), olddata_buffer_(0),
+ rdatas_(boost::bind(RdataLess, _1, _2)),
+ rrsigs_(boost::bind(RdataLess, _1, _2))
{}
+ // Common initialization for RdataEncoder::start().
+ void start(RRClass rrclass, RRType rrtype) {
+ if (rrtype == RRType::RRSIG()) {
+ isc_throw(BadValue, "RRSIG cannot be encoded as main RDATA type");
+ }
+
+ encode_spec_ = &getRdataEncodeSpec(rrclass, rrtype);
+ current_class_ = rrclass;
+ current_type_ = rrtype;
+ field_composer_.clearLocal(encode_spec_);
+ rrsig_buffer_.clear();
+ rrsig_lengths_.clear();
+ old_varlen_count_ = 0;
+ old_sig_count_ = 0;
+ old_data_len_ = 0;
+ old_sig_len_ = 0;
+ old_length_fields_ = NULL;
+ old_data_ = NULL;
+ old_sig_data_ = NULL;
+ olddata_buffer_.clear();
+
+ rdatas_.clear();
+ rrsigs_.clear();
+ }
+
const RdataEncodeSpec* encode_spec_; // encode spec of current RDATA set
RdataFieldComposer field_composer_;
util::OutputBuffer rrsig_buffer_;
- size_t rdata_count_;
vector<uint16_t> rrsig_lengths_;
+
+ // Placeholder for the RR class and type of the current session;
+ // initially null, and will be (re)set at the beginning of each session.
+ boost::optional<RRClass> current_class_;
+ boost::optional<RRType> current_type_;
+
+ // Parameters corresponding to the previously encoded data in the
+ // merge mode.
+ size_t old_varlen_count_;
+ size_t old_sig_count_;
+ size_t old_data_len_;
+ size_t old_sig_len_;
+ const void* old_length_fields_;
+ const void* old_data_;
+ const void* old_sig_data_;
+ util::OutputBuffer olddata_buffer_;
+
+ // Temporary storage of Rdata and RRSIGs to be encoded. They are used
+ // to detect and ignore duplicate data.
+ typedef boost::function<bool(const ConstRdataPtr&, const ConstRdataPtr&)>
+ RdataCmp;
+ // added unique Rdatas
+ set<ConstRdataPtr, RdataCmp> rdatas_;
+ // added unique RRSIG Rdatas
+ set<ConstRdataPtr, RdataCmp> rrsigs_;
};
RdataEncoder::RdataEncoder() :
@@ -390,36 +458,119 @@ RdataEncoder::~RdataEncoder() {
void
RdataEncoder::start(RRClass rrclass, RRType rrtype) {
- if (rrtype == RRType::RRSIG()) {
- isc_throw(BadValue, "RRSIG cannot be encoded as main RDATA type");
- }
+ impl_->start(rrclass, rrtype);
+}
- impl_->encode_spec_ = &getRdataEncodeSpec(rrclass, rrtype);
- impl_->field_composer_.clearLocal(impl_->encode_spec_);
- impl_->rrsig_buffer_.clear();
- impl_->rdata_count_ = 0;
- impl_->rrsig_lengths_.clear();
+namespace {
+// Helper callbacks used in the merge mode of start(). These re-construct
+// each RDATA and RRSIG in the wire-format, counting the total length of the
+// encoded data fields.
+void
+decodeName(const LabelSequence& name_labels, RdataNameAttributes,
+ util::OutputBuffer* buffer, size_t* total_len)
+{
+ size_t name_dlen;
+ const uint8_t* name_data = name_labels.getData(&name_dlen);
+ buffer->writeData(name_data, name_dlen);
+ *total_len += name_labels.getSerializedLength();
}
void
-RdataEncoder::addRdata(const rdata::Rdata& rdata) {
+decodeData(const void* data, size_t data_len, util::OutputBuffer* buffer,
+ size_t* total_len)
+{
+ buffer->writeData(data, data_len);
+ *total_len += data_len;
+}
+}
+
+void
+RdataEncoder::start(RRClass rrclass, RRType rrtype, const void* old_data,
+ size_t old_rdata_count, size_t old_sig_count)
+{
+ impl_->start(rrclass, rrtype);
+
+ // Identify start points of various fields of the encoded data and
+ // remember it in class variables.
+ const uint8_t* cp = static_cast<const uint8_t*>(old_data);
+ impl_->old_varlen_count_ =
+ impl_->encode_spec_->varlen_count * old_rdata_count;
+ if (impl_->old_varlen_count_ > 0 || old_sig_count > 0) {
+ impl_->old_length_fields_ = cp;
+ cp += (impl_->old_varlen_count_ + old_sig_count) * sizeof(uint16_t);
+ }
+ impl_->old_data_ = cp;
+ impl_->old_sig_count_ = old_sig_count;
+
+ // Re-construct RDATAs and RRSIGs in the form of Rdata objects, and
+ // keep them in rdatas_ and rrsigs_ so we can detect and ignore duplicate
+ // data with the existing one later. We'll also figure out the lengths
+ // of the RDATA and RRSIG part of the data by iterating over the data
+ // fields.
+ size_t total_len = 0;
+ RdataReader reader(rrclass, rrtype, old_data, old_rdata_count,
+ old_sig_count,
+ boost::bind(decodeName, _1, _2, &impl_->olddata_buffer_,
+ &total_len),
+ boost::bind(decodeData, _1, _2, &impl_->olddata_buffer_,
+ &total_len));
+ while (reader.iterateRdata()) {
+ util::InputBuffer ibuffer(impl_->olddata_buffer_.getData(),
+ impl_->olddata_buffer_.getLength());
+ impl_->rdatas_.insert(createRdata(rrtype, rrclass, ibuffer,
+ impl_->olddata_buffer_.getLength()));
+ impl_->olddata_buffer_.clear();
+ }
+ impl_->old_data_len_ = total_len;
+
+ total_len = 0;
+ while (reader.iterateSingleSig()) {
+ util::InputBuffer ibuffer(impl_->olddata_buffer_.getData(),
+ impl_->olddata_buffer_.getLength());
+ impl_->rrsigs_.insert(createRdata(RRType::RRSIG(), rrclass, ibuffer,
+ impl_->olddata_buffer_.getLength()));
+ impl_->olddata_buffer_.clear();
+ }
+ impl_->old_sig_len_ = total_len;
+}
+
+bool
+RdataEncoder::addRdata(const Rdata& rdata) {
if (impl_->encode_spec_ == NULL) {
isc_throw(InvalidOperation,
"RdataEncoder::addRdata performed before start");
}
+ // Simply ignore duplicate RDATA. Creating RdataPtr also checks the
+ // given Rdata is of the correct RR type.
+ ConstRdataPtr rdatap = createRdata(*impl_->current_type_,
+ *impl_->current_class_, rdata);
+ if (impl_->rdatas_.find(rdatap) != impl_->rdatas_.end()) {
+ return (false);
+ }
+
impl_->field_composer_.startRdata();
rdata.toWire(impl_->field_composer_);
impl_->field_composer_.endRdata();
- ++impl_->rdata_count_;
+ impl_->rdatas_.insert(rdatap);
+
+ return (true);
}
-void
-RdataEncoder::addSIGRdata(const rdata::Rdata& sig_rdata) {
+bool
+RdataEncoder::addSIGRdata(const Rdata& sig_rdata) {
if (impl_->encode_spec_ == NULL) {
isc_throw(InvalidOperation,
"RdataEncoder::addSIGRdata performed before start");
}
+
+ // Ignore duplicate RRSIGs
+ ConstRdataPtr rdatap = createRdata(RRType::RRSIG(), *impl_->current_class_,
+ sig_rdata);
+ if (impl_->rrsigs_.find(rdatap) != impl_->rrsigs_.end()) {
+ return (false);
+ }
+
const size_t cur_pos = impl_->rrsig_buffer_.getLength();
sig_rdata.toWire(impl_->rrsig_buffer_);
const size_t rrsig_datalen = impl_->rrsig_buffer_.getLength() - cur_pos;
@@ -427,7 +578,10 @@ RdataEncoder::addSIGRdata(const rdata::Rdata& sig_rdata) {
isc_throw(RdataEncodingError, "RRSIG is too large: "
<< rrsig_datalen << " bytes");
}
+ impl_->rrsigs_.insert(rdatap);
impl_->rrsig_lengths_.push_back(rrsig_datalen);
+
+ return (true);
}
size_t
@@ -437,8 +591,11 @@ RdataEncoder::getStorageLength() const {
"RdataEncoder::getStorageLength performed before start");
}
- return (sizeof(uint16_t) * impl_->field_composer_.data_lengths_.size() +
- sizeof(uint16_t) * impl_->rrsig_lengths_.size() +
+ return (sizeof(uint16_t) * (impl_->old_varlen_count_ +
+ impl_->old_sig_count_ +
+ impl_->field_composer_.data_lengths_.size() +
+ impl_->rrsig_lengths_.size()) +
+ impl_->old_data_len_ + impl_->old_sig_len_ +
impl_->rrsig_buffer_.getLength() +
impl_->field_composer_.getLength());
}
@@ -461,6 +618,12 @@ RdataEncoder::encode(void* buf, size_t buf_len) const {
uint8_t* dp = dp_beg;
uint16_t* lenp = reinterpret_cast<uint16_t*>(buf);
+ // Encode list of lengths for variable length fields for old data (if any)
+ const size_t old_varlen_fields_len =
+ impl_->old_varlen_count_ * sizeof(uint16_t);
+ std::memcpy(lenp, impl_->old_length_fields_, old_varlen_fields_len);
+ lenp += impl_->old_varlen_count_;
+ dp += old_varlen_fields_len;
// Encode list of lengths for variable length fields (if any)
if (!impl_->field_composer_.data_lengths_.empty()) {
const size_t varlen_fields_len =
@@ -470,6 +633,12 @@ RdataEncoder::encode(void* buf, size_t buf_len) const {
lenp += impl_->field_composer_.data_lengths_.size();
dp += varlen_fields_len;
}
+ // Encode list of lengths for old RRSIGs (if any)
+ const size_t old_rrsigs_len = impl_->old_sig_count_ * sizeof(uint16_t);
+ std::memcpy(lenp, static_cast<const uint8_t*>(impl_->old_length_fields_) +
+ old_varlen_fields_len, old_rrsigs_len);
+ lenp += impl_->old_sig_count_;
+ dp += old_rrsigs_len;
// Encode list of lengths for RRSIGs (if any)
if (!impl_->rrsig_lengths_.empty()) {
const size_t rrsigs_len =
@@ -477,10 +646,17 @@ RdataEncoder::encode(void* buf, size_t buf_len) const {
std::memcpy(lenp, &impl_->rrsig_lengths_[0], rrsigs_len);
dp += rrsigs_len;
}
+ // Encode main old RDATA, if any
+ std::memcpy(dp, impl_->old_data_, impl_->old_data_len_);
+ dp += impl_->old_data_len_;
// Encode main RDATA
std::memcpy(dp, impl_->field_composer_.getData(),
impl_->field_composer_.getLength());
dp += impl_->field_composer_.getLength();
+ // Encode old RRSIGs, if any
+ std::memcpy(dp, static_cast<const uint8_t*>(impl_->old_data_) +
+ impl_->old_data_len_, impl_->old_sig_len_);
+ dp += impl_->old_sig_len_;
// Encode RRSIGs, if any
std::memcpy(dp, impl_->rrsig_buffer_.getData(),
impl_->rrsig_buffer_.getLength());
@@ -501,7 +677,7 @@ RdataReader::RdataReader(const RRClass& rrclass, const RRType& rrtype,
var_count_total_(spec_.varlen_count * rdata_count),
sig_count_(sig_count),
spec_count_(spec_.field_count * rdata_count),
- // The lenghts are stored first
+ // The lengths are stored first
lengths_(reinterpret_cast<const uint16_t*>(data)),
// And the data just after all the lengths
data_(reinterpret_cast<const uint8_t*>(data) +
diff --git a/src/lib/datasrc/memory/rdata_serialization.h b/src/lib/datasrc/memory/rdata_serialization.h
index 183276f..1582d48 100644
--- a/src/lib/datasrc/memory/rdata_serialization.h
+++ b/src/lib/datasrc/memory/rdata_serialization.h
@@ -25,8 +25,6 @@
#include <boost/function.hpp>
#include <boost/noncopyable.hpp>
-#include <vector>
-
/// \file rdata_serialization.h
///
/// This file defines a set of interfaces (classes, types, constants) to
@@ -157,6 +155,32 @@ public:
/// \param rrtype The RR type of RDATA to be encoded in the session.
void start(dns::RRClass rrclass, dns::RRType rrtype);
+ /// \brief Start the encoding session in the merge mode.
+ ///
+ /// This method is similar to the other version, but begins with a copy
+ /// of previously encoded data and merges Rdata and RRSIGs into it
+ /// that will be given via subsequent calls to \c addRdata() and
+ /// \c addSIGRdata(). \c old_data, \c old_rdata_count, and
+ /// \c old_sig_count correspond to parameters given to the
+ /// \c RdataReader constructor, and must have valid values for encoded
+ /// data by this class for the same \c rrclass and \c rrtype.
+ /// It's the caller's responsibility to ensure this condition; if it's
+ /// not met, the behavior will be undefined.
+ ///
+ /// The caller must also ensure that previously encoded data (pointed
+ /// to by \c old_data) will be valid and intact throughout the encoding
+ /// session started by this method.
+ ///
+ /// \param rrclass The RR class of RDATA to be encoded in the session.
+ /// \param rrtype The RR type of RDATA to be encoded in the session.
+ /// \param old_data Point to previously encoded data for the same RR
+ /// class and type.
+ /// \param old_rdata_count The number of RDATAs stored in \c old_data.
+ /// \param old_sig_count The number of RRSIGs stored in \c old_data.
+ void start(dns::RRClass rrclass, dns::RRType rrtype,
+ const void* old_data, size_t old_rdata_count,
+ size_t old_sig_count);
+
/// \brief Add an RDATA for encoding.
///
/// This method updates internal state of the \c RdataEncoder() with the
@@ -168,6 +192,14 @@ public:
/// to some extent, but the check is not complete; this is generally
/// the responsibility of the caller.
///
+ /// This method checks if the given RDATA is a duplicate of already
+ /// added one (including ones encoded in the old data if the session
+ /// began with the merge mode). If it's a duplicate this method ignores
+ /// the given RDATA and returns false; otherwise it returns true.
+ /// The check is based on the comparison in the "canonical form" as
+ /// described in RFC4034 Section 6.2. In particular, domain name fields
+ /// of the RDATA are generally compared in case-insensitive manner.
+ ///
/// The caller can destroy \c rdata after this call is completed.
///
/// \note This implementation does not support RDATA (or any subfield of
@@ -183,12 +215,14 @@ public:
/// new session from \c start() should this method throws an exception.
///
/// \throw InvalidOperation called before start().
- /// \throw BadValue inconsistent data found.
+ /// \throw std::bad_cast The given Rdata is of different RR type.
/// \throw RdataEncodingError A very unusual case, such as over 64KB RDATA.
/// \throw std::bad_alloc Internal memory allocation failure.
///
/// \param rdata An RDATA to be encoded in the session.
- void addRdata(const dns::rdata::Rdata& rdata);
+ /// \return true if the given RDATA was added to encode; false if
+ /// it's a duplicate and ignored.
+ bool addRdata(const dns::rdata::Rdata& rdata);
/// \brief Add an RRSIG RDATA for encoding.
///
@@ -204,6 +238,13 @@ public:
/// it could even accept any type of RDATA as opaque data. It's caller's
/// responsibility to ensure the assumption.
///
+ /// This method checks if the given RRSIG RDATA is a duplicate of already
+ /// added one (including ones encoded in the old data if the session
+ /// began with the merge mode). If it's a duplicate this method ignores
+ /// the given RRSIG and returns false; otherwise it returns true.
+ /// The check is based on the comparison in the "canonical form" as
+ /// described in RFC4034 Section 6.2.
+ ///
/// The caller can destroy \c rdata after this call is completed.
///
/// \note Like addRdata(), this implementation does not support
@@ -218,7 +259,9 @@ public:
///
/// \param sig_rdata An RDATA to be encoded in the session. Supposed to
/// be of type RRSIG.
- void addSIGRdata(const dns::rdata::Rdata& sig_rdata);
+ /// \return true if the given RRSIG RDATA was added to encode; false if
+ /// it's a duplicate and ignored.
+ bool addSIGRdata(const dns::rdata::Rdata& sig_rdata);
/// \brief Return the length of space for encoding for the session.
///
diff --git a/src/lib/datasrc/memory/rdataset.cc b/src/lib/datasrc/memory/rdataset.cc
index e7a070f..3841c03 100644
--- a/src/lib/datasrc/memory/rdataset.cc
+++ b/src/lib/datasrc/memory/rdataset.cc
@@ -26,6 +26,7 @@
#include <boost/static_assert.hpp>
#include <stdint.h>
+#include <algorithm>
#include <cstring>
#include <typeinfo> // for bad_cast
#include <new> // for the placement new
@@ -48,11 +49,39 @@ getCoveredType(const Rdata& rdata) {
isc_throw(BadValue, "Non RRSIG is given where it's expected");
}
}
+
+// A helper for lowestTTL: restore RRTTL object from wire-format 32-bit data.
+RRTTL
+restoreTTL(const void* ttl_data) {
+ isc::util::InputBuffer b(ttl_data, sizeof(uint32_t));
+ return (RRTTL(b));
+}
+
+// A helper function for create(): return the TTL that has lowest value
+// amount the given those of given rdataset (if non NULL), rrset, sig_rrset.
+RRTTL
+lowestTTL(const RdataSet* rdataset, ConstRRsetPtr& rrset,
+ ConstRRsetPtr& sig_rrset)
+{
+ if (rrset && sig_rrset) {
+ const RRTTL tmp(std::min(rrset->getTTL(), sig_rrset->getTTL()));
+ return (rdataset ?
+ std::min(restoreTTL(rdataset->getTTLData()), tmp) : tmp);
+ } else if (rrset) {
+ return (rdataset ? std::min(restoreTTL(rdataset->getTTLData()),
+ rrset->getTTL()) : rrset->getTTL());
+ } else {
+ return (rdataset ? std::min(restoreTTL(rdataset->getTTLData()),
+ sig_rrset->getTTL()) :
+ sig_rrset->getTTL());
+ }
+}
}
RdataSet*
RdataSet::create(util::MemorySegment& mem_sgmt, RdataEncoder& encoder,
- ConstRRsetPtr rrset, ConstRRsetPtr sig_rrset)
+ ConstRRsetPtr rrset, ConstRRsetPtr sig_rrset,
+ const RdataSet* old_rdataset)
{
// Check basic validity
if (!rrset && !sig_rrset) {
@@ -68,31 +97,40 @@ RdataSet::create(util::MemorySegment& mem_sgmt, RdataEncoder& encoder,
isc_throw(BadValue, "RR class doesn't match between RRset and RRSIG");
}
- // Check assumptions on the number of RDATAs
- if (rrset && rrset->getRdataCount() > MAX_RDATA_COUNT) {
- isc_throw(RdataSetError, "Too many RDATAs for RdataSet: "
- << rrset->getRdataCount() << ", must be <= "
- << MAX_RDATA_COUNT);
- }
- if (sig_rrset && sig_rrset->getRdataCount() > MAX_RRSIG_COUNT) {
- isc_throw(RdataSetError, "Too many RRSIGs for RdataSet: "
- << sig_rrset->getRdataCount() << ", must be <= "
- << MAX_RRSIG_COUNT);
- }
-
const RRClass rrclass = rrset ? rrset->getClass() : sig_rrset->getClass();
const RRType rrtype = rrset ? rrset->getType() :
getCoveredType(sig_rrset->getRdataIterator()->getCurrent());
- const RRTTL rrttl = rrset ? rrset->getTTL() : sig_rrset->getTTL();
+ if (old_rdataset && old_rdataset->type != rrtype) {
+ isc_throw(BadValue, "RR type doesn't match for merging RdataSet");
+ }
+ const RRTTL rrttl = lowestTTL(old_rdataset, rrset, sig_rrset);
+ if (old_rdataset) {
+ encoder.start(rrclass, rrtype, old_rdataset->getDataBuf(),
+ old_rdataset->getRdataCount(),
+ old_rdataset->getSigRdataCount());
+ } else {
+ encoder.start(rrclass, rrtype);
+ }
- encoder.start(rrclass, rrtype);
+ // Store RDATAs to be added and check assumptions on the number of them
+ size_t rdata_count = old_rdataset ? old_rdataset->getRdataCount() : 0;
if (rrset) {
for (RdataIteratorPtr it = rrset->getRdataIterator();
!it->isLast();
it->next()) {
- encoder.addRdata(it->getCurrent());
+ if (encoder.addRdata(it->getCurrent())) {
+ ++rdata_count;
+ }
}
}
+ if (rdata_count > MAX_RDATA_COUNT) {
+ isc_throw(RdataSetError, "Too many RDATAs for RdataSet: "
+ << rrset->getRdataCount() << ", must be <= "
+ << MAX_RDATA_COUNT);
+ }
+
+ // Same for RRSIG
+ size_t rrsig_count = old_rdataset ? old_rdataset->getSigRdataCount() : 0;
if (sig_rrset) {
for (RdataIteratorPtr it = sig_rrset->getRdataIterator();
!it->isLast();
@@ -101,19 +139,24 @@ RdataSet::create(util::MemorySegment& mem_sgmt, RdataEncoder& encoder,
if (getCoveredType(it->getCurrent()) != rrtype) {
isc_throw(BadValue, "Type covered doesn't match");
}
- encoder.addSIGRdata(it->getCurrent());
+ if (encoder.addSIGRdata(it->getCurrent())) {
+ ++rrsig_count;
+ }
}
}
+ if (rrsig_count > MAX_RRSIG_COUNT) {
+ isc_throw(RdataSetError, "Too many RRSIGs for RdataSet: "
+ << sig_rrset->getRdataCount() << ", must be <= "
+ << MAX_RRSIG_COUNT);
+ }
- const size_t rrsig_count = sig_rrset ? sig_rrset->getRdataCount() : 0;
const size_t ext_rrsig_count_len =
rrsig_count >= MANY_RRSIG_COUNT ? sizeof(uint16_t) : 0;
const size_t data_len = encoder.getStorageLength();
void* p = mem_sgmt.allocate(sizeof(RdataSet) + ext_rrsig_count_len +
data_len);
- RdataSet* rdataset = new(p) RdataSet(rrtype,
- rrset ? rrset->getRdataCount() : 0,
- rrsig_count, rrttl);
+ RdataSet* rdataset = new(p) RdataSet(rrtype, rdata_count, rrsig_count,
+ rrttl);
if (rrsig_count >= MANY_RRSIG_COUNT) {
*rdataset->getExtSIGCountBuf() = rrsig_count;
}
diff --git a/src/lib/datasrc/memory/rdataset.h b/src/lib/datasrc/memory/rdataset.h
index ffa5075..250af93 100644
--- a/src/lib/datasrc/memory/rdataset.h
+++ b/src/lib/datasrc/memory/rdataset.h
@@ -120,6 +120,27 @@ public:
/// RRSIG from the given memory segment, constructs the object, and
/// returns a pointer to it.
///
+ /// If the optional \c old_rdataset parameter is set to non NULL,
+ /// The given \c RdataSet, RRset, RRSIG will be merged in the new
+ /// \c Rdataset object: the new object contain the union set of all
+ /// RDATA and RRSIGs contained in these. Obviously \c old_rdataset
+ /// was previously generated for the same RRClass and RRType as those
+ /// for the given RRsets; it's the caller's responsibility to ensure
+ /// this condition. If it's not met the result will be undefined.
+ ///
+ /// In both cases, this method ensures the stored RDATA and RRSIG are
+ /// unique. Any duplicate data (in the sense of the comparison in the
+ /// form of canonical form of RRs as described in RFC4034) within RRset or
+ /// RRSIG, or between data in \c old_rdataset and RRset/RRSIG will be
+ /// unified.
+ ///
+ /// In general, the TTLs of the given data are expected to be the same.
+ /// This is especially the case if the zone is signed (and RRSIG is given).
+ /// However, if different TTLs are found among the given data, this
+ /// method chooses the lowest one for the TTL of the resulting
+ /// \c RdataSet. This is an implementation choice, but should be most
+ /// compliant to the sense of Section 5.2 of RFC2181.
+ ///
/// Normally the (non RRSIG) RRset is given (\c rrset is not NULL) while
/// its RRSIG (\c sig_rrset) may or may not be provided. But it's also
/// expected that in some rare (mostly broken) cases there can be an RRSIG
@@ -148,9 +169,9 @@ public:
/// happens.
///
/// Due to implementation limitations, this class cannot contain more than
- /// 8191 RDATAs for the non RRISG RRset; also, it cannot contain more than
- /// 65535 RRSIGs. If the given RRset(s) fail to meet this condition,
- /// an \c RdataSetError exception will be thrown.
+ /// 8191 RDATAs (after unifying duplicates) for the non RRISG RRset; also,
+ /// it cannot contain more than 65535 RRSIGs. If the given RRset(s) fail
+ /// to meet this condition, an \c RdataSetError exception will be thrown.
///
/// \throw isc::BadValue Given RRset(s) are invalid (see the description)
/// \throw RdataSetError Number of RDATAs exceed the limits
@@ -164,12 +185,15 @@ public:
/// created. Can be NULL if sig_rrset is not.
/// \param sig_rrset An RRSIG RRset from which the \c RdataSet is to be
/// created. Can be NULL if rrset is not.
+ /// \param old_rdataset If non NULL, create RdataSet merging old_rdataset
+ /// into given rrset and sig_rrset.
///
/// \return A pointer to the created \c RdataSet.
static RdataSet* create(util::MemorySegment& mem_sgmt,
RdataEncoder& encoder,
dns::ConstRRsetPtr rrset,
- dns::ConstRRsetPtr sig_rrset);
+ dns::ConstRRsetPtr sig_rrset,
+ const RdataSet* old_rdataset = NULL);
/// \brief Destruct and deallocate \c RdataSet
///
diff --git a/src/lib/datasrc/memory/treenode_rrset.cc b/src/lib/datasrc/memory/treenode_rrset.cc
index e7ed20c..c4e16a6 100644
--- a/src/lib/datasrc/memory/treenode_rrset.cc
+++ b/src/lib/datasrc/memory/treenode_rrset.cc
@@ -59,8 +59,7 @@ TreeNodeRRset::getName() const {
const RRTTL&
TreeNodeRRset::getTTL() const {
if (ttl_ == NULL) {
- util::InputBuffer ttl_buffer(rdataset_->getTTLData(),
- sizeof(uint32_t));
+ util::InputBuffer ttl_buffer(ttl_data_, sizeof(uint32_t));
ttl_ = new RRTTL(ttl_buffer);
}
@@ -169,7 +168,7 @@ TreeNodeRRset::toWire(AbstractMessageRenderer& renderer) const {
// Render the main (non RRSIG) RRs
const size_t rendered_rdata_count =
writeRRs(renderer, rdataset_->getRdataCount(), name_labels,
- rdataset_->type, rrclass_, rdataset_->getTTLData(), reader,
+ rdataset_->type, rrclass_, ttl_data_, reader,
&RdataReader::iterateRdata);
if (renderer.isTruncated()) {
return (rendered_rdata_count);
@@ -180,7 +179,7 @@ TreeNodeRRset::toWire(AbstractMessageRenderer& renderer) const {
// Render any RRSIGs, if we supposed to do so
const size_t rendered_rrsig_count = dnssec_ok_ ?
writeRRs(renderer, rrsig_count_, name_labels, RRType::RRSIG(),
- rrclass_, rdataset_->getTTLData(), reader,
+ rrclass_, ttl_data_, reader,
&RdataReader::iterateSingleSig) : 0;
return (rendered_rdata_count + rendered_rrsig_count);
diff --git a/src/lib/datasrc/memory/treenode_rrset.h b/src/lib/datasrc/memory/treenode_rrset.h
index 460a704..295a95a 100644
--- a/src/lib/datasrc/memory/treenode_rrset.h
+++ b/src/lib/datasrc/memory/treenode_rrset.h
@@ -112,12 +112,34 @@ public:
const RdataSet* rdataset, bool dnssec_ok) :
node_(node), rdataset_(rdataset),
rrsig_count_(rdataset_->getSigRdataCount()), rrclass_(rrclass),
- dnssec_ok_(dnssec_ok), name_(NULL), realname_(NULL), ttl_(NULL)
+ dnssec_ok_(dnssec_ok), name_(NULL), realname_(NULL),
+ ttl_data_(rdataset->getTTLData()), ttl_(NULL)
+ {}
+
+ /// \brief Constructor with a specific TTL.
+ ///
+ /// This constructor is mostly the same as the normal version, but takes
+ /// an extra parameter, \c ttl_data. It's expected to point to a memory
+ /// region at least for 32 bits, and the corresponding 32-bit data will
+ /// be used as wire-format TTL value of the RRset, instead of the TTL
+ /// associated with \c rdataset.
+ ///
+ /// It's the caller's responsibility to guarantee the memory region is
+ /// valid and intact throughout the lifetime of the RRset.
+ ///
+ /// \throw None
+ TreeNodeRRset(const dns::RRClass& rrclass, const ZoneNode* node,
+ const RdataSet* rdataset, bool dnssec_ok,
+ const void* ttl_data) :
+ node_(node), rdataset_(rdataset),
+ rrsig_count_(rdataset_->getSigRdataCount()), rrclass_(rrclass),
+ dnssec_ok_(dnssec_ok), name_(NULL), realname_(NULL),
+ ttl_data_(ttl_data), ttl_(NULL)
{}
/// \brief Constructor for wildcard-expanded owner name.
///
- /// This constructor is mostly the same as the other version, but takes
+ /// This constructor is mostly the same as the normal version, but takes
/// an extra parameter, \c realname. It effectively overrides the owner
/// name of the RRset; wherever the owner name is used (e.g., in the
/// \c toWire() method), the specified name will be used instead of
@@ -133,7 +155,7 @@ public:
node_(node), rdataset_(rdataset),
rrsig_count_(rdataset_->getSigRdataCount()), rrclass_(rrclass),
dnssec_ok_(dnssec_ok), name_(NULL), realname_(new dns::Name(realname)),
- ttl_(NULL)
+ ttl_data_(rdataset->getTTLData()), ttl_(NULL)
{}
virtual ~TreeNodeRRset() {
@@ -255,6 +277,7 @@ private:
const bool dnssec_ok_;
mutable dns::Name* name_;
const dns::Name* const realname_;
+ const void* const ttl_data_;
mutable dns::RRTTL* ttl_;
};
diff --git a/src/lib/datasrc/memory/zone_data.cc b/src/lib/datasrc/memory/zone_data.cc
index cc31419..d32fc87 100644
--- a/src/lib/datasrc/memory/zone_data.cc
+++ b/src/lib/datasrc/memory/zone_data.cc
@@ -134,6 +134,31 @@ NSEC3Data::insertName(util::MemorySegment& mem_sgmt, const Name& name,
result == ZoneTree::ALREADYEXISTS) && node != NULL);
}
+namespace {
+// A helper to convert a TTL value in network byte order and set it in
+// ZoneData::min_ttl_. We can use util::OutputBuffer, but copy the logic
+// here to guarantee it is exception free.
+// Note: essentially this function is a local (re)implementation of the
+// standard htonl() library function, but we avoid relying on it in case it's
+// not available (it's not in the C++ standard library).
+void
+setTTLInNetOrder(uint32_t val, uint32_t* result) {
+ uint8_t buf[4];
+ buf[0] = static_cast<uint8_t>((val & 0xff000000) >> 24);
+ buf[1] = static_cast<uint8_t>((val & 0x00ff0000) >> 16);
+ buf[2] = static_cast<uint8_t>((val & 0x0000ff00) >> 8);
+ buf[3] = static_cast<uint8_t>(val & 0x000000ff);
+ std::memcpy(result, buf, sizeof(*result));
+}
+}
+
+ZoneData::ZoneData(ZoneTree* zone_tree, ZoneNode* origin_node) :
+ zone_tree_(zone_tree), origin_node_(origin_node),
+ min_ttl_(0) // tentatively set to silence static checkers
+{
+ setTTLInNetOrder(RRTTL::MAX_TTL().getValue(), &min_ttl_);
+}
+
ZoneData*
ZoneData::create(util::MemorySegment& mem_sgmt, const Name& zone_origin) {
// ZoneTree::insert() and ZoneData allocation can throw. See also
@@ -178,6 +203,11 @@ ZoneData::insertName(util::MemorySegment& mem_sgmt, const Name& name,
result == ZoneTree::ALREADYEXISTS) && node != NULL);
}
+void
+ZoneData::setMinTTL(uint32_t min_ttl_val) {
+ setTTLInNetOrder(min_ttl_val, &min_ttl_);
+}
+
} // namespace memory
} // namespace datasrc
} // datasrc isc
diff --git a/src/lib/datasrc/memory/zone_data.h b/src/lib/datasrc/memory/zone_data.h
index 974ce24..c6b3dcc 100644
--- a/src/lib/datasrc/memory/zone_data.h
+++ b/src/lib/datasrc/memory/zone_data.h
@@ -287,7 +287,7 @@ private:
/// from NSEC to NSEC3 or vice versa, support incremental signing, or support
/// multiple sets of NSEC3 parameters.
///
-/// One last type of meta data is the status of the zone in terms of DNSSEC
+/// One other type of meta data is the status of the zone in terms of DNSSEC
/// signing. This class supports the following concepts:
/// - Whether the zone is signed or not, either with NSEC records or NSEC3
/// records.
@@ -315,6 +315,15 @@ private:
/// because we won't have to change the application code when we implement
/// the future separation.
///
+/// One last type of meta data is the zone's "minimum" TTL. It's expected
+/// to be a shortcut copy of the minimum field of the zone's SOA RDATA,
+/// and is expected to be used to create an SOA RR for a negative response,
+/// whose RR TTL may have to be set to this value according to RFC2308.
+/// This class is not aware of such usage, however, and only provides a
+/// simple getter and setter method for this value: \c getMinTTLData() and
+/// \c setMinTTL(). The user of this class is responsible for setting the
+/// value with \c setMinTTL() when it loads or updates the SOA RR.
+///
/// The intended usage of these two status concepts is to implement the
/// \c ZoneFinder::Context::isNSECSigned() and
/// \c ZoneFinder::Context::isNSEC3Signed() methods. A possible implementation
@@ -349,9 +358,7 @@ private:
/// allocator (\c create()), so the constructor is hidden as private.
///
/// It never throws an exception.
- ZoneData(ZoneTree* zone_tree, ZoneNode* origin_node) :
- zone_tree_(zone_tree), origin_node_(origin_node)
- {}
+ ZoneData(ZoneTree* zone_tree, ZoneNode* origin_node);
// Zone node flags.
private:
@@ -413,7 +420,7 @@ public:
///
/// The class encapsulation ensures that the origin node always exists at
/// the same address, so this method always returns a non-NULL valid
- /// valid pointer.
+ /// pointer.
///
/// \throw none
const ZoneNode* getOriginNode() const {
@@ -456,6 +463,26 @@ public:
///
/// \throw none
const NSEC3Data* getNSEC3Data() const { return (nsec3_data_.get()); }
+
+ /// \brief Return a pointer to the zone's minimum TTL data.
+ ///
+ /// The returned pointer points to a memory region that is valid at least
+ /// for 32 bits, storing the zone's minimum TTL in the network byte
+ /// order. The corresponding 32-bit value as an integer is initially
+ /// set to the value of \c dns::RRTTL::MAX_TTL(), and, once
+ /// \c setMinTTL() is called, set to the value specified at the latest
+ /// call to \c setMinTTL().
+ ///
+ /// It returns opaque data to make it clear that unless the wire
+ /// format data is necessary (e.g., when rendering it in a DNS message),
+ /// it should be converted to, e.g., an \c RRTTL object explicitly.
+ ///
+ /// The returned pointer is valid as long as the \c ZoneData is valid,
+ /// and the corresponding 32-bit data are the same until \c setMinTTL()
+ /// is called.
+ ///
+ /// \throw none
+ const void* getMinTTLData() const { return (&min_ttl_); }
//@}
///
@@ -552,12 +579,32 @@ public:
nsec3_data_ = nsec3_data;
return (old);
}
+
+ /// \brief Set the zone's "minimum" TTL.
+ ///
+ /// This method updates the recorded minimum TTL of the zone data.
+ /// It's expected to be identical to the value of the Minimum field
+ /// of the SOA RR at the zone apex, but this method does not check the
+ /// consistency; it's the caller's responsibility.
+ ///
+ /// While RFC2181 specifies the max TTL value to be 2^31-1, this method
+ /// does not check the range; it accepts any unsigned 32-bit integer
+ /// value. In practice, this shouldn't cause a problem, however, because
+ /// the only expected usage of this value is to use the minimum of this
+ /// value and SOA RR's TTL, and the latter is expected to be in the
+ /// valid range.
+ ///
+ /// \throw None
+ /// \param min_ttl_val The minimum TTL value as unsigned 32-bit integer
+ /// in the host byte order.
+ void setMinTTL(uint32_t min_ttl_val);
//@}
private:
const boost::interprocess::offset_ptr<ZoneTree> zone_tree_;
const boost::interprocess::offset_ptr<ZoneNode> origin_node_;
boost::interprocess::offset_ptr<NSEC3Data> nsec3_data_;
+ uint32_t min_ttl_;
};
} // namespace memory
diff --git a/src/lib/datasrc/memory/zone_data_loader.h b/src/lib/datasrc/memory/zone_data_loader.h
index db7ac3b..32ed58b 100644
--- a/src/lib/datasrc/memory/zone_data_loader.h
+++ b/src/lib/datasrc/memory/zone_data_loader.h
@@ -17,7 +17,7 @@
#include <datasrc/exceptions.h>
#include <datasrc/memory/zone_data.h>
-#include <datasrc/iterator.h>
+#include <datasrc/zone_iterator.h>
#include <dns/name.h>
#include <dns/rrclass.h>
#include <util/memory_segment.h>
diff --git a/src/lib/datasrc/memory/zone_data_updater.cc b/src/lib/datasrc/memory/zone_data_updater.cc
index 51ec03c..5bde6d4 100644
--- a/src/lib/datasrc/memory/zone_data_updater.cc
+++ b/src/lib/datasrc/memory/zone_data_updater.cc
@@ -336,6 +336,17 @@ ZoneDataUpdater::addRdataSet(const Name& name, const RRType& rrtype,
// "NSEC signed")
zone_data_.setSigned(true);
}
+
+ // If we are adding a new SOA at the origin, update zone's min TTL.
+ // Note: if the input is broken and contains multiple SOAs, the load
+ // or update will be rejected higher level. We just always (though
+ // this should be only once in normal cases) update the TTL.
+ if (rrset && rrtype == RRType::SOA() && is_origin) {
+ // Our own validation ensures the RRset is not empty.
+ zone_data_.setMinTTL(
+ dynamic_cast<const generic::SOA&>(
+ rrset->getRdataIterator()->getCurrent()).getMinimum());
+ }
}
}
diff --git a/src/lib/datasrc/memory/zone_finder.cc b/src/lib/datasrc/memory/zone_finder.cc
index 7f57d8e..56c4110 100644
--- a/src/lib/datasrc/memory/zone_finder.cc
+++ b/src/lib/datasrc/memory/zone_finder.cc
@@ -17,16 +17,19 @@
#include <datasrc/memory/treenode_rrset.h>
#include <datasrc/memory/rdata_serialization.h>
-#include <datasrc/zone.h>
+#include <datasrc/zone_finder.h>
#include <datasrc/data_source.h>
#include <dns/labelsequence.h>
#include <dns/name.h>
#include <dns/rrset.h>
#include <dns/rrtype.h>
+#include <dns/rrttl.h>
#include <dns/nsec3hash.h>
#include <datasrc/logger.h>
+#include <util/buffer.h>
+
#include <boost/scoped_ptr.hpp>
#include <boost/bind.hpp>
@@ -104,14 +107,19 @@ createTreeNodeRRset(const ZoneNode* node,
const RdataSet* rdataset,
const RRClass& rrclass,
ZoneFinder::FindOptions options,
- const Name* realname = NULL)
+ const Name* realname = NULL,
+ const void* ttl_data = NULL)
{
const bool dnssec = ((options & ZoneFinder::FIND_DNSSEC) != 0);
- if (node != NULL && rdataset != NULL) {
- if (realname != NULL) {
+ if (node && rdataset) {
+ if (realname) {
return (TreeNodeRRsetPtr(new TreeNodeRRset(*realname, rrclass,
node, rdataset,
dnssec)));
+ } else if (ttl_data) {
+ assert(!realname); // these two cases should be mixed in our use
+ return (TreeNodeRRsetPtr(new TreeNodeRRset(rrclass, node, rdataset,
+ dnssec, ttl_data)));
} else {
return (TreeNodeRRsetPtr(new TreeNodeRRset(rrclass, node, rdataset,
dnssec)));
@@ -229,6 +237,12 @@ createNSEC3RRset(const ZoneNode* node, const RRClass& rrclass) {
ZoneFinder::FIND_DNSSEC));
}
+inline RRTTL
+createTTLFromData(const void* ttl_data) {
+ util::InputBuffer b(ttl_data, sizeof(uint32_t));
+ return (RRTTL(b));
+}
+
// convenience function to fill in the final details
//
// Set up ZoneFinderResultContext object as a return value of find(),
@@ -250,7 +264,8 @@ createFindResult(const RRClass& rrclass,
const RdataSet* rdataset,
ZoneFinder::FindOptions options,
bool wild = false,
- const Name* qname = NULL)
+ const Name* qname = NULL,
+ bool use_minttl = false)
{
ZoneFinder::FindResultFlags flags = ZoneFinder::RESULT_DEFAULT;
const Name* rename = NULL;
@@ -268,6 +283,15 @@ createFindResult(const RRClass& rrclass,
}
}
+ if (use_minttl && rdataset &&
+ createTTLFromData(zone_data.getMinTTLData()) <
+ createTTLFromData(rdataset->getTTLData())) {
+ return (ZoneFinderResultContext(
+ code,
+ createTreeNodeRRset(node, rdataset, rrclass, options,
+ rename, zone_data.getMinTTLData()),
+ flags, zone_data, node, rdataset));
+ }
return (ZoneFinderResultContext(code, createTreeNodeRRset(node, rdataset,
rrclass, options,
rename),
@@ -721,8 +745,8 @@ InMemoryZoneFinder::Context::findAdditional(
boost::shared_ptr<ZoneFinder::Context>
InMemoryZoneFinder::find(const isc::dns::Name& name,
- const isc::dns::RRType& type,
- const FindOptions options)
+ const isc::dns::RRType& type,
+ const FindOptions options)
{
return (ZoneFinderContextPtr(new Context(*this, options, rrclass_,
findInternal(name, type,
@@ -731,8 +755,8 @@ InMemoryZoneFinder::find(const isc::dns::Name& name,
boost::shared_ptr<ZoneFinder::Context>
InMemoryZoneFinder::findAll(const isc::dns::Name& name,
- std::vector<isc::dns::ConstRRsetPtr>& target,
- const FindOptions options)
+ std::vector<isc::dns::ConstRRsetPtr>& target,
+ const FindOptions options)
{
return (ZoneFinderContextPtr(new Context(*this, options, rrclass_,
findInternal(name,
@@ -741,6 +765,44 @@ InMemoryZoneFinder::findAll(const isc::dns::Name& name,
options))));
}
+// The implementation is a special case of the generic findInternal: we know
+// the qname should have an "exact match" and its node is accessible via
+// getOriginNode(); and, since there should be at least SOA RR at the origin
+// the case of CNAME can be eliminated (these should be guaranteed at the load
+// or update time, but even if they miss a corner case and allows a CNAME to
+// be added at origin, the zone is broken anyway, so we'd just let this
+// method return garbage, too). As a result, there can be only too cases
+// for the result codes: SUCCESS if the requested type of RR exists; NXRRSET
+// otherwise. Due to its simplicity we implement it separately, rather than
+// sharing the code with findInternal.
+boost::shared_ptr<ZoneFinder::Context>
+InMemoryZoneFinder::findAtOrigin(const isc::dns::RRType& type,
+ bool use_minttl,
+ const FindOptions options)
+{
+ const ZoneNode* const node = zone_data_.getOriginNode();
+ const RdataSet* const found = RdataSet::find(node->getData(), type);
+
+ if (found != NULL) {
+ LOG_DEBUG(logger, DBG_TRACE_DATA, DATASRC_MEM_FIND_TYPE_AT_ORIGIN).
+ arg(type).arg(getOrigin()).arg(rrclass_);
+ return (ZoneFinderContextPtr(
+ new Context(*this, options, rrclass_,
+ createFindResult(rrclass_, zone_data_, SUCCESS,
+ node, found, options, false,
+ NULL, use_minttl))));
+ }
+ return (ZoneFinderContextPtr(
+ new Context(*this, options, rrclass_,
+ createFindResult(rrclass_, zone_data_, NXRRSET,
+ node,
+ getNSECForNXRRSET(zone_data_,
+ options,
+ node),
+ options, false, NULL,
+ use_minttl))));
+}
+
ZoneFinderResultContext
InMemoryZoneFinder::findInternal(const isc::dns::Name& name,
const isc::dns::RRType& type,
diff --git a/src/lib/datasrc/memory/zone_finder.h b/src/lib/datasrc/memory/zone_finder.h
index c95b5bc..f4f411a 100644
--- a/src/lib/datasrc/memory/zone_finder.h
+++ b/src/lib/datasrc/memory/zone_finder.h
@@ -18,7 +18,7 @@
#include <datasrc/memory/zone_data.h>
#include <datasrc/memory/treenode_rrset.h>
-#include <datasrc/zone.h>
+#include <datasrc/zone_finder.h>
#include <dns/name.h>
#include <dns/rrset.h>
#include <dns/rrtype.h>
@@ -60,6 +60,16 @@ public:
const isc::dns::RRType& type,
const FindOptions options = FIND_DEFAULT);
+ /// \brief Search for an RRset of given RR type at the zone origin
+ /// specialized for in-memory data source.
+ ///
+ /// This specialized version exploits internal data structure to find
+ /// RRsets at the zone origin and (if \c use_minttl is true) extract
+ /// the SOA Minimum TTL much more efficiently.
+ virtual boost::shared_ptr<ZoneFinder::Context> findAtOrigin(
+ const isc::dns::RRType& type, bool use_minttl,
+ FindOptions options);
+
/// \brief Version of find that returns all types at once
///
/// It acts the same as find, just that when the correct node is found,
@@ -108,3 +118,7 @@ private:
} // namespace isc
#endif // DATASRC_MEMORY_ZONE_FINDER_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/datasrc/memory_datasrc.cc b/src/lib/datasrc/memory_datasrc.cc
index 686dd94..45d7920 100644
--- a/src/lib/datasrc/memory_datasrc.cc
+++ b/src/lib/datasrc/memory_datasrc.cc
@@ -26,9 +26,10 @@
#include <datasrc/rbtree.h>
#include <datasrc/rbnode_rrset.h>
#include <datasrc/logger.h>
-#include <datasrc/iterator.h>
+#include <datasrc/zone_iterator.h>
#include <datasrc/data_source.h>
#include <datasrc/factory.h>
+#include <datasrc/zone_finder.h>
#include <boost/function.hpp>
#include <boost/shared_ptr.hpp>
diff --git a/src/lib/datasrc/rrset_collection_base.cc b/src/lib/datasrc/rrset_collection_base.cc
index b19f62e..9da4075 100644
--- a/src/lib/datasrc/rrset_collection_base.cc
+++ b/src/lib/datasrc/rrset_collection_base.cc
@@ -14,6 +14,7 @@
#include <datasrc/rrset_collection_base.h>
#include <datasrc/zone_loader.h>
+#include <datasrc/zone_finder.h>
#include <exceptions/exceptions.h>
using namespace isc;
diff --git a/src/lib/datasrc/rrset_collection_base.h b/src/lib/datasrc/rrset_collection_base.h
index c02df9a..66b5825 100644
--- a/src/lib/datasrc/rrset_collection_base.h
+++ b/src/lib/datasrc/rrset_collection_base.h
@@ -22,14 +22,33 @@
namespace isc {
namespace datasrc {
-/// \brief A forward declaration
-class ZoneUpdater;
-
/// \brief datasrc derivation of \c isc::dns::RRsetCollectionBase.
///
-/// This is an abstract class that adds datasrc related detail to
-/// \c isc::dns::RRsetCollectionBase. Derived classes need to complete
-/// the implementation (add iterator support, etc.) before using it.
+/// This is a default datasrc implementation of
+/// \c isc::dns::RRsetCollectionBase that adds datasrc related detail.
+///
+/// While it is a concrete class to be used along with a \c ZoneUpdater,
+/// specific \c ZoneUpdater implementations may derive from it and add
+/// additional detail. Unless you are implementing a \c ZoneUpdater, you
+/// must not use the constructor directly. Instead use the
+/// \c ZoneUpdater::getRRsetCollection() method to get a reference to
+/// the \c RRsetCollectionBase object for that \c ZoneUpdater. This is
+/// usually a singleton object and the API is designed with this in
+/// mind, because multiple \c RRsetCollectionBase objects cannot be used
+/// at the same time in most kinds of database implementations
+/// (esp. where iterators are in use). Specific \c ZoneUpdaters that can
+/// allow multiple \c RRsetCollection objects may provide additional
+/// API, but that is unspecified here.
+///
+/// There are some restrictions on when an \c RRsetCollection may be
+/// used. Though code may have a reference to an \c RRsetCollection
+/// object, it is not always valid to use it. Implementations of
+/// \c ZoneUpdater may disable an \c RRsetCollection previously returned
+/// by \c ZoneUpdater::getRRsetCollection() after \c commit() is called
+/// on the \c ZoneUpdater. An \c isc::dns::RRsetCollectionError
+/// exception will be thrown if an \c RRsetCollection is used when
+/// disabled. Please see the \c ZoneUpdater methods' documentation for
+/// more detail.
class RRsetCollectionBase : public isc::dns::RRsetCollectionBase {
public:
/// \brief Constructor.
@@ -90,18 +109,18 @@ protected:
/// \brief See \c isc::dns::RRsetCollectionBase::getBeginning() for
/// documentation.
///
- /// \throw isc::dns::RRsetCollectionError if using the iterator
- /// results in some underlying datasrc error, or if \c disable() was
- /// called.
- virtual IterPtr getBeginning() = 0;
+ /// \throw isc::NotImplemented as it's not implemented currently.
+ virtual IterPtr getBeginning() {
+ isc_throw(NotImplemented, "This method is not implemented.");
+ }
/// \brief See \c isc::dns::RRsetCollectionBase::getEnd() for
/// documentation.
///
- /// \throw isc::dns::RRsetCollectionError if using the iterator
- /// results in some underlying datasrc error, or if \c disable() was
- /// called.
- virtual IterPtr getEnd() = 0;
+ /// \throw isc::NotImplemented as it's not implemented currently.
+ virtual IterPtr getEnd() {
+ isc_throw(NotImplemented, "This method is not implemented.");
+ }
private:
ZoneUpdater& updater_;
@@ -109,13 +128,6 @@ private:
bool disabled_;
};
-/// \brief A pointer-like type pointing to an
-/// \c isc::datasrc::RRsetCollectionBase object.
-///
-/// This type is used to handle RRsetCollections in a polymorphic manner
-/// in libdatasrc.
-typedef boost::shared_ptr<isc::datasrc::RRsetCollectionBase> RRsetCollectionPtr;
-
} // end of namespace datasrc
} // end of namespace isc
diff --git a/src/lib/datasrc/sqlite3_accessor.cc b/src/lib/datasrc/sqlite3_accessor.cc
index bd71544..632c271 100644
--- a/src/lib/datasrc/sqlite3_accessor.cc
+++ b/src/lib/datasrc/sqlite3_accessor.cc
@@ -44,7 +44,7 @@ namespace {
// program may not be taking advantage of features (possibly performance
// improvements) added to the database.
const int SQLITE_SCHEMA_MAJOR_VERSION = 2;
-const int SQLITE_SCHEMA_MINOR_VERSION = 1;
+const int SQLITE_SCHEMA_MINOR_VERSION = 2;
}
namespace isc {
@@ -338,7 +338,7 @@ public:
const char* const SCHEMA_LIST[] = {
"CREATE TABLE schema_version (version INTEGER NOT NULL, "
"minor INTEGER NOT NULL DEFAULT 0)",
- "INSERT INTO schema_version VALUES (2, 1)",
+ "INSERT INTO schema_version VALUES (2, 2)",
"CREATE TABLE zones (id INTEGER PRIMARY KEY, "
"name TEXT NOT NULL COLLATE NOCASE, "
"rdclass TEXT NOT NULL COLLATE NOCASE DEFAULT 'IN', "
@@ -358,6 +358,7 @@ const char* const SCHEMA_LIST[] = {
// defining a separate index for rdtype only doesn't work either; SQLite3
// would then create a temporary B-tree for "ORDER BY").
"CREATE INDEX records_bytype_and_rname ON records (rdtype, rname)",
+ "CREATE INDEX records_byrname_and_rdtype ON records (rname, rdtype)",
"CREATE TABLE nsec3 (id INTEGER PRIMARY KEY, zone_id INTEGER NOT NULL, "
"hash TEXT NOT NULL COLLATE NOCASE, "
"owner TEXT NOT NULL COLLATE NOCASE, "
diff --git a/src/lib/datasrc/sqlite3_accessor_link.cc b/src/lib/datasrc/sqlite3_accessor_link.cc
index c064e0f..56e0c2f 100644
--- a/src/lib/datasrc/sqlite3_accessor_link.cc
+++ b/src/lib/datasrc/sqlite3_accessor_link.cc
@@ -47,12 +47,12 @@ checkConfig(ConstElementPtr config, ElementPtr errors) {
bool result = true;
if (!config || config->getType() != Element::map) {
- addError(errors, "Base config for SQlite3 backend must be a map");
+ addError(errors, "Base config for SQLite3 backend must be a map");
result = false;
} else {
if (!config->contains(CONFIG_ITEM_DATABASE_FILE)) {
addError(errors,
- "Config for SQlite3 backend does not contain a '" +
+ "Config for SQLite3 backend does not contain a '" +
string(CONFIG_ITEM_DATABASE_FILE) +
"' value");
result = false;
@@ -89,11 +89,11 @@ createInstance(isc::data::ConstElementPtr config, std::string& error) {
new SQLite3Accessor(dbfile, "IN")); // XXX: avoid hardcode RR class
return (new DatabaseClient(isc::dns::RRClass::IN(), sqlite3_accessor));
} catch (const std::exception& exc) {
- error = std::string("Error creating sqlite3 datasource: ") +
+ error = std::string("Error creating SQLite3 datasource: ") +
exc.what();
return (NULL);
} catch (...) {
- error = std::string("Error creating sqlite3 datasource, "
+ error = std::string("Error creating SQLite3 datasource, "
"unknown exception");
return (NULL);
}
diff --git a/src/lib/datasrc/tests/client_list_unittest.cc b/src/lib/datasrc/tests/client_list_unittest.cc
index 29a0daa..0838fb6 100644
--- a/src/lib/datasrc/tests/client_list_unittest.cc
+++ b/src/lib/datasrc/tests/client_list_unittest.cc
@@ -14,7 +14,7 @@
#include <datasrc/client_list.h>
#include <datasrc/client.h>
-#include <datasrc/iterator.h>
+#include <datasrc/zone_iterator.h>
#include <datasrc/data_source.h>
#include <datasrc/memory/memory_client.h>
#include <datasrc/memory/zone_table_segment.h>
diff --git a/src/lib/datasrc/tests/database_unittest.cc b/src/lib/datasrc/tests/database_unittest.cc
index 9f5ade4..58af193 100644
--- a/src/lib/datasrc/tests/database_unittest.cc
+++ b/src/lib/datasrc/tests/database_unittest.cc
@@ -25,8 +25,9 @@
#include <datasrc/database.h>
#include <datasrc/zone.h>
+#include <datasrc/zone_finder.h>
#include <datasrc/data_source.h>
-#include <datasrc/iterator.h>
+#include <datasrc/zone_iterator.h>
#include <datasrc/sqlite3_accessor.h>
#include <testutils/dnsmessage_test.h>
@@ -1728,58 +1729,107 @@ TYPED_TEST(DatabaseClientTest, updateAfterDeleteIterator) {
}
void
-doFindTest(ZoneFinder& finder,
- const isc::dns::Name& name,
- const isc::dns::RRType& type,
- const isc::dns::RRType& expected_type,
- const isc::dns::RRTTL expected_ttl,
- ZoneFinder::Result expected_result,
- const std::vector<std::string>& expected_rdatas,
- const std::vector<std::string>& expected_sig_rdatas,
- ZoneFinder::FindResultFlags expected_flags =
- ZoneFinder::RESULT_DEFAULT,
- const isc::dns::Name& expected_name = isc::dns::Name::ROOT_NAME(),
- const ZoneFinder::FindOptions options = ZoneFinder::FIND_DEFAULT)
+findTestCommon(ZoneFinder& finder, const isc::dns::Name& name,
+ const isc::dns::RRType& type,
+ ConstZoneFinderContextPtr actual_result,
+ const isc::dns::RRType& expected_type,
+ const isc::dns::RRTTL expected_ttl,
+ ZoneFinder::Result expected_result,
+ const std::vector<string>& expected_rdatas,
+ const std::vector<string>& expected_sig_rdatas,
+ ZoneFinder::FindResultFlags expected_flags,
+ const isc::dns::Name& expected_name,
+ const ZoneFinder::FindOptions options)
{
- SCOPED_TRACE("doFindTest " + name.toText() + " " + type.toText());
- ConstZoneFinderContextPtr result = finder.find(name, type, options);
- ASSERT_EQ(expected_result, result->code) << name << " " << type;
+ ASSERT_EQ(expected_result, actual_result->code) << name << " " << type;
EXPECT_EQ((expected_flags & ZoneFinder::RESULT_WILDCARD) != 0,
- result->isWildcard());
+ actual_result->isWildcard());
EXPECT_EQ((expected_flags & ZoneFinder::RESULT_NSEC_SIGNED) != 0,
- result->isNSECSigned());
+ actual_result->isNSECSigned());
EXPECT_EQ((expected_flags & ZoneFinder::RESULT_NSEC3_SIGNED) != 0,
- result->isNSEC3Signed());
- if (!expected_rdatas.empty() && result->rrset) {
- checkRRset(result->rrset, expected_name != Name(".") ? expected_name :
+ actual_result->isNSEC3Signed());
+ if (!expected_rdatas.empty() && actual_result->rrset) {
+ checkRRset(actual_result->rrset,
+ expected_name != Name::ROOT_NAME() ? expected_name :
name, finder.getClass(), expected_type, expected_ttl,
expected_rdatas);
if ((options & ZoneFinder::FIND_DNSSEC) == ZoneFinder::FIND_DNSSEC) {
- if (!expected_sig_rdatas.empty() && result->rrset->getRRsig()) {
- checkRRset(result->rrset->getRRsig(),
- expected_name != Name(".") ? expected_name : name,
+ if (!expected_sig_rdatas.empty() &&
+ actual_result->rrset->getRRsig()) {
+ checkRRset(actual_result->rrset->getRRsig(),
+ expected_name != Name::ROOT_NAME() ?
+ expected_name : name,
finder.getClass(),
isc::dns::RRType::RRSIG(), expected_ttl,
expected_sig_rdatas);
} else if (expected_sig_rdatas.empty()) {
- EXPECT_EQ(isc::dns::RRsetPtr(), result->rrset->getRRsig()) <<
- "Unexpected RRSIG: " << result->rrset->getRRsig()->toText();
+ EXPECT_EQ(isc::dns::RRsetPtr(),
+ actual_result->rrset->getRRsig()) <<
+ "Unexpected RRSIG: " <<
+ actual_result->rrset->getRRsig()->toText();
} else {
ADD_FAILURE() << "Missing RRSIG";
}
- } else if (result->rrset->getRRsig()) {
- EXPECT_EQ(isc::dns::RRsetPtr(), result->rrset->getRRsig()) <<
- "Unexpected RRSIG: " << result->rrset->getRRsig()->toText();
+ } else if (actual_result->rrset->getRRsig()) {
+ EXPECT_EQ(isc::dns::RRsetPtr(), actual_result->rrset->getRRsig())
+ << "Unexpected RRSIG: "
+ << actual_result->rrset->getRRsig()->toText();
}
} else if (expected_rdatas.empty()) {
- EXPECT_EQ(isc::dns::RRsetPtr(), result->rrset) <<
- "Unexpected RRset: " << result->rrset->toText();
+ EXPECT_EQ(isc::dns::RRsetPtr(), actual_result->rrset) <<
+ "Unexpected RRset: " << actual_result->rrset->toText();
} else {
ADD_FAILURE() << "Missing result";
}
}
void
+doFindTest(ZoneFinder& finder,
+ const isc::dns::Name& name,
+ const isc::dns::RRType& type,
+ const isc::dns::RRType& expected_type,
+ const isc::dns::RRTTL expected_ttl,
+ ZoneFinder::Result expected_result,
+ const std::vector<std::string>& expected_rdatas,
+ const std::vector<std::string>& expected_sig_rdatas,
+ ZoneFinder::FindResultFlags expected_flags =
+ ZoneFinder::RESULT_DEFAULT,
+ const isc::dns::Name& expected_name = isc::dns::Name::ROOT_NAME(),
+ const ZoneFinder::FindOptions options = ZoneFinder::FIND_DEFAULT)
+{
+ SCOPED_TRACE("doFindTest " + name.toText() + " " + type.toText());
+ ConstZoneFinderContextPtr result = finder.find(name, type, options);
+ findTestCommon(finder, name, type, result, expected_type, expected_ttl,
+ expected_result, expected_rdatas, expected_sig_rdatas,
+ expected_flags, expected_name, options);
+}
+
+void
+doFindAtOriginTest(ZoneFinder& finder,
+ const isc::dns::Name& origin,
+ const isc::dns::RRType& type,
+ const isc::dns::RRType& expected_type,
+ const isc::dns::RRTTL expected_ttl,
+ ZoneFinder::Result expected_result,
+ const std::vector<std::string>& expected_rdatas,
+ const std::vector<std::string>& expected_sig_rdatas,
+ bool use_minttl = false,
+ ZoneFinder::FindResultFlags expected_flags =
+ ZoneFinder::RESULT_DEFAULT,
+ const isc::dns::Name& expected_name =
+ isc::dns::Name::ROOT_NAME(),
+ const ZoneFinder::FindOptions options =
+ ZoneFinder::FIND_DEFAULT)
+{
+ SCOPED_TRACE("doFindOriginTest " + origin.toText() + " " + type.toText());
+ ConstZoneFinderContextPtr result =
+ finder.findAtOrigin(type, use_minttl, options);
+ findTestCommon(finder, origin, type, result, expected_type, expected_ttl,
+ expected_result, expected_rdatas, expected_sig_rdatas,
+ expected_flags, expected_name, options);
+}
+
+void
doFindAllTestResult(ZoneFinder& finder, const isc::dns::Name& name,
ZoneFinder::Result expected_result,
const isc::dns::RRType expected_type,
@@ -2116,6 +2166,159 @@ TYPED_TEST(DatabaseClientTest, find) {
this->expected_rdatas_, this->expected_sig_rdatas_);
}
+TYPED_TEST(DatabaseClientTest, findAtOrigin) {
+ ZoneFinderPtr finder(this->getFinder());
+
+ // Specified type of RR exists, no DNSSEC
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back("ns.example.com.");
+ doFindAtOriginTest(*finder, this->zname_, RRType::NS(), RRType::NS(),
+ this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ // Specified type of RR exists, with DNSSEC
+ this->expected_sig_rdatas_.push_back("NS 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ doFindAtOriginTest(*finder, this->zname_, RRType::NS(), RRType::NS(),
+ this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ false, ZoneFinder::RESULT_DEFAULT, this->zname_,
+ ZoneFinder::FIND_DNSSEC);
+
+ // Specified type of RR doesn't exist, no DNSSEC
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ doFindAtOriginTest(*finder, this->zname_, RRType::TXT(), this->qtype_,
+ this->rrttl_, ZoneFinder::NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ // Specified type of RR doesn't exist, with DNSSEC
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back(
+ "acnamesig1.example.org. A NS RRSIG NSEC");
+ this->expected_sig_rdatas_.push_back("NSEC 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ doFindAtOriginTest(*finder, this->zname_, RRType::TXT(), RRType::NSEC(),
+ this->rrttl_, ZoneFinder::NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ false, ZoneFinder::RESULT_NSEC_SIGNED,
+ this->zname_, ZoneFinder::FIND_DNSSEC);
+
+ // Specified type of RR doesn't exist, with DNSSEC, enabling NSEC3
+ this->current_accessor_->enableNSEC3();
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ doFindAtOriginTest(*finder, this->zname_, RRType::TXT(), RRType::TXT(),
+ this->rrttl_, ZoneFinder::NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ false, ZoneFinder::RESULT_NSEC3_SIGNED,
+ this->zname_, ZoneFinder::FIND_DNSSEC);
+}
+
+TYPED_TEST(DatabaseClientTest, findAtOriginWithMinTTL) {
+ // First, replace the SOA of the test zone so that its RR TTL is larger
+ // than MINTTL (the original data are used in many places, so replacing
+ // them just for this doesn't make sense).
+ RRsetPtr old_soa(new RRset(this->zname_, this->qclass_, RRType::SOA(),
+ this->rrttl_));
+ old_soa->addRdata(rdata::createRdata(RRType::SOA(), this->qclass_,
+ "ns1.example.org. admin.example.org. "
+ "1234 3600 1800 2419200 7200"));
+
+ const string new_soa_rdata = "ns1.example.org. admin.example.org. "
+ "1234 3600 1800 2419200 1200";
+ RRsetPtr new_soa(new RRset(this->zname_, this->qclass_, RRType::SOA(),
+ this->rrttl_));
+ new_soa->addRdata(rdata::createRdata(RRType::SOA(), this->qclass_,
+ new_soa_rdata));
+
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->updater_->deleteRRset(*old_soa);
+ this->updater_->addRRset(*new_soa);
+ this->updater_->commit();
+
+ ZoneFinderPtr finder = this->getFinder();
+
+ // Specify the use of min TTL, then the resulting TTL should be derived
+ // from the SOA MINTTL (which is smaller).
+ this->expected_rdatas_.push_back(new_soa_rdata);
+ doFindAtOriginTest(*finder, this->zname_, RRType::SOA(), RRType::SOA(),
+ RRTTL(1200), ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ true);
+
+ // If DNSSEC is requested, TTL of the RRSIG should also be the min.
+ this->expected_sig_rdatas_.push_back(
+ "SOA 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. FAKEFAKEFAKE");
+ doFindAtOriginTest(*finder, this->zname_, RRType::SOA(), RRType::SOA(),
+ RRTTL(1200), ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ true, ZoneFinder::RESULT_DEFAULT, this->zname_,
+ ZoneFinder::FIND_DNSSEC);
+
+ // Not really intended usage, but specify the use of min TTL for non SOA.
+ // It should still work as specified.
+ this->expected_rdatas_.clear();
+ this->expected_sig_rdatas_.clear();
+ this->expected_rdatas_.push_back("ns.example.com.");
+ doFindAtOriginTest(*finder, this->zname_, RRType::NS(), RRType::NS(),
+ RRTTL(1200), ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ true);
+
+ // If we don't request the use of min TTL, the original TTL will be used.
+ this->expected_rdatas_.clear();
+ this->expected_rdatas_.push_back(new_soa_rdata);
+ doFindAtOriginTest(*finder, this->zname_, RRType::SOA(), RRType::SOA(),
+ this->rrttl_, ZoneFinder::SUCCESS,
+ this->expected_rdatas_, this->expected_sig_rdatas_);
+
+ // If no RRset is returned, use_minttl doesn't matter (it shouldn't cause
+ // disruption)
+ this->expected_rdatas_.clear();
+ doFindAtOriginTest(*finder, this->zname_, RRType::TXT(), this->qtype_,
+ this->rrttl_, ZoneFinder::NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ true);
+
+ // If it results in NXRRSET with NSEC, and if we specify the use of min
+ // TTL, the NSEC and RRSIG should have the min TTL (again, though, this
+ // use case is not really the intended one)
+ this->expected_rdatas_.push_back(
+ "acnamesig1.example.org. A NS RRSIG NSEC");
+ this->expected_sig_rdatas_.push_back("NSEC 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ doFindAtOriginTest(*finder, this->zname_, RRType::TXT(), RRType::NSEC(),
+ RRTTL(1200), ZoneFinder::NXRRSET,
+ this->expected_rdatas_, this->expected_sig_rdatas_,
+ true, ZoneFinder::RESULT_NSEC_SIGNED,
+ this->zname_, ZoneFinder::FIND_DNSSEC);
+}
+
+TYPED_TEST(DatabaseClientTest, findAtOriginWithMinTTLBroken) {
+ // Similar to the previous case, but we intentionally remove the SOA
+ // (assuming the underlying data source doesn't complain about it).
+ // This will cause exception in subsequent findAtOrigin() with use_minttl
+ // being true.
+ RRsetPtr old_soa(new RRset(this->zname_, this->qclass_, RRType::SOA(),
+ this->rrttl_));
+ old_soa->addRdata(rdata::createRdata(RRType::SOA(), this->qclass_,
+ "ns1.example.org. admin.example.org. "
+ "1234 3600 1800 2419200 7200"));
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->updater_->deleteRRset(*old_soa);
+ this->updater_->commit();
+
+ EXPECT_THROW(this->getFinder()->findAtOrigin(RRType::NS(), true,
+ ZoneFinder::FIND_DEFAULT),
+ DataSourceError);
+}
+
TYPED_TEST(DatabaseClientTest, findOutOfZone) {
// If the query name is out-of-zone it should result in an exception
boost::shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
@@ -2263,7 +2466,7 @@ TYPED_TEST(DatabaseClientTest, findDelegation) {
// It should normally just result in DELEGATION; if GLUE_OK is specified,
// the other RR should be visible.
this->expected_rdatas_.clear();
- this->expected_rdatas_.push_back("ns.example.com");
+ this->expected_rdatas_.push_back("ns.example.com.");
doFindTest(*finder, Name("brokenns1.example.org"), this->qtype_,
RRType::NS(), this->rrttl_, ZoneFinder::DELEGATION,
this->expected_rdatas_, this->empty_rdatas_,
@@ -2312,7 +2515,7 @@ TYPED_TEST(DatabaseClientTest, findDS) {
// Some insane case: DS under a zone cut. It's included in the DB, but
// shouldn't be visible via finder.
this->expected_rdatas_.clear();
- this->expected_rdatas_.push_back("ns.example.com");
+ this->expected_rdatas_.push_back("ns.example.com.");
doFindTest(*finder, Name("child.insecdelegation.example.org"),
RRType::DS(), RRType::NS(), this->rrttl_,
ZoneFinder::DELEGATION, this->expected_rdatas_,
@@ -3446,7 +3649,7 @@ TYPED_TEST(DatabaseClientTest, deleteRRset) {
RRType::CNAME(), this->rrttl_));
this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
this->rrset_->getClass(),
- "www.example.org"));
+ "www.example.org."));
this->updater_->deleteRRset(*this->rrset_);
// The this->updater_ finder should immediately see the deleted results.
@@ -3498,7 +3701,7 @@ TYPED_TEST(DatabaseClientTest, deleteRRsetToNXDOMAIN) {
RRType::CNAME(), this->rrttl_));
this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
this->rrset_->getClass(),
- "www.example.org"));
+ "www.example.org."));
this->updater_ = this->client_->getUpdater(this->zname_, false);
this->updater_->deleteRRset(*this->rrset_);
@@ -4244,7 +4447,7 @@ public:
{}
ZoneUpdaterPtr updater;
- isc::datasrc::RRsetCollectionBase& collection;
+ isc::dns::RRsetCollectionBase& collection;
};
TYPED_TEST(RRsetCollectionTest, find) {
@@ -4389,7 +4592,14 @@ TYPED_TEST(RRsetCollectionAndUpdaterTest, updateThrows) {
// Now setup a new updater and call getRRsetCollection() on it.
this->updater_.reset();
this->updater_ = this->client_->getUpdater(this->zname_, false);
- (void) this->updater_->getRRsetCollection();
+
+ // Just call getRRsetCollection() here. The test using .find() is
+ // unnecessary for the purpose of this test case, but we have it to
+ // use the result of getRRsetCollection() and silence some compiler
+ // complaining about ignoring the return value of
+ // getRRsetCollection().
+ EXPECT_FALSE(this->updater_->getRRsetCollection().
+ find(Name("www.example.org"), RRClass::IN(), RRType::MX()));
// addRRset() must throw isc::InvalidOperation here.
EXPECT_THROW(this->updater_->addRRset(*this->rrset_),
@@ -4407,7 +4617,12 @@ TYPED_TEST(RRsetCollectionAndUpdaterTest, updateThrows) {
this->updater_.reset();
this->updater_ = this->client_->getUpdater(this->zname_, false);
this->updater_->addRRset(*this->rrset_);
- (void) this->updater_->getRRsetCollection();
+
+ // Just call getRRsetCollection() here. The .find() is unnecessary,
+ // but we have it to use the result of getRRsetCollection().
+ this->updater_->getRRsetCollection().find(Name("www.example.org"),
+ RRClass::IN(),
+ RRType::MX());
// deleteRRset() must throw isc::InvalidOperation here.
EXPECT_THROW(this->updater_->deleteRRset(*this->rrset_),
@@ -4417,7 +4632,7 @@ TYPED_TEST(RRsetCollectionAndUpdaterTest, updateThrows) {
// Test that using an RRsetCollection after calling commit() on the
// ZoneUpdater throws, as the RRsetCollection is disabled.
TYPED_TEST(RRsetCollectionAndUpdaterTest, useAfterCommitThrows) {
- isc::datasrc::RRsetCollectionBase& collection =
+ isc::dns::RRsetCollectionBase& collection =
this->updater_->getRRsetCollection();
// find() must not throw here.
diff --git a/src/lib/datasrc/tests/faked_nsec3.h b/src/lib/datasrc/tests/faked_nsec3.h
index 26a7b8d..13dd5fb 100644
--- a/src/lib/datasrc/tests/faked_nsec3.h
+++ b/src/lib/datasrc/tests/faked_nsec3.h
@@ -15,7 +15,7 @@
#ifndef FAKED_NSEC3_H
#define FAKED_NSEC3_H
-#include <datasrc/zone.h>
+#include <datasrc/zone_finder.h>
#include <dns/nsec3hash.h>
diff --git a/src/lib/datasrc/tests/master_loader_callbacks_test.cc b/src/lib/datasrc/tests/master_loader_callbacks_test.cc
index dc44461..fb4487a 100644
--- a/src/lib/datasrc/tests/master_loader_callbacks_test.cc
+++ b/src/lib/datasrc/tests/master_loader_callbacks_test.cc
@@ -65,7 +65,7 @@ public:
virtual ZoneFinder& getFinder() {
isc_throw(isc::NotImplemented, "Not to be called in this test");
}
- virtual isc::datasrc::RRsetCollectionBase& getRRsetCollection() {
+ virtual isc::dns::RRsetCollectionBase& getRRsetCollection() {
isc_throw(isc::NotImplemented, "Not to be called in this test");
}
virtual void deleteRRset(const isc::dns::AbstractRRset&) {
diff --git a/src/lib/datasrc/tests/memory/rdata_serialization_unittest.cc b/src/lib/datasrc/tests/memory/rdata_serialization_unittest.cc
index a45c2bd..b23e07f 100644
--- a/src/lib/datasrc/tests/memory/rdata_serialization_unittest.cc
+++ b/src/lib/datasrc/tests/memory/rdata_serialization_unittest.cc
@@ -34,7 +34,9 @@
#include <boost/foreach.hpp>
#include <cstring>
+#include <algorithm>
#include <set>
+#include <stdexcept>
#include <string>
#include <vector>
@@ -131,7 +133,7 @@ protected:
"20120715220826 12345 com. FAKE"))
{}
- // A wraper for RdataEncoder::encode() with buffer overrun check.
+ // A wrapper for RdataEncoder::encode() with buffer overrun check.
void encodeWrapper(size_t data_len);
// Some commonly used RDATA
@@ -159,14 +161,28 @@ public:
// in the wire format. It then compares the wire data with the one
// generated by the normal libdns++ interface to see the encoding/decoding
// works as intended.
+ // By default it encodes the given RDATAs from the scratch; if old_data
+ // is non NULL, the test case assumes it points to previously encoded data
+ // and the given RDATAs are to be merged with it. old_rdata/rrsig_count
+ // will be set to the number of RDATAs and RRSIGs encoded in old_data.
+ // These "count" variables must not be set to non 0 unless old_data is
+ // non NULL, but it's not checked in this methods; it's the caller's
+ // responsibility to ensure that. rdata_list and rrsig_list should contain
+ // all RDATAs and RRSIGs included those stored in old_data.
void checkEncode(RRClass rrclass, RRType rrtype,
const vector<ConstRdataPtr>& rdata_list,
size_t expected_varlen_fields,
const vector<ConstRdataPtr>& rrsig_list =
- vector<ConstRdataPtr>());
+ vector<ConstRdataPtr>(),
+ const void* old_data = NULL,
+ size_t old_rdata_count = 0,
+ size_t old_rrsig_count = 0);
void addRdataCommon(const vector<ConstRdataPtr>& rrsigs);
- void addRdataMultiCommon(const vector<ConstRdataPtr>& rrsigs);
+ void addRdataMultiCommon(const vector<ConstRdataPtr>& rrsigs,
+ bool duplicate = false);
+ void mergeRdataCommon(const vector<ConstRdataPtr>& old_rrsigs,
+ const vector<ConstRdataPtr>& rrsigs);
};
// Used across more classes and scopes. But it's just uninteresting
@@ -272,11 +288,12 @@ public:
size_t rdata_count,
size_t rrsig_count,
size_t expected_varlen_fields,
- // Warning: this test actualy might change the
- // encoded_data !
- vector<uint8_t>& encoded_data, size_t,
+ const vector<uint8_t>& encoded_data_orig, size_t,
MessageRenderer& renderer)
{
+ // Make a manual copy, which we're going to modify.
+ vector<uint8_t> encoded_data = encoded_data_orig;
+
// If this type of RDATA is expected to contain variable-length fields,
// we brute force the encoded data, exploiting our knowledge of actual
// encoding, then adjust the encoded data excluding the list of length
@@ -532,13 +549,20 @@ RdataSerializationTest::encodeWrapper(size_t data_len) {
encoded_data_.resize(data_len);
}
+bool
+rdataMatch(ConstRdataPtr rdata1, ConstRdataPtr rdata2) {
+ return (rdata1->compare(*rdata2) == 0);
+}
+
template<class DecoderStyle>
void
RdataEncodeDecodeTest<DecoderStyle>::
checkEncode(RRClass rrclass, RRType rrtype,
const vector<ConstRdataPtr>& rdata_list,
size_t expected_varlen_fields,
- const vector<ConstRdataPtr>& rrsig_list)
+ const vector<ConstRdataPtr>& rrsig_list,
+ const void* old_data, size_t old_rdata_count,
+ size_t old_rrsig_count)
{
// These two names will be rendered before and after the test RDATA,
// to check in case the RDATA contain a domain name whether it's
@@ -552,34 +576,70 @@ checkEncode(RRClass rrclass, RRType rrtype,
actual_renderer_.clear();
encoded_data_.clear();
- // Build expected wire-format data
+ // Build expected wire-format data, skipping duplicate Rdata.
expected_renderer_.writeName(dummy_name);
+ vector<ConstRdataPtr> rdata_uniq_list;
BOOST_FOREACH(const ConstRdataPtr& rdata, rdata_list) {
- rdata->toWire(expected_renderer_);
+ if (std::find_if(rdata_uniq_list.begin(), rdata_uniq_list.end(),
+ boost::bind(rdataMatch, rdata, _1)) ==
+ rdata_uniq_list.end()) {
+ rdata_uniq_list.push_back(rdata);
+ rdata->toWire(expected_renderer_);
+ }
}
expected_renderer_.writeName(dummyName2());
+ vector<ConstRdataPtr> rrsig_uniq_list;
BOOST_FOREACH(const ConstRdataPtr& rdata, rrsig_list) {
- rdata->toWire(expected_renderer_);
+ if (std::find_if(rrsig_uniq_list.begin(), rrsig_uniq_list.end(),
+ boost::bind(rdataMatch, rdata, _1)) ==
+ rrsig_uniq_list.end()) {
+ rrsig_uniq_list.push_back(rdata);
+ rdata->toWire(expected_renderer_);
+ }
}
// Then build wire format data using the encoded data.
// 1st dummy name
actual_renderer_.writeName(dummy_name);
- // Create encoded data
- encoder_.start(rrclass, rrtype);
+ // Create encoded data. If old_xxx_count > 0, that part should be in
+ // old_data, so should be excluded from addRdata/addSIGRdata.
+ if (old_data) {
+ encoder_.start(rrclass, rrtype, old_data, old_rdata_count,
+ old_rrsig_count);
+ } else {
+ encoder_.start(rrclass, rrtype);
+ }
+ size_t count = 0;
+ std::vector<ConstRdataPtr> encoded; // for duplicate check include old
BOOST_FOREACH(const ConstRdataPtr& rdata, rdata_list) {
- encoder_.addRdata(*rdata);
+ if (++count > old_rdata_count) {
+ const bool uniq =
+ (std::find_if(encoded.begin(), encoded.end(),
+ boost::bind(rdataMatch, rdata, _1)) ==
+ encoded.end());
+ EXPECT_EQ(uniq, encoder_.addRdata(*rdata));
+ }
+ encoded.push_back(rdata); // we need to remember old rdata too
}
+ count = 0;
+ encoded.clear();
BOOST_FOREACH(const ConstRdataPtr& rdata, rrsig_list) {
- encoder_.addSIGRdata(*rdata);
+ if (++count > old_rrsig_count) {
+ const bool uniq =
+ (std::find_if(encoded.begin(), encoded.end(),
+ boost::bind(rdataMatch, rdata, _1)) ==
+ encoded.end());
+ EXPECT_EQ(uniq, encoder_.addSIGRdata(*rdata));
+ }
+ encoded.push_back(rdata);
}
const size_t storage_len = encoder_.getStorageLength();
encodeWrapper(storage_len);
- DecoderStyle::decode(rrclass, rrtype, rdata_list.size(), rrsig_list.size(),
- expected_varlen_fields, encoded_data_, storage_len,
- actual_renderer_);
+ DecoderStyle::decode(rrclass, rrtype, rdata_uniq_list.size(),
+ rrsig_uniq_list.size(), expected_varlen_fields,
+ encoded_data_, storage_len, actual_renderer_);
// Two sets of wire-format data should be identical.
matchWireData(expected_renderer_.getData(), expected_renderer_.getLength(),
@@ -619,7 +679,7 @@ TYPED_TEST(RdataEncodeDecodeTest, addRdata) {
template<class DecoderStyle>
void
RdataEncodeDecodeTest<DecoderStyle>::
-addRdataMultiCommon(const vector<ConstRdataPtr>& rrsigs) {
+addRdataMultiCommon(const vector<ConstRdataPtr>& rrsigs, bool duplicate) {
// Similar to addRdata(), but test with multiple RDATAs.
// Four different cases are tested: a single fixed-len RDATA (A),
// fixed-len data + domain name (MX), variable-len data only (TXT),
@@ -629,12 +689,19 @@ addRdataMultiCommon(const vector<ConstRdataPtr>& rrsigs) {
rdata_list_.clear();
rdata_list_.push_back(a_rdata_);
rdata_list_.push_back(a_rdata2);
+ if (duplicate) { // if duplicate is true, add duplicate Rdata
+ rdata_list_.push_back(a_rdata_);
+ }
checkEncode(RRClass::IN(), RRType::A(), rdata_list_, 0, rrsigs);
ConstRdataPtr mx_rdata1 = createRdata(RRType::MX(), RRClass::IN(),
- "5 mx1.example.com");
+ "5 mx1.example.com.");
ConstRdataPtr mx_rdata2 = createRdata(RRType::MX(), RRClass::IN(),
- "10 mx2.example.com");
+ "10 mx2.example.com.");
+ if (duplicate) { // check duplicate detection is case insensitive for names
+ rdata_list_.push_back(createRdata(RRType::MX(), RRClass::IN(),
+ "5 MX1.example.COM."));
+ }
rdata_list_.clear();
rdata_list_.push_back(mx_rdata1);
rdata_list_.push_back(mx_rdata2);
@@ -644,6 +711,9 @@ addRdataMultiCommon(const vector<ConstRdataPtr>& rrsigs) {
"foo bar baz");
ConstRdataPtr txt_rdata2 = createRdata(RRType::TXT(), RRClass::IN(),
"another text data");
+ if (duplicate) {
+ rdata_list_.push_back(txt_rdata1);
+ }
rdata_list_.clear();
rdata_list_.push_back(txt_rdata1);
rdata_list_.push_back(txt_rdata2);
@@ -655,6 +725,9 @@ addRdataMultiCommon(const vector<ConstRdataPtr>& rrsigs) {
ConstRdataPtr naptr_rdata2 =
createRdata(RRType::NAPTR(), RRClass::IN(),
"200 100 \"s\" \"http\" \"\" _http._tcp.example.com");
+ if (duplicate) {
+ rdata_list_.push_back(naptr_rdata1);
+ }
rdata_list_.clear();
rdata_list_.push_back(naptr_rdata1);
rdata_list_.push_back(naptr_rdata2);
@@ -705,12 +778,18 @@ TYPED_TEST(RdataEncodeDecodeTest, addRdataMulti) {
vector<ConstRdataPtr> rrsigs;
this->addRdataMultiCommon(rrsigs); // test without RRSIGs (empty vector)
+ this->addRdataMultiCommon(rrsigs, true); // ditto, but with duplicated data
+
// Tests with two RRSIGs
rrsigs.push_back(this->rrsig_rdata_);
rrsigs.push_back(createRdata(RRType::RRSIG(), RRClass::IN(),
"A 5 2 3600 20120814220826 "
"20120715220826 54321 com. FAKE"));
this->addRdataMultiCommon(rrsigs);
+
+ // Similar to the previous, but with duplicate RRSIG.
+ rrsigs.push_back(this->rrsig_rdata_);
+ this->addRdataMultiCommon(rrsigs, true);
}
TEST_F(RdataSerializationTest, badAddRdata) {
@@ -732,50 +811,44 @@ TEST_F(RdataSerializationTest, badAddRdata) {
EXPECT_THROW(encoder_.encode(&encoded_data_[0], buf_len - 1),
isc::BadValue);
- // Type of RDATA and the specified RR type don't match. addRdata() should
- // detect this inconsistency.
+ // Some of the following checks confirm that adding an Rdata of the
+ // wrong RR type will be rejected. Several different cases are checked,
+ // but there shouldn't be any essential difference among these cases in
+ // the tested code; these cases were considered because in an older version
+ // of implementation rejected them for possibly different reasons, and
+ // we simply keep these cases as they are not so many (and may help detect
+ // future possible regression).
encoder_.start(RRClass::IN(), RRType::AAAA());
- EXPECT_THROW(encoder_.addRdata(*a_rdata_), isc::BadValue);
+ EXPECT_THROW(encoder_.addRdata(*a_rdata_), std::bad_cast);
- // Likewise.
encoder_.start(RRClass::IN(), RRType::A());
- EXPECT_THROW(encoder_.addRdata(*aaaa_rdata_), isc::BadValue);
+ EXPECT_THROW(encoder_.addRdata(*aaaa_rdata_), std::bad_cast);
- // Likewise. The encoder expects the first name completes the data, and
- // throws on the second due as an unexpected name field.
const ConstRdataPtr rp_rdata =
createRdata(RRType::RP(), RRClass::IN(), "a.example. b.example");
encoder_.start(RRClass::IN(), RRType::NS());
- EXPECT_THROW(encoder_.addRdata(*rp_rdata), isc::BadValue);
+ EXPECT_THROW(encoder_.addRdata(*rp_rdata), std::bad_cast);
- // Likewise. The encoder considers the name data a variable length data
- // field, and throws on the first name.
encoder_.start(RRClass::IN(), RRType::DHCID());
- EXPECT_THROW(encoder_.addRdata(*rp_rdata), isc::BadValue);
+ EXPECT_THROW(encoder_.addRdata(*rp_rdata), std::bad_cast);
- // Likewise. The text RDATA (2 bytes) will be treated as MX preference,
- // and the encoder will still expect to see a domain name.
const ConstRdataPtr txt_rdata = createRdata(RRType::TXT(), RRClass::IN(),
"a");
encoder_.start(RRClass::IN(), RRType::MX());
- EXPECT_THROW(encoder_.addRdata(*txt_rdata), isc::BadValue);
+ EXPECT_THROW(encoder_.addRdata(*txt_rdata), std::bad_cast);
- // Similar to the previous one, but in this case there's no data field
- // in the spec.
encoder_.start(RRClass::IN(), RRType::NS());
- EXPECT_THROW(encoder_.addRdata(*txt_rdata), isc::BadValue);
+ EXPECT_THROW(encoder_.addRdata(*txt_rdata), std::bad_cast);
- // Likewise. Inconsistent name compression policy.
const ConstRdataPtr ns_rdata =
- createRdata(RRType::NS(), RRClass::IN(), "ns.example");
+ createRdata(RRType::NS(), RRClass::IN(), "ns.example.");
encoder_.start(RRClass::IN(), RRType::DNAME());
- EXPECT_THROW(encoder_.addRdata(*ns_rdata), isc::BadValue);
+ EXPECT_THROW(encoder_.addRdata(*ns_rdata), std::bad_cast);
- // Same as the previous one, opposite inconsistency.
const ConstRdataPtr dname_rdata =
- createRdata(RRType::DNAME(), RRClass::IN(), "dname.example");
+ createRdata(RRType::DNAME(), RRClass::IN(), "dname.example.");
encoder_.start(RRClass::IN(), RRType::NS());
- EXPECT_THROW(encoder_.addRdata(*dname_rdata), isc::BadValue);
+ EXPECT_THROW(encoder_.addRdata(*dname_rdata), std::bad_cast);
// RDATA len exceeds the 16-bit range. Technically not invalid, but
// we don't support that (and it's practically useless anyway).
@@ -791,6 +864,145 @@ TEST_F(RdataSerializationTest, badAddRdata) {
isc::BadValue);
}
+struct MergeTestData {
+ const char* const type_txt; // "AAAA", "NS", etc
+ const char* const rdata_txt1; // RDATA possibly used for old data
+ const char* const rdata_txt2; // RDATA possibly used for new data
+ const char* const rdata_txt3; // optional data to check with multi-RDATAs
+ const size_t varlen_fields; // number of variable-len fields in RDATA
+} merge_test_data[] = {
+ // For test with fixed-length RDATA
+ {"A", "192.0.2.53", "192.0.2.54", "192.0.2.55", 0},
+ // For test with variable-length RDATA
+ {"TXT", "foo bar baz", "another text data", "yet another", 1},
+ // For test with RDATA containing domain name
+ {"MX", "5 mx1.example.com.", "10 mx2.example.com.", "20 mx.example.", 0},
+ {NULL, NULL, NULL, NULL, 0}
+};
+
+// Identifier for slightly difference modes of "merge data" test below.
+// We test various combinations on # of old (before merge) and new (being
+// merged) RDATAs.
+enum MergeTestMode {
+ ONE_OLD_ONE_NEW = 0,
+ MULTI_OLD_NO_NEW,
+ ONE_OLD_MULTI_NEW,
+ DUPLICATE_NEW, // The new RDATA is a duplicate of the old one
+ NO_OLD_ONE_NEW, // no old RDATA; this can also cover the case where
+ // the resulting RdataSet is RRSIG-only.
+ ONE_OLD_NO_NEW
+};
+
+// A helper to build vectors of Rata's for the given test mode.
+void
+createMergeData(int mode, const MergeTestData& data,
+ const RRClass& rrclass, const RRType& rrtype,
+ vector<ConstRdataPtr>& old_list,
+ vector<ConstRdataPtr>& new_list)
+{
+ old_list.clear();
+ new_list.clear();
+ old_list.push_back(createRdata(rrtype, rrclass, data.rdata_txt1));
+ new_list.push_back(createRdata(rrtype, rrclass, data.rdata_txt2));
+ switch (static_cast<MergeTestMode>(mode)) {
+ case ONE_OLD_ONE_NEW:
+ break;
+ case MULTI_OLD_NO_NEW:
+ old_list.push_back(createRdata(rrtype, rrclass, data.rdata_txt3));
+ break;
+ case ONE_OLD_MULTI_NEW:
+ new_list.push_back(createRdata(rrtype, rrclass, data.rdata_txt3));
+ break;
+ case DUPLICATE_NEW:
+ new_list.push_back(createRdata(rrtype, rrclass, data.rdata_txt1));
+ break;
+ case NO_OLD_ONE_NEW:
+ old_list.clear();
+ break;
+ case ONE_OLD_NO_NEW:
+ new_list.clear();
+ break;
+ }
+}
+
+template<class DecoderStyle>
+void
+RdataEncodeDecodeTest<DecoderStyle>::
+mergeRdataCommon(const vector<ConstRdataPtr>& old_rrsigs,
+ const vector<ConstRdataPtr>& rrsigs)
+{
+ const RRClass rrclass(RRClass::IN()); // class is fixed in the test
+ vector<uint8_t> old_encoded_data;
+ vector<ConstRdataPtr> rrsigs_all;
+ vector<ConstRdataPtr> old_list;
+ vector<ConstRdataPtr> new_list;
+
+ // For each type of test Rdata, we check all modes of test scenarios.
+ for (const MergeTestData* data = merge_test_data;
+ data->type_txt;
+ ++data) {
+ const RRType rrtype(data->type_txt);
+
+ for (int mode = 0; mode <= ONE_OLD_NO_NEW; ++mode) {
+ createMergeData(mode, *data, rrclass, rrtype, old_list, new_list);
+
+ // Encode the old data
+ rdata_list_ = old_list;
+ checkEncode(rrclass, RRType(data->type_txt), rdata_list_,
+ data->varlen_fields, old_rrsigs);
+ old_encoded_data = encoded_data_; // make a copy of the data
+
+ // Prepare new data. rrsigs_all is set to "old_rrsigs + rrsigs".
+ // Then check the behavior in the "merge" mode.
+ const size_t old_rdata_count = rdata_list_.size();
+ rdata_list_.insert(rdata_list_.end(), new_list.begin(),
+ new_list.end());
+ rrsigs_all = old_rrsigs;
+ rrsigs_all.insert(rrsigs_all.end(), rrsigs.begin(), rrsigs.end());
+ checkEncode(rrclass, rrtype, rdata_list_, data->varlen_fields,
+ rrsigs_all, &old_encoded_data[0], old_rdata_count,
+ old_rrsigs.size());
+ }
+ }
+}
+
+TYPED_TEST(RdataEncodeDecodeTest, mergeRdata) {
+ vector<ConstRdataPtr> old_rrsigs;
+ vector<ConstRdataPtr> rrsigs;
+
+ // Test without RRSIGs, either for old or new.
+ this->mergeRdataCommon(old_rrsigs, rrsigs);
+
+ // Test without RRSIG for old and with RRSIG for new.
+ rrsigs.push_back(this->rrsig_rdata_);
+ this->mergeRdataCommon(old_rrsigs, rrsigs);
+
+ // Test with RRSIG for old and without RRSIG for new.
+ rrsigs.clear();
+ old_rrsigs.push_back(this->rrsig_rdata_);
+ this->mergeRdataCommon(old_rrsigs, rrsigs);
+
+ // Tests with RRSIGs for both old and new.
+ old_rrsigs.clear();
+ rrsigs.push_back(createRdata(RRType::RRSIG(), RRClass::IN(),
+ "A 5 2 3600 20120814220826 "
+ "20120715220826 54321 com. FAKE"));
+ this->mergeRdataCommon(old_rrsigs, rrsigs);
+
+ // Tests with multiple old RRSIGs.
+ rrsigs.clear();
+ old_rrsigs.clear();
+ old_rrsigs.push_back(this->rrsig_rdata_);
+ old_rrsigs.push_back(createRdata(RRType::RRSIG(), RRClass::IN(),
+ "A 5 2 3600 20120814220826 "
+ "20120715220826 54321 com. FAKE"));
+ this->mergeRdataCommon(old_rrsigs, rrsigs);
+
+ // Tests with duplicate RRSIG in new one (keeping the old_rrsigs)
+ rrsigs.push_back(this->rrsig_rdata_);
+ this->mergeRdataCommon(old_rrsigs, rrsigs);
+}
+
void
checkSigData(const ConstRdataPtr& decoded, bool* called, const void* encoded,
size_t length)
diff --git a/src/lib/datasrc/tests/memory/rdataset_unittest.cc b/src/lib/datasrc/tests/memory/rdataset_unittest.cc
index f599999..1a964ef 100644
--- a/src/lib/datasrc/tests/memory/rdataset_unittest.cc
+++ b/src/lib/datasrc/tests/memory/rdataset_unittest.cc
@@ -32,14 +32,18 @@
#include <gtest/gtest.h>
+#include <boost/bind.hpp>
#include <boost/lexical_cast.hpp>
+#include <vector>
#include <string>
using namespace isc::dns;
using namespace isc::dns::rdata;
using namespace isc::datasrc::memory;
using namespace isc::testutils;
+using std::string;
+using std::vector;
using isc::datasrc::memory::detail::SegmentObjectHolder;
using boost::lexical_cast;
@@ -48,20 +52,37 @@ namespace {
class RdataSetTest : public ::testing::Test {
protected:
RdataSetTest() :
+ rrclass(RRClass::IN()),
// 1076895760 = 0x40302010. Use this so we fill in all 8-bit "field"
// of the 32-bit TTL
a_rrset_(textToRRset("www.example.com. 1076895760 IN A 192.0.2.1")),
rrsig_rrset_(textToRRset("www.example.com. 1076895760 IN RRSIG "
"A 5 2 3600 20120814220826 20120715220826 "
"1234 example.com. FAKE"))
- {}
+ {
+ def_rdata_txt_.push_back("192.0.2.1");
+ def_rrsig_txt_.push_back("A 5 2 3600 20120814220826 20120715220826 "
+ "1234 example.com. FAKE");
+ }
void TearDown() {
EXPECT_TRUE(mem_sgmt_.allMemoryDeallocated());
}
+ // Helper for checking common cases against both versions of create()
+ typedef boost::function<RdataSet*(isc::util::MemorySegment&, RdataEncoder&,
+ ConstRRsetPtr, ConstRRsetPtr)> CreateFn;
+ void checkCreateManyRRs(CreateFn create_fn, size_t n_old_rdata);
+ void checkCreateManyRRSIGs(CreateFn create_fn, size_t n_old_sig);
+ void checkBadCreate(CreateFn create_fn);
+
+ const RRClass rrclass;
ConstRRsetPtr a_rrset_, rrsig_rrset_;
isc::util::MemorySegmentLocal mem_sgmt_;
RdataEncoder encoder_;
+
+ // These are placeholder for default expected values used in checkRdataSet.
+ vector<string> def_rdata_txt_;
+ vector<string> def_rrsig_txt_;
};
// Convert the given 32-bit integer (network byte order) to the corresponding
@@ -73,38 +94,60 @@ restoreTTL(const void* ttl_data) {
}
// A helper callback for checkRdataSet. This confirms the given data
-// is the expected in::A RDATA (the value is taken from the RdataSetTest
-// constructor).
+// is the expected RDATA of the specified type.
void
-checkData(const void* data, size_t size) {
+checkData(const void* data, size_t size, const RRType* rrtype,
+ vector<string>::const_iterator* it,
+ vector<string>::const_iterator it_end)
+{
+ ASSERT_TRUE(*it != it_end); // shouldn't reach the end yet
+
isc::util::InputBuffer b(data, size);
- EXPECT_EQ(0, in::A(b, size).compare(in::A("192.0.2.1")));
+ EXPECT_EQ(0, createRdata(*rrtype, RRClass::IN(), b, size)->compare(
+ *createRdata(*rrtype, RRClass::IN(), **it)));
+ ++(*it); // move to the next expected data
}
// This is a set of checks for an RdataSet created with some simple
-// conditions. with_rrset/with_rrsig is true iff the RdataSet is supposed to
-// contain normal/RRSIG RDATA.
+// conditions. expected_data/sigs contain the RDATAs and RRSIGs that are
+// supposed to be contained in rdataset. They can be empty if rdataset misses
+// RDATA or RRSIG (but not both).
void
-checkRdataSet(const RdataSet& rdataset, bool with_rrset, bool with_rrsig) {
+checkRdataSet(const RdataSet& rdataset,
+ vector<string> expected_data, // we use a local copy
+ const vector<string>& expected_sigs)
+{
EXPECT_FALSE(rdataset.next); // by default the next pointer should be NULL
EXPECT_EQ(RRType::A(), rdataset.type);
// See the RdataSetTest constructor for the magic number.
EXPECT_EQ(RRTTL(1076895760), restoreTTL(rdataset.getTTLData()));
- EXPECT_EQ(with_rrset ? 1 : 0, rdataset.getRdataCount());
- EXPECT_EQ(with_rrsig ? 1 : 0, rdataset.getSigRdataCount());
+ EXPECT_EQ(expected_data.size(), rdataset.getRdataCount());
+ EXPECT_EQ(expected_sigs.size(), rdataset.getSigRdataCount());
+
+ // extend expected_data with sigs for the convenience of RdataReader
+ expected_data.insert(expected_data.end(), expected_sigs.begin(),
+ expected_sigs.end());
- // A simple test for the data content. Details tests for the encoder/
+ // A simple test for the data content. Detailed tests for the encoder/
// reader should be basically sufficient for various cases of the data,
// and the fact that this test doesn't detect memory leak should be
// reasonably sufficient that the implementation handles the data region
- // correctly. Here we check one simple case for a simple form of RDATA,
- // mainly for checking the behavior of getDataBuf().
+ // correctly. Here we check one simple case for a simple form of RDATA
+ // and RRSIG, mainly for checking the behavior of getDataBuf().
+ vector<string>::const_iterator it = expected_data.begin();
+ RRType rrtype = RRType::A();
RdataReader reader(RRClass::IN(), RRType::A(),
reinterpret_cast<const uint8_t*>(
rdataset.getDataBuf()),
rdataset.getRdataCount(), rdataset.getSigRdataCount(),
- &RdataReader::emptyNameAction, checkData);
+ &RdataReader::emptyNameAction,
+ boost::bind(checkData, _1, _2, &rrtype, &it,
+ expected_data.end()));
reader.iterate();
+ rrtype = RRType::RRSIG();
+ reader.iterateAllSigs();
+
+ EXPECT_TRUE(it == expected_data.end());
}
TEST_F(RdataSetTest, create) {
@@ -113,10 +156,104 @@ TEST_F(RdataSetTest, create) {
// would detect any memory leak)
RdataSet* rdataset = RdataSet::create(mem_sgmt_, encoder_, a_rrset_,
ConstRRsetPtr());
- checkRdataSet(*rdataset, true, false);
+ checkRdataSet(*rdataset, def_rdata_txt_, vector<string>());
RdataSet::destroy(mem_sgmt_, rdataset, RRClass::IN());
}
+// This is similar to the simple create test, but we check all combinations
+// of old and new data.
+TEST_F(RdataSetTest, mergeCreate) {
+ // Prepare test data
+ const char* const a_rdatas[] = { "192.0.2.1", "192.0.2.2" };
+ const char* const sig_rdatas[] = {
+ "A 5 2 3600 20120814220826 20120715220826 1234 example.com. FAKE",
+ "A 5 2 3600 20120814220826 20120715220826 4321 example.com. FAKE" };
+ vector<ConstRRsetPtr> a_rrsets;
+ a_rrsets.push_back(textToRRset("www.example.com. 1076895760 IN A "
+ + string(a_rdatas[0])));
+ a_rrsets.push_back(textToRRset("www.example.com. 1076895760 IN A "
+ + string(a_rdatas[1])));
+ vector<ConstRRsetPtr> rrsig_rrsets;
+ rrsig_rrsets.push_back(textToRRset("www.example.com. 1076895760 IN RRSIG "
+ + string(sig_rdatas[0])));
+ rrsig_rrsets.push_back(textToRRset("www.example.com. 1076895760 IN RRSIG "
+ + string(sig_rdatas[1])));
+ ConstRRsetPtr null_rrset; // convenience shortcut
+
+ // We are going to check all combinations of:
+ // with/without old/new RDATA/RRSIGs.
+ // counter variables i, j control the old and new data, respectively, and
+ // the meaning of the value is: bit 1: with RDATA, bit 2: with RRSIG.
+ // Note that at least one RDATA or RRSIG should be contained, so there's
+ // no case for value 0.
+ for (int i = 1; i < 4; ++i) {
+ for (int j = 1; j < 4; ++j) {
+ SCOPED_TRACE("creating merge case " + lexical_cast<string>(i) +
+ ", " + lexical_cast<string>(j));
+ // Create old rdataset
+ SegmentObjectHolder<RdataSet, RRClass> holder1(
+ mem_sgmt_,
+ RdataSet::create(mem_sgmt_, encoder_,
+ (i & 1) != 0 ? a_rrsets[0] : null_rrset,
+ (i & 2) != 0 ? rrsig_rrsets[0] : null_rrset),
+ rrclass);
+ // Create merged rdataset, based on the old one and RRsets
+ SegmentObjectHolder<RdataSet, RRClass> holder2(
+ mem_sgmt_,
+ RdataSet::create(mem_sgmt_, encoder_,
+ (j & 1) != 0 ? a_rrsets[1] : null_rrset,
+ (j & 2) != 0 ? rrsig_rrsets[1] : null_rrset,
+ holder1.get()),
+ rrclass);
+
+ // Set up the expected data for the case.
+ vector<string> expected_rdata;
+ if ((i & 1) != 0) {
+ expected_rdata.push_back(a_rdatas[0]);
+ }
+ if ((j & 1) != 0) {
+ expected_rdata.push_back(a_rdatas[1]);
+ }
+ vector<string> expected_sigs;
+ if ((i & 2) != 0) {
+ expected_sigs.push_back(sig_rdatas[0]);
+ }
+ if ((j & 2) != 0) {
+ expected_sigs.push_back(sig_rdatas[1]);
+ }
+
+ // Then perform the check
+ checkRdataSet(*holder2.get(), expected_rdata, expected_sigs);
+ }
+ }
+}
+
+TEST_F(RdataSetTest, duplicate) {
+ // Create RRset and RRSIG containing duplicate RDATA.
+ ConstRRsetPtr dup_rrset =
+ textToRRset("www.example.com. 1076895760 IN A 192.0.2.1\n"
+ "www.example.com. 1076895760 IN A 192.0.2.1\n");
+ ConstRRsetPtr dup_rrsig =
+ textToRRset("www.example.com. 1076895760 IN RRSIG " +
+ def_rrsig_txt_[0] +
+ "\nwww.example.com. 1076895760 IN RRSIG " +
+ def_rrsig_txt_[0]);
+
+ // After suppressing duplicates, it should be the same as the default
+ // RdataSet. Check that.
+ SegmentObjectHolder<RdataSet, RRClass> holder1(
+ mem_sgmt_,
+ RdataSet::create(mem_sgmt_, encoder_, dup_rrset, dup_rrsig), rrclass);
+ checkRdataSet(*holder1.get(), def_rdata_txt_, def_rrsig_txt_);
+
+ // Confirm the same thing for the merge mode.
+ SegmentObjectHolder<RdataSet, RRClass> holder2(
+ mem_sgmt_,
+ RdataSet::create(mem_sgmt_, encoder_, a_rrset_, rrsig_rrset_,
+ holder1.get()), rrclass);
+ checkRdataSet(*holder2.get(), def_rdata_txt_, def_rrsig_txt_);
+}
+
TEST_F(RdataSetTest, getNext) {
RdataSet* rdataset = RdataSet::create(mem_sgmt_, encoder_, a_rrset_,
ConstRRsetPtr());
@@ -192,8 +329,8 @@ TEST_F(RdataSetTest, find) {
}
// A helper function to create an RRset containing the given number of
-// unique RDATAs.
-ConstRRsetPtr
+// unique RDATAs. We return non const pointer so that we can extend it.
+RRsetPtr
getRRsetWithRdataCount(size_t rdata_count) {
RRsetPtr rrset(new RRset(Name("example.com"), RRClass::IN(), RRType::TXT(),
RRTTL(3600)));
@@ -204,48 +341,60 @@ getRRsetWithRdataCount(size_t rdata_count) {
return (rrset);
}
-TEST_F(RdataSetTest, createManyRRs) {
- // RRset with possible maximum number of RDATAs
- RdataSet* rdataset = RdataSet::create(mem_sgmt_, encoder_,
- getRRsetWithRdataCount(8191),
- ConstRRsetPtr());
+void
+RdataSetTest::checkCreateManyRRs(CreateFn create_fn, size_t n_old_rdata) {
+ // RRset with possible maximum number of RDATAs, taking into account
+ // "pre-existing" RDATAs
+ RRsetPtr large_rrset = getRRsetWithRdataCount(8191 - n_old_rdata);
+ RdataSet* rdataset = create_fn(mem_sgmt_, encoder_, large_rrset,
+ ConstRRsetPtr());
EXPECT_EQ(8191, rdataset->getRdataCount());
EXPECT_EQ(0, rdataset->getSigRdataCount());
- RdataSet::destroy(mem_sgmt_, rdataset, RRClass::IN());
+ RdataSet::destroy(mem_sgmt_, rdataset, rrclass);
+
+ // Duplicate RDATA will be ignored in this check.
+ large_rrset->addRdata(createRdata(RRType::TXT(), rrclass, "0"));
+ rdataset = create_fn(mem_sgmt_, encoder_, large_rrset, ConstRRsetPtr());
+ EXPECT_EQ(8191, rdataset->getRdataCount());
+ RdataSet::destroy(mem_sgmt_, rdataset, rrclass);
// Exceeding that will result in an exception.
- EXPECT_THROW(RdataSet::create(mem_sgmt_, encoder_,
- getRRsetWithRdataCount(8192),
- ConstRRsetPtr()),
+ large_rrset->addRdata(createRdata(RRType::TXT(), rrclass, "8192"));
+ EXPECT_THROW(create_fn(mem_sgmt_, encoder_, large_rrset, ConstRRsetPtr()),
RdataSetError);
// To be very sure even try larger number than the threshold
- EXPECT_THROW(RdataSet::create(mem_sgmt_, encoder_,
- getRRsetWithRdataCount(65535),
- ConstRRsetPtr()),
+ EXPECT_THROW(create_fn(mem_sgmt_, encoder_,
+ getRRsetWithRdataCount(65535 - n_old_rdata),
+ ConstRRsetPtr()),
RdataSetError);
}
+TEST_F(RdataSetTest, createManyRRs) {
+ checkCreateManyRRs(boost::bind(&RdataSet::create, _1, _2, _3, _4,
+ static_cast<const RdataSet*>(NULL)), 0);
+}
+
+TEST_F(RdataSetTest, mergeCreateManyRRs) {
+ ConstRRsetPtr rrset = textToRRset("example.com. 3600 IN TXT some-text");
+ SegmentObjectHolder<RdataSet, RRClass> holder(
+ mem_sgmt_,
+ RdataSet::create(mem_sgmt_, encoder_, rrset, ConstRRsetPtr()),
+ RRClass::IN());
+
+ checkCreateManyRRs(boost::bind(&RdataSet::create, _1, _2, _3, _4,
+ holder.get()), rrset->getRdataCount());
+}
+
TEST_F(RdataSetTest, createWithRRSIG) {
- // Normal case.
RdataSet* rdataset = RdataSet::create(mem_sgmt_, encoder_, a_rrset_,
rrsig_rrset_);
- checkRdataSet(*rdataset, true, true);
- RdataSet::destroy(mem_sgmt_, rdataset, RRClass::IN());
-
- // Unusual case: TTL doesn't match. This implementation accepts that,
- // using the TTL of the covered RRset.
- ConstRRsetPtr rrsig_badttl(textToRRset(
- "www.example.com. 3600 IN RRSIG "
- "A 5 2 3600 20120814220826 "
- "20120715220826 1234 example.com. FAKE"));
- rdataset = RdataSet::create(mem_sgmt_, encoder_, a_rrset_, rrsig_badttl);
- checkRdataSet(*rdataset, true, true);
+ checkRdataSet(*rdataset, def_rdata_txt_, def_rrsig_txt_);
RdataSet::destroy(mem_sgmt_, rdataset, RRClass::IN());
}
// A helper function to create an RRSIG RRset containing the given number of
// unique RDATAs.
-ConstRRsetPtr
+RRsetPtr
getRRSIGWithRdataCount(size_t sig_count) {
RRsetPtr rrset(new RRset(Name("example.com"), RRClass::IN(),
RRType::RRSIG(), RRTTL(3600)));
@@ -269,77 +418,111 @@ getRRSIGWithRdataCount(size_t sig_count) {
return (rrset);
}
-TEST_F(RdataSetTest, createManyRRSIGs) {
+void
+RdataSetTest::checkCreateManyRRSIGs(CreateFn create_fn, size_t n_old_sig) {
// 7 has a special meaning in the implementation: if the number of the
// RRSIGs reaches this value, an extra 'sig count' field will be created.
- RdataSet* rdataset = RdataSet::create(mem_sgmt_, encoder_, a_rrset_,
- getRRSIGWithRdataCount(7));
+ RdataSet* rdataset = create_fn(mem_sgmt_, encoder_, a_rrset_,
+ getRRSIGWithRdataCount(7 - n_old_sig));
EXPECT_EQ(7, rdataset->getSigRdataCount());
RdataSet::destroy(mem_sgmt_, rdataset, RRClass::IN());
// 8 would cause overflow in the normal 3-bit field if there were no extra
// count field.
- rdataset = RdataSet::create(mem_sgmt_, encoder_, a_rrset_,
- getRRSIGWithRdataCount(8));
+ rdataset = create_fn(mem_sgmt_, encoder_, a_rrset_,
+ getRRSIGWithRdataCount(8 - n_old_sig));
EXPECT_EQ(8, rdataset->getSigRdataCount());
RdataSet::destroy(mem_sgmt_, rdataset, RRClass::IN());
// Up to 2^16-1 RRSIGs are allowed (although that would be useless
// in practice)
- rdataset = RdataSet::create(mem_sgmt_, encoder_, a_rrset_,
- getRRSIGWithRdataCount(65535));
+ RRsetPtr large_rrsig = getRRSIGWithRdataCount(65535 - n_old_sig);
+ rdataset = create_fn(mem_sgmt_, encoder_, a_rrset_, large_rrsig);
+ EXPECT_EQ(65535, rdataset->getSigRdataCount());
+ RdataSet::destroy(mem_sgmt_, rdataset, RRClass::IN());
+
+ // Duplicate shouldn't be counted
+ large_rrsig->addRdata(
+ createRdata(RRType::RRSIG(), rrclass,
+ "A 5 2 0 20120814220826 20120715220826 1234 "
+ "example.com. FAKE"));
+ rdataset = create_fn(mem_sgmt_, encoder_, a_rrset_, large_rrsig);
EXPECT_EQ(65535, rdataset->getSigRdataCount());
RdataSet::destroy(mem_sgmt_, rdataset, RRClass::IN());
// Exceeding this limit will result in an exception.
- EXPECT_THROW(RdataSet::create(mem_sgmt_, encoder_, a_rrset_,
- getRRSIGWithRdataCount(65536)),
+ large_rrsig->addRdata(
+ createRdata(RRType::RRSIG(), rrclass,
+ "A 5 2 65536 20120814220826 20120715220826 1234 "
+ "example.com. FAKE"));
+ EXPECT_THROW(create_fn(mem_sgmt_, encoder_, a_rrset_, large_rrsig),
RdataSetError);
// To be very sure even try larger number than the threshold
- EXPECT_THROW(RdataSet::create(mem_sgmt_, encoder_, a_rrset_,
- getRRSIGWithRdataCount(70000)),
+ EXPECT_THROW(create_fn(mem_sgmt_, encoder_, a_rrset_,
+ getRRSIGWithRdataCount(70000 - n_old_sig)),
RdataSetError);
}
+TEST_F(RdataSetTest, createManyRRSIGs) {
+ checkCreateManyRRSIGs(boost::bind(&RdataSet::create, _1, _2, _3, _4,
+ static_cast<const RdataSet*>(NULL)), 0);
+}
+
+TEST_F(RdataSetTest, mergeCreateManyRRSIGs) {
+ // Create "old" RRSIG that shouldn't be a duplicate of ones created in
+ // checkCreateManyRRSIGs (signature is different).
+ ConstRRsetPtr rrsig = textToRRset(
+ "example.com. 3600 IN RRSIG A 5 2 3600 20120814220826 20120715220826 "
+ "1234 example.com. FAKEFAKE");
+ SegmentObjectHolder<RdataSet, RRClass> holder(
+ mem_sgmt_,
+ RdataSet::create(mem_sgmt_, encoder_, ConstRRsetPtr(), rrsig),
+ rrclass);
+
+ checkCreateManyRRSIGs(boost::bind(&RdataSet::create, _1, _2, _3, _4,
+ holder.get()), rrsig->getRdataCount());
+}
+
TEST_F(RdataSetTest, createWithRRSIGOnly) {
// A rare, but allowed, case: RdataSet without the main RRset but with
// RRSIG.
RdataSet* rdataset = RdataSet::create(mem_sgmt_, encoder_, ConstRRsetPtr(),
rrsig_rrset_);
- checkRdataSet(*rdataset, false, true);
+ checkRdataSet(*rdataset, vector<string>(), def_rrsig_txt_);
RdataSet::destroy(mem_sgmt_, rdataset, RRClass::IN());
}
-TEST_F(RdataSetTest, badCeate) {
+// Checking initial validation for both versions of create().
+void
+RdataSetTest::checkBadCreate(CreateFn create_fn) {
// Neither the RRset nor RRSIG RRset is given
- EXPECT_THROW(RdataSet::create(mem_sgmt_, encoder_, ConstRRsetPtr(),
- ConstRRsetPtr()), isc::BadValue);
+ EXPECT_THROW(create_fn(mem_sgmt_, encoder_, ConstRRsetPtr(),
+ ConstRRsetPtr()), isc::BadValue);
// Empty RRset (An RRset without RDATA)
ConstRRsetPtr empty_rrset(new RRset(Name("example.com"), RRClass::IN(),
RRType::A(), RRTTL(3600)));
- EXPECT_THROW(RdataSet::create(mem_sgmt_, encoder_, empty_rrset,
- ConstRRsetPtr()), isc::BadValue);
+ EXPECT_THROW(create_fn(mem_sgmt_, encoder_, empty_rrset,
+ ConstRRsetPtr()), isc::BadValue);
ConstRRsetPtr empty_rrsig(new RRset(Name("example.com"), RRClass::IN(),
RRType::RRSIG(), RRTTL(3600)));
- EXPECT_THROW(RdataSet::create(mem_sgmt_, encoder_, ConstRRsetPtr(),
- empty_rrsig), isc::BadValue);
+ EXPECT_THROW(create_fn(mem_sgmt_, encoder_, ConstRRsetPtr(),
+ empty_rrsig), isc::BadValue);
// The RRset type and RRSIG's type covered don't match
ConstRRsetPtr bad_rrsig(textToRRset(
"www.example.com. 1076895760 IN RRSIG "
"NS 5 2 3600 20120814220826 20120715220826 "
"1234 example.com. FAKE"));
- EXPECT_THROW(RdataSet::create(mem_sgmt_, encoder_, a_rrset_, bad_rrsig),
+ EXPECT_THROW(create_fn(mem_sgmt_, encoder_, a_rrset_, bad_rrsig),
isc::BadValue);
// Pass non RRSIG for the sig parameter
- EXPECT_THROW(RdataSet::create(mem_sgmt_, encoder_, a_rrset_, a_rrset_),
+ EXPECT_THROW(create_fn(mem_sgmt_, encoder_, a_rrset_, a_rrset_),
isc::BadValue);
// Pass RRSIG for normal RRset (the RdataEncoder will catch this and throw)
- EXPECT_THROW(RdataSet::create(mem_sgmt_, encoder_, rrsig_rrset_,
- rrsig_rrset_),
+ EXPECT_THROW(create_fn(mem_sgmt_, encoder_, rrsig_rrset_, rrsig_rrset_),
isc::BadValue);
// RR class doesn't match between RRset and RRSIG
@@ -348,8 +531,99 @@ TEST_F(RdataSetTest, badCeate) {
"A 5 2 3600 20120814220826 "
"20120715220826 1234 example.com. FAKE",
RRClass::CH()));
+ EXPECT_THROW(create_fn(mem_sgmt_, encoder_, a_rrset_, badclass_rrsig),
+ isc::BadValue);
+}
+
+TEST_F(RdataSetTest, badCreate) {
+ checkBadCreate(boost::bind(&RdataSet::create, _1, _2, _3, _4,
+ static_cast<const RdataSet*>(NULL)));
+}
+
+TEST_F(RdataSetTest, badMergeCreate) {
+ // The 'old RdataSet' for merge. Its content doesn't matter much; the test
+ // should trigger exception before examining it except for the last checks.
+ SegmentObjectHolder<RdataSet, RRClass> holder(
+ mem_sgmt_,
+ RdataSet::create(mem_sgmt_, encoder_,
+ textToRRset("www.example.com. 0 IN AAAA 2001:db8::1"),
+ ConstRRsetPtr()),
+ RRClass::IN());
+
+ checkBadCreate(boost::bind(&RdataSet::create, _1, _2, _3, _4,
+ holder.get()));
+
+ // Type mismatch: this case is specific to the merge create.
EXPECT_THROW(RdataSet::create(mem_sgmt_, encoder_, a_rrset_,
- badclass_rrsig),
+ ConstRRsetPtr(), holder.get()),
+ isc::BadValue);
+ EXPECT_THROW(RdataSet::create(mem_sgmt_, encoder_, ConstRRsetPtr(),
+ rrsig_rrset_, holder.get()),
isc::BadValue);
}
+
+TEST_F(RdataSetTest, varyingTTL) {
+ // Creating RdataSets with different TTLs. The lowest one should win.
+
+ ConstRRsetPtr aaaa_smaller = textToRRset("example. 5 IN AAAA 2001:db8::");
+ ConstRRsetPtr aaaa_small = textToRRset("example. 10 IN AAAA 2001:db8::1");
+ ConstRRsetPtr aaaa_large = textToRRset("example. 20 IN AAAA 2001:db8::2");
+ ConstRRsetPtr sig_smaller =
+ textToRRset("www.example.com. 5 IN RRSIG AAAA 5 2 3600 "
+ "20120814220826 20120715220826 1111 example.com. FAKE");
+ ConstRRsetPtr sig_small =
+ textToRRset("www.example.com. 10 IN RRSIG AAAA 5 2 3600 "
+ "20120814220826 20120715220826 1234 example.com. FAKE");
+ ConstRRsetPtr sig_large =
+ textToRRset("www.example.com. 20 IN RRSIG AAAA 5 2 3600 "
+ "20120814220826 20120715220826 4321 example.com. FAKE");
+
+ // RRSIG's TTL is larger
+ RdataSet* rdataset = RdataSet::create(mem_sgmt_, encoder_, aaaa_small,
+ sig_large);
+ EXPECT_EQ(RRTTL(10), restoreTTL(rdataset->getTTLData()));
+ RdataSet::destroy(mem_sgmt_, rdataset, rrclass);
+
+ // RRSIG's TTL is smaller
+ SegmentObjectHolder<RdataSet, RRClass> holder1(
+ mem_sgmt_,
+ RdataSet::create(mem_sgmt_, encoder_, aaaa_large, sig_small), rrclass);
+ EXPECT_EQ(RRTTL(10), restoreTTL(holder1.get()->getTTLData()));
+
+ // Merging another RRset (w/o sig) that has larger TTL
+ rdataset = RdataSet::create(mem_sgmt_, encoder_, aaaa_large,
+ ConstRRsetPtr(), holder1.get());
+ EXPECT_EQ(RRTTL(10), restoreTTL(rdataset->getTTLData()));
+ RdataSet::destroy(mem_sgmt_, rdataset, rrclass);
+
+ // Merging another RRset (w/o sig) that has smaller TTL
+ rdataset = RdataSet::create(mem_sgmt_, encoder_, aaaa_smaller,
+ ConstRRsetPtr(), holder1.get());
+ EXPECT_EQ(RRTTL(5), restoreTTL(rdataset->getTTLData()));
+ RdataSet::destroy(mem_sgmt_, rdataset, rrclass);
+
+ // Merging another RRSIG (w/o RRset) that has larger TTL
+ rdataset = RdataSet::create(mem_sgmt_, encoder_, ConstRRsetPtr(),
+ sig_large, holder1.get());
+ EXPECT_EQ(RRTTL(10), restoreTTL(rdataset->getTTLData()));
+ RdataSet::destroy(mem_sgmt_, rdataset, rrclass);
+
+ // Merging another RRSIG (w/o RRset) that has smaller TTL
+ rdataset = RdataSet::create(mem_sgmt_, encoder_, ConstRRsetPtr(),
+ sig_smaller, holder1.get());
+ EXPECT_EQ(RRTTL(5), restoreTTL(rdataset->getTTLData()));
+ RdataSet::destroy(mem_sgmt_, rdataset, rrclass);
+
+ // Merging another RRset and RRSIG that have larger TTL
+ rdataset = RdataSet::create(mem_sgmt_, encoder_, aaaa_large, sig_large,
+ holder1.get());
+ EXPECT_EQ(RRTTL(10), restoreTTL(rdataset->getTTLData()));
+ RdataSet::destroy(mem_sgmt_, rdataset, rrclass);
+
+ // Merging another RRset and RRSIG that have smaller TTL
+ rdataset = RdataSet::create(mem_sgmt_, encoder_, aaaa_smaller, sig_smaller,
+ holder1.get());
+ EXPECT_EQ(RRTTL(5), restoreTTL(rdataset->getTTLData()));
+ RdataSet::destroy(mem_sgmt_, rdataset, rrclass);
+}
}
diff --git a/src/lib/datasrc/tests/memory/treenode_rrset_unittest.cc b/src/lib/datasrc/tests/memory/treenode_rrset_unittest.cc
index 02ad2bd..921ca68 100644
--- a/src/lib/datasrc/tests/memory/treenode_rrset_unittest.cc
+++ b/src/lib/datasrc/tests/memory/treenode_rrset_unittest.cc
@@ -194,8 +194,14 @@ checkBasicFields(const AbstractRRset& actual_rrset, const RdataSet* rdataset,
// a temporary non-copyable object.
boost::shared_ptr<TreeNodeRRset>
createRRset(const RRClass& rrclass, const ZoneNode* node,
- const RdataSet* rdataset, bool dnssec_ok)
+ const RdataSet* rdataset, bool dnssec_ok,
+ const void* ttl_data = NULL)
{
+ if (ttl_data) {
+ return (boost::shared_ptr<TreeNodeRRset>(
+ new TreeNodeRRset(rrclass, node, rdataset, dnssec_ok,
+ ttl_data)));
+ }
return (boost::shared_ptr<TreeNodeRRset>(
new TreeNodeRRset(rrclass, node, rdataset, dnssec_ok)));
}
@@ -243,6 +249,13 @@ TEST_F(TreeNodeRRsetTest, create) {
true),
wildcard_rdataset_, match_name_, rrclass_, RRType::A(),
3600, 2, 1);
+
+ // Constructed with explicit TTL
+ const uint32_t ttl = 0; // use 0 to avoid byte-order conversion
+ checkBasicFields(*createRRset(rrclass_, www_node_, a_rdataset_, true,
+ &ttl),
+ a_rdataset_, www_name_, rrclass_, RRType::A(), 0, 2,
+ 1);
}
// The following two templated functions are helper to encapsulate the
@@ -337,6 +350,25 @@ TEST_F(TreeNodeRRsetTest, toWire) {
}
{
+ SCOPED_TRACE("with RRSIG, DNSSEC OK, explicit TTL");
+ const uint32_t ttl = 0;
+ const TreeNodeRRset rrset(rrclass_, www_node_, a_rdataset_, true,
+ &ttl);
+ // We need separate variable for the following two: SunStudio cannot
+ // automatically promote RRsetPtr to ConstRRsetPtr in the templated
+ // function.
+ ConstRRsetPtr expected_rrset =
+ textToRRset("www.example.com. 0 IN A 192.0.2.1\n"
+ "www.example.com. 0 IN A 192.0.2.2");
+ ConstRRsetPtr expected_rrsig_rrset =
+ textToRRset("www.example.com. 0 IN RRSIG "
+ "A 5 2 3600 20120814220826 "
+ "20120715220826 1234 example.com. FAKE");
+ checkToWireResult(expected_renderer, actual_renderer, rrset, www_name_,
+ expected_rrset, expected_rrsig_rrset, true);
+ }
+
+ {
SCOPED_TRACE("with RRSIG, DNSSEC not OK");
const TreeNodeRRset rrset(rrclass_, www_node_, a_rdataset_, false);
checkToWireResult(expected_renderer, actual_renderer, rrset,
@@ -396,7 +428,7 @@ TEST_F(TreeNodeRRsetTest, toWire) {
const TreeNodeRRset rrset(rrclass_, www_node_, rrsig_only_rdataset_,
true);
checkToWireResult(expected_renderer, actual_renderer, rrset,
- www_name_, ConstRRsetPtr(), txt_rrsig_rrset_,true);
+ www_name_, ConstRRsetPtr(), txt_rrsig_rrset_, true);
}
{
@@ -407,7 +439,7 @@ TEST_F(TreeNodeRRsetTest, toWire) {
const TreeNodeRRset rrset(rrclass_, www_node_, rrsig_only_rdataset_,
false);
checkToWireResult(expected_renderer, actual_renderer, rrset,
- www_name_, ConstRRsetPtr(), txt_rrsig_rrset_,false);
+ www_name_, ConstRRsetPtr(), txt_rrsig_rrset_, false);
}
}
@@ -522,6 +554,14 @@ TEST_F(TreeNodeRRsetTest, toText) {
// Constructed with RRSIG, and it should be visible.
checkToText(*createRRset(rrclass_, www_node_, a_rdataset_, true),
a_rrset_, a_rrsig_rrset_);
+ // Same as the previous, but with explicit TTL.
+ const uint32_t ttl = 0;
+ checkToText(*createRRset(rrclass_, www_node_, a_rdataset_, true, &ttl),
+ textToRRset("www.example.com. 0 IN A 192.0.2.1\n"
+ "www.example.com. 0 IN A 192.0.2.2"),
+ textToRRset("www.example.com. 0 IN RRSIG A 5 2 3600 "
+ "20120814220826 20120715220826 1234 example.com. "
+ "FAKE"));
// Constructed with RRSIG, and it should be invisible.
checkToText(*createRRset(rrclass_, www_node_, a_rdataset_, false),
a_rrset_, ConstRRsetPtr());
@@ -556,6 +596,11 @@ TEST_F(TreeNodeRRsetTest, isSameKind) {
EXPECT_TRUE(rrset.isSameKind(*createRRset(rrclass_, www_node_,
a_rdataset_, true)));
+ // Similar to the previous, but with explicit (different TTL) => still same
+ const uint32_t ttl = 0;
+ EXPECT_TRUE(rrset.isSameKind(*createRRset(rrclass_, www_node_,
+ a_rdataset_, true, &ttl)));
+
// Same name (node), different type (rdataset) => not same kind
EXPECT_FALSE(rrset.isSameKind(*createRRset(rrclass_, www_node_,
aaaa_rdataset_, true)));
diff --git a/src/lib/datasrc/tests/memory/zone_data_loader_unittest.cc b/src/lib/datasrc/tests/memory/zone_data_loader_unittest.cc
index c005bf1..abc6f13 100644
--- a/src/lib/datasrc/tests/memory/zone_data_loader_unittest.cc
+++ b/src/lib/datasrc/tests/memory/zone_data_loader_unittest.cc
@@ -12,13 +12,15 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
-#include <dns/name.h>
-#include <dns/rrclass.h>
-
+#include <datasrc/memory/zone_data_loader.h>
#include <datasrc/memory/rdataset.h>
#include <datasrc/memory/zone_data.h>
#include <datasrc/memory/zone_data_updater.h>
-#include <datasrc/memory/zone_data_loader.h>
+
+#include <util/buffer.h>
+
+#include <dns/name.h>
+#include <dns/rrclass.h>
#include "memory_segment_test.h"
@@ -62,4 +64,13 @@ TEST_F(ZoneDataLoaderTest, loadRRSIGFollowsNothing) {
// Teardown checks for memory segment leaks
}
+TEST_F(ZoneDataLoaderTest, zoneMinTTL) {
+ // This should hold outside of the loader class, but we do double check.
+ zone_data_ = loadZoneData(mem_sgmt_, zclass_, Name("example.org"),
+ TEST_DATA_DIR
+ "/example.org-nsec3-signed.zone");
+ isc::util::InputBuffer b(zone_data_->getMinTTLData(), sizeof(uint32_t));
+ EXPECT_EQ(RRTTL(1200), RRTTL(b));
+}
+
}
diff --git a/src/lib/datasrc/tests/memory/zone_data_unittest.cc b/src/lib/datasrc/tests/memory/zone_data_unittest.cc
index 1605fa2..ffbd0f6 100644
--- a/src/lib/datasrc/tests/memory/zone_data_unittest.cc
+++ b/src/lib/datasrc/tests/memory/zone_data_unittest.cc
@@ -12,19 +12,22 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <datasrc/memory/zone_data.h>
+#include <datasrc/memory/rdata_serialization.h>
+#include <datasrc/memory/rdataset.h>
+
#include "memory_segment_test.h"
#include <dns/rdataclass.h>
#include <exceptions/exceptions.h>
+#include <util/buffer.h>
+
#include <dns/name.h>
#include <dns/labelsequence.h>
#include <dns/rrclass.h>
-
-#include <datasrc/memory/rdata_serialization.h>
-#include <datasrc/memory/rdataset.h>
-#include <datasrc/memory/zone_data.h>
+#include <dns/rrttl.h>
#include <testutils/dnsmessage_test.h>
@@ -258,4 +261,21 @@ TEST_F(ZoneDataTest, isSigned) {
zone_data_->setSigned(false);
EXPECT_FALSE(zone_data_->isSigned());
}
+
+// A simple wrapper to reconstruct an RRTTL object from wire-format TTL
+// data (32 bits)
+RRTTL
+createRRTTL(const void* ttl_data) {
+ isc::util::InputBuffer b(ttl_data, sizeof(uint32_t));
+ return (RRTTL(b));
+}
+
+TEST_F(ZoneDataTest, minTTL) {
+ // By default it's tentatively set to "max TTL"
+ EXPECT_EQ(RRTTL::MAX_TTL(), createRRTTL(zone_data_->getMinTTLData()));
+
+ // Explicitly set, then retrieve it.
+ zone_data_->setMinTTL(1200);
+ EXPECT_EQ(RRTTL(1200), createRRTTL(zone_data_->getMinTTLData()));
+}
}
diff --git a/src/lib/datasrc/tests/memory/zone_data_updater_unittest.cc b/src/lib/datasrc/tests/memory/zone_data_updater_unittest.cc
index 63c69c8..93ca0c9 100644
--- a/src/lib/datasrc/tests/memory/zone_data_updater_unittest.cc
+++ b/src/lib/datasrc/tests/memory/zone_data_updater_unittest.cc
@@ -12,6 +12,10 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <datasrc/memory/zone_data_updater.h>
+#include <datasrc/memory/rdataset.h>
+#include <datasrc/memory/zone_data.h>
+
#include <testutils/dnsmessage_test.h>
#include <exceptions/exceptions.h>
@@ -19,10 +23,7 @@
#include <dns/name.h>
#include <dns/rrclass.h>
#include <dns/rrset.h>
-
-#include <datasrc/memory/rdataset.h>
-#include <datasrc/memory/zone_data.h>
-#include <datasrc/memory/zone_data_updater.h>
+#include <dns/rrttl.h>
#include "memory_segment_test.h"
@@ -86,6 +87,16 @@ getNode(isc::util::MemorySegment& mem_sgmt, const Name& name,
return (node);
}
+TEST_F(ZoneDataUpdaterTest, zoneMinTTL) {
+ // If we add SOA, zone's min TTL will be updated.
+ updater_->add(textToRRset(
+ "example.org. 3600 IN SOA . . 0 0 0 0 1200",
+ zclass_, zname_),
+ ConstRRsetPtr());
+ isc::util::InputBuffer b(zone_data_->getMinTTLData(), sizeof(uint32_t));
+ EXPECT_EQ(RRTTL(1200), RRTTL(b));
+}
+
TEST_F(ZoneDataUpdaterTest, rrsigOnly) {
// RRSIG that doesn't have covered RRset can be added. The resulting
// rdataset won't have "normal" RDATA but sig RDATA.
diff --git a/src/lib/datasrc/tests/memory/zone_finder_unittest.cc b/src/lib/datasrc/tests/memory/zone_finder_unittest.cc
index 42667da..e59013e 100644
--- a/src/lib/datasrc/tests/memory/zone_finder_unittest.cc
+++ b/src/lib/datasrc/tests/memory/zone_finder_unittest.cc
@@ -53,19 +53,6 @@ namespace {
using result::SUCCESS;
using result::EXIST;
-/// \brief expensive rrset converter
-///
-/// converts any specialized rrset (which may not have implemented some
-/// methods for efficiency) into a 'full' RRsetPtr, for easy use in test
-/// checks.
-///
-/// Done very inefficiently through text representation, speed should not
-/// be a concern here.
-ConstRRsetPtr
-convertRRset(ConstRRsetPtr src) {
- return (textToRRset(src->toText()));
-}
-
/// \brief Test fixture for the InMemoryZoneFinder class
class InMemoryZoneFinderTest : public ::testing::Test {
// A straightforward pair of textual RR(set) and a RRsetPtr variable
@@ -105,7 +92,6 @@ protected:
ZoneFinder::FindResultFlags expected_flags =
ZoneFinder::RESULT_DEFAULT);
-public:
InMemoryZoneFinderTest() :
class_(RRClass::IN()),
origin_("example.org"),
@@ -119,13 +105,14 @@ public:
// Note that this contains an out-of-zone RR, and due to the
// validation check of masterLoad() used below, we cannot add SOA.
const RRsetData zone_data[] = {
+ {"example.org. 300 IN SOA . . 0 0 0 0 100", &rr_soa_},
{"example.org. 300 IN NS ns.example.org.", &rr_ns_},
{"example.org. 300 IN A 192.0.2.1", &rr_a_},
{"ns.example.org. 300 IN A 192.0.2.2", &rr_ns_a_},
// This one will place rr_ns_a_ at a zone cut, making it a glue:
- {"ns.example.org. 300 IN NS 192.0.2.2", &rr_ns_ns_},
+ {"ns.example.org. 300 IN NS 192.0.2.2.", &rr_ns_ns_},
{"ns.example.org. 300 IN AAAA 2001:db8::2", &rr_ns_aaaa_},
- {"cname.example.org. 300 IN CNAME canonical.example.org",
+ {"cname.example.org. 300 IN CNAME canonical.example.org.",
&rr_cname_},
{"cname.example.org. 300 IN A 192.0.2.3", &rr_cname_a_},
{"dname.example.org. 300 IN DNAME target.example.org.",
@@ -185,7 +172,18 @@ public:
};
for (unsigned int i = 0; zone_data[i].text != NULL; ++i) {
- *zone_data[i].rrset = textToRRset(zone_data[i].text);
+ if (zone_data[i].rrset == &rr_soa_) {
+ // This is zone's SOA. We need to specify the origin for
+ // textToRRset; otherwise it would throw.
+ *zone_data[i].rrset = textToRRset(zone_data[i].text, class_,
+ origin_);
+ } else {
+ // For other data, we should rather omit the origin (the root
+ // name will be used by default); there's some out-of-zone
+ // name, which would trigger an exception if we specified
+ // origin_.
+ *zone_data[i].rrset = textToRRset(zone_data[i].text);
+ }
}
}
@@ -200,6 +198,24 @@ public:
updater_.add(rrset, rrset->getRRsig());
}
+ /// \brief expensive rrset converter
+ ///
+ /// converts any specialized rrset (which may not have implemented some
+ /// methods for efficiency) into a 'full' RRsetPtr, for easy use in test
+ /// checks.
+ ///
+ /// Done very inefficiently through text representation, speed should not
+ /// be a concern here.
+ ConstRRsetPtr
+ convertRRset(ConstRRsetPtr src) {
+ // If the type is SOA, textToRRset performs a stricter check, so we
+ // should specify the origin. For now we don't use out-of-zone
+ // owner names (e.g. for pathological cases) with this method, so it
+ // works for all test data. If future changes break this assumption
+ // we should adjust it.
+ return (textToRRset(src->toText(), class_, origin_));
+ }
+
// Some data to test with
const RRClass class_;
const Name origin_;
@@ -218,6 +234,8 @@ public:
RRsetPtr
// Out of zone RRset
rr_out_,
+ // SOA of example.org
+ rr_soa_,
// NS of example.org
rr_ns_,
// A of ns.example.org
@@ -293,75 +311,110 @@ public:
if (zone_finder == NULL) {
zone_finder = &zone_finder_;
}
- const ConstRRsetPtr answer_sig = answer ? answer->getRRsig() :
- RRsetPtr(); // note we use the same type as of retval of getRRsig()
// The whole block is inside, because we need to check the result and
// we can't assign to FindResult
EXPECT_NO_THROW({
ZoneFinderContextPtr find_result(zone_finder->find(
name, rrtype, options));
- // Check it returns correct answers
- EXPECT_EQ(result, find_result->code);
- EXPECT_EQ((expected_flags & ZoneFinder::RESULT_WILDCARD) != 0,
- find_result->isWildcard());
- EXPECT_EQ((expected_flags & ZoneFinder::RESULT_NSEC_SIGNED)
- != 0, find_result->isNSECSigned());
- EXPECT_EQ((expected_flags & ZoneFinder::RESULT_NSEC3_SIGNED)
- != 0, find_result->isNSEC3Signed());
- if (check_answer) {
- if (!answer) {
- ASSERT_FALSE(find_result->rrset);
- } else {
- ASSERT_TRUE(find_result->rrset);
- ConstRRsetPtr result_rrset(
- convertRRset(find_result->rrset));
- rrsetCheck(answer, result_rrset);
- if (answer_sig &&
- (options & ZoneFinder::FIND_DNSSEC) != 0) {
- ASSERT_TRUE(result_rrset->getRRsig());
- rrsetCheck(answer_sig, result_rrset->getRRsig());
- } else {
- EXPECT_FALSE(result_rrset->getRRsig());
- }
- }
- } else if (check_wild_answer) {
- ASSERT_NE(ConstRRsetPtr(), answer) <<
- "Wrong test, don't check for wild names if you expect "
- "empty answer";
- ASSERT_NE(ConstRRsetPtr(), find_result->rrset) <<
- "No answer found";
- // Build the expected answer using the given name and
- // other parameter of the base wildcard RRset.
- RRsetPtr wildanswer(new RRset(name, answer->getClass(),
- answer->getType(),
- answer->getTTL()));
- RdataIteratorPtr expectedIt(answer->getRdataIterator());
- for (; !expectedIt->isLast(); expectedIt->next()) {
- wildanswer->addRdata(expectedIt->getCurrent());
- }
-
- ConstRRsetPtr result_rrset(
- convertRRset(find_result->rrset));
- rrsetCheck(wildanswer, result_rrset);
-
- // Same for the RRSIG, if any.
- if (answer_sig) {
- ASSERT_TRUE(result_rrset->getRRsig());
-
- RRsetPtr wildsig(new RRset(name,
- answer_sig->getClass(),
- RRType::RRSIG(),
- answer_sig->getTTL()));
- RdataIteratorPtr expectedIt(
- answer_sig->getRdataIterator());
- for (; !expectedIt->isLast(); expectedIt->next()) {
- wildsig->addRdata(expectedIt->getCurrent());
- }
- rrsetCheck(wildsig, result_rrset->getRRsig());
- }
- }
+ findTestCommon(name, result, find_result, check_answer,
+ answer, expected_flags, options,
+ check_wild_answer);
});
}
+
+ void findAtOriginTest(const RRType& rrtype,
+ ZoneFinder::Result result,
+ bool check_answer = true,
+ const ConstRRsetPtr& answer = ConstRRsetPtr(),
+ ZoneFinder::FindResultFlags expected_flags =
+ ZoneFinder::RESULT_DEFAULT,
+ memory::InMemoryZoneFinder* zone_finder = NULL,
+ ZoneFinder::FindOptions options =
+ ZoneFinder::FIND_DEFAULT,
+ bool use_minttl = false)
+ {
+ SCOPED_TRACE("findAtOriginTest for " + rrtype.toText());
+
+ if (zone_finder == NULL) {
+ zone_finder = &zone_finder_;
+ }
+ ZoneFinderContextPtr find_result(zone_finder->findAtOrigin(
+ rrtype, use_minttl, options));
+ findTestCommon(origin_, result, find_result, check_answer, answer,
+ expected_flags, options, false);
+ }
+
+private:
+ void findTestCommon(const Name& name, ZoneFinder::Result result,
+ ZoneFinderContextPtr find_result,
+ bool check_answer,
+ const ConstRRsetPtr& answer,
+ ZoneFinder::FindResultFlags expected_flags,
+ ZoneFinder::FindOptions options,
+ bool check_wild_answer)
+ {
+ const ConstRRsetPtr answer_sig = answer ? answer->getRRsig() :
+ RRsetPtr(); // note we use the same type as of retval of getRRsig()
+
+ // Check it returns correct answers
+ EXPECT_EQ(result, find_result->code);
+ EXPECT_EQ((expected_flags & ZoneFinder::RESULT_WILDCARD) != 0,
+ find_result->isWildcard());
+ EXPECT_EQ((expected_flags & ZoneFinder::RESULT_NSEC_SIGNED) != 0,
+ find_result->isNSECSigned());
+ EXPECT_EQ((expected_flags & ZoneFinder::RESULT_NSEC3_SIGNED) != 0,
+ find_result->isNSEC3Signed());
+ if (check_answer) {
+ if (!answer) {
+ ASSERT_FALSE(find_result->rrset);
+ } else {
+ ASSERT_TRUE(find_result->rrset);
+ ConstRRsetPtr result_rrset(convertRRset(find_result->rrset));
+ rrsetCheck(answer, result_rrset);
+ if (answer_sig && (options & ZoneFinder::FIND_DNSSEC) != 0) {
+ ASSERT_TRUE(result_rrset->getRRsig());
+ rrsetCheck(answer_sig, result_rrset->getRRsig());
+ } else {
+ EXPECT_FALSE(result_rrset->getRRsig());
+ }
+ }
+ } else if (check_wild_answer) {
+ ASSERT_NE(ConstRRsetPtr(), answer) <<
+ "Wrong test, don't check for wild names if you expect "
+ "empty answer";
+ ASSERT_NE(ConstRRsetPtr(), find_result->rrset) <<
+ "No answer found";
+ // Build the expected answer using the given name and
+ // other parameter of the base wildcard RRset.
+ RRsetPtr wildanswer(new RRset(name, answer->getClass(),
+ answer->getType(),
+ answer->getTTL()));
+ RdataIteratorPtr expectedIt(answer->getRdataIterator());
+ for (; !expectedIt->isLast(); expectedIt->next()) {
+ wildanswer->addRdata(expectedIt->getCurrent());
+ }
+
+ ConstRRsetPtr result_rrset(convertRRset(find_result->rrset));
+ rrsetCheck(wildanswer, result_rrset);
+
+ // Same for the RRSIG, if any.
+ if (answer_sig) {
+ ASSERT_TRUE(result_rrset->getRRsig());
+
+ RRsetPtr wildsig(new RRset(name, answer_sig->getClass(),
+ RRType::RRSIG(),
+ answer_sig->getTTL()));
+ RdataIteratorPtr expectedIt(
+ answer_sig->getRdataIterator());
+ for (; !expectedIt->isLast(); expectedIt->next()) {
+ wildsig->addRdata(expectedIt->getCurrent());
+ }
+ rrsetCheck(wildsig, result_rrset->getRRsig());
+ }
+ }
+ }
+
+protected:
/**
* \brief Calls the findAll on the finder and checks the result.
*/
@@ -583,7 +636,6 @@ TEST_F(InMemoryZoneFinderTest, glue) {
findTest(rr_child_glue_->getName(), RRType::A(), ZoneFinder::DELEGATION,
true, rr_child_ns_);
-
// If we do it in the "glue OK" mode, we should find the exact match.
findTest(rr_child_glue_->getName(), RRType::A(), ZoneFinder::SUCCESS, true,
rr_child_glue_, ZoneFinder::RESULT_DEFAULT, NULL,
@@ -619,6 +671,101 @@ TEST_F(InMemoryZoneFinderTest, glue) {
NULL, ZoneFinder::FIND_GLUE_OK);
}
+TEST_F(InMemoryZoneFinderTest, findAtOrigin) {
+ // Add origin NS.
+ rr_ns_->addRRsig(createRdata(RRType::RRSIG(), RRClass::IN(),
+ "NS 5 3 3600 20120814220826 20120715220826 "
+ "1234 example.org. FAKE"));
+ addToZoneData(rr_ns_);
+
+ // Specified type of RR exists, no DNSSEC
+ findAtOriginTest(RRType::NS(), ZoneFinder::SUCCESS, true, rr_ns_);
+
+ // Specified type of RR exists, with DNSSEC
+ findAtOriginTest(RRType::NS(), ZoneFinder::SUCCESS, true, rr_ns_,
+ ZoneFinder::RESULT_DEFAULT, NULL,
+ ZoneFinder::FIND_DNSSEC);
+
+ // Specified type of RR doesn't exist, no DNSSEC
+ findAtOriginTest(RRType::TXT(), ZoneFinder::NXRRSET);
+
+ // Specified type of RR doesn't exist, with DNSSEC. First, make the
+ // zone "NSEC-signed", then check.
+ rr_nsec_->addRRsig(createRdata(RRType::RRSIG(), RRClass::IN(),
+ "NSEC 5 3 3600 20120814220826 "
+ "20120715220826 1234 example.org. FAKE"));
+ addToZoneData(rr_nsec_);
+ findAtOriginTest(RRType::TXT(), ZoneFinder::NXRRSET, true, rr_nsec_,
+ ZoneFinder::RESULT_NSEC_SIGNED, NULL,
+ ZoneFinder::FIND_DNSSEC);
+
+ // Specified type of RR doesn't exist, with DNSSEC, enabling NSEC3. First,
+ // make the zone "NSEC3-signed" (by just installing NSEC3PARAM; we don't
+ // need to add NSEC3s for the purpose of this test), then check.
+ addToZoneData(textToRRset("example.org. 300 IN NSEC3PARAM "
+ "1 0 12 aabbccdd"));
+ findAtOriginTest(RRType::TXT(), ZoneFinder::NXRRSET, true, ConstRRsetPtr(),
+ ZoneFinder::RESULT_NSEC3_SIGNED, NULL,
+ ZoneFinder::FIND_DNSSEC);
+}
+
+TEST_F(InMemoryZoneFinderTest, findAtOriginWithMinTTL) {
+ // Install zone's SOA. This also sets internal zone data min TTL field.
+ addToZoneData(rr_soa_);
+
+ // Specify the use of min TTL, then the resulting TTL should be derived
+ // from the SOA MINTTL (which is smaller).
+ findAtOriginTest(RRType::SOA(), ZoneFinder::SUCCESS, true,
+ textToRRset("example.org. 100 IN SOA . . 0 0 0 0 100",
+ class_, origin_),
+ ZoneFinder::RESULT_DEFAULT, NULL,
+ ZoneFinder::FIND_DEFAULT, true);
+
+ // Add signed NS for the following test.
+ RRsetPtr ns_rrset(textToRRset("example.org. 300 IN NS ns.example.org."));
+ ns_rrset->addRRsig(createRdata(RRType::RRSIG(), RRClass::IN(),
+ "NS 5 3 3600 20120814220826 20120715220826 "
+ "1234 example.org. FAKE"));
+ addToZoneData(ns_rrset);
+
+ // If DNSSEC is requested, TTL of the RRSIG should also be the min.
+ ns_rrset->setTTL(RRTTL(100)); // reset TTL to the expected one
+ findAtOriginTest(RRType::NS(), ZoneFinder::SUCCESS, true, ns_rrset,
+ ZoneFinder::RESULT_DEFAULT, NULL,
+ ZoneFinder::FIND_DEFAULT, true);
+
+ // If we don't request the use of min TTL, the original TTL will be used.
+ findAtOriginTest(RRType::SOA(), ZoneFinder::SUCCESS, true, rr_soa_,
+ ZoneFinder::RESULT_DEFAULT, NULL,
+ ZoneFinder::FIND_DEFAULT, false);
+
+ // If the found RRset has a smaller TTL than SOA, the original TTL should
+ // win.
+ rr_a_->setTTL(RRTTL(10));
+ addToZoneData(rr_a_);
+ findAtOriginTest(RRType::A(), ZoneFinder::SUCCESS, true, rr_a_,
+ ZoneFinder::RESULT_DEFAULT, NULL,
+ ZoneFinder::FIND_DEFAULT, true);
+
+ // If no RRset is returned, use_minttl doesn't matter (it shouldn't cause
+ // disruption)
+ findAtOriginTest(RRType::TXT(), ZoneFinder::NXRRSET, true, ConstRRsetPtr(),
+ ZoneFinder::RESULT_DEFAULT, NULL,
+ ZoneFinder::FIND_DEFAULT, true);
+
+ // If it results in NXRRSET with NSEC, and if we specify the use of min
+ // TTL, the NSEC and RRSIG should have the min TTL (again, though, this
+ // use case is not really the intended one)
+ rr_nsec_->addRRsig(createRdata(RRType::RRSIG(), RRClass::IN(),
+ "NSEC 5 3 3600 20120814220826 "
+ "20120715220826 1234 example.org. FAKE"));
+ addToZoneData(rr_nsec_);
+ rr_nsec_->setTTL(RRTTL(100)); // reset it to the expected one
+ findAtOriginTest(RRType::TXT(), ZoneFinder::NXRRSET, true, rr_nsec_,
+ ZoneFinder::RESULT_NSEC_SIGNED, NULL,
+ ZoneFinder::FIND_DNSSEC, true);
+}
+
/**
* \brief Test searching.
*
diff --git a/src/lib/datasrc/tests/memory_datasrc_unittest.cc b/src/lib/datasrc/tests/memory_datasrc_unittest.cc
index 85be310..f7a478b 100644
--- a/src/lib/datasrc/tests/memory_datasrc_unittest.cc
+++ b/src/lib/datasrc/tests/memory_datasrc_unittest.cc
@@ -28,7 +28,7 @@
#include <datasrc/client.h>
#include <datasrc/memory_datasrc.h>
#include <datasrc/data_source.h>
-#include <datasrc/iterator.h>
+#include <datasrc/zone_iterator.h>
#include "test_client.h"
@@ -347,7 +347,7 @@ public:
{"example.org. 300 IN A 192.0.2.1", &rr_a_},
{"ns.example.org. 300 IN A 192.0.2.2", &rr_ns_a_},
{"ns.example.org. 300 IN AAAA 2001:db8::2", &rr_ns_aaaa_},
- {"cname.example.org. 300 IN CNAME canonical.example.org",
+ {"cname.example.org. 300 IN CNAME canonical.example.org.",
&rr_cname_},
{"cname.example.org. 300 IN A 192.0.2.3", &rr_cname_a_},
{"dname.example.org. 300 IN DNAME target.example.org.",
diff --git a/src/lib/datasrc/tests/zone_finder_context_unittest.cc b/src/lib/datasrc/tests/zone_finder_context_unittest.cc
index 85b167e..a5c8a8f 100644
--- a/src/lib/datasrc/tests/zone_finder_context_unittest.cc
+++ b/src/lib/datasrc/tests/zone_finder_context_unittest.cc
@@ -18,7 +18,7 @@
#include <dns/name.h>
#include <dns/rrclass.h>
-#include <datasrc/zone.h>
+#include <datasrc/zone_finder.h>
#include <datasrc/memory/memory_client.h>
#include <datasrc/memory/zone_table_segment.h>
#include <datasrc/database.h>
diff --git a/src/lib/datasrc/tests/zone_loader_unittest.cc b/src/lib/datasrc/tests/zone_loader_unittest.cc
index 0f44074..4b42185 100644
--- a/src/lib/datasrc/tests/zone_loader_unittest.cc
+++ b/src/lib/datasrc/tests/zone_loader_unittest.cc
@@ -14,6 +14,7 @@
#include <datasrc/zone_loader.h>
#include <datasrc/data_source.h>
+#include <datasrc/rrset_collection_base.h>
#include <datasrc/memory/zone_table_segment.h>
#include <datasrc/memory/memory_client.h>
@@ -163,24 +164,16 @@ public:
RRClass rrclass_;
};
-// Test implementation of RRsetCollectionBase.
+// Test implementation of RRsetCollectionBase. This is currently just a
+// wrapper around \c isc::datasrc::RRsetCollectionBase;
+// \c isc::datasrc::RRsetCollectionBase may become an abstract class in
+// the future.
class TestRRsetCollection : public isc::datasrc::RRsetCollectionBase {
public:
TestRRsetCollection(ZoneUpdater& updater,
const isc::dns::RRClass& rrclass) :
isc::datasrc::RRsetCollectionBase(updater, rrclass)
{}
-
- virtual ~TestRRsetCollection() {}
-
-protected:
- virtual RRsetCollectionBase::IterPtr getBeginning() {
- isc_throw(isc::NotImplemented, "This method is not implemented.");
- }
-
- virtual RRsetCollectionBase::IterPtr getEnd() {
- isc_throw(isc::NotImplemented, "This method is not implemented.");
- }
};
// The updater isn't really correct according to the API. For example,
@@ -197,7 +190,7 @@ public:
virtual ZoneFinder& getFinder() {
return (finder_);
}
- virtual isc::datasrc::RRsetCollectionBase& getRRsetCollection() {
+ virtual isc::dns::RRsetCollectionBase& getRRsetCollection() {
if (!rrset_collection_) {
rrset_collection_.reset(new TestRRsetCollection(*this,
client_->rrclass_));
diff --git a/src/lib/datasrc/zone.h b/src/lib/datasrc/zone.h
index 01d6a83..ca73bb5 100644
--- a/src/lib/datasrc/zone.h
+++ b/src/lib/datasrc/zone.h
@@ -18,731 +18,18 @@
#include <dns/name.h>
#include <dns/rrset.h>
#include <dns/rrtype.h>
+#include <dns/rrset_collection_base.h>
#include <datasrc/exceptions.h>
#include <datasrc/result.h>
-#include <datasrc/rrset_collection_base.h>
#include <utility>
-#include <vector>
namespace isc {
namespace datasrc {
-/// \brief Out of zone exception
-///
-/// This is thrown when a method is called for a name or RRset which
-/// is not in or below the zone.
-class OutOfZone : public ZoneException {
-public:
- OutOfZone(const char* file, size_t line, const char* what) :
- ZoneException(file, line, what) {}
-};
-
-/// \brief The base class to search a zone for RRsets
-///
-/// The \c ZoneFinder class is an abstract base class for representing
-/// an object that performs DNS lookups in a specific zone accessible via
-/// a data source. In general, different types of data sources (in-memory,
-/// database-based, etc) define their own derived classes of \c ZoneFinder,
-/// implementing ways to retrieve the required data through the common
-/// interfaces declared in the base class. Each concrete \c ZoneFinder
-/// object is therefore (conceptually) associated with a specific zone
-/// of one specific data source instance.
-///
-/// The origin name and the RR class of the associated zone are available
-/// via the \c getOrigin() and \c getClass() methods, respectively.
-///
-/// The most important method of this class is \c find(), which performs
-/// the lookup for a given domain and type. See the description of the
-/// method for details.
-///
-/// \note It's not clear whether we should request that a zone finder form a
-/// "transaction", that is, whether to ensure the finder is not susceptible
-/// to changes made by someone else than the creator of the finder. If we
-/// don't request that, for example, two different lookup results for the
-/// same name and type can be different if other threads or programs make
-/// updates to the zone between the lookups. We should revisit this point
-/// as we gain more experiences.
-class ZoneFinder {
-public:
- /// Result codes of the \c find() method.
- ///
- /// Note: the codes are tentative. We may need more, or we may find
- /// some of them unnecessary as we implement more details.
- ///
- /// See the description of \c find() for further details of how
- /// these results should be interpreted.
- enum Result {
- SUCCESS, ///< An exact match is found.
- DELEGATION, ///< The search encounters a zone cut.
- NXDOMAIN, ///< There is no domain name that matches the search name
- NXRRSET, ///< There is a matching name but no RRset of the search type
- CNAME, ///< The search encounters and returns a CNAME RR
- DNAME ///< The search encounters and returns a DNAME RR
- };
-
- /// Special attribute flags on the result of the \c find() method
- ///
- /// The flag values defined here are intended to signal to the caller
- /// that it may need special handling on the result. This is particularly
- /// of concern when DNSSEC is requested. For example, for negative
- /// responses the caller would want to know whether the zone is signed
- /// with NSEC or NSEC3 so that it can subsequently provide necessary
- /// proof of the result.
- ///
- /// The caller is generally expected to get access to the information
- /// via read-only getter methods of \c FindContext so that it won't rely
- /// on specific details of the representation of the flags. So these
- /// definitions are basically only meaningful for data source
- /// implementations.
- enum FindResultFlags {
- RESULT_DEFAULT = 0, ///< The default flags
- RESULT_WILDCARD = 1, ///< find() resulted in a wildcard match
- RESULT_NSEC_SIGNED = 2, ///< The zone is signed with NSEC RRs
- RESULT_NSEC3_SIGNED = 4 ///< The zone is signed with NSEC3 RRs
- };
-
- /// Find options.
- ///
- /// The option values are used as a parameter for \c find().
- /// These are values of a bitmask type. Bitwise operations can be
- /// performed on these values to express compound options.
- enum FindOptions {
- FIND_DEFAULT = 0, ///< The default options
- FIND_GLUE_OK = 1, ///< Allow search under a zone cut
- FIND_DNSSEC = 2, ///< Require DNSSEC data in the answer
- ///< (RRSIG, NSEC, etc.). The implementation
- ///< is allowed to include it even if it is
- ///< not set.
- NO_WILDCARD = 4 ///< Do not try wildcard matching.
- };
-
-protected:
- /// \brief A convenient tuple representing a set of find() results.
- ///
- /// This helper structure is specifically expected to be used as an input
- /// for the construct of the \c Context class object used by derived
- /// ZoneFinder implementations. This is therefore defined as protected.
- struct ResultContext {
- ResultContext(Result code_param,
- isc::dns::ConstRRsetPtr rrset_param,
- FindResultFlags flags_param = RESULT_DEFAULT) :
- code(code_param), rrset(rrset_param), flags(flags_param)
- {}
- const Result code;
- const isc::dns::ConstRRsetPtr rrset;
- const FindResultFlags flags;
- };
-
-public:
- /// \brief A helper function to strip RRSIGs when FIND_DNSSEC is not
- /// requested.
- static isc::dns::ConstRRsetPtr
- stripRRsigs(isc::dns::ConstRRsetPtr rp, const FindOptions options);
-
- /// \brief Context of the result of a find() call.
- ///
- /// This class encapsulates results and (possibly) associated context
- /// of a call to the \c find() method. The public member variables of
- /// this class represent the result of the call. They are a
- /// straightforward tuple of the result code and a pointer (and
- /// optionally special flags) to the found RRset.
- ///
- /// These member variables will be initialized on construction and never
- /// change, so for convenience we allow the applications to refer to some
- /// of the members directly. For some others we provide read-only accessor
- /// methods to hide specific representation.
- ///
- /// Another role of this class is to provide the interface to some common
- /// processing logic that may be necessary using the result of \c find().
- /// Specifically, it's expected to be used in the context of DNS query
- /// handling, where the caller would need to look into the data source
- /// again based on the \c find() result. For example, it would need to
- /// get A and/or AAAA records for some of the answer or authority RRs.
- ///
- /// This class defines (a set of) method(s) that can be commonly used
- /// for such purposes for any type of data source (as long as it conforms
- /// to the public \c find() interface). In some cases, a specific data
- /// source implementation may want to (and can) optimize the processing
- /// exploiting its internal data structure and the knowledge of the context
- /// of the precedent \c find() call. Such a data source implementation
- /// can define a derived class of the base Context and override the
- /// specific virtual method.
- ///
- /// This base class defines these common protected methods along with
- /// some helper pure virtual methods that would be necessary for the
- /// common methods. If a derived class wants to use the common version
- /// of the protected method, it needs to provide expected result through
- /// their implementation of the pure virtual methods.
- ///
- /// This class object is generally expected to be associated with the
- /// ZoneFinder that originally performed the \c find() call, and expects
- /// the finder is valid throughout the lifetime of this object. It's
- /// caller's responsibility to ensure that assumption.
- class Context {
- public:
- /// \brief The constructor.
- ///
- /// \param options The find options specified for the find() call.
- /// \param result The result of the find() call.
- Context(FindOptions options, const ResultContext& result) :
- code(result.code), rrset(result.rrset),
- flags_(result.flags), options_(options)
- {}
-
- /// \brief The destructor.
- virtual ~Context() {}
-
- const Result code;
- const isc::dns::ConstRRsetPtr rrset;
-
- /// Return true iff find() results in a wildcard match.
- bool isWildcard() const { return ((flags_ & RESULT_WILDCARD) != 0); }
-
- /// Return true when the underlying zone is signed with NSEC.
- ///
- /// The \c find() implementation allows this to return false if
- /// \c FIND_DNSSEC isn't specified regardless of whether the zone
- /// is signed or which of NSEC/NSEC3 is used.
- ///
- /// When this is returned, the implementation of find() must ensure
- /// that \c rrset be a valid NSEC RRset as described in \c find()
- /// documentation.
- bool isNSECSigned() const {
- return ((flags_ & RESULT_NSEC_SIGNED) != 0);
- }
-
- /// Return true when the underlying zone is signed with NSEC3.
- ///
- /// The \c find() implementation allows this to return false if
- /// \c FIND_DNSSEC isn't specified regardless of whether the zone
- /// is signed or which of NSEC/NSEC3 is used.
- bool isNSEC3Signed() const {
- return ((flags_ & RESULT_NSEC3_SIGNED) != 0);
- }
-
- /// \brief Find and return additional RRsets corresponding to the
- /// result of \c find().
- ///
- /// If this context is based on a normal find() call that resulted
- /// in SUCCESS or DELEGATION, it examines the returned RRset (in many
- /// cases NS, sometimes MX or others), searches the data source for
- /// specified type of additional RRs for each RDATA of the RRset
- /// (e.g., A or AAAA for the name server addresses), and stores the
- /// result in the given vector. The vector may not be empty; this
- /// method appends any found RRsets to it, without touching existing
- /// elements.
- ///
- /// If this context is based on a findAll() call that resulted in
- /// SUCCESS, it performs the same process for each RRset returned in
- /// the \c findAll() call.
- ///
- /// The caller specifies desired RR types of the additional RRsets
- /// in \c requested_types. Normally it consists of A and/or AAAA
- /// types, but other types can be specified.
- ///
- /// This method is meaningful only when the precedent find()/findAll()
- /// call resulted in SUCCESS or DELEGATION. Otherwise this method
- /// does nothing.
- ///
- /// \note The additional RRsets returned via method are limited to
- /// ones contained in the zone which the corresponding find/findAll
- /// call searched (possibly including glues under a zone cut where
- /// they are applicable). If the caller needs to get out-of-zone
- /// additional RRsets, it needs to explicitly finds them by
- /// identifying the corresponding zone and calls \c find() for it.
- ///
- /// \param requested_types A vector of RR types for desired additional
- /// RRsets.
- /// \param result A vector to which any found additional RRsets are
- /// to be inserted.
- void getAdditional(
- const std::vector<isc::dns::RRType>& requested_types,
- std::vector<isc::dns::ConstRRsetPtr>& result)
- {
- // Perform common checks, and delegate the process to the default
- // or specialized implementation.
- if (code != SUCCESS && code != DELEGATION) {
- return;
- }
-
- getAdditionalImpl(requested_types, result);
- }
-
- protected:
- /// \brief Return the \c ZoneFinder that created this \c Context.
- ///
- /// A derived class implementation can return NULL if it defines
- /// other protected methods that require a non NULL result from
- /// this method. Otherwise it must return a valid, non NULL pointer
- /// to the \c ZoneFinder object.
- ///
- /// When returning non NULL, the ownership of the pointed object
- /// was not transferred to the caller; it cannot be assumed to be
- /// valid after the originating \c Context object is destroyed.
- /// Also, the caller must not try to delete the returned object.
- virtual ZoneFinder* getFinder() = 0;
-
- /// \brief Return a vector of RRsets corresponding to findAll() result.
- ///
- /// This method returns a set of RRsets that correspond to the
- /// returned RRsets to a prior \c findAll() call.
- ///
- /// A derived class implementation can return NULL if it defines
- /// other protected methods that require a non NULL result from
- /// this method. Otherwise it must return a valid, non NULL pointer
- /// to a vector that correspond to the expected set of RRsets.
- ///
- /// When returning non NULL, the ownership of the pointed object
- /// was not transferred to the caller; it cannot be assumed to be
- /// valid after the originating \c Context object is destroyed.
- /// Also, the caller must not try to delete the returned object.
- virtual const std::vector<isc::dns::ConstRRsetPtr>*
- getAllRRsets() const = 0;
-
- /// \brief Actual implementation of getAdditional().
- ///
- /// This base class defines a default implementation that can be
- /// used for any type of data sources. A data source implementation
- /// can override it.
- ///
- /// The default version of this implementation requires both
- /// \c getFinder() and \c getAllRRsets() return valid results.
- virtual void getAdditionalImpl(
- const std::vector<isc::dns::RRType>& requested_types,
- std::vector<isc::dns::ConstRRsetPtr>& result);
-
- private:
- const FindResultFlags flags_;
- protected:
- const FindOptions options_;
- };
-
- /// \brief Generic ZoneFinder context that works for all implementations.
- ///
- /// This is a concrete derived class of \c ZoneFinder::Context that
- /// only use the generic (default) versions of the protected methods
- /// and therefore work for any data source implementation.
- ///
- /// A data source implementation can use this class to create a
- /// \c Context object as a return value of \c find() or \c findAll()
- /// method if it doesn't have to optimize specific protected methods.
- class GenericContext : public Context {
- public:
- /// \brief The constructor for the normal find call.
- ///
- /// This constructor is expected to be called from the \c find()
- /// method when it constructs the return value.
- ///
- /// \param finder The ZoneFinder on which find() is called.
- /// \param options See the \c Context class.
- /// \param result See the \c Context class.
- GenericContext(ZoneFinder& finder, FindOptions options,
- const ResultContext& result) :
- Context(options, result), finder_(finder)
- {}
-
- /// \brief The constructor for the normal findAll call.
- ///
- /// This constructor is expected to be called from the \c findAll()
- /// method when it constructs the return value.
- ///
- /// It copies the vector that is to be returned to the caller of
- /// \c findAll() for possible subsequent use. Note that it cannot
- /// simply hold a reference to the vector because the caller may
- /// alter it after the \c findAll() call.
- ///
- /// \param finder The ZoneFinder on which findAll() is called.
- /// \param options See the \c Context class.
- /// \param result See the \c Context class.
- /// \param all_set Reference to the vector given by the caller of
- /// \c findAll(), storing the RRsets to be returned.
- GenericContext(ZoneFinder& finder, FindOptions options,
- const ResultContext& result,
- const std::vector<isc::dns::ConstRRsetPtr>& all_set) :
- Context(options, result), finder_(finder), all_set_(all_set)
- {}
-
- protected:
- virtual ZoneFinder* getFinder() { return (&finder_); }
- virtual const std::vector<isc::dns::ConstRRsetPtr>*
- getAllRRsets() const {
- return (&all_set_);
- }
-
- private:
- ZoneFinder& finder_;
- std::vector<isc::dns::ConstRRsetPtr> all_set_;
- };
-
- ///
- /// \name Constructors and Destructor.
- ///
- //@{
-protected:
- /// The default constructor.
- ///
- /// This is intentionally defined as \c protected as this base class should
- /// never be instantiated (except as part of a derived class).
- ZoneFinder() {}
-public:
- /// The destructor.
- virtual ~ZoneFinder() {}
- //@}
-
- ///
- /// \name Getter Methods
- ///
- /// These methods should never throw an exception.
- //@{
- /// Return the origin name of the zone.
- virtual isc::dns::Name getOrigin() const = 0;
-
- /// Return the RR class of the zone.
- virtual isc::dns::RRClass getClass() const = 0;
- //@}
-
- ///
- /// \name Search Methods
- ///
- //@{
- /// Search the zone for a given pair of domain name and RR type.
- ///
- /// Each derived version of this method searches the underlying backend
- /// for the data that best matches the given name and type.
- /// This method is expected to be "intelligent", and identifies the
- /// best possible answer for the search key. Specifically,
- ///
- /// - If the search name belongs under a zone cut, it returns the code
- /// of \c DELEGATION and the NS RRset at the zone cut.
- /// - If there is no matching name, it returns the code of \c NXDOMAIN.
- /// - If there is a matching name but no RRset of the search type, it
- /// returns the code of \c NXRRSET. This case includes the search name
- /// matches an empty node of the zone.
- /// - If there is a CNAME RR of the searched name but there is no
- /// RR of the searched type of the name (so this type is different from
- /// CNAME), it returns the code of \c CNAME and that CNAME RR.
- /// Note that if the searched RR type is CNAME, it is considered
- /// a successful match, and the code of \c SUCCESS will be returned.
- /// - If the search name matches a delegation point of DNAME, it returns
- /// the code of \c DNAME and that DNAME RR.
- ///
- /// No RRset will be returned in the \c NXDOMAIN and \c NXRRSET cases
- /// (\c rrset member of \c FindContext will be NULL), unless DNSSEC data
- /// are required. See below for the cases with DNSSEC.
- ///
- /// The returned \c FindContext object can also provide supplemental
- /// information about the search result via its methods returning a
- /// boolean value. Such information may be useful for the caller if
- /// the caller wants to collect additional DNSSEC proofs based on the
- /// search result.
- ///
- /// The \c options parameter specifies customized behavior of the search.
- /// Their semantics is as follows (they are or bit-field):
- ///
- /// - \c FIND_GLUE_OK Allow search under a zone cut. By default the search
- /// will stop once it encounters a zone cut. If this option is specified
- /// it remembers information about the highest zone cut and continues
- /// the search until it finds an exact match for the given name or it
- /// detects there is no exact match. If an exact match is found,
- /// RRsets for that name are searched just like the normal case;
- /// otherwise, if the search has encountered a zone cut, \c DELEGATION
- /// with the information of the highest zone cut will be returned.
- /// Note: the term "glue" in the DNS protocol standard may sometimes
- /// cause confusion: some people use this term strictly for an address
- /// record (type AAAA or A) for the name used in the RDATA of an NS RR;
- /// some others seem to give it broader flexibility. Nevertheless,
- /// in this API the "GLUE OK" simply means the search by find() can
- /// continue beyond a zone cut; the derived class implementation does
- /// not have to, and should not, check whether the type is an address
- /// record or whether the query name is pointed by some NS RR.
- /// It's up to the caller with which definition of "glue" the search
- /// result with this option should be used.
- /// - \c FIND_DNSSEC Request that DNSSEC data (like NSEC, RRSIGs) are
- /// returned with the answer. It is allowed for the data source to
- /// include them even when not requested.
- /// - \c NO_WILDCARD Do not try wildcard matching. This option is of no
- /// use for normal lookups; it's intended to be used to get a DNSSEC
- /// proof of the non existence of any matching wildcard or non existence
- /// of an exact match when a wildcard match is found.
- ///
- /// In general, \c name is expected to be included in the zone, that is,
- /// it should be equal to or a subdomain of the zone origin. Otherwise
- /// this method will return \c NXDOMAIN with an empty RRset. But such a
- /// case should rather be considered a caller's bug.
- ///
- /// \note For this reason it's probably better to throw an exception
- /// than returning \c NXDOMAIN. This point should be revisited in a near
- /// future version. In any case applications shouldn't call this method
- /// for an out-of-zone name.
- ///
- /// <b>DNSSEC considerations:</b>
- /// The result when DNSSEC data are required can be very complicated,
- /// especially if it involves negative result or wildcard match.
- /// Specifically, if an application calls this method for DNS query
- /// processing with DNSSEC data, and if the search result code is
- /// either \c NXDOMAIN or \c NXRRRSET, and/or \c isWildcard() returns
- /// true, then the application will need to find additional NSEC or
- /// NSEC3 records for supplemental proofs. This method helps the
- /// application for such post search processing.
- ///
- /// First, it tells the application whether the zone is signed with
- /// NSEC or NSEC3 via the \c isNSEC(3)Signed() method. Any sanely signed
- /// zone should be signed with either (and only one) of these two types
- /// of RRs; however, the application should expect that the zone could
- /// be broken and these methods could both return false. But this method
- /// should ensure that not both of these methods return true.
- ///
- /// In case it's signed with NSEC3, there is no further information
- /// returned from this method.
- ///
- /// In case it's signed with NSEC, this method will possibly return
- /// a related NSEC RRset in the \c rrset member of \c FindContext.
- /// What kind of NSEC is returned depends on the result code
- /// (\c NXDOMAIN or \c NXRRSET) and on whether it's a wildcard match:
- ///
- /// - In case of NXDOMAIN, the returned NSEC covers the queried domain
- /// that proves that the query name does not exist in the zone. Note
- /// that this does not necessarily prove it doesn't even match a
- /// wildcard (even if the result of NXDOMAIN can only happen when
- /// there's no matching wildcard either). It is caller's
- /// responsibility to provide a proof that there is no matching
- /// wildcard if that proof is necessary.
- /// - In case of NXRRSET, we need to consider the following cases
- /// referring to Section 3.1.3 of RFC4035:
- ///
- /// -# (Normal) no data: there is a matching non-wildcard name with a
- /// different RR type. This is the "No Data" case of the RFC.
- /// -# (Normal) empty non terminal: there is no matching (exact or
- /// wildcard) name, but there is a subdomain with an RR of the query
- /// name. This is one case of "Name Error" of the RFC.
- /// -# Wildcard empty non terminal: similar to 2a, but the empty name
- /// is a wildcard, and matches the query name by wildcard expansion.
- /// This is a special case of "Name Error" of the RFC.
- /// -# Wildcard no data: there is no exact match name, but there is a
- /// wildcard name that matches the query name with a different type
- /// of RR. This is the "Wildcard No Data" case of the RFC.
- ///
- /// In case 1, \c find() returns NSEC of the matching name.
- ///
- /// In case 2, \c find() will return NSEC for the interval where the
- /// empty nonterminal lives. The end of the interval is the subdomain
- /// causing existence of the empty nonterminal (if there's
- /// sub.x.example.com, and no record in x.example.com, then
- /// x.example.com exists implicitly - is the empty nonterminal and
- /// sub.x.example.com is the subdomain causing it). Note that this NSEC
- /// proves not only the existence of empty non terminal name but also
- /// the non existence of possibly matching wildcard name, because
- /// there can be no better wildcard match than the exact matching empty
- /// name.
- ///
- /// In case 3, \c find() will return NSEC for the interval where the
- /// wildcard empty nonterminal lives. Cases 2 and 3 are especially
- /// complicated and confusing. See the examples below.
- ///
- /// In case 4, \c find() will return NSEC of the matching wildcard name.
- ///
- /// Examples: if zone "example.com" has the following record:
- /// \code
- /// a.example.com. NSEC a.b.example.com.
- /// \endcode
- /// a call to \c find() for "b.example.com." with the FIND_DNSSEC option
- /// will result in NXRRSET, and this NSEC will be returned.
- /// Likewise, if zone "example.org" has the following record,
- /// \code
- /// a.example.org. NSEC x.*.b.example.org.
- /// \endcode
- /// a call to \c find() for "y.b.example.org" with FIND_DNSSEC will
- /// result in NXRRSET and this NSEC; \c isWildcard() on the returned
- /// \c FindContext object will return true.
- ///
- /// \exception std::bad_alloc Memory allocation such as for constructing
- /// the resulting RRset fails
- /// \throw OutOfZone The Name \c name is outside of the origin of the
- /// zone of this ZoneFinder.
- /// \exception DataSourceError Derived class specific exception, e.g.
- /// when encountering a bad zone configuration or database connection
- /// failure. Although these are considered rare, exceptional events,
- /// it can happen under relatively usual conditions (unlike memory
- /// allocation failure). So, in general, the application is expected
- /// to catch this exception, either specifically or as a result of
- /// catching a base exception class, and handle it gracefully.
- ///
- /// \param name The domain name to be searched for.
- /// \param type The RR type to be searched for.
- /// \param options The search options.
- /// \return A \c FindContext object enclosing the search result
- /// (see above).
- virtual boost::shared_ptr<Context> find(const isc::dns::Name& name,
- const isc::dns::RRType& type,
- const FindOptions options
- = FIND_DEFAULT) = 0;
-
- ///
- /// \brief Finds all RRsets in the given name.
- ///
- /// This function works almost exactly in the same way as the find one. The
- /// only difference is, when the lookup is successful (eg. the code is
- /// SUCCESS), all the RRsets residing in the named node are
- /// copied into the \c target parameter and the rrset member of the result
- /// is NULL. All the other (unsuccessful) cases are handled the same,
- /// including returning delegations, NSEC/NSEC3 availability and NSEC
- /// proofs, wildcard information etc. The options parameter works the
- /// same way and it should conform to the same exception restrictions.
- ///
- /// \param name \see find, parameter name
- /// \param target the successfull result is returned through this
- /// \param options \see find, parameter options
- /// \return \see find and it's result
- virtual boost::shared_ptr<Context> findAll(
- const isc::dns::Name& name,
- std::vector<isc::dns::ConstRRsetPtr> &target,
- const FindOptions options = FIND_DEFAULT) = 0;
-
- /// A helper structure to represent the search result of \c findNSEC3().
- ///
- /// The idea is similar to that of \c FindContext, but \c findNSEC3() has
- /// special interface and semantics, we use a different structure to
- /// represent the result.
- struct FindNSEC3Result {
- FindNSEC3Result(bool param_matched, uint8_t param_closest_labels,
- isc::dns::ConstRRsetPtr param_closest_proof,
- isc::dns::ConstRRsetPtr param_next_proof) :
- matched(param_matched), closest_labels(param_closest_labels),
- closest_proof(param_closest_proof),
- next_proof(param_next_proof)
- {}
-
- /// true iff closest_proof is a matching NSEC3
- const bool matched;
-
- /// The number of labels of the identified closest encloser.
- const uint8_t closest_labels;
-
- /// Either the NSEC3 for the closest provable encloser of the given
- /// name or NSEC3 that covers the name
- const isc::dns::ConstRRsetPtr closest_proof;
-
- /// When non NULL, NSEC3 for the next closer name.
- const isc::dns::ConstRRsetPtr next_proof;
- };
-
- /// Search the zone for the NSEC3 RR(s) that prove existence or non
- /// existence of a give name.
- ///
- /// It searches the NSEC3 namespace of the zone (how that namespace is
- /// implemented can vary in specific data source implementation) for NSEC3
- /// RRs that match or cover the NSEC3 hash value for the given name.
- ///
- /// If \c recursive is false, it will first look for the NSEC3 that has
- /// a matching hash. If it doesn't exist, it identifies the covering NSEC3
- /// for the hash. In either case the search stops at that point and the
- /// found NSEC3 RR(set) will be returned in the closest_proof member of
- /// \c FindNSEC3Result. \c matched is true or false depending on
- /// the found NSEC3 is a matched one or covering one. \c next_proof
- /// is always NULL. closest_labels must be equal to the number of
- /// labels of \c name (and therefore meaningless).
- ///
- /// If \c recursive is true, it will continue the search toward the zone
- /// apex (origin name) until it finds a provable encloser, that is,
- /// an ancestor of \c name that has a matching NSEC3. This is the closest
- /// provable encloser of \c name as defined in RFC5155. In this case,
- /// if the found encloser is not equal to \c name, the search should
- /// have seen a covering NSEC3 for the immediate child of the found
- /// encloser. That child name is the next closer name as defined in
- /// RFC5155. In this case, this method returns the NSEC3 for the
- /// closest encloser in \c closest_proof, and the NSEC3 for the next
- /// closer name in \c next_proof of \c FindNSEC3Result. This set of
- /// NSEC3 RRs provide the closest encloser proof as defined in RFC5155.
- /// closest_labels will be set to the number of labels of the identified
- /// closest encloser. This will be useful when the caller needs to
- /// construct the closest encloser name from the original \c name.
- /// If, on the other hand, the found closest name is equal to \c name,
- /// this method simply returns it in \c closest_proof. \c next_proof
- /// is set to NULL. In all cases \c matched is set to true.
- /// closest_labels will be set to the number of labels of \c name.
- ///
- /// When looking for NSEC3, this method retrieves NSEC3 parameters from
- /// the corresponding zone to calculate hash values. Actual implementation
- /// of how to do this will differ in different data sources. If the
- /// NSEC3 parameters are not available \c DataSourceError exception
- /// will be thrown.
- ///
- /// \note This implicitly means this method assumes the zone does not
- /// have more than one set of parameters. This assumption should be
- /// reasonable in actual deployment and will help simplify the interface
- /// and implementation. But if there's a real need for supporting
- /// multiple sets of parameters in a single zone, we will have to
- /// extend this method so that, e.g., the caller can specify the parameter
- /// set.
- ///
- /// In general, this method expects the zone is properly signed with NSEC3
- /// RRs. Specifically, it assumes at least the apex node has a matching
- /// NSEC3 RR (so the search in the recursive mode must always succeed);
- /// it also assumes that it can retrieve NSEC parameters (iterations,
- /// algorithm, and salt) from the zone as noted above. If these
- /// assumptions aren't met, \c DataSourceError exception will be thrown.
- ///
- /// \exception OutOfZone name is not a subdomain of the zone origin
- /// \exception DataSourceError Low-level or internal datasource errors
- /// happened, or the zone isn't properly signed with NSEC3
- /// (NSEC3 parameters cannot be found, no NSEC3s are available, etc).
- /// \exception std::bad_alloc The underlying implementation involves
- /// memory allocation and it fails
- ///
- /// \param name The name for which NSEC3 RRs are to be found. It must
- /// be a subdomain of the zone.
- /// \param recursive Whether or not search should continue until it finds
- /// a provable encloser (see above).
- ///
- /// \return The search result and whether or not the closest_proof is
- /// a matching NSEC3, in the form of \c FindNSEC3Result object.
- virtual FindNSEC3Result
- findNSEC3(const isc::dns::Name& name, bool recursive) = 0;
- //@}
-};
-
-/// \brief Operator to combine FindOptions
-///
-/// We would need to manually static-cast the options if we put or
-/// between them, which is undesired with bit-flag options. Therefore
-/// we hide the cast here, which is the simplest solution and it still
-/// provides reasonable level of type safety.
-inline ZoneFinder::FindOptions operator |(ZoneFinder::FindOptions a,
- ZoneFinder::FindOptions b)
-{
- return (static_cast<ZoneFinder::FindOptions>(static_cast<unsigned>(a) |
- static_cast<unsigned>(b)));
-}
-
-/// \brief Operator to combine FindResultFlags
-///
-/// Similar to the same operator for \c FindOptions. Refer to the description
-/// of that function.
-inline ZoneFinder::FindResultFlags operator |(
- ZoneFinder::FindResultFlags a,
- ZoneFinder::FindResultFlags b)
-{
- return (static_cast<ZoneFinder::FindResultFlags>(
- static_cast<unsigned>(a) | static_cast<unsigned>(b)));
-}
-
-/// \brief A pointer-like type pointing to a \c ZoneFinder object.
-typedef boost::shared_ptr<ZoneFinder> ZoneFinderPtr;
-
-/// \brief A pointer-like type pointing to an immutable \c ZoneFinder object.
-typedef boost::shared_ptr<const ZoneFinder> ConstZoneFinderPtr;
-
-/// \brief A pointer-like type pointing to a \c ZoneFinder::Context object.
-typedef boost::shared_ptr<ZoneFinder::Context> ZoneFinderContextPtr;
-
-/// \brief A pointer-like type pointing to an immutable
-/// \c ZoneFinder::Context object.
-typedef boost::shared_ptr<ZoneFinder::Context> ConstZoneFinderContextPtr;
-
/// \brief A forward declaration
-class RRsetCollectionBase;
+class ZoneFinder;
/// The base class to make updates to a single zone.
///
@@ -809,7 +96,7 @@ public:
/// Return an RRsetCollection for the updater.
///
/// This method returns an \c RRsetCollection for the updater,
- /// implementing the \c isc::datasrc::RRsetCollectionBase
+ /// implementing the \c isc::dns::RRsetCollectionBase
/// interface. Typically, the returned \c RRsetCollection is a
/// singleton for its \c ZoneUpdater. The returned RRsetCollection
/// object must not be used after its corresponding \c ZoneUpdater
@@ -819,15 +106,51 @@ public:
/// \c ZoneUpdater implementation.
///
/// The behavior of the RRsetCollection is similar to the behavior
- /// of the \c Zonefinder returned by \c getFinder().
+ /// of the \c Zonefinder returned by \c getFinder(). In fact, it's
+ /// redundant in a sense because one can implement the
+ /// \c dns::RRsetCollectionBase interface using an updater and
+ /// \c getFinder() interface (unless it's expected to support zone
+ /// iteration, and the initial implementation of the \c RRsetCollection
+ /// returned by this method doesn't support it). We still provide it
+ /// as an updater's method so it will be easier for an updater
+ /// implementation to customize the \c RRsetCollection implementation,
+ /// and also for making it easy to impose restrictions described below.
+ ///
+ /// Specific data sources may have special restrictions. That's
+ /// especially the case for database-based data sources. Such
+ /// restrictions may also result in limiting the usage of the
+ /// \c RRsetCollection as described in the following paragraphs. A
+ /// specific updater implementation may provide more flexible
+ /// behavior, but applications using this interface must assume
+ /// the most restricted case unless it knows it uses a particular
+ /// specialized updater implementation that loosens specific restrictions.
+ ///
+ /// To summarize the restrictions:
+ /// - An application must not add or delete RRsets after
+ /// \c getRRsetCollection() is called.
+ /// - An application must not use the returned collection from
+ /// \c getRRsetCollection() once \c commit() is called on the updater
+ /// that generates the collection.
+ ///
/// Implementations of \c ZoneUpdater may not allow adding or
- /// deleting RRsets after \c getRRsetCollection() is called.
- /// Implementations of \c ZoneUpdater may disable a previously
- /// returned \c RRsetCollection after \c commit() is called. If an
- /// \c RRsetCollection is disabled, using methods such as \c find()
- /// and using its iterator would cause an exception to be
- /// thrown. See \c isc::datasrc::RRsetCollectionBase for details.
- virtual isc::datasrc::RRsetCollectionBase& getRRsetCollection() = 0;
+ /// deleting RRsets after \c getRRsetCollection() is called. This is
+ /// because if an iterator of the collection is being used at that time
+ /// the modification to the zone may break an internal assumption of the
+ /// iterator and may result in unexpected behavior. Also, the iterator
+ /// may conceptually hold a "reader lock" of the zone (in an implementation
+ /// dependent manner), which would prevent the addition or deletion,
+ /// surprising the caller (who would normally expect it to succeed).
+ ///
+ /// Implementations of \c ZoneUpdater may disable a previously returned
+ /// \c RRsetCollection after \c commit() is called. This is because
+ /// the returned \c RRsetCollection may internally rely on the conceptual
+ /// transaction of the updater that generates the collection (which would
+ /// be literally the case for database-based data sources), and once
+ /// the transaction is committed anything that relies on it won't be valid.
+ /// If an \c RRsetCollection is disabled, using methods such as \c find()
+ /// and using its iterator would cause an exception to be thrown. See
+ /// \c isc::datasrc::RRsetCollectionBase for details.
+ virtual isc::dns::RRsetCollectionBase& getRRsetCollection() = 0;
/// Add an RRset to a zone via the updater
///
@@ -877,8 +200,9 @@ public:
/// \c DataSourceError exception.
///
/// Implementations of \c ZoneUpdater may not allow adding or
- /// deleting RRsets after \c getRRsetCollection() is called. In this
- /// case, implementations throw an \c InvalidOperation exception.
+ /// deleting RRsets after \c getRRsetCollection() is called (see
+ /// the description of \c getRRsetCollection()). In this case,
+ /// implementations throw an \c InvalidOperation exception.
///
/// If journaling was requested when getting this updater, it will reject
/// to add the RRset if the squence doesn't look like and IXFR (see
@@ -952,8 +276,9 @@ public:
/// \c DataSourceError exception.
///
/// Implementations of \c ZoneUpdater may not allow adding or
- /// deleting RRsets after \c getRRsetCollection() is called. In this
- /// case, implementations throw an \c InvalidOperation exception.
+ /// deleting RRsets after \c getRRsetCollection() is called (see
+ /// the description of \c getRRsetCollection()). In this case,
+ /// implementations throw an \c InvalidOperation exception.
///
/// If journaling was requested when getting this updater, it will reject
/// to add the RRset if the squence doesn't look like and IXFR (see
diff --git a/src/lib/datasrc/zone_finder.cc b/src/lib/datasrc/zone_finder.cc
index 562b43f..b4240c0 100644
--- a/src/lib/datasrc/zone_finder.cc
+++ b/src/lib/datasrc/zone_finder.cc
@@ -12,13 +12,14 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <datasrc/zone_finder.h>
+#include <datasrc/data_source.h>
+
#include <dns/rdata.h>
#include <dns/rrset.h>
#include <dns/rrtype.h>
#include <dns/rdataclass.h>
-#include <datasrc/zone.h>
-
using namespace std;
using namespace isc::dns;
using namespace isc::dns::rdata;
@@ -26,6 +27,96 @@ using namespace isc::dns::rdata;
namespace isc {
namespace datasrc {
+namespace {
+// Identify zone's SOA and return its MINTTL in the form of RRTTL.
+RRTTL
+getMinTTL(ZoneFinder& finder, ConstRRsetPtr rrset) {
+ ConstRRsetPtr soa_rrset;
+ if (rrset->getType() == RRType::SOA()) {
+ // Shortcut: if we are looking at SOA itself (which should be the
+ // case in the expected scenario), we can simply use its RDATA.
+ soa_rrset = rrset;
+ } else {
+ soa_rrset =
+ finder.findAtOrigin(RRType::SOA(), false,
+ ZoneFinder::FIND_DEFAULT)->rrset;
+ }
+
+ // In a valid zone there is one and only one SOA RR at the origin.
+ // Otherwise either zone data or the data source implementation is broken.
+ if (!soa_rrset || soa_rrset->getRdataCount() != 1) {
+ isc_throw(DataSourceError, "Zone " << rrset->getName().toText(true)
+ << "/" << rrset->getClass().toText() << " is broken: "
+ << (!soa_rrset ? "no SOA" : "empty SOA"));
+ }
+
+ return (RRTTL(dynamic_cast<const generic::SOA&>(
+ soa_rrset->getRdataIterator()->getCurrent()).
+ getMinimum()));
+}
+
+// Make a fresh copy of given RRset, just replacing RRTTL with the given one.
+RRsetPtr
+copyRRset(const AbstractRRset& rrset, const RRTTL& ttl) {
+ RRsetPtr rrset_copy(new RRset(rrset.getName(), rrset.getClass(),
+ rrset.getType(), ttl));
+ for (RdataIteratorPtr rit = rrset.getRdataIterator();
+ !rit->isLast();
+ rit->next()) {
+ rrset_copy->addRdata(rit->getCurrent());
+ }
+
+ ConstRRsetPtr rrsig = rrset.getRRsig();
+ if (rrsig) {
+ RRsetPtr rrsig_copy(new RRset(rrset.getName(), rrset.getClass(),
+ RRType::RRSIG(), ttl));
+ for (RdataIteratorPtr rit = rrsig->getRdataIterator();
+ !rit->isLast();
+ rit->next()) {
+ rrsig_copy->addRdata(rit->getCurrent());
+ }
+ rrset_copy->addRRsig(rrsig_copy);
+ }
+
+ return (rrset_copy);
+}
+}
+
+ZoneFinderContextPtr
+ZoneFinder::findAtOrigin(const dns::RRType& type, bool use_minttl,
+ FindOptions options)
+{
+ ZoneFinderContextPtr context = find(getOrigin(), type, options);
+
+ // If we are requested to use the min TTL and the RRset's RR TTL is larger
+ // than that, we need to make a copy of the RRset, replacing the TTL,
+ // and return a newly created context copying other parameters.
+ if (use_minttl && context->rrset) {
+ const AbstractRRset& rrset = *context->rrset;
+ const RRTTL min_ttl = getMinTTL(*this, context->rrset);
+ if (min_ttl < rrset.getTTL()) {
+ FindResultFlags flags_copy = RESULT_DEFAULT;
+ if (context->isWildcard()) {
+ flags_copy = flags_copy | RESULT_WILDCARD;
+ }
+ if (context->isNSECSigned()) {
+ flags_copy = flags_copy | RESULT_NSEC_SIGNED;
+ } else if (context->isNSEC3Signed()) {
+ flags_copy = flags_copy | RESULT_NSEC3_SIGNED;
+ }
+
+ return (ZoneFinderContextPtr(
+ new GenericContext(*this, options,
+ ResultContext(context->code,
+ copyRRset(rrset,
+ min_ttl),
+ flags_copy))));
+ }
+ }
+
+ return (context);
+}
+
isc::dns::ConstRRsetPtr
ZoneFinder::stripRRsigs(isc::dns::ConstRRsetPtr rp,
const FindOptions options) {
diff --git a/src/lib/datasrc/zone_finder.h b/src/lib/datasrc/zone_finder.h
new file mode 100644
index 0000000..83851f6
--- /dev/null
+++ b/src/lib/datasrc/zone_finder.h
@@ -0,0 +1,808 @@
+// Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef DATASRC_ZONE_FINDER_H
+#define DATASRC_ZONE_FINDER_H 1
+
+#include <dns/name.h>
+#include <dns/rrset.h>
+#include <dns/rrtype.h>
+
+#include <datasrc/exceptions.h>
+#include <datasrc/result.h>
+
+#include <utility>
+#include <vector>
+
+namespace isc {
+namespace datasrc {
+
+/// \brief Out of zone exception
+///
+/// This is thrown when a method is called for a name or RRset which
+/// is not in or below the zone.
+class OutOfZone : public ZoneException {
+public:
+ OutOfZone(const char* file, size_t line, const char* what) :
+ ZoneException(file, line, what) {}
+};
+
+/// \brief The base class to search a zone for RRsets
+///
+/// The \c ZoneFinder class is an abstract base class for representing
+/// an object that performs DNS lookups in a specific zone accessible via
+/// a data source. In general, different types of data sources (in-memory,
+/// database-based, etc) define their own derived classes of \c ZoneFinder,
+/// implementing ways to retrieve the required data through the common
+/// interfaces declared in the base class. Each concrete \c ZoneFinder
+/// object is therefore (conceptually) associated with a specific zone
+/// of one specific data source instance.
+///
+/// The origin name and the RR class of the associated zone are available
+/// via the \c getOrigin() and \c getClass() methods, respectively.
+///
+/// The most important method of this class is \c find(), which performs
+/// the lookup for a given domain and type. See the description of the
+/// method for details.
+///
+/// \note It's not clear whether we should request that a zone finder form a
+/// "transaction", that is, whether to ensure the finder is not susceptible
+/// to changes made by someone else than the creator of the finder. If we
+/// don't request that, for example, two different lookup results for the
+/// same name and type can be different if other threads or programs make
+/// updates to the zone between the lookups. We should revisit this point
+/// as we gain more experiences.
+class ZoneFinder {
+public:
+ /// Result codes of the \c find() method.
+ ///
+ /// Note: the codes are tentative. We may need more, or we may find
+ /// some of them unnecessary as we implement more details.
+ ///
+ /// See the description of \c find() for further details of how
+ /// these results should be interpreted.
+ enum Result {
+ SUCCESS, ///< An exact match is found.
+ DELEGATION, ///< The search encounters a zone cut.
+ NXDOMAIN, ///< There is no domain name that matches the search name
+ NXRRSET, ///< There is a matching name but no RRset of the search type
+ CNAME, ///< The search encounters and returns a CNAME RR
+ DNAME ///< The search encounters and returns a DNAME RR
+ };
+
+ /// Special attribute flags on the result of the \c find() method
+ ///
+ /// The flag values defined here are intended to signal to the caller
+ /// that it may need special handling on the result. This is particularly
+ /// of concern when DNSSEC is requested. For example, for negative
+ /// responses the caller would want to know whether the zone is signed
+ /// with NSEC or NSEC3 so that it can subsequently provide necessary
+ /// proof of the result.
+ ///
+ /// The caller is generally expected to get access to the information
+ /// via read-only getter methods of \c FindContext so that it won't rely
+ /// on specific details of the representation of the flags. So these
+ /// definitions are basically only meaningful for data source
+ /// implementations.
+ enum FindResultFlags {
+ RESULT_DEFAULT = 0, ///< The default flags
+ RESULT_WILDCARD = 1, ///< find() resulted in a wildcard match
+ RESULT_NSEC_SIGNED = 2, ///< The zone is signed with NSEC RRs
+ RESULT_NSEC3_SIGNED = 4 ///< The zone is signed with NSEC3 RRs
+ };
+
+ /// Find options.
+ ///
+ /// The option values are used as a parameter for \c find().
+ /// These are values of a bitmask type. Bitwise operations can be
+ /// performed on these values to express compound options.
+ enum FindOptions {
+ FIND_DEFAULT = 0, ///< The default options
+ FIND_GLUE_OK = 1, ///< Allow search under a zone cut
+ FIND_DNSSEC = 2, ///< Require DNSSEC data in the answer
+ ///< (RRSIG, NSEC, etc.). The implementation
+ ///< is allowed to include it even if it is
+ ///< not set.
+ NO_WILDCARD = 4 ///< Do not try wildcard matching.
+ };
+
+protected:
+ /// \brief A convenient tuple representing a set of find() results.
+ ///
+ /// This helper structure is specifically expected to be used as an input
+ /// for the construct of the \c Context class object used by derived
+ /// ZoneFinder implementations. This is therefore defined as protected.
+ struct ResultContext {
+ ResultContext(Result code_param,
+ isc::dns::ConstRRsetPtr rrset_param,
+ FindResultFlags flags_param = RESULT_DEFAULT) :
+ code(code_param), rrset(rrset_param), flags(flags_param)
+ {}
+ const Result code;
+ const isc::dns::ConstRRsetPtr rrset;
+ const FindResultFlags flags;
+ };
+
+public:
+ /// \brief A helper function to strip RRSIGs when FIND_DNSSEC is not
+ /// requested.
+ static isc::dns::ConstRRsetPtr
+ stripRRsigs(isc::dns::ConstRRsetPtr rp, const FindOptions options);
+
+ /// \brief Context of the result of a find() call.
+ ///
+ /// This class encapsulates results and (possibly) associated context
+ /// of a call to the \c find() method. The public member variables of
+ /// this class represent the result of the call. They are a
+ /// straightforward tuple of the result code and a pointer (and
+ /// optionally special flags) to the found RRset.
+ ///
+ /// These member variables will be initialized on construction and never
+ /// change, so for convenience we allow the applications to refer to some
+ /// of the members directly. For some others we provide read-only accessor
+ /// methods to hide specific representation.
+ ///
+ /// Another role of this class is to provide the interface to some common
+ /// processing logic that may be necessary using the result of \c find().
+ /// Specifically, it's expected to be used in the context of DNS query
+ /// handling, where the caller would need to look into the data source
+ /// again based on the \c find() result. For example, it would need to
+ /// get A and/or AAAA records for some of the answer or authority RRs.
+ ///
+ /// This class defines (a set of) method(s) that can be commonly used
+ /// for such purposes for any type of data source (as long as it conforms
+ /// to the public \c find() interface). In some cases, a specific data
+ /// source implementation may want to (and can) optimize the processing
+ /// exploiting its internal data structure and the knowledge of the context
+ /// of the precedent \c find() call. Such a data source implementation
+ /// can define a derived class of the base Context and override the
+ /// specific virtual method.
+ ///
+ /// This base class defines these common protected methods along with
+ /// some helper pure virtual methods that would be necessary for the
+ /// common methods. If a derived class wants to use the common version
+ /// of the protected method, it needs to provide expected result through
+ /// their implementation of the pure virtual methods.
+ ///
+ /// This class object is generally expected to be associated with the
+ /// ZoneFinder that originally performed the \c find() call, and expects
+ /// the finder is valid throughout the lifetime of this object. It's
+ /// caller's responsibility to ensure that assumption.
+ class Context {
+ public:
+ /// \brief The constructor.
+ ///
+ /// \param options The find options specified for the find() call.
+ /// \param result The result of the find() call.
+ Context(FindOptions options, const ResultContext& result) :
+ code(result.code), rrset(result.rrset),
+ flags_(result.flags), options_(options)
+ {}
+
+ /// \brief The destructor.
+ virtual ~Context() {}
+
+ const Result code;
+ const isc::dns::ConstRRsetPtr rrset;
+
+ /// Return true iff find() results in a wildcard match.
+ bool isWildcard() const { return ((flags_ & RESULT_WILDCARD) != 0); }
+
+ /// Return true when the underlying zone is signed with NSEC.
+ ///
+ /// The \c find() implementation allows this to return false if
+ /// \c FIND_DNSSEC isn't specified regardless of whether the zone
+ /// is signed or which of NSEC/NSEC3 is used.
+ ///
+ /// When this is returned, the implementation of find() must ensure
+ /// that \c rrset be a valid NSEC RRset as described in \c find()
+ /// documentation.
+ bool isNSECSigned() const {
+ return ((flags_ & RESULT_NSEC_SIGNED) != 0);
+ }
+
+ /// Return true when the underlying zone is signed with NSEC3.
+ ///
+ /// The \c find() implementation allows this to return false if
+ /// \c FIND_DNSSEC isn't specified regardless of whether the zone
+ /// is signed or which of NSEC/NSEC3 is used.
+ bool isNSEC3Signed() const {
+ return ((flags_ & RESULT_NSEC3_SIGNED) != 0);
+ }
+
+ /// \brief Find and return additional RRsets corresponding to the
+ /// result of \c find().
+ ///
+ /// If this context is based on a normal find() call that resulted
+ /// in SUCCESS or DELEGATION, it examines the returned RRset (in many
+ /// cases NS, sometimes MX or others), searches the data source for
+ /// specified type of additional RRs for each RDATA of the RRset
+ /// (e.g., A or AAAA for the name server addresses), and stores the
+ /// result in the given vector. The vector may not be empty; this
+ /// method appends any found RRsets to it, without touching existing
+ /// elements.
+ ///
+ /// If this context is based on a findAll() call that resulted in
+ /// SUCCESS, it performs the same process for each RRset returned in
+ /// the \c findAll() call.
+ ///
+ /// The caller specifies desired RR types of the additional RRsets
+ /// in \c requested_types. Normally it consists of A and/or AAAA
+ /// types, but other types can be specified.
+ ///
+ /// This method is meaningful only when the precedent find()/findAll()
+ /// call resulted in SUCCESS or DELEGATION. Otherwise this method
+ /// does nothing.
+ ///
+ /// \note The additional RRsets returned via method are limited to
+ /// ones contained in the zone which the corresponding find/findAll
+ /// call searched (possibly including glues under a zone cut where
+ /// they are applicable). If the caller needs to get out-of-zone
+ /// additional RRsets, it needs to explicitly finds them by
+ /// identifying the corresponding zone and calls \c find() for it.
+ ///
+ /// \param requested_types A vector of RR types for desired additional
+ /// RRsets.
+ /// \param result A vector to which any found additional RRsets are
+ /// to be inserted.
+ void getAdditional(
+ const std::vector<isc::dns::RRType>& requested_types,
+ std::vector<isc::dns::ConstRRsetPtr>& result)
+ {
+ // Perform common checks, and delegate the process to the default
+ // or specialized implementation.
+ if (code != SUCCESS && code != DELEGATION) {
+ return;
+ }
+
+ getAdditionalImpl(requested_types, result);
+ }
+
+ protected:
+ /// \brief Return the \c ZoneFinder that created this \c Context.
+ ///
+ /// A derived class implementation can return NULL if it defines
+ /// other protected methods that require a non NULL result from
+ /// this method. Otherwise it must return a valid, non NULL pointer
+ /// to the \c ZoneFinder object.
+ ///
+ /// When returning non NULL, the ownership of the pointed object
+ /// was not transferred to the caller; it cannot be assumed to be
+ /// valid after the originating \c Context object is destroyed.
+ /// Also, the caller must not try to delete the returned object.
+ virtual ZoneFinder* getFinder() = 0;
+
+ /// \brief Return a vector of RRsets corresponding to findAll() result.
+ ///
+ /// This method returns a set of RRsets that correspond to the
+ /// returned RRsets to a prior \c findAll() call.
+ ///
+ /// A derived class implementation can return NULL if it defines
+ /// other protected methods that require a non NULL result from
+ /// this method. Otherwise it must return a valid, non NULL pointer
+ /// to a vector that correspond to the expected set of RRsets.
+ ///
+ /// When returning non NULL, the ownership of the pointed object
+ /// was not transferred to the caller; it cannot be assumed to be
+ /// valid after the originating \c Context object is destroyed.
+ /// Also, the caller must not try to delete the returned object.
+ virtual const std::vector<isc::dns::ConstRRsetPtr>*
+ getAllRRsets() const = 0;
+
+ /// \brief Actual implementation of getAdditional().
+ ///
+ /// This base class defines a default implementation that can be
+ /// used for any type of data sources. A data source implementation
+ /// can override it.
+ ///
+ /// The default version of this implementation requires both
+ /// \c getFinder() and \c getAllRRsets() return valid results.
+ virtual void getAdditionalImpl(
+ const std::vector<isc::dns::RRType>& requested_types,
+ std::vector<isc::dns::ConstRRsetPtr>& result);
+
+ private:
+ const FindResultFlags flags_;
+ protected:
+ const FindOptions options_;
+ };
+
+ /// \brief Generic ZoneFinder context that works for all implementations.
+ ///
+ /// This is a concrete derived class of \c ZoneFinder::Context that
+ /// only use the generic (default) versions of the protected methods
+ /// and therefore work for any data source implementation.
+ ///
+ /// A data source implementation can use this class to create a
+ /// \c Context object as a return value of \c find() or \c findAll()
+ /// method if it doesn't have to optimize specific protected methods.
+ class GenericContext : public Context {
+ public:
+ /// \brief The constructor for the normal find call.
+ ///
+ /// This constructor is expected to be called from the \c find()
+ /// method when it constructs the return value.
+ ///
+ /// \param finder The ZoneFinder on which find() is called.
+ /// \param options See the \c Context class.
+ /// \param result See the \c Context class.
+ GenericContext(ZoneFinder& finder, FindOptions options,
+ const ResultContext& result) :
+ Context(options, result), finder_(finder)
+ {}
+
+ /// \brief The constructor for the normal findAll call.
+ ///
+ /// This constructor is expected to be called from the \c findAll()
+ /// method when it constructs the return value.
+ ///
+ /// It copies the vector that is to be returned to the caller of
+ /// \c findAll() for possible subsequent use. Note that it cannot
+ /// simply hold a reference to the vector because the caller may
+ /// alter it after the \c findAll() call.
+ ///
+ /// \param finder The ZoneFinder on which findAll() is called.
+ /// \param options See the \c Context class.
+ /// \param result See the \c Context class.
+ /// \param all_set Reference to the vector given by the caller of
+ /// \c findAll(), storing the RRsets to be returned.
+ GenericContext(ZoneFinder& finder, FindOptions options,
+ const ResultContext& result,
+ const std::vector<isc::dns::ConstRRsetPtr>& all_set) :
+ Context(options, result), finder_(finder), all_set_(all_set)
+ {}
+
+ protected:
+ virtual ZoneFinder* getFinder() { return (&finder_); }
+ virtual const std::vector<isc::dns::ConstRRsetPtr>*
+ getAllRRsets() const {
+ return (&all_set_);
+ }
+
+ private:
+ ZoneFinder& finder_;
+ std::vector<isc::dns::ConstRRsetPtr> all_set_;
+ };
+
+ ///
+ /// \name Constructors and Destructor.
+ ///
+ //@{
+protected:
+ /// The default constructor.
+ ///
+ /// This is intentionally defined as \c protected as this base class should
+ /// never be instantiated (except as part of a derived class).
+ ZoneFinder() {}
+public:
+ /// The destructor.
+ virtual ~ZoneFinder() {}
+ //@}
+
+ ///
+ /// \name Getter Methods
+ ///
+ /// These methods should never throw an exception.
+ //@{
+ /// Return the origin name of the zone.
+ virtual isc::dns::Name getOrigin() const = 0;
+
+ /// Return the RR class of the zone.
+ virtual isc::dns::RRClass getClass() const = 0;
+ //@}
+
+ ///
+ /// \name Search Methods
+ ///
+ //@{
+ /// \brief Search the zone for a given pair of domain name and RR type.
+ ///
+ /// Each derived version of this method searches the underlying backend
+ /// for the data that best matches the given name and type.
+ /// This method is expected to be "intelligent", and identifies the
+ /// best possible answer for the search key. Specifically,
+ ///
+ /// - If the search name belongs under a zone cut, it returns the code
+ /// of \c DELEGATION and the NS RRset at the zone cut.
+ /// - If there is no matching name, it returns the code of \c NXDOMAIN.
+ /// - If there is a matching name but no RRset of the search type, it
+ /// returns the code of \c NXRRSET. This case includes the search name
+ /// matches an empty node of the zone.
+ /// - If there is a CNAME RR of the searched name but there is no
+ /// RR of the searched type of the name (so this type is different from
+ /// CNAME), it returns the code of \c CNAME and that CNAME RR.
+ /// Note that if the searched RR type is CNAME, it is considered
+ /// a successful match, and the code of \c SUCCESS will be returned.
+ /// - If the search name matches a delegation point of DNAME, it returns
+ /// the code of \c DNAME and that DNAME RR.
+ ///
+ /// No RRset will be returned in the \c NXDOMAIN and \c NXRRSET cases
+ /// (\c rrset member of \c FindContext will be NULL), unless DNSSEC data
+ /// are required. See below for the cases with DNSSEC.
+ ///
+ /// The returned \c FindContext object can also provide supplemental
+ /// information about the search result via its methods returning a
+ /// boolean value. Such information may be useful for the caller if
+ /// the caller wants to collect additional DNSSEC proofs based on the
+ /// search result.
+ ///
+ /// The \c options parameter specifies customized behavior of the search.
+ /// Their semantics is as follows (they are or bit-field):
+ ///
+ /// - \c FIND_GLUE_OK Allow search under a zone cut. By default the search
+ /// will stop once it encounters a zone cut. If this option is specified
+ /// it remembers information about the highest zone cut and continues
+ /// the search until it finds an exact match for the given name or it
+ /// detects there is no exact match. If an exact match is found,
+ /// RRsets for that name are searched just like the normal case;
+ /// otherwise, if the search has encountered a zone cut, \c DELEGATION
+ /// with the information of the highest zone cut will be returned.
+ /// Note: the term "glue" in the DNS protocol standard may sometimes
+ /// cause confusion: some people use this term strictly for an address
+ /// record (type AAAA or A) for the name used in the RDATA of an NS RR;
+ /// some others seem to give it broader flexibility. Nevertheless,
+ /// in this API the "GLUE OK" simply means the search by find() can
+ /// continue beyond a zone cut; the derived class implementation does
+ /// not have to, and should not, check whether the type is an address
+ /// record or whether the query name is pointed by some NS RR.
+ /// It's up to the caller with which definition of "glue" the search
+ /// result with this option should be used.
+ /// - \c FIND_DNSSEC Request that DNSSEC data (like NSEC, RRSIGs) are
+ /// returned with the answer. It is allowed for the data source to
+ /// include them even when not requested.
+ /// - \c NO_WILDCARD Do not try wildcard matching. This option is of no
+ /// use for normal lookups; it's intended to be used to get a DNSSEC
+ /// proof of the non existence of any matching wildcard or non existence
+ /// of an exact match when a wildcard match is found.
+ ///
+ /// In general, \c name is expected to be included in the zone, that is,
+ /// it should be equal to or a subdomain of the zone origin. Otherwise
+ /// this method will return \c NXDOMAIN with an empty RRset. But such a
+ /// case should rather be considered a caller's bug.
+ ///
+ /// \note For this reason it's probably better to throw an exception
+ /// than returning \c NXDOMAIN. This point should be revisited in a near
+ /// future version. In any case applications shouldn't call this method
+ /// for an out-of-zone name.
+ ///
+ /// <b>DNSSEC considerations:</b>
+ /// The result when DNSSEC data are required can be very complicated,
+ /// especially if it involves negative result or wildcard match.
+ /// Specifically, if an application calls this method for DNS query
+ /// processing with DNSSEC data, and if the search result code is
+ /// either \c NXDOMAIN or \c NXRRRSET, and/or \c isWildcard() returns
+ /// true, then the application will need to find additional NSEC or
+ /// NSEC3 records for supplemental proofs. This method helps the
+ /// application for such post search processing.
+ ///
+ /// First, it tells the application whether the zone is signed with
+ /// NSEC or NSEC3 via the \c isNSEC(3)Signed() method. Any sanely signed
+ /// zone should be signed with either (and only one) of these two types
+ /// of RRs; however, the application should expect that the zone could
+ /// be broken and these methods could both return false. But this method
+ /// should ensure that not both of these methods return true.
+ ///
+ /// In case it's signed with NSEC3, there is no further information
+ /// returned from this method.
+ ///
+ /// In case it's signed with NSEC, this method will possibly return
+ /// a related NSEC RRset in the \c rrset member of \c FindContext.
+ /// What kind of NSEC is returned depends on the result code
+ /// (\c NXDOMAIN or \c NXRRSET) and on whether it's a wildcard match:
+ ///
+ /// - In case of NXDOMAIN, the returned NSEC covers the queried domain
+ /// that proves that the query name does not exist in the zone. Note
+ /// that this does not necessarily prove it doesn't even match a
+ /// wildcard (even if the result of NXDOMAIN can only happen when
+ /// there's no matching wildcard either). It is caller's
+ /// responsibility to provide a proof that there is no matching
+ /// wildcard if that proof is necessary.
+ /// - In case of NXRRSET, we need to consider the following cases
+ /// referring to Section 3.1.3 of RFC4035:
+ ///
+ /// -# (Normal) no data: there is a matching non-wildcard name with a
+ /// different RR type. This is the "No Data" case of the RFC.
+ /// -# (Normal) empty non terminal: there is no matching (exact or
+ /// wildcard) name, but there is a subdomain with an RR of the query
+ /// name. This is one case of "Name Error" of the RFC.
+ /// -# Wildcard empty non terminal: similar to 2a, but the empty name
+ /// is a wildcard, and matches the query name by wildcard expansion.
+ /// This is a special case of "Name Error" of the RFC.
+ /// -# Wildcard no data: there is no exact match name, but there is a
+ /// wildcard name that matches the query name with a different type
+ /// of RR. This is the "Wildcard No Data" case of the RFC.
+ ///
+ /// In case 1, \c find() returns NSEC of the matching name.
+ ///
+ /// In case 2, \c find() will return NSEC for the interval where the
+ /// empty nonterminal lives. The end of the interval is the subdomain
+ /// causing existence of the empty nonterminal (if there's
+ /// sub.x.example.com, and no record in x.example.com, then
+ /// x.example.com exists implicitly - is the empty nonterminal and
+ /// sub.x.example.com is the subdomain causing it). Note that this NSEC
+ /// proves not only the existence of empty non terminal name but also
+ /// the non existence of possibly matching wildcard name, because
+ /// there can be no better wildcard match than the exact matching empty
+ /// name.
+ ///
+ /// In case 3, \c find() will return NSEC for the interval where the
+ /// wildcard empty nonterminal lives. Cases 2 and 3 are especially
+ /// complicated and confusing. See the examples below.
+ ///
+ /// In case 4, \c find() will return NSEC of the matching wildcard name.
+ ///
+ /// Examples: if zone "example.com" has the following record:
+ /// \code
+ /// a.example.com. NSEC a.b.example.com.
+ /// \endcode
+ /// a call to \c find() for "b.example.com." with the FIND_DNSSEC option
+ /// will result in NXRRSET, and this NSEC will be returned.
+ /// Likewise, if zone "example.org" has the following record,
+ /// \code
+ /// a.example.org. NSEC x.*.b.example.org.
+ /// \endcode
+ /// a call to \c find() for "y.b.example.org" with FIND_DNSSEC will
+ /// result in NXRRSET and this NSEC; \c isWildcard() on the returned
+ /// \c FindContext object will return true.
+ ///
+ /// \exception std::bad_alloc Memory allocation such as for constructing
+ /// the resulting RRset fails
+ /// \throw OutOfZone The Name \c name is outside of the origin of the
+ /// zone of this ZoneFinder.
+ /// \exception DataSourceError Derived class specific exception, e.g.
+ /// when encountering a bad zone configuration or database connection
+ /// failure. Although these are considered rare, exceptional events,
+ /// it can happen under relatively usual conditions (unlike memory
+ /// allocation failure). So, in general, the application is expected
+ /// to catch this exception, either specifically or as a result of
+ /// catching a base exception class, and handle it gracefully.
+ ///
+ /// \param name The domain name to be searched for.
+ /// \param type The RR type to be searched for.
+ /// \param options The search options.
+ /// \return A \c FindContext object enclosing the search result
+ /// (see above).
+ virtual boost::shared_ptr<Context> find(const isc::dns::Name& name,
+ const isc::dns::RRType& type,
+ const FindOptions options
+ = FIND_DEFAULT) = 0;
+
+ /// \brief Search for an RRset of given RR type at the zone origin.
+ ///
+ /// In terms of API this method is equivalent to a call to \c find() where
+ /// the \c name parameter is the zone origin (return value of
+ /// \c getOrigin()) and is redundant. This method is provided as an
+ /// optimization point for some kind of finder implementations that can
+ /// exploit the fact that the query name is the zone origin and for
+ /// applications that want to possibly benefit from such implementations.
+ ///
+ /// If \c use_minttl is set to \c true and the returned context would
+ /// contain a non NULL RRset, its RR TTL is (possibly) adjusted so that
+ /// it's set to the minimum of its own TTL and the minimum TTL field value
+ /// of the zone's SOA record. If the RRset contains an RRSIG, its TTL
+ /// is also adjusted in the same way.
+ ///
+ /// The origin of a zone is special in some points: for any valid zone
+ /// there should always be an SOA and at least one NS RR there, which
+ /// also means the origin name is never empty. Also, the SOA record can
+ /// be used in a DNS response for negative answers, in which case the
+ /// RR TTL must be set to minimum of its own RRTTL and the value of the
+ /// minimum TTL field. Although these operations can be performed
+ /// through other public interfaces, they can be sometimes suboptimal
+ /// in performance or could be more efficient in a specialized
+ /// implementation. For example, a specific implementation of
+ /// \c getOrigin() could involve a dynamic creation of a \c Name object,
+ /// which is less efficient; on the other hand, the underlying finder
+ /// implementation may have an efficient way to access RRs of the origin
+ /// in implementation specific way; and, while reconstructing an RRset
+ /// with replacing the TTL is relatively expensive, this can be done
+ /// much faster if the need for it is known beforehand.
+ ///
+ /// If the underlying finder implementation wants to optimize these cases,
+ /// it can do so by specializing the method. It has the default
+ /// implementation for any other implementations, which should work for
+ /// any finder implementation as long as it conforms to other public
+ /// interfaces.
+ ///
+ /// So, an implementation of a finder does not have to care about this
+ /// method unless it sees the need for optimizing the behavior.
+ /// Also, applications normally do not have to use this interface;
+ /// using the generic \c find() method (with some post call processing)
+ /// can do everything this method can provide. The default implementation
+ /// may even be slower than such straightforward usage due to the
+ /// internal overhead. This method should be used if and only if the
+ /// application needs to achieve the possible best performance with an
+ /// optimized finder implementation.
+ ///
+ /// \param type The RR type to be searched for.
+ /// \param use_minttl Whether to adjust the TTL (see the description).
+ /// \param options The search options. Same for \c find().
+ ///
+ /// \return A \c FindContext object enclosing the search result.
+ /// See \c find().
+ virtual boost::shared_ptr<Context> findAtOrigin(
+ const isc::dns::RRType& type, bool use_minttl,
+ FindOptions options);
+
+public:
+ ///
+ /// \brief Finds all RRsets in the given name.
+ ///
+ /// This function works almost exactly in the same way as the find one. The
+ /// only difference is, when the lookup is successful (eg. the code is
+ /// SUCCESS), all the RRsets residing in the named node are
+ /// copied into the \c target parameter and the rrset member of the result
+ /// is NULL. All the other (unsuccessful) cases are handled the same,
+ /// including returning delegations, NSEC/NSEC3 availability and NSEC
+ /// proofs, wildcard information etc. The options parameter works the
+ /// same way and it should conform to the same exception restrictions.
+ ///
+ /// \param name \see find, parameter name
+ /// \param target the successfull result is returned through this
+ /// \param options \see find, parameter options
+ /// \return \see find and it's result
+ virtual boost::shared_ptr<Context> findAll(
+ const isc::dns::Name& name,
+ std::vector<isc::dns::ConstRRsetPtr> &target,
+ const FindOptions options = FIND_DEFAULT) = 0;
+
+ /// A helper structure to represent the search result of \c findNSEC3().
+ ///
+ /// The idea is similar to that of \c FindContext, but \c findNSEC3() has
+ /// special interface and semantics, we use a different structure to
+ /// represent the result.
+ struct FindNSEC3Result {
+ FindNSEC3Result(bool param_matched, uint8_t param_closest_labels,
+ isc::dns::ConstRRsetPtr param_closest_proof,
+ isc::dns::ConstRRsetPtr param_next_proof) :
+ matched(param_matched), closest_labels(param_closest_labels),
+ closest_proof(param_closest_proof),
+ next_proof(param_next_proof)
+ {}
+
+ /// true iff closest_proof is a matching NSEC3
+ const bool matched;
+
+ /// The number of labels of the identified closest encloser.
+ const uint8_t closest_labels;
+
+ /// Either the NSEC3 for the closest provable encloser of the given
+ /// name or NSEC3 that covers the name
+ const isc::dns::ConstRRsetPtr closest_proof;
+
+ /// When non NULL, NSEC3 for the next closer name.
+ const isc::dns::ConstRRsetPtr next_proof;
+ };
+
+ /// Search the zone for the NSEC3 RR(s) that prove existence or non
+ /// existence of a give name.
+ ///
+ /// It searches the NSEC3 namespace of the zone (how that namespace is
+ /// implemented can vary in specific data source implementation) for NSEC3
+ /// RRs that match or cover the NSEC3 hash value for the given name.
+ ///
+ /// If \c recursive is false, it will first look for the NSEC3 that has
+ /// a matching hash. If it doesn't exist, it identifies the covering NSEC3
+ /// for the hash. In either case the search stops at that point and the
+ /// found NSEC3 RR(set) will be returned in the closest_proof member of
+ /// \c FindNSEC3Result. \c matched is true or false depending on
+ /// the found NSEC3 is a matched one or covering one. \c next_proof
+ /// is always NULL. closest_labels must be equal to the number of
+ /// labels of \c name (and therefore meaningless).
+ ///
+ /// If \c recursive is true, it will continue the search toward the zone
+ /// apex (origin name) until it finds a provable encloser, that is,
+ /// an ancestor of \c name that has a matching NSEC3. This is the closest
+ /// provable encloser of \c name as defined in RFC5155. In this case,
+ /// if the found encloser is not equal to \c name, the search should
+ /// have seen a covering NSEC3 for the immediate child of the found
+ /// encloser. That child name is the next closer name as defined in
+ /// RFC5155. In this case, this method returns the NSEC3 for the
+ /// closest encloser in \c closest_proof, and the NSEC3 for the next
+ /// closer name in \c next_proof of \c FindNSEC3Result. This set of
+ /// NSEC3 RRs provide the closest encloser proof as defined in RFC5155.
+ /// closest_labels will be set to the number of labels of the identified
+ /// closest encloser. This will be useful when the caller needs to
+ /// construct the closest encloser name from the original \c name.
+ /// If, on the other hand, the found closest name is equal to \c name,
+ /// this method simply returns it in \c closest_proof. \c next_proof
+ /// is set to NULL. In all cases \c matched is set to true.
+ /// closest_labels will be set to the number of labels of \c name.
+ ///
+ /// When looking for NSEC3, this method retrieves NSEC3 parameters from
+ /// the corresponding zone to calculate hash values. Actual implementation
+ /// of how to do this will differ in different data sources. If the
+ /// NSEC3 parameters are not available \c DataSourceError exception
+ /// will be thrown.
+ ///
+ /// \note This implicitly means this method assumes the zone does not
+ /// have more than one set of parameters. This assumption should be
+ /// reasonable in actual deployment and will help simplify the interface
+ /// and implementation. But if there's a real need for supporting
+ /// multiple sets of parameters in a single zone, we will have to
+ /// extend this method so that, e.g., the caller can specify the parameter
+ /// set.
+ ///
+ /// In general, this method expects the zone is properly signed with NSEC3
+ /// RRs. Specifically, it assumes at least the apex node has a matching
+ /// NSEC3 RR (so the search in the recursive mode must always succeed);
+ /// it also assumes that it can retrieve NSEC parameters (iterations,
+ /// algorithm, and salt) from the zone as noted above. If these
+ /// assumptions aren't met, \c DataSourceError exception will be thrown.
+ ///
+ /// \exception OutOfZone name is not a subdomain of the zone origin
+ /// \exception DataSourceError Low-level or internal datasource errors
+ /// happened, or the zone isn't properly signed with NSEC3
+ /// (NSEC3 parameters cannot be found, no NSEC3s are available, etc).
+ /// \exception std::bad_alloc The underlying implementation involves
+ /// memory allocation and it fails
+ ///
+ /// \param name The name for which NSEC3 RRs are to be found. It must
+ /// be a subdomain of the zone.
+ /// \param recursive Whether or not search should continue until it finds
+ /// a provable encloser (see above).
+ ///
+ /// \return The search result and whether or not the closest_proof is
+ /// a matching NSEC3, in the form of \c FindNSEC3Result object.
+ virtual FindNSEC3Result
+ findNSEC3(const isc::dns::Name& name, bool recursive) = 0;
+ //@}
+};
+
+/// \brief Operator to combine FindOptions
+///
+/// We would need to manually static-cast the options if we put or
+/// between them, which is undesired with bit-flag options. Therefore
+/// we hide the cast here, which is the simplest solution and it still
+/// provides reasonable level of type safety.
+inline ZoneFinder::FindOptions operator |(ZoneFinder::FindOptions a,
+ ZoneFinder::FindOptions b)
+{
+ return (static_cast<ZoneFinder::FindOptions>(static_cast<unsigned>(a) |
+ static_cast<unsigned>(b)));
+}
+
+/// \brief Operator to combine FindResultFlags
+///
+/// Similar to the same operator for \c FindOptions. Refer to the description
+/// of that function.
+inline ZoneFinder::FindResultFlags operator |(
+ ZoneFinder::FindResultFlags a,
+ ZoneFinder::FindResultFlags b)
+{
+ return (static_cast<ZoneFinder::FindResultFlags>(
+ static_cast<unsigned>(a) | static_cast<unsigned>(b)));
+}
+
+/// \brief A pointer-like type pointing to a \c ZoneFinder object.
+typedef boost::shared_ptr<ZoneFinder> ZoneFinderPtr;
+
+/// \brief A pointer-like type pointing to an immutable \c ZoneFinder object.
+typedef boost::shared_ptr<const ZoneFinder> ConstZoneFinderPtr;
+
+/// \brief A pointer-like type pointing to a \c ZoneFinder::Context object.
+typedef boost::shared_ptr<ZoneFinder::Context> ZoneFinderContextPtr;
+
+/// \brief A pointer-like type pointing to an immutable
+/// \c ZoneFinder::Context object.
+typedef boost::shared_ptr<ZoneFinder::Context> ConstZoneFinderContextPtr;
+
+} // end of datasrc
+} // end of isc
+
+#endif // DATASRC_ZONE_FINDER_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/datasrc/zone_finder_context.cc b/src/lib/datasrc/zone_finder_context.cc
index 482eb65..8d7edf0 100644
--- a/src/lib/datasrc/zone_finder_context.cc
+++ b/src/lib/datasrc/zone_finder_context.cc
@@ -19,7 +19,7 @@
#include <dns/rrtype.h>
#include <dns/rdataclass.h>
-#include <datasrc/zone.h>
+#include <datasrc/zone_finder.h>
#include <boost/foreach.hpp>
diff --git a/src/lib/datasrc/zone_iterator.h b/src/lib/datasrc/zone_iterator.h
new file mode 100644
index 0000000..e1c6929
--- /dev/null
+++ b/src/lib/datasrc/zone_iterator.h
@@ -0,0 +1,105 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef DATASRC_ZONE_ITERATOR_H
+#define DATASRC_ZONE_ITERATOR_H 1
+
+#include <dns/rrset.h>
+
+#include <boost/noncopyable.hpp>
+
+#include <datasrc/zone.h>
+
+namespace isc {
+namespace datasrc {
+
+/**
+ * \brief Read-only iterator to a zone.
+ *
+ * You can get an instance of (descendand of) ZoneIterator from
+ * DataSourceClient::getIterator() method. The actual concrete implementation
+ * will be different depending on the actual data source used. This is the
+ * abstract interface.
+ *
+ * There's no way to start iterating from the beginning again or return.
+ */
+class ZoneIterator : public boost::noncopyable {
+public:
+ /**
+ * \brief Destructor
+ *
+ * Virtual destructor. It is empty, but ensures the right destructor from
+ * descendant is called.
+ */
+ virtual ~ ZoneIterator() { }
+
+ /**
+ * \brief Get next RRset from the zone.
+ *
+ * This returns the next RRset in the zone as a shared pointer. The
+ * shared pointer is used to allow both accessing in-memory data and
+ * automatic memory management.
+ *
+ * Any special order is not guaranteed.
+ *
+ * While this can potentially throw anything (including standard allocation
+ * errors), it should be rare.
+ *
+ * \return Pointer to the next RRset or NULL pointer when the iteration
+ * gets to the end of the zone.
+ */
+ virtual isc::dns::ConstRRsetPtr getNextRRset() = 0;
+
+ /**
+ * \brief Return the SOA record of the zone in the iterator context.
+ *
+ * This method returns the zone's SOA record (if any, and a valid zone
+ * should have it) in the form of an RRset object. This SOA is identical
+ * to that (again, if any) contained in the sequence of RRsets returned
+ * by the iterator. In that sense this method is redundant, but is
+ * provided as a convenient utility for the application of the
+ * iterator; the application may need to know the SOA serial or the
+ * SOA RR itself for the purpose of protocol handling or skipping the
+ * expensive iteration processing.
+ *
+ * If the zone doesn't have an SOA (which is broken, but some data source
+ * may allow that situation), this method returns NULL. Also, in the
+ * normal and valid case, the SOA should have exactly one RDATA, but
+ * this API does not guarantee it as some data source may accept such an
+ * abnormal condition. It's up to the caller whether to check the number
+ * of RDATA and how to react to the unexpected case.
+ *
+ * Each concrete derived method must ensure that the SOA returned by this
+ * method is identical to the zone's SOA returned via the iteration.
+ * For example, even if another thread or process updates the SOA while
+ * the iterator is working, the result of this method must not be
+ * affected by the update. For database based data sources, this can
+ * be done by making the entire iterator operation as a single database
+ * transaction, but the actual implementation can differ.
+ *
+ * \exception None
+ *
+ * \return A shared pointer to an SOA RRset that would be returned
+ * from the iteration. It will be NULL if the zone doesn't have an SOA.
+ */
+ virtual isc::dns::ConstRRsetPtr getSOA() const = 0;
+};
+
+}
+}
+#endif // DATASRC_ZONE_ITERATOR_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/datasrc/zone_loader.cc b/src/lib/datasrc/zone_loader.cc
index 9e9dd4a..d0f4a64 100644
--- a/src/lib/datasrc/zone_loader.cc
+++ b/src/lib/datasrc/zone_loader.cc
@@ -17,15 +17,15 @@
#include <datasrc/client.h>
#include <datasrc/data_source.h>
-#include <datasrc/iterator.h>
+#include <datasrc/zone_iterator.h>
#include <datasrc/zone.h>
#include <datasrc/logger.h>
-#include <datasrc/rrset_collection_base.h>
#include <dns/rrset.h>
#include <dns/zone_checker.h>
#include <dns/name.h>
#include <dns/rrclass.h>
+#include <dns/rrset_collection_base.h>
#include <boost/bind.hpp>
@@ -33,6 +33,7 @@
using isc::dns::Name;
using isc::dns::ConstRRsetPtr;
+using isc::dns::RRsetCollectionBase;
using isc::dns::MasterLoader;
using isc::dns::MasterLexer;
diff --git a/src/lib/datasrc/zonetable.h b/src/lib/datasrc/zonetable.h
index 911391c..ef8a33b 100644
--- a/src/lib/datasrc/zonetable.h
+++ b/src/lib/datasrc/zonetable.h
@@ -19,7 +19,7 @@
#include <dns/rrset.h>
-#include <datasrc/zone.h>
+#include <datasrc/zone_finder.h>
#include <boost/shared_ptr.hpp>
diff --git a/src/lib/dhcp/Makefile.am b/src/lib/dhcp/Makefile.am
index 822c4e3..f169fe6 100644
--- a/src/lib/dhcp/Makefile.am
+++ b/src/lib/dhcp/Makefile.am
@@ -32,6 +32,7 @@ libb10_dhcp___la_SOURCES += option.cc option.h
libb10_dhcp___la_SOURCES += option_custom.cc option_custom.h
libb10_dhcp___la_SOURCES += option_data_types.cc option_data_types.h
libb10_dhcp___la_SOURCES += option_definition.cc option_definition.h
+libb10_dhcp___la_SOURCES += option_space.cc option_space.h
libb10_dhcp___la_SOURCES += pkt6.cc pkt6.h
libb10_dhcp___la_SOURCES += pkt4.cc pkt4.h
libb10_dhcp___la_SOURCES += std_option_defs.h
diff --git a/src/lib/dhcp/libdhcp++.cc b/src/lib/dhcp/libdhcp++.cc
index a3921cc..78f511d 100644
--- a/src/lib/dhcp/libdhcp++.cc
+++ b/src/lib/dhcp/libdhcp++.cc
@@ -267,20 +267,12 @@ size_t LibDHCP::unpackOptions4(const OptionBuffer& buf,
return (offset);
}
-void LibDHCP::packOptions6(isc::util::OutputBuffer &buf,
- const isc::dhcp::Option::OptionCollection& options) {
- for (Option::OptionCollection::const_iterator it = options.begin();
- it != options.end(); ++it) {
- it->second->pack(buf);
- }
-}
-
void
LibDHCP::packOptions(isc::util::OutputBuffer& buf,
const Option::OptionCollection& options) {
for (Option::OptionCollection::const_iterator it = options.begin();
it != options.end(); ++it) {
- it->second->pack4(buf);
+ it->second->pack(buf);
}
}
@@ -329,10 +321,35 @@ LibDHCP::initStdOptionDefs4() {
// Now let's add all option definitions.
for (int i = 0; i < OPTION_DEF_PARAMS_SIZE4; ++i) {
- OptionDefinitionPtr definition(new OptionDefinition(OPTION_DEF_PARAMS4[i].name,
- OPTION_DEF_PARAMS4[i].code,
- OPTION_DEF_PARAMS4[i].type,
- OPTION_DEF_PARAMS4[i].array));
+ std::string encapsulates(OPTION_DEF_PARAMS4[i].encapsulates);
+ if (!encapsulates.empty() && OPTION_DEF_PARAMS4[i].array) {
+ isc_throw(isc::BadValue, "invalid standard option definition: "
+ << "option with code '" << OPTION_DEF_PARAMS4[i].code
+ << "' may not encapsulate option space '"
+ << encapsulates << "' because the definition"
+ << " indicates that this option comprises an array"
+ << " of values");
+ }
+
+ // Depending whether the option encapsulates an option space or not
+ // we pick different constructor to create an instance of the option
+ // definition.
+ OptionDefinitionPtr definition;
+ if (encapsulates.empty()) {
+ // Option does not encapsulate any option space.
+ definition.reset(new OptionDefinition(OPTION_DEF_PARAMS4[i].name,
+ OPTION_DEF_PARAMS4[i].code,
+ OPTION_DEF_PARAMS4[i].type,
+ OPTION_DEF_PARAMS4[i].array));
+
+ } else {
+ // Option does encapsulate an option space.
+ definition.reset(new OptionDefinition(OPTION_DEF_PARAMS4[i].name,
+ OPTION_DEF_PARAMS4[i].code,
+ OPTION_DEF_PARAMS4[i].type,
+ OPTION_DEF_PARAMS4[i].encapsulates));
+
+ }
for (int rec = 0; rec < OPTION_DEF_PARAMS4[i].records_size; ++rec) {
definition->addRecordField(OPTION_DEF_PARAMS4[i].records[rec]);
@@ -358,10 +375,34 @@ LibDHCP::initStdOptionDefs6() {
v6option_defs_.clear();
for (int i = 0; i < OPTION_DEF_PARAMS_SIZE6; ++i) {
- OptionDefinitionPtr definition(new OptionDefinition(OPTION_DEF_PARAMS6[i].name,
- OPTION_DEF_PARAMS6[i].code,
- OPTION_DEF_PARAMS6[i].type,
- OPTION_DEF_PARAMS6[i].array));
+ std::string encapsulates(OPTION_DEF_PARAMS6[i].encapsulates);
+ if (!encapsulates.empty() && OPTION_DEF_PARAMS6[i].array) {
+ isc_throw(isc::BadValue, "invalid standard option definition: "
+ << "option with code '" << OPTION_DEF_PARAMS6[i].code
+ << "' may not encapsulate option space '"
+ << encapsulates << "' because the definition"
+ << " indicates that this option comprises an array"
+ << " of values");
+ }
+
+ // Depending whether an option encapsulates an option space or not
+ // we pick different constructor to create an instance of the option
+ // definition.
+ OptionDefinitionPtr definition;
+ if (encapsulates.empty()) {
+ // Option does not encapsulate any option space.
+ definition.reset(new OptionDefinition(OPTION_DEF_PARAMS6[i].name,
+ OPTION_DEF_PARAMS6[i].code,
+ OPTION_DEF_PARAMS6[i].type,
+ OPTION_DEF_PARAMS6[i].array));
+ } else {
+ // Option does encapsulate an option space.
+ definition.reset(new OptionDefinition(OPTION_DEF_PARAMS6[i].name,
+ OPTION_DEF_PARAMS6[i].code,
+ OPTION_DEF_PARAMS6[i].type,
+ OPTION_DEF_PARAMS6[i].encapsulates));
+
+ }
for (int rec = 0; rec < OPTION_DEF_PARAMS6[i].records_size; ++rec) {
definition->addRecordField(OPTION_DEF_PARAMS6[i].records[rec]);
diff --git a/src/lib/dhcp/libdhcp++.h b/src/lib/dhcp/libdhcp++.h
index bc47405..c6594b9 100644
--- a/src/lib/dhcp/libdhcp++.h
+++ b/src/lib/dhcp/libdhcp++.h
@@ -88,16 +88,6 @@ public:
uint16_t type,
const OptionBuffer& buf);
- /// Builds collection of options.
- ///
- /// Builds raw (on-wire) data for provided collection of options.
- ///
- /// @param buf output buffer (assembled options will be stored here)
- /// @param options collection of options to store to
- static void packOptions6(isc::util::OutputBuffer& buf,
- const isc::dhcp::Option::OptionCollection& options);
-
-
/// @brief Stores options in a buffer.
///
/// Stores all options defined in options containers in a on-wire
diff --git a/src/lib/dhcp/option.cc b/src/lib/dhcp/option.cc
index dbdac0c..e06b163 100644
--- a/src/lib/dhcp/option.cc
+++ b/src/lib/dhcp/option.cc
@@ -84,51 +84,14 @@ Option::check() {
}
void Option::pack(isc::util::OutputBuffer& buf) {
- switch (universe_) {
- case V6:
- return (pack6(buf));
-
- case V4:
- return (pack4(buf));
-
- default:
- isc_throw(BadValue, "Failed to pack " << type_ << " option as the "
- << "universe type is unknown.");
+ // Write a header.
+ packHeader(buf);
+ // Write data.
+ if (!data_.empty()) {
+ buf.writeData(&data_[0], data_.size());
}
-}
-
-void
-Option::pack4(isc::util::OutputBuffer& buf) {
- if (universe_ == V4) {
- // Write a header.
- packHeader(buf);
- // Write data.
- if (!data_.empty()) {
- buf.writeData(&data_[0], data_.size());
- }
- // Write sub-options.
- packOptions(buf);
- } else {
- isc_throw(BadValue, "Invalid universe type " << universe_);
- }
-
- return;
-}
-
-void Option::pack6(isc::util::OutputBuffer& buf) {
- if (universe_ == V6) {
- // Write a header.
- packHeader(buf);
- // Write data.
- if (!data_.empty()) {
- buf.writeData(&data_[0], data_.size());
- }
- // Write sub-options.
- packOptions(buf);
- } else {
- isc_throw(BadValue, "Invalid universe type " << universe_);
- }
- return;
+ // Write sub-options.
+ packOptions(buf);
}
void
@@ -153,16 +116,7 @@ Option::packHeader(isc::util::OutputBuffer& buf) {
void
Option::packOptions(isc::util::OutputBuffer& buf) {
- switch (universe_) {
- case V4:
- LibDHCP::packOptions(buf, options_);
- return;
- case V6:
- LibDHCP::packOptions6(buf, options_);
- return;
- default:
- isc_throw(isc::BadValue, "Invalid universe type " << universe_);
- }
+ LibDHCP::packOptions(buf, options_);
}
void Option::unpack(OptionBufferConstIter begin,
diff --git a/src/lib/dhcp/option.h b/src/lib/dhcp/option.h
index e4105cc..553e825 100644
--- a/src/lib/dhcp/option.h
+++ b/src/lib/dhcp/option.h
@@ -158,28 +158,13 @@ public:
///
/// Writes option in wire-format to buffer, returns pointer to first unused
/// byte after stored option (that is useful for writing options one after
- /// another). Used in DHCPv6 options.
- ///
- /// @todo Migrate DHCPv6 code to pack(OutputBuffer& buf) version
+ /// another).
///
/// @param buf pointer to a buffer
///
/// @throw BadValue Universe of the option is neither V4 nor V6.
virtual void pack(isc::util::OutputBuffer& buf);
- /// @brief Writes option in a wire-format to a buffer.
- ///
- /// Method will throw if option storing fails for some reason.
- ///
- /// @todo Once old (DHCPv6) implementation is rewritten,
- /// unify pack4() and pack6() and rename them to just pack().
- ///
- /// @param buf output buffer (option will be stored there)
- ///
- /// @throw OutOfRange Option type is greater than 255.
- /// @throw BadValue Universe is not V4.
- virtual void pack4(isc::util::OutputBuffer& buf);
-
/// @brief Parses received buffer.
///
/// @param begin iterator to first byte of option data
@@ -317,13 +302,6 @@ public:
virtual bool equal(const OptionPtr& other) const;
protected:
- /// Builds raw (over-wire) buffer of this option, including all
- /// defined suboptions. Version for building DHCPv4 options.
- ///
- /// @param buf output buffer (built options will be stored here)
- ///
- /// @throw BadValue Universe is not V6.
- virtual void pack6(isc::util::OutputBuffer& buf);
/// @brief Store option's header in a buffer.
///
diff --git a/src/lib/dhcp/option4_addrlst.cc b/src/lib/dhcp/option4_addrlst.cc
index 86da9f6..436d07d 100644
--- a/src/lib/dhcp/option4_addrlst.cc
+++ b/src/lib/dhcp/option4_addrlst.cc
@@ -64,7 +64,7 @@ Option4AddrLst::Option4AddrLst(uint8_t type, const IOAddress& addr)
}
void
-Option4AddrLst::pack4(isc::util::OutputBuffer& buf) {
+Option4AddrLst::pack(isc::util::OutputBuffer& buf) {
if (addrs_.size() * V4ADDRESS_LEN > 255) {
isc_throw(OutOfRange, "DHCPv4 Option4AddrLst " << type_ << " is too big."
diff --git a/src/lib/dhcp/option4_addrlst.h b/src/lib/dhcp/option4_addrlst.h
index 927f75b..21bbbe1 100644
--- a/src/lib/dhcp/option4_addrlst.h
+++ b/src/lib/dhcp/option4_addrlst.h
@@ -29,6 +29,11 @@
namespace isc {
namespace dhcp {
+/// Forward declaration to Option4AddrLst class.
+class Option4AddrLst;
+
+/// A pointer to the Option4AddrLst object.
+typedef boost::shared_ptr<Option4AddrLst> Option4AddrLstPtr;
/// @brief DHCPv4 Option class for handling list of IPv4 addresses.
///
@@ -87,11 +92,8 @@ public:
///
/// Method will throw if option storing fails for some reason.
///
- /// TODO Once old (DHCPv6) implementation is rewritten,
- /// unify pack4() and pack6() and rename them to just pack().
- ///
/// @param buf output buffer (option will be stored there)
- virtual void pack4(isc::util::OutputBuffer& buf);
+ virtual void pack(isc::util::OutputBuffer& buf);
/// Returns string representation of the option.
///
diff --git a/src/lib/dhcp/option_custom.cc b/src/lib/dhcp/option_custom.cc
index 068e360..3d2a1a9 100644
--- a/src/lib/dhcp/option_custom.cc
+++ b/src/lib/dhcp/option_custom.cc
@@ -387,14 +387,10 @@ OptionCustom::dataFieldToText(const OptionDataType data_type,
}
void
-OptionCustom::pack4(isc::util::OutputBuffer& buf) {
- if (len() > 255) {
- isc_throw(OutOfRange, "DHCPv4 Option " << type_
- << " value is too high. At most 255 is supported.");
- }
+OptionCustom::pack(isc::util::OutputBuffer& buf) {
- buf.writeUint8(type_);
- buf.writeUint8(len() - getHeaderLen());
+ // Pack DHCP header (V4 or V6).
+ packHeader(buf);
// Write data from buffers.
for (std::vector<OptionBuffer>::const_iterator it = buffers_.begin();
@@ -411,21 +407,6 @@ OptionCustom::pack4(isc::util::OutputBuffer& buf) {
packOptions(buf);
}
-void
-OptionCustom::pack6(isc::util::OutputBuffer& buf) {
- buf.writeUint16(type_);
- buf.writeUint16(len() - getHeaderLen());
-
- // Write data from buffers.
- for (std::vector<OptionBuffer>::const_iterator it = buffers_.begin();
- it != buffers_.end(); ++it) {
- if (!it->empty()) {
- buf.writeData(&(*it)[0], it->size());
- }
- }
-
- packOptions(buf);
-}
asiolink::IOAddress
OptionCustom::readAddress(const uint32_t index) const {
diff --git a/src/lib/dhcp/option_custom.h b/src/lib/dhcp/option_custom.h
index 0ee4688..c25347b 100644
--- a/src/lib/dhcp/option_custom.h
+++ b/src/lib/dhcp/option_custom.h
@@ -1,4 +1,4 @@
-// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2012-2013 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -249,6 +249,11 @@ public:
void writeString(const std::string& text,
const uint32_t index = 0);
+ /// @brief Writes DHCP option in a wire format to a buffer.
+ ///
+ /// @param buf output buffer (option will be stored there).
+ virtual void pack(isc::util::OutputBuffer& buf);
+
/// @brief Parses received buffer.
///
/// @param begin iterator to first byte of option data
@@ -278,18 +283,6 @@ public:
void setData(const OptionBufferConstIter first,
const OptionBufferConstIter last);
-protected:
-
- /// @brief Writes DHCPv4 option in a wire format to a buffer.
- ///
- /// @param buf output buffer (option will be stored there).
- virtual void pack4(isc::util::OutputBuffer& buf);
-
- /// @brief Writes DHCPv6 option in a wire format to a buffer.
- ///
- /// @param buf output buffer (built options will be stored here)
- virtual void pack6(isc::util::OutputBuffer& buf);
-
private:
/// @brief Verify that the option comprises an array of values.
@@ -352,6 +345,9 @@ private:
std::vector<OptionBuffer> buffers_;
};
+/// A pointer to the OptionCustom object.
+typedef boost::shared_ptr<OptionCustom> OptionCustomPtr;
+
} // namespace isc::dhcp
} // namespace isc
diff --git a/src/lib/dhcp/option_definition.cc b/src/lib/dhcp/option_definition.cc
index d2b5aae..59ff022 100644
--- a/src/lib/dhcp/option_definition.cc
+++ b/src/lib/dhcp/option_definition.cc
@@ -1,4 +1,4 @@
-// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2012-2013 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -21,6 +21,7 @@
#include <dhcp/option_definition.h>
#include <dhcp/option_int.h>
#include <dhcp/option_int_array.h>
+#include <dhcp/option_space.h>
#include <util/encode/hex.h>
#include <util/strutil.h>
#include <boost/algorithm/string/classification.hpp>
@@ -40,7 +41,8 @@ OptionDefinition::OptionDefinition(const std::string& name,
: name_(name),
code_(code),
type_(OPT_UNKNOWN_TYPE),
- array_type_(array_type) {
+ array_type_(array_type),
+ encapsulated_space_("") {
// Data type is held as enum value by this class.
// Use the provided option type string to get the
// corresponding enum value.
@@ -54,7 +56,33 @@ OptionDefinition::OptionDefinition(const std::string& name,
: name_(name),
code_(code),
type_(type),
- array_type_(array_type) {
+ array_type_(array_type),
+ encapsulated_space_("") {
+}
+
+OptionDefinition::OptionDefinition(const std::string& name,
+ const uint16_t code,
+ const std::string& type,
+ const char* encapsulated_space)
+ : name_(name),
+ code_(code),
+ // Data type is held as enum value by this class.
+ // Use the provided option type string to get the
+ // corresponding enum value.
+ type_(OptionDataTypeUtil::getDataType(type)),
+ array_type_(false),
+ encapsulated_space_(encapsulated_space) {
+}
+
+OptionDefinition::OptionDefinition(const std::string& name,
+ const uint16_t code,
+ const OptionDataType type,
+ const char* encapsulated_space)
+ : name_(name),
+ code_(code),
+ type_(type),
+ array_type_(false),
+ encapsulated_space_(encapsulated_space) {
}
void
@@ -188,8 +216,8 @@ OptionDefinition::optionFactory(Option::Universe u, uint16_t type,
const RecordFieldsCollection& records = getRecordFields();
if (records.size() > values.size()) {
isc_throw(InvalidOptionValue, "number of data fields for the option"
- << " type " << type_ << " is greater than number of values"
- << " provided.");
+ << " type '" << getCode() << "' is greater than number"
+ << " of values provided.");
}
for (size_t i = 0; i < records.size(); ++i) {
writeToBuffer(util::str::trim(values[i]),
@@ -228,6 +256,11 @@ OptionDefinition::validate() const {
all(find_tail(name_, 1), boost::is_any_of(std::string("-_")))) {
err_str << "invalid option name '" << name_ << "'";
+ } else if (!encapsulated_space_.empty() &&
+ !OptionSpace::validateName(encapsulated_space_)) {
+ err_str << "invalid encapsulated option space name: '"
+ << encapsulated_space_ << "'";
+
} else if (type_ >= OPT_UNKNOWN_TYPE) {
// Option definition must be of a known type.
err_str << "option type value " << type_ << " is out of range.";
@@ -411,14 +444,8 @@ OptionDefinition::writeToBuffer(const std::string& value,
OptionDataTypeUtil::writeString(value, buf);
return;
case OPT_FQDN_TYPE:
- {
- // FQDN implementation is not terribly complicated but will require
- // creation of some additional logic (maybe object) that will parse
- // the fqdn into labels.
- isc_throw(isc::NotImplemented, "write of FQDN record into option buffer"
- " is not supported yet");
- return;
- }
+ OptionDataTypeUtil::writeFqdn(value, buf);
+ return;
default:
// We hit this point because invalid option data type has been specified
// This may be the case because 'empty' or 'record' data type has been
diff --git a/src/lib/dhcp/option_definition.h b/src/lib/dhcp/option_definition.h
index efcaba0..df5def7 100644
--- a/src/lib/dhcp/option_definition.h
+++ b/src/lib/dhcp/option_definition.h
@@ -146,10 +146,10 @@ public:
/// @param type option data type as string.
/// @param array_type array indicator, if true it indicates that the
/// option fields are the array.
- OptionDefinition(const std::string& name,
- const uint16_t code,
- const std::string& type,
- const bool array_type = false);
+ explicit OptionDefinition(const std::string& name,
+ const uint16_t code,
+ const std::string& type,
+ const bool array_type = false);
/// @brief Constructor.
///
@@ -158,10 +158,49 @@ public:
/// @param type option data type.
/// @param array_type array indicator, if true it indicates that the
/// option fields are the array.
- OptionDefinition(const std::string& name,
- const uint16_t code,
- const OptionDataType type,
- const bool array_type = false);
+ explicit OptionDefinition(const std::string& name,
+ const uint16_t code,
+ const OptionDataType type,
+ const bool array_type = false);
+
+ /// @brief Constructor.
+ ///
+ /// This constructor sets the name of the option space that is
+ /// encapsulated by this option. The encapsulated option space
+ /// indentifies sub-options that are carried within this option.
+ /// This constructor does not allow to set array indicator
+ /// because options comprising an array of data fields must
+ /// not be used with sub-options.
+ ///
+ /// @param name option name.
+ /// @param code option code.
+ /// @param type option data type given as string.
+ /// @param encapsulated_space name of the option space being
+ /// encapsulated by this option.
+ explicit OptionDefinition(const std::string& name,
+ const uint16_t code,
+ const std::string& type,
+ const char* encapsulated_space);
+
+ /// @brief Constructor.
+ ///
+ /// This constructor sets the name of the option space that is
+ /// encapsulated by this option. The encapsulated option space
+ /// indentifies sub-options that are carried within this option.
+ /// This constructor does not allow to set array indicator
+ /// because options comprising an array of data fields must
+ /// not be used with sub-options.
+ ///
+ /// @param name option name.
+ /// @param code option code.
+ /// @param type option data type.
+ /// @param encapsulated_space name of the option space being
+ /// encapsulated by this option.
+ explicit OptionDefinition(const std::string& name,
+ const uint16_t code,
+ const OptionDataType type,
+ const char* encapsulated_space);
+
/// @brief Adds data field to the record.
///
@@ -192,10 +231,17 @@ public:
/// @return option code.
uint16_t getCode() const { return (code_); }
+ /// @brief Return name of the encapsulated option space.
+ ///
+ /// @return name of the encapsulated option space.
+ std::string getEncapsulatedSpace() const {
+ return (encapsulated_space_);
+ }
+
/// @brief Return option name.
///
/// @return option name.
- const std::string& getName() const { return (name_); }
+ std::string getName() const { return (name_); }
/// @brief Return list of record fields.
///
@@ -456,6 +502,8 @@ private:
OptionDataType type_;
/// Indicates wheter option is a single value or array.
bool array_type_;
+ /// Name of the space being encapsulated by this option.
+ std::string encapsulated_space_;
/// Collection of data fields within the record.
RecordFieldsCollection record_fields_;
};
diff --git a/src/lib/dhcp/option_int_array.h b/src/lib/dhcp/option_int_array.h
index 5004152..1babee5 100644
--- a/src/lib/dhcp/option_int_array.h
+++ b/src/lib/dhcp/option_int_array.h
@@ -25,6 +25,23 @@
namespace isc {
namespace dhcp {
+/// Forward declaration of OptionIntArray.
+template<typename T>
+class OptionIntArray;
+
+/// @defgroup option_int_array_defs Typedefs for OptionIntArray class.
+///
+/// @brief Classes that represent options comprising array of integers.
+///
+/// @{
+typedef OptionIntArray<uint8_t> OptionUint8Array;
+typedef boost::shared_ptr<OptionUint8Array> OptionUint8ArrayPtr;
+typedef OptionIntArray<uint16_t> OptionUint16Array;
+typedef boost::shared_ptr<OptionUint16Array> OptionUint16ArrayPtr;
+typedef OptionIntArray<uint32_t> OptionUint32Array;
+typedef boost::shared_ptr<OptionUint32Array> OptionUint32ArrayPtr;
+/// @}
+
/// This template class represents DHCP (v4 or v6) option with an
/// array of integer values. The type of the elements in the array
/// can be any of the following:
@@ -107,6 +124,13 @@ public:
unpack(begin, end);
}
+ /// @brief Adds a new value to the array.
+ ///
+ /// @param value a value being added.
+ void addValue(const T value) {
+ values_.push_back(value);
+ }
+
/// Writes option in wire-format to buf, returns pointer to first unused
/// byte after stored option.
///
diff --git a/src/lib/dhcp/option_space.cc b/src/lib/dhcp/option_space.cc
new file mode 100644
index 0000000..f9f5bee
--- /dev/null
+++ b/src/lib/dhcp/option_space.cc
@@ -0,0 +1,71 @@
+// Copyright (C) 2012, 2013 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <dhcp/option_space.h>
+#include <boost/algorithm/string/classification.hpp>
+#include <boost/algorithm/string/predicate.hpp>
+
+namespace isc {
+namespace dhcp {
+
+OptionSpace::OptionSpace(const std::string& name, const bool vendor_space)
+ : name_(name), vendor_space_(vendor_space) {
+ // Check that provided option space name is valid.
+ if (!validateName(name_)) {
+ isc_throw(InvalidOptionSpace, "Invalid option space name "
+ << name_);
+ }
+}
+
+bool
+OptionSpace::validateName(const std::string& name) {
+
+ using namespace boost::algorithm;
+
+ // Allowed characters are: lower or upper case letters, digits,
+ // underscores and hyphens. Empty option spaces are not allowed.
+ if (all(name, boost::is_from_range('a', 'z') ||
+ boost::is_from_range('A', 'Z') ||
+ boost::is_digit() ||
+ boost::is_any_of(std::string("-_"))) &&
+ !name.empty() &&
+ // Hyphens and underscores are not allowed at the beginning
+ // and at the end of the option space name.
+ !all(find_head(name, 1), boost::is_any_of(std::string("-_"))) &&
+ !all(find_tail(name, 1), boost::is_any_of(std::string("-_")))) {
+ return (true);
+
+ }
+ return (false);
+}
+
+OptionSpace6::OptionSpace6(const std::string& name)
+ : OptionSpace(name),
+ enterprise_number_(0) {
+}
+
+OptionSpace6::OptionSpace6(const std::string& name,
+ const uint32_t enterprise_number)
+ : OptionSpace(name, true),
+ enterprise_number_(enterprise_number) {
+}
+
+void
+OptionSpace6::setVendorSpace(const uint32_t enterprise_number) {
+ enterprise_number_ = enterprise_number;
+ OptionSpace::setVendorSpace();
+}
+
+} // end of isc::dhcp namespace
+} // end of isc namespace
diff --git a/src/lib/dhcp/option_space.h b/src/lib/dhcp/option_space.h
new file mode 100644
index 0000000..9eebd76
--- /dev/null
+++ b/src/lib/dhcp/option_space.h
@@ -0,0 +1,189 @@
+// Copyright (C) 2012, 2013 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef OPTION_SPACE_H
+#define OPTION_SPACE_H
+
+#include <exceptions/exceptions.h>
+#include <boost/shared_ptr.hpp>
+#include <map>
+#include <stdint.h>
+#include <string>
+
+namespace isc {
+namespace dhcp {
+
+/// @brief Exception to be thrown when invalid option space
+/// is specified.
+class InvalidOptionSpace : public Exception {
+public:
+ InvalidOptionSpace(const char* file, size_t line, const char* what) :
+ isc::Exception(file, line, what) { };
+};
+
+/// OptionSpace forward declaration.
+class OptionSpace;
+/// A pointer to OptionSpace object.
+typedef boost::shared_ptr<OptionSpace> OptionSpacePtr;
+/// A collection of option spaces.
+typedef std::map<std::string, OptionSpacePtr> OptionSpaceCollection;
+
+/// @brief DHCP option space.
+///
+/// This class represents single option space. The option spaces are used
+/// to group DHCP options having unique option codes. The special type
+/// of the option space is so called "vendor specific option space".
+/// It groups sub-options being sent within Vendor Encapsulated Options.
+/// For DHCPv4 it is the option with code 43. The option spaces are
+/// assigned to option instances represented by isc::dhcp::Option and
+/// other classes derived from it. Each particular option may belong to
+/// multiple option spaces.
+/// This class may be used to represent any DHCPv4 option space. If the
+/// option space is to group DHCPv4 Vendor Encapsulated Options then
+/// "vendor space" flag must be set using \ref OptionSpace::setVendorSpace
+/// or the argument passed to the constructor. In theory, this class can
+/// be also used to represent non-vendor specific DHCPv6 option space
+/// but this is discouraged. For DHCPv6 option spaces the OptionSpace6
+/// class should be used instead.
+///
+/// @note this class is intended to be used to represent DHCPv4 option
+/// spaces only. However, it hasn't been called OptionSpace4 (that would
+/// suggest that it is specific to DHCPv4) because it can be also
+/// used to represent some DHCPv6 option spaces and is a base class
+/// for \ref OptionSpace6. Thus, if one declared the container as follows:
+/// @code
+/// std::vector<OptionSpace4> container;
+/// @endcode
+/// it would suggest that the container holds DHCPv4 option spaces while
+/// it could hold both DHCPv4 and DHCPv6 option spaces as the OptionSpace6
+/// object could be upcast to OptionSpace4. This confusion does not appear
+/// when OptionSpace is used as a name for the base class.
+class OptionSpace {
+public:
+
+ /// @brief Constructor.
+ ///
+ /// @param name option space name.
+ /// @param vendor_space boolean value that indicates that the object
+ /// describes the vendor specific option space.
+ ///
+ /// @throw isc::dhcp::InvalidOptionSpace if given option space name
+ /// contains invalid characters or is empty. This constructor uses
+ /// \ref validateName function to check that the specified name is
+ /// correct.
+ OptionSpace(const std::string& name, const bool vendor_space = false);
+
+ /// @brief Return option space name.
+ ///
+ /// @return option space name.
+ const std::string& getName() const { return (name_); }
+
+ /// @brief Mark option space as non-vendor space.
+ void clearVendorSpace() {
+ vendor_space_ = false;
+ }
+
+ /// @brief Check if option space is vendor specific.
+ ///
+ /// @return boolean value that indicates if the object describes
+ /// the vendor specific option space.
+ bool isVendorSpace() const { return (vendor_space_); }
+
+ /// @brief Mark option space as vendor specific.
+ void setVendorSpace() {
+ vendor_space_ = true;
+ }
+
+ /// @brief Checks that the provided option space name is valid.
+ ///
+ /// It is expected that option space name consists of upper or
+ /// lower case letters or digits. Also, it may contain underscores
+ /// or dashes. Other characters are prohibited. The empty option
+ /// space names are invalid.
+ ///
+ /// @param name option space name to be validated.
+ ///
+ /// @return true if the option space is valid, else it returns false.
+ static bool validateName(const std::string& name);
+
+private:
+ std::string name_; ///< Holds option space name.
+
+ bool vendor_space_; ///< Is this the vendor space?
+
+};
+
+/// @brief DHCPv6 option space with enterprise number assigned.
+///
+/// This class extends the base class with the support for enterprise numbers.
+/// The enterprise numbers are assigned by IANA to various organizations
+/// and they are carried as uint32_t integers in DHCPv6 Vendor Specific
+/// Information Options (VSIO). For more information refer to RFC3315.
+/// All option spaces that group VSIO options must have enterprise number
+/// set. It can be set using a constructor or \ref setVendorSpace function.
+/// The extra functionality of this class (enterprise numbers) allows to
+/// represent DHCPv6 vendor-specific option spaces but this class is also
+/// intended to be used for all other DHCPv6 option spaces. That way all
+/// DHCPv6 option spaces can be stored in the container holding OptionSpace6
+/// objects. Also, it is easy to mark vendor-specific option space as non-vendor
+/// specific option space (and the other way around) without a need to cast
+/// between OptionSpace and OptionSpace6 types.
+class OptionSpace6 : public OptionSpace {
+public:
+
+ /// @brief Constructor for non-vendor-specific options.
+ ///
+ /// This constructor marks option space as non-vendor specific.
+ ///
+ /// @param name option space name.
+ ///
+ /// @throw isc::dhcp::InvalidOptionSpace if given option space name
+ /// contains invalid characters or is empty. This constructor uses
+ /// \ref OptionSpace::validateName function to check that the specified
+ /// name is correct.
+ OptionSpace6(const std::string& name);
+
+ /// @brief Constructor for vendor-specific options.
+ ///
+ /// This constructor marks option space as vendor specific and sets
+ /// enterprise number to a given value.
+ ///
+ /// @param name option space name.
+ /// @param enterprise_number enterprise number.
+ ///
+ /// @throw isc::dhcp::InvalidOptionSpace if given option space name
+ /// contains invalid characters or is empty. This constructor uses
+ /// \ref OptionSpace::validateName function to check that the specified
+ /// name is correct.
+ OptionSpace6(const std::string& name, const uint32_t enterprise_number);
+
+ /// @brief Return enterprise number for the option space.
+ ///
+ /// @return enterprise number.
+ uint32_t getEnterpriseNumber() const { return (enterprise_number_); }
+
+ /// @brief Mark option space as vendor specific.
+ ///
+ /// @param enterprise_number enterprise number.
+ void setVendorSpace(const uint32_t enterprise_number);
+
+private:
+
+ uint32_t enterprise_number_; ///< IANA assigned enterprise number.
+};
+
+} // namespace isc::dhcp
+} // namespace isc
+
+#endif // OPTION_SPACE_H
diff --git a/src/lib/dhcp/pkt4.cc b/src/lib/dhcp/pkt4.cc
index d3b22de..0592807 100644
--- a/src/lib/dhcp/pkt4.cc
+++ b/src/lib/dhcp/pkt4.cc
@@ -219,7 +219,7 @@ void Pkt4::check() {
uint8_t msg_type = getType();
if (msg_type > DHCPLEASEACTIVE) {
isc_throw(BadValue, "Invalid DHCP message type received: "
- << msg_type);
+ << static_cast<int>(msg_type));
}
}
@@ -230,10 +230,10 @@ uint8_t Pkt4::getType() const {
}
// Check if Message Type is specified as OptionInt<uint8_t>
- boost::shared_ptr<OptionInt<uint8_t> > typeOpt =
+ boost::shared_ptr<OptionInt<uint8_t> > type_opt =
boost::dynamic_pointer_cast<OptionInt<uint8_t> >(generic);
- if (typeOpt) {
- return (typeOpt->getValue());
+ if (type_opt) {
+ return (type_opt->getValue());
}
// Try to use it as generic option
@@ -253,7 +253,6 @@ void Pkt4::setType(uint8_t dhcp_type) {
}
}
-
void Pkt4::repack() {
bufferOut_.writeData(&data_[0], data_.size());
}
@@ -263,7 +262,7 @@ Pkt4::toText() {
stringstream tmp;
tmp << "localAddr=" << local_addr_.toText() << ":" << local_port_
<< " remoteAddr=" << remote_addr_.toText()
- << ":" << remote_port_ << ", msgtype=" << getType()
+ << ":" << remote_port_ << ", msgtype=" << static_cast<int>(getType())
<< ", transid=0x" << hex << transid_ << dec << endl;
for (isc::dhcp::Option::OptionCollection::iterator opt=options_.begin();
diff --git a/src/lib/dhcp/pkt6.cc b/src/lib/dhcp/pkt6.cc
index 2c97b07..c3a98bf 100644
--- a/src/lib/dhcp/pkt6.cc
+++ b/src/lib/dhcp/pkt6.cc
@@ -90,7 +90,7 @@ Pkt6::packUDP() {
bufferOut_.writeUint8( (transid_) & 0xff );
// the rest are options
- LibDHCP::packOptions6(bufferOut_, options_);
+ LibDHCP::packOptions(bufferOut_, options_);
}
catch (const Exception& e) {
/// @todo: throw exception here once we turn this function to void.
@@ -155,8 +155,8 @@ Pkt6::toText() {
tmp << "localAddr=[" << local_addr_.toText() << "]:" << local_port_
<< " remoteAddr=[" << remote_addr_.toText()
<< "]:" << remote_port_ << endl;
- tmp << "msgtype=" << msg_type_ << ", transid=0x" << hex << transid_
- << dec << endl;
+ tmp << "msgtype=" << static_cast<int>(msg_type_) << ", transid=0x" <<
+ hex << transid_ << dec << endl;
for (isc::dhcp::Option::OptionCollection::iterator opt=options_.begin();
opt != options_.end();
++opt) {
diff --git a/src/lib/dhcp/std_option_defs.h b/src/lib/dhcp/std_option_defs.h
index 144df8b..839a5d9 100644
--- a/src/lib/dhcp/std_option_defs.h
+++ b/src/lib/dhcp/std_option_defs.h
@@ -50,6 +50,8 @@ struct OptionDefParams {
bool array; // is array
const OptionDataType* records; // record fields
size_t records_size; // number of fields in a record
+ const char* encapsulates; // option space encapsulated by
+ // the particular option.
};
// fqdn option record fields.
@@ -64,128 +66,128 @@ RECORD_DECL(FQDN_RECORDS, OPT_UINT8_TYPE, OPT_UINT8_TYPE, OPT_STRING_TYPE);
/// @brief Definitions of standard DHCPv4 options.
const OptionDefParams OPTION_DEF_PARAMS4[] = {
- { "subnet-mask", DHO_SUBNET_MASK, OPT_IPV4_ADDRESS_TYPE, false, NO_RECORD_DEF },
- { "time-offset", DHO_TIME_OFFSET, OPT_UINT32_TYPE, false, NO_RECORD_DEF },
- { "routers", DHO_ROUTERS, OPT_IPV4_ADDRESS_TYPE, true, NO_RECORD_DEF },
- { "time-servers", DHO_TIME_SERVERS, OPT_IPV4_ADDRESS_TYPE, true, NO_RECORD_DEF },
+ { "subnet-mask", DHO_SUBNET_MASK, OPT_IPV4_ADDRESS_TYPE, false, NO_RECORD_DEF, "" },
+ { "time-offset", DHO_TIME_OFFSET, OPT_UINT32_TYPE, false, NO_RECORD_DEF, "" },
+ { "routers", DHO_ROUTERS, OPT_IPV4_ADDRESS_TYPE, true, NO_RECORD_DEF, "" },
+ { "time-servers", DHO_TIME_SERVERS, OPT_IPV4_ADDRESS_TYPE, true, NO_RECORD_DEF, "" },
{ "name-servers", DHO_NAME_SERVERS, OPT_IPV4_ADDRESS_TYPE,
- false, NO_RECORD_DEF },
+ false, NO_RECORD_DEF, "" },
{ "domain-name-servers", DHO_DOMAIN_NAME_SERVERS,
- OPT_IPV4_ADDRESS_TYPE, true, NO_RECORD_DEF },
- { "log-servers", DHO_LOG_SERVERS, OPT_IPV4_ADDRESS_TYPE, true, NO_RECORD_DEF },
+ OPT_IPV4_ADDRESS_TYPE, true, NO_RECORD_DEF, "" },
+ { "log-servers", DHO_LOG_SERVERS, OPT_IPV4_ADDRESS_TYPE, true, NO_RECORD_DEF, "" },
{ "cookie-servers", DHO_COOKIE_SERVERS, OPT_IPV4_ADDRESS_TYPE,
- true, NO_RECORD_DEF },
- { "lpr-servers", DHO_LPR_SERVERS, OPT_IPV4_ADDRESS_TYPE, true, NO_RECORD_DEF },
- { "impress-servers", DHO_IMPRESS_SERVERS, OPT_IPV4_ADDRESS_TYPE, true, NO_RECORD_DEF },
+ true, NO_RECORD_DEF, "" },
+ { "lpr-servers", DHO_LPR_SERVERS, OPT_IPV4_ADDRESS_TYPE, true, NO_RECORD_DEF, "" },
+ { "impress-servers", DHO_IMPRESS_SERVERS, OPT_IPV4_ADDRESS_TYPE, true, NO_RECORD_DEF, "" },
{ "resource-location-servers", DHO_RESOURCE_LOCATION_SERVERS,
- OPT_IPV4_ADDRESS_TYPE, true, NO_RECORD_DEF },
- { "host-name", DHO_HOST_NAME, OPT_STRING_TYPE, false, NO_RECORD_DEF },
- { "boot-size", DHO_BOOT_SIZE, OPT_UINT16_TYPE, false, NO_RECORD_DEF },
- { "merit-dump", DHO_MERIT_DUMP, OPT_STRING_TYPE, false, NO_RECORD_DEF },
- { "domain-name", DHO_DOMAIN_NAME, OPT_FQDN_TYPE, false, NO_RECORD_DEF },
- { "swap-server", DHO_SWAP_SERVER, OPT_IPV4_ADDRESS_TYPE, false, NO_RECORD_DEF },
- { "root-path", DHO_ROOT_PATH, OPT_STRING_TYPE, false, NO_RECORD_DEF },
+ OPT_IPV4_ADDRESS_TYPE, true, NO_RECORD_DEF, "" },
+ { "host-name", DHO_HOST_NAME, OPT_STRING_TYPE, false, NO_RECORD_DEF, "" },
+ { "boot-size", DHO_BOOT_SIZE, OPT_UINT16_TYPE, false, NO_RECORD_DEF, "" },
+ { "merit-dump", DHO_MERIT_DUMP, OPT_STRING_TYPE, false, NO_RECORD_DEF, "" },
+ { "domain-name", DHO_DOMAIN_NAME, OPT_FQDN_TYPE, false, NO_RECORD_DEF, "" },
+ { "swap-server", DHO_SWAP_SERVER, OPT_IPV4_ADDRESS_TYPE, false, NO_RECORD_DEF, "" },
+ { "root-path", DHO_ROOT_PATH, OPT_STRING_TYPE, false, NO_RECORD_DEF, "" },
{ "extensions-path", DHO_EXTENSIONS_PATH, OPT_STRING_TYPE,
- false, NO_RECORD_DEF },
- { "ip-forwarding", DHO_IP_FORWARDING, OPT_BOOLEAN_TYPE, false, NO_RECORD_DEF },
+ false, NO_RECORD_DEF, "" },
+ { "ip-forwarding", DHO_IP_FORWARDING, OPT_BOOLEAN_TYPE, false, NO_RECORD_DEF, "" },
{ "non-local-source-routing", DHO_NON_LOCAL_SOURCE_ROUTING,
- OPT_BOOLEAN_TYPE, false, NO_RECORD_DEF },
- { "policy-filter", DHO_POLICY_FILTER, OPT_IPV4_ADDRESS_TYPE, true, NO_RECORD_DEF },
+ OPT_BOOLEAN_TYPE, false, NO_RECORD_DEF, "" },
+ { "policy-filter", DHO_POLICY_FILTER, OPT_IPV4_ADDRESS_TYPE, true, NO_RECORD_DEF, "" },
{ "max-dgram-reassembly", DHO_MAX_DGRAM_REASSEMBLY,
- OPT_UINT16_TYPE, false, NO_RECORD_DEF },
- { "default-ip-ttl", DHO_DEFAULT_IP_TTL, OPT_UINT8_TYPE, false, NO_RECORD_DEF },
+ OPT_UINT16_TYPE, false, NO_RECORD_DEF, "" },
+ { "default-ip-ttl", DHO_DEFAULT_IP_TTL, OPT_UINT8_TYPE, false, NO_RECORD_DEF, "" },
{ "path-mtu-aging-timeout", DHO_PATH_MTU_AGING_TIMEOUT,
- OPT_UINT32_TYPE, false, NO_RECORD_DEF },
+ OPT_UINT32_TYPE, false, NO_RECORD_DEF, "" },
{ "path-mtu-plateau-table", DHO_PATH_MTU_PLATEAU_TABLE,
- OPT_UINT16_TYPE, true, NO_RECORD_DEF },
- { "interface-mtu", DHO_INTERFACE_MTU, OPT_UINT16_TYPE, false, NO_RECORD_DEF },
+ OPT_UINT16_TYPE, true, NO_RECORD_DEF, "" },
+ { "interface-mtu", DHO_INTERFACE_MTU, OPT_UINT16_TYPE, false, NO_RECORD_DEF, "" },
{ "all-subnets-local", DHO_ALL_SUBNETS_LOCAL,
- OPT_BOOLEAN_TYPE, false, NO_RECORD_DEF },
+ OPT_BOOLEAN_TYPE, false, NO_RECORD_DEF, "" },
{ "broadcast-address", DHO_BROADCAST_ADDRESS,
- OPT_IPV4_ADDRESS_TYPE, false, NO_RECORD_DEF },
+ OPT_IPV4_ADDRESS_TYPE, false, NO_RECORD_DEF, "" },
{ "perform-mask-discovery", DHO_PERFORM_MASK_DISCOVERY,
- OPT_BOOLEAN_TYPE, false, NO_RECORD_DEF },
- { "mask-supplier", DHO_MASK_SUPPLIER, OPT_BOOLEAN_TYPE, false, NO_RECORD_DEF },
+ OPT_BOOLEAN_TYPE, false, NO_RECORD_DEF, "" },
+ { "mask-supplier", DHO_MASK_SUPPLIER, OPT_BOOLEAN_TYPE, false, NO_RECORD_DEF, "" },
{ "router-discovery", DHO_ROUTER_DISCOVERY,
- OPT_BOOLEAN_TYPE, false, NO_RECORD_DEF },
+ OPT_BOOLEAN_TYPE, false, NO_RECORD_DEF, "" },
{ "router-solicitation-address", DHO_ROUTER_SOLICITATION_ADDRESS,
- OPT_IPV4_ADDRESS_TYPE, false, NO_RECORD_DEF },
+ OPT_IPV4_ADDRESS_TYPE, false, NO_RECORD_DEF, "" },
{ "static-routes", DHO_STATIC_ROUTES,
- OPT_IPV4_ADDRESS_TYPE, true, NO_RECORD_DEF },
+ OPT_IPV4_ADDRESS_TYPE, true, NO_RECORD_DEF, "" },
{ "trailer-encapsulation", DHO_TRAILER_ENCAPSULATION,
- OPT_BOOLEAN_TYPE, false, NO_RECORD_DEF },
+ OPT_BOOLEAN_TYPE, false, NO_RECORD_DEF, "" },
{ "arp-cache-timeout", DHO_ARP_CACHE_TIMEOUT,
- OPT_UINT32_TYPE, false, NO_RECORD_DEF },
+ OPT_UINT32_TYPE, false, NO_RECORD_DEF, "" },
{ "ieee802-3-encapsulation", DHO_IEEE802_3_ENCAPSULATION,
- OPT_BOOLEAN_TYPE, false, NO_RECORD_DEF },
- { "default-tcp-ttl", DHO_DEFAULT_TCP_TTL, OPT_UINT8_TYPE, false, NO_RECORD_DEF },
+ OPT_BOOLEAN_TYPE, false, NO_RECORD_DEF, "" },
+ { "default-tcp-ttl", DHO_DEFAULT_TCP_TTL, OPT_UINT8_TYPE, false, NO_RECORD_DEF, "" },
{ "tcp-keepalive-internal", DHO_TCP_KEEPALIVE_INTERVAL,
- OPT_UINT32_TYPE, false, NO_RECORD_DEF },
+ OPT_UINT32_TYPE, false, NO_RECORD_DEF, "" },
{ "tcp-keepalive-garbage", DHO_TCP_KEEPALIVE_GARBAGE,
- OPT_BOOLEAN_TYPE, false, NO_RECORD_DEF },
- { "nis-domain", DHO_NIS_DOMAIN, OPT_STRING_TYPE, false, NO_RECORD_DEF },
- { "nis-servers", DHO_NIS_SERVERS, OPT_IPV4_ADDRESS_TYPE, true, NO_RECORD_DEF },
- { "ntp-servers", DHO_NTP_SERVERS, OPT_IPV4_ADDRESS_TYPE, true, NO_RECORD_DEF },
+ OPT_BOOLEAN_TYPE, false, NO_RECORD_DEF, "" },
+ { "nis-domain", DHO_NIS_DOMAIN, OPT_STRING_TYPE, false, NO_RECORD_DEF, "" },
+ { "nis-servers", DHO_NIS_SERVERS, OPT_IPV4_ADDRESS_TYPE, true, NO_RECORD_DEF, "" },
+ { "ntp-servers", DHO_NTP_SERVERS, OPT_IPV4_ADDRESS_TYPE, true, NO_RECORD_DEF, "" },
{ "vendor-encapsulated-options", DHO_VENDOR_ENCAPSULATED_OPTIONS,
- OPT_BINARY_TYPE, false, NO_RECORD_DEF },
+ OPT_EMPTY_TYPE, false, NO_RECORD_DEF, "vendor-encapsulated-options-space" },
{ "netbios-name-servers", DHO_NETBIOS_NAME_SERVERS,
- OPT_IPV4_ADDRESS_TYPE, true, NO_RECORD_DEF },
+ OPT_IPV4_ADDRESS_TYPE, true, NO_RECORD_DEF, "" },
{ "netbios-dd-server", DHO_NETBIOS_DD_SERVER,
- OPT_IPV4_ADDRESS_TYPE, true, NO_RECORD_DEF },
+ OPT_IPV4_ADDRESS_TYPE, true, NO_RECORD_DEF, "" },
{ "netbios-node-type", DHO_NETBIOS_NODE_TYPE,
- OPT_UINT8_TYPE, false, NO_RECORD_DEF },
- { "netbios-scope", DHO_NETBIOS_SCOPE, OPT_STRING_TYPE, false, NO_RECORD_DEF },
- { "font-servers", DHO_FONT_SERVERS, OPT_IPV4_ADDRESS_TYPE, true, NO_RECORD_DEF },
+ OPT_UINT8_TYPE, false, NO_RECORD_DEF, "" },
+ { "netbios-scope", DHO_NETBIOS_SCOPE, OPT_STRING_TYPE, false, NO_RECORD_DEF, "" },
+ { "font-servers", DHO_FONT_SERVERS, OPT_IPV4_ADDRESS_TYPE, true, NO_RECORD_DEF, "" },
{ "x-display-manager", DHO_X_DISPLAY_MANAGER,
- OPT_IPV4_ADDRESS_TYPE, true, NO_RECORD_DEF },
+ OPT_IPV4_ADDRESS_TYPE, true, NO_RECORD_DEF, "" },
{ "dhcp-requested-address", DHO_DHCP_REQUESTED_ADDRESS,
- OPT_IPV4_ADDRESS_TYPE, false, NO_RECORD_DEF },
- { "dhcp-lease-time", DHO_DHCP_LEASE_TIME, OPT_UINT32_TYPE, false, NO_RECORD_DEF },
+ OPT_IPV4_ADDRESS_TYPE, false, NO_RECORD_DEF, "" },
+ { "dhcp-lease-time", DHO_DHCP_LEASE_TIME, OPT_UINT32_TYPE, false, NO_RECORD_DEF, "" },
{ "dhcp-option-overload", DHO_DHCP_OPTION_OVERLOAD,
- OPT_UINT8_TYPE, false, NO_RECORD_DEF },
- { "dhcp-message-type", DHO_DHCP_MESSAGE_TYPE, OPT_UINT8_TYPE, false, NO_RECORD_DEF },
+ OPT_UINT8_TYPE, false, NO_RECORD_DEF, "" },
+ { "dhcp-message-type", DHO_DHCP_MESSAGE_TYPE, OPT_UINT8_TYPE, false, NO_RECORD_DEF, "" },
{ "dhcp-server-identifier", DHO_DHCP_SERVER_IDENTIFIER,
- OPT_IPV4_ADDRESS_TYPE, false, NO_RECORD_DEF },
+ OPT_IPV4_ADDRESS_TYPE, false, NO_RECORD_DEF, "" },
{ "dhcp-parameter-request-list", DHO_DHCP_PARAMETER_REQUEST_LIST,
- OPT_UINT8_TYPE, true, NO_RECORD_DEF },
- { "dhcp-message", DHO_DHCP_MESSAGE, OPT_STRING_TYPE, false, NO_RECORD_DEF },
+ OPT_UINT8_TYPE, true, NO_RECORD_DEF, "" },
+ { "dhcp-message", DHO_DHCP_MESSAGE, OPT_STRING_TYPE, false, NO_RECORD_DEF, "" },
{ "dhcp-max-message-size", DHO_DHCP_MAX_MESSAGE_SIZE,
- OPT_UINT16_TYPE, false, NO_RECORD_DEF },
- { "dhcp-renewal-time", DHO_DHCP_RENEWAL_TIME, OPT_UINT32_TYPE, false, NO_RECORD_DEF },
+ OPT_UINT16_TYPE, false, NO_RECORD_DEF, "" },
+ { "dhcp-renewal-time", DHO_DHCP_RENEWAL_TIME, OPT_UINT32_TYPE, false, NO_RECORD_DEF, "" },
{ "dhcp-rebinding-time", DHO_DHCP_REBINDING_TIME,
- OPT_UINT32_TYPE, false, NO_RECORD_DEF },
+ OPT_UINT32_TYPE, false, NO_RECORD_DEF, "" },
{ "vendor-class-identifier", DHO_VENDOR_CLASS_IDENTIFIER,
- OPT_BINARY_TYPE, false, NO_RECORD_DEF },
+ OPT_BINARY_TYPE, false, NO_RECORD_DEF, "" },
{ "dhcp-client-identifier", DHO_DHCP_CLIENT_IDENTIFIER,
- OPT_BINARY_TYPE, false, NO_RECORD_DEF },
- { "nwip-domain-name", DHO_NWIP_DOMAIN_NAME, OPT_STRING_TYPE, false, NO_RECORD_DEF },
- { "nwip-suboptions", DHO_NWIP_SUBOPTIONS, OPT_BINARY_TYPE, false, NO_RECORD_DEF },
- { "user-class", DHO_USER_CLASS, OPT_BINARY_TYPE, false, NO_RECORD_DEF },
- { "fqdn", DHO_FQDN, OPT_RECORD_TYPE, false, RECORD_DEF(FQDN_RECORDS) },
+ OPT_BINARY_TYPE, false, NO_RECORD_DEF, "" },
+ { "nwip-domain-name", DHO_NWIP_DOMAIN_NAME, OPT_STRING_TYPE, false, NO_RECORD_DEF, "" },
+ { "nwip-suboptions", DHO_NWIP_SUBOPTIONS, OPT_BINARY_TYPE, false, NO_RECORD_DEF, "" },
+ { "user-class", DHO_USER_CLASS, OPT_BINARY_TYPE, false, NO_RECORD_DEF, "" },
+ { "fqdn", DHO_FQDN, OPT_RECORD_TYPE, false, RECORD_DEF(FQDN_RECORDS), "" },
{ "dhcp-agent-options", DHO_DHCP_AGENT_OPTIONS,
- OPT_BINARY_TYPE, false, NO_RECORD_DEF },
+ OPT_EMPTY_TYPE, false, NO_RECORD_DEF, "dhcp-agent-options-space" },
// Unfortunatelly the AUTHENTICATE option contains a 64-bit
// data field called 'replay-detection' that can't be added
// as a record field to a custom option. Also, there is no
// dedicated option class to handle it so we simply return
// binary option type for now.
// @todo implement a class to handle AUTH option.
- { "authenticate", DHO_AUTHENTICATE, OPT_BINARY_TYPE, false, NO_RECORD_DEF },
+ { "authenticate", DHO_AUTHENTICATE, OPT_BINARY_TYPE, false, NO_RECORD_DEF, "" },
{ "client-last-transaction-time", DHO_CLIENT_LAST_TRANSACTION_TIME,
- OPT_UINT32_TYPE, false, NO_RECORD_DEF },
- { "associated-ip", DHO_ASSOCIATED_IP, OPT_IPV4_ADDRESS_TYPE, true, NO_RECORD_DEF },
+ OPT_UINT32_TYPE, false, NO_RECORD_DEF, "" },
+ { "associated-ip", DHO_ASSOCIATED_IP, OPT_IPV4_ADDRESS_TYPE, true, NO_RECORD_DEF, "" },
{ "subnet-selection", DHO_SUBNET_SELECTION,
- OPT_IPV4_ADDRESS_TYPE, false, NO_RECORD_DEF },
+ OPT_IPV4_ADDRESS_TYPE, false, NO_RECORD_DEF, "" },
// The following options need a special encoding of data
// being carried by them. Therefore, there is no way they can
// be handled by OptionCustom. We may need to implement
// dedicated classes to handle them. Until that happens
// let's treat them as 'binary' options.
- { "domain-search", DHO_DOMAIN_SEARCH, OPT_BINARY_TYPE, false, NO_RECORD_DEF },
+ { "domain-search", DHO_DOMAIN_SEARCH, OPT_BINARY_TYPE, false, NO_RECORD_DEF, "" },
{ "vivco-suboptions", DHO_VIVCO_SUBOPTIONS,
- OPT_BINARY_TYPE, false, NO_RECORD_DEF },
+ OPT_BINARY_TYPE, false, NO_RECORD_DEF, "" },
{ "vivso-suboptions", DHO_VIVSO_SUBOPTIONS, OPT_BINARY_TYPE,
- false, NO_RECORD_DEF }
+ false, NO_RECORD_DEF, "" }
// @todo add definitions for all remaning options.
};
@@ -222,8 +224,6 @@ RECORD_DECL(REMOTE_ID_RECORDS, OPT_UINT32_TYPE, OPT_BINARY_TYPE);
RECORD_DECL(STATUS_CODE_RECORDS, OPT_UINT16_TYPE, OPT_STRING_TYPE);
// vendor-class
RECORD_DECL(VENDOR_CLASS_RECORDS, OPT_UINT32_TYPE, OPT_BINARY_TYPE);
-// vendor-opts
-RECORD_DECL(VENDOR_OPTS_RECORDS, OPT_UINT32_TYPE, OPT_BINARY_TYPE);
/// Standard DHCPv6 option definitions.
///
@@ -236,84 +236,84 @@ RECORD_DECL(VENDOR_OPTS_RECORDS, OPT_UINT32_TYPE, OPT_BINARY_TYPE);
/// warning about lack of initializers for some struct members
/// causing build to fail.
const OptionDefParams OPTION_DEF_PARAMS6[] = {
- { "clientid", D6O_CLIENTID, OPT_BINARY_TYPE, false, NO_RECORD_DEF },
- { "serverid", D6O_SERVERID, OPT_BINARY_TYPE, false, NO_RECORD_DEF },
- { "ia-na", D6O_IA_NA, OPT_RECORD_TYPE, false, RECORD_DEF(IA_NA_RECORDS) },
- { "ia-ta", D6O_IA_TA, OPT_UINT32_TYPE, false, NO_RECORD_DEF },
- { "iaaddr", D6O_IAADDR, OPT_RECORD_TYPE, false, RECORD_DEF(IAADDR_RECORDS) },
- { "oro", D6O_ORO, OPT_UINT16_TYPE, true, NO_RECORD_DEF },
- { "preference", D6O_PREFERENCE, OPT_UINT8_TYPE, false, NO_RECORD_DEF },
- { "elapsed-time", D6O_ELAPSED_TIME, OPT_UINT16_TYPE, false, NO_RECORD_DEF },
- { "relay-msg", D6O_RELAY_MSG, OPT_BINARY_TYPE, false, NO_RECORD_DEF },
+ { "clientid", D6O_CLIENTID, OPT_BINARY_TYPE, false, NO_RECORD_DEF, "" },
+ { "serverid", D6O_SERVERID, OPT_BINARY_TYPE, false, NO_RECORD_DEF, "" },
+ { "ia-na", D6O_IA_NA, OPT_RECORD_TYPE, false, RECORD_DEF(IA_NA_RECORDS), "" },
+ { "ia-ta", D6O_IA_TA, OPT_UINT32_TYPE, false, NO_RECORD_DEF, "" },
+ { "iaaddr", D6O_IAADDR, OPT_RECORD_TYPE, false, RECORD_DEF(IAADDR_RECORDS), "" },
+ { "oro", D6O_ORO, OPT_UINT16_TYPE, true, NO_RECORD_DEF, "" },
+ { "preference", D6O_PREFERENCE, OPT_UINT8_TYPE, false, NO_RECORD_DEF, "" },
+ { "elapsed-time", D6O_ELAPSED_TIME, OPT_UINT16_TYPE, false, NO_RECORD_DEF, "" },
+ { "relay-msg", D6O_RELAY_MSG, OPT_BINARY_TYPE, false, NO_RECORD_DEF, "" },
// Unfortunatelly the AUTH option contains a 64-bit data field
// called 'replay-detection' that can't be added as a record
// field to a custom option. Also, there is no dedicated
// option class to handle it so we simply return binary
// option type for now.
// @todo implement a class to handle AUTH option.
- { "auth", D6O_AUTH, OPT_BINARY_TYPE, false, NO_RECORD_DEF },
- { "unicast", D6O_UNICAST, OPT_IPV6_ADDRESS_TYPE, false, NO_RECORD_DEF },
+ { "auth", D6O_AUTH, OPT_BINARY_TYPE, false, NO_RECORD_DEF, "" },
+ { "unicast", D6O_UNICAST, OPT_IPV6_ADDRESS_TYPE, false, NO_RECORD_DEF, "" },
{ "status-code", D6O_STATUS_CODE, OPT_RECORD_TYPE, false,
- RECORD_DEF(STATUS_CODE_RECORDS) },
- { "rapid-commit", D6O_RAPID_COMMIT, OPT_EMPTY_TYPE, false, NO_RECORD_DEF },
- { "user-class", D6O_USER_CLASS, OPT_BINARY_TYPE, false, NO_RECORD_DEF },
+ RECORD_DEF(STATUS_CODE_RECORDS), "" },
+ { "rapid-commit", D6O_RAPID_COMMIT, OPT_EMPTY_TYPE, false, NO_RECORD_DEF, "" },
+ { "user-class", D6O_USER_CLASS, OPT_BINARY_TYPE, false, NO_RECORD_DEF, "" },
{ "vendor-class", D6O_VENDOR_CLASS, OPT_RECORD_TYPE, false,
- RECORD_DEF(VENDOR_CLASS_RECORDS) },
- { "vendor-opts", D6O_VENDOR_OPTS, OPT_RECORD_TYPE, false,
- RECORD_DEF(VENDOR_OPTS_RECORDS) },
- { "interface-id", D6O_INTERFACE_ID, OPT_BINARY_TYPE, false, NO_RECORD_DEF },
- { "reconf-msg", D6O_RECONF_MSG, OPT_UINT8_TYPE, false, NO_RECORD_DEF },
+ RECORD_DEF(VENDOR_CLASS_RECORDS), "" },
+ { "vendor-opts", D6O_VENDOR_OPTS, OPT_UINT32_TYPE, false,
+ NO_RECORD_DEF, "vendor-opts-space" },
+ { "interface-id", D6O_INTERFACE_ID, OPT_BINARY_TYPE, false, NO_RECORD_DEF, "" },
+ { "reconf-msg", D6O_RECONF_MSG, OPT_UINT8_TYPE, false, NO_RECORD_DEF, "" },
{ "reconf-accept", D6O_RECONF_ACCEPT, OPT_EMPTY_TYPE, false,
- NO_RECORD_DEF },
+ NO_RECORD_DEF, "" },
{ "sip-server-dns", D6O_SIP_SERVERS_DNS, OPT_FQDN_TYPE, true,
- NO_RECORD_DEF },
+ NO_RECORD_DEF, "" },
{ "sip-server-addr", D6O_SIP_SERVERS_ADDR, OPT_IPV6_ADDRESS_TYPE, true,
- NO_RECORD_DEF },
+ NO_RECORD_DEF, "" },
{ "dns-servers", D6O_NAME_SERVERS, OPT_IPV6_ADDRESS_TYPE, true,
- NO_RECORD_DEF },
- { "domain-search", D6O_DOMAIN_SEARCH, OPT_FQDN_TYPE, true, NO_RECORD_DEF },
- { "ia-pd", D6O_IA_PD, OPT_RECORD_TYPE, false, RECORD_DEF(IA_PD_RECORDS) },
+ NO_RECORD_DEF, "" },
+ { "domain-search", D6O_DOMAIN_SEARCH, OPT_FQDN_TYPE, true, NO_RECORD_DEF, "" },
+ { "ia-pd", D6O_IA_PD, OPT_RECORD_TYPE, false, RECORD_DEF(IA_PD_RECORDS), "" },
{ "iaprefix", D6O_IAPREFIX, OPT_RECORD_TYPE, false,
- RECORD_DEF(IA_PREFIX_RECORDS) },
+ RECORD_DEF(IA_PREFIX_RECORDS), "" },
{ "nis-servers", D6O_NIS_SERVERS, OPT_IPV6_ADDRESS_TYPE, true,
- NO_RECORD_DEF },
+ NO_RECORD_DEF, "" },
{ "nisp-servers", D6O_NISP_SERVERS, OPT_IPV6_ADDRESS_TYPE, true,
- NO_RECORD_DEF },
+ NO_RECORD_DEF, "" },
{ "nis-domain-name", D6O_NIS_DOMAIN_NAME, OPT_FQDN_TYPE, true,
- NO_RECORD_DEF },
+ NO_RECORD_DEF, "" },
{ "nisp-domain-name", D6O_NISP_DOMAIN_NAME, OPT_FQDN_TYPE, true,
- NO_RECORD_DEF },
+ NO_RECORD_DEF, "" },
{ "sntp-servers", D6O_SNTP_SERVERS, OPT_IPV6_ADDRESS_TYPE, true,
- NO_RECORD_DEF },
+ NO_RECORD_DEF, "" },
{ "information-refresh-time", D6O_INFORMATION_REFRESH_TIME,
- OPT_UINT32_TYPE, false, NO_RECORD_DEF },
+ OPT_UINT32_TYPE, false, NO_RECORD_DEF, "" },
{ "bcmcs-server-dns", D6O_BCMCS_SERVER_D, OPT_FQDN_TYPE, true,
- NO_RECORD_DEF },
+ NO_RECORD_DEF, "" },
{ "bcmcs-server-addr", D6O_BCMCS_SERVER_A, OPT_IPV6_ADDRESS_TYPE, true,
- NO_RECORD_DEF },
+ NO_RECORD_DEF, "" },
{ "geoconf-civic", D6O_GEOCONF_CIVIC, OPT_RECORD_TYPE, false,
- RECORD_DEF(GEOCONF_CIVIC_RECORDS) },
+ RECORD_DEF(GEOCONF_CIVIC_RECORDS), "" },
{ "remote-id", D6O_REMOTE_ID, OPT_RECORD_TYPE, false,
- RECORD_DEF(REMOTE_ID_RECORDS) },
+ RECORD_DEF(REMOTE_ID_RECORDS), "" },
{ "subscriber-id", D6O_SUBSCRIBER_ID, OPT_BINARY_TYPE, false,
- NO_RECORD_DEF },
+ NO_RECORD_DEF, "" },
{ "client-fqdn", D6O_CLIENT_FQDN, OPT_RECORD_TYPE, false,
- RECORD_DEF(CLIENT_FQDN_RECORDS) },
+ RECORD_DEF(CLIENT_FQDN_RECORDS), "" },
{ "pana-agent", D6O_PANA_AGENT, OPT_IPV6_ADDRESS_TYPE, true,
- NO_RECORD_DEF },
+ NO_RECORD_DEF, "" },
{ "new-posix-timezone", D6O_NEW_POSIX_TIMEZONE, OPT_STRING_TYPE, false,
- NO_RECORD_DEF },
+ NO_RECORD_DEF, "" },
{ "new-tzdb-timezone", D6O_NEW_TZDB_TIMEZONE, OPT_STRING_TYPE, false,
- NO_RECORD_DEF },
- { "ero", D6O_ERO, OPT_UINT16_TYPE, true, NO_RECORD_DEF },
+ NO_RECORD_DEF, "" },
+ { "ero", D6O_ERO, OPT_UINT16_TYPE, true, NO_RECORD_DEF, "" },
{ "lq-query", D6O_LQ_QUERY, OPT_RECORD_TYPE, false,
- RECORD_DEF(LQ_QUERY_RECORDS) },
- { "client-data", D6O_CLIENT_DATA, OPT_EMPTY_TYPE, false, NO_RECORD_DEF },
- { "clt-time", D6O_CLT_TIME, OPT_UINT32_TYPE, false, NO_RECORD_DEF },
+ RECORD_DEF(LQ_QUERY_RECORDS), "" },
+ { "client-data", D6O_CLIENT_DATA, OPT_EMPTY_TYPE, false, NO_RECORD_DEF, "" },
+ { "clt-time", D6O_CLT_TIME, OPT_UINT32_TYPE, false, NO_RECORD_DEF, "" },
{ "lq-relay-data", D6O_LQ_RELAY_DATA, OPT_RECORD_TYPE, false,
- RECORD_DEF(LQ_RELAY_DATA_RECORDS) },
+ RECORD_DEF(LQ_RELAY_DATA_RECORDS), "" },
{ "lq-client-link", D6O_LQ_CLIENT_LINK, OPT_IPV6_ADDRESS_TYPE, true,
- NO_RECORD_DEF }
+ NO_RECORD_DEF, "" }
// @todo There is still a bunch of options for which we have to provide
// definitions but we don't do it because they are not really
diff --git a/src/lib/dhcp/tests/Makefile.am b/src/lib/dhcp/tests/Makefile.am
index 4833fb5..c868553 100644
--- a/src/lib/dhcp/tests/Makefile.am
+++ b/src/lib/dhcp/tests/Makefile.am
@@ -40,6 +40,7 @@ libdhcp___unittests_SOURCES += option_data_types_unittest.cc
libdhcp___unittests_SOURCES += option_definition_unittest.cc
libdhcp___unittests_SOURCES += option_custom_unittest.cc
libdhcp___unittests_SOURCES += option_unittest.cc
+libdhcp___unittests_SOURCES += option_space_unittest.cc
libdhcp___unittests_SOURCES += pkt4_unittest.cc
libdhcp___unittests_SOURCES += pkt6_unittest.cc
libdhcp___unittests_SOURCES += duid_unittest.cc
diff --git a/src/lib/dhcp/tests/libdhcp++_unittest.cc b/src/lib/dhcp/tests/libdhcp++_unittest.cc
index a59da12..1a87b13 100644
--- a/src/lib/dhcp/tests/libdhcp++_unittest.cc
+++ b/src/lib/dhcp/tests/libdhcp++_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright (C) 2011-2012 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2011-2013 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -68,12 +68,16 @@ public:
/// used to create option instance.
/// @param expected_type type of the option created by the
/// factory function returned by the option definition.
+ /// @param encapsulates name of the option space being encapsulated
+ /// by the option.
static void testStdOptionDefs4(const uint16_t code,
const OptionBufferConstIter begin,
const OptionBufferConstIter end,
- const std::type_info& expected_type) {
+ const std::type_info& expected_type,
+ const std::string& encapsulates = "") {
// Use V4 universe.
- testStdOptionDefs(Option::V4, code, begin, end, expected_type);
+ testStdOptionDefs(Option::V4, code, begin, end, expected_type,
+ encapsulates);
}
/// @brief Test DHCPv6 option definition.
@@ -88,12 +92,16 @@ public:
/// used to create option instance.
/// @param expected_type type of the option created by the
/// factory function returned by the option definition.
+ /// @param encapsulates name of the option space being encapsulated
+ /// by the option.
static void testStdOptionDefs6(const uint16_t code,
const OptionBufferConstIter begin,
const OptionBufferConstIter end,
- const std::type_info& expected_type) {
+ const std::type_info& expected_type,
+ const std::string& encapsulates = "") {
// Use V6 universe.
- testStdOptionDefs(Option::V6, code, begin, end, expected_type);
+ testStdOptionDefs(Option::V6, code, begin, end, expected_type,
+ encapsulates);
}
private:
@@ -109,11 +117,14 @@ private:
/// used to create option instance.
/// @param expected_type type of the option created by the
/// factory function returned by the option definition.
+ /// @param encapsulates name of the option space being encapsulated
+ /// by the option.
static void testStdOptionDefs(const Option::Universe u,
const uint16_t code,
const OptionBufferConstIter begin,
const OptionBufferConstIter end,
- const std::type_info& expected_type) {
+ const std::type_info& expected_type,
+ const std::string& encapsulates) {
// Get all option definitions, we will use them to extract
// the definition for a particular option code.
// We don't have to initialize option definitions here because they
@@ -141,6 +152,9 @@ private:
ASSERT_NO_THROW(def->validate())
<< "Option definition for the option code " << code
<< " is invalid";
+ // Check that the valid encapsulated option space name
+ // has been specified.
+ EXPECT_EQ(encapsulates, def->getEncapsulatedSpace());
OptionPtr option;
// Create the option.
ASSERT_NO_THROW(option = def->optionFactory(u, code, begin, end))
@@ -259,7 +273,7 @@ TEST_F(LibDhcpTest, packOptions6) {
OutputBuffer assembled(512);
- EXPECT_NO_THROW(LibDHCP::packOptions6(assembled, opts));
+ EXPECT_NO_THROW(LibDHCP::packOptions(assembled, opts));
EXPECT_EQ(sizeof(v6packed), assembled.getLength());
EXPECT_EQ(0, memcmp(assembled.getData(), v6packed, sizeof(v6packed)));
}
@@ -648,7 +662,8 @@ TEST_F(LibDhcpTest, stdOptionDefs4) {
typeid(Option4AddrLst));
LibDhcpTest::testStdOptionDefs4(DHO_VENDOR_ENCAPSULATED_OPTIONS, begin, end,
- typeid(Option));
+ typeid(Option),
+ "vendor-encapsulated-options-space");
LibDhcpTest::testStdOptionDefs4(DHO_NETBIOS_NAME_SERVERS, begin, end,
typeid(Option4AddrLst));
@@ -717,7 +732,7 @@ TEST_F(LibDhcpTest, stdOptionDefs4) {
typeid(OptionCustom));
LibDhcpTest::testStdOptionDefs4(DHO_DHCP_AGENT_OPTIONS, begin, end,
- typeid(Option));
+ typeid(Option), "dhcp-agent-options-space");
LibDhcpTest::testStdOptionDefs4(DHO_AUTHENTICATE, begin, end,
typeid(Option));
@@ -816,7 +831,8 @@ TEST_F(LibDhcpTest, stdOptionDefs6) {
typeid(OptionCustom));
LibDhcpTest::testStdOptionDefs6(D6O_VENDOR_OPTS, begin, end,
- typeid(OptionCustom));
+ typeid(OptionInt<uint32_t>),
+ "vendor-opts-space");
LibDhcpTest::testStdOptionDefs6(D6O_INTERFACE_ID, begin, end,
typeid(Option));
diff --git a/src/lib/dhcp/tests/option4_addrlst_unittest.cc b/src/lib/dhcp/tests/option4_addrlst_unittest.cc
index a8e60f6..0c1d9e6 100644
--- a/src/lib/dhcp/tests/option4_addrlst_unittest.cc
+++ b/src/lib/dhcp/tests/option4_addrlst_unittest.cc
@@ -155,7 +155,7 @@ TEST_F(Option4AddrLstTest, assembly1) {
OutputBuffer buf(100);
EXPECT_NO_THROW(
- opt->pack4(buf);
+ opt->pack(buf);
);
ASSERT_EQ(6, opt->len());
@@ -198,7 +198,7 @@ TEST_F(Option4AddrLstTest, assembly4) {
OutputBuffer buf(100);
EXPECT_NO_THROW(
- opt->pack4(buf);
+ opt->pack(buf);
);
ASSERT_EQ(18, opt->len()); // 2(header) + 4xsizeof(IPv4addr)
diff --git a/src/lib/dhcp/tests/option_definition_unittest.cc b/src/lib/dhcp/tests/option_definition_unittest.cc
index 310c7bf..174bafb 100644
--- a/src/lib/dhcp/tests/option_definition_unittest.cc
+++ b/src/lib/dhcp/tests/option_definition_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2012-2013 Internet Systems Consortium, Inc. ("ISC")
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
@@ -53,38 +53,62 @@ public:
TEST_F(OptionDefinitionTest, constructor) {
// Specify the option data type as string. This should get converted
// to enum value returned by getType().
- OptionDefinition opt_def1("OPTION_CLIENTID", 1, "string");
+ OptionDefinition opt_def1("OPTION_CLIENTID", D6O_CLIENTID, "string");
EXPECT_EQ("OPTION_CLIENTID", opt_def1.getName());
EXPECT_EQ(1, opt_def1.getCode());
EXPECT_EQ(OPT_STRING_TYPE, opt_def1.getType());
EXPECT_FALSE(opt_def1.getArrayType());
+ EXPECT_TRUE(opt_def1.getEncapsulatedSpace().empty());
EXPECT_NO_THROW(opt_def1.validate());
// Specify the option data type as an enum value.
- OptionDefinition opt_def2("OPTION_RAPID_COMMIT", 14,
+ OptionDefinition opt_def2("OPTION_RAPID_COMMIT", D6O_RAPID_COMMIT,
OPT_EMPTY_TYPE);
EXPECT_EQ("OPTION_RAPID_COMMIT", opt_def2.getName());
EXPECT_EQ(14, opt_def2.getCode());
EXPECT_EQ(OPT_EMPTY_TYPE, opt_def2.getType());
EXPECT_FALSE(opt_def2.getArrayType());
- EXPECT_NO_THROW(opt_def1.validate());
+ EXPECT_TRUE(opt_def2.getEncapsulatedSpace().empty());
+ EXPECT_NO_THROW(opt_def2.validate());
+
+ // Specify encapsulated option space name and option data type
+ // as enum value.
+ OptionDefinition opt_def3("OPTION_VENDOR_OPTS", D6O_VENDOR_OPTS,
+ OPT_UINT32_TYPE, "isc");
+ EXPECT_EQ("OPTION_VENDOR_OPTS", opt_def3.getName());
+ EXPECT_EQ(D6O_VENDOR_OPTS, opt_def3.getCode());
+ EXPECT_EQ(OPT_UINT32_TYPE, opt_def3.getType());
+ EXPECT_FALSE(opt_def3.getArrayType());
+ EXPECT_EQ("isc", opt_def3.getEncapsulatedSpace());
+ EXPECT_NO_THROW(opt_def3.validate());
+
+ // Specify encapsulated option space name and option data type
+ // as string value.
+ OptionDefinition opt_def4("OPTION_VENDOR_OPTS", D6O_VENDOR_OPTS,
+ "uint32", "isc");
+ EXPECT_EQ("OPTION_VENDOR_OPTS", opt_def4.getName());
+ EXPECT_EQ(D6O_VENDOR_OPTS, opt_def4.getCode());
+ EXPECT_EQ(OPT_UINT32_TYPE, opt_def4.getType());
+ EXPECT_FALSE(opt_def4.getArrayType());
+ EXPECT_EQ("isc", opt_def4.getEncapsulatedSpace());
+ EXPECT_NO_THROW(opt_def4.validate());
// Check if it is possible to set that option is an array.
- OptionDefinition opt_def3("OPTION_NIS_SERVERS", 27,
+ OptionDefinition opt_def5("OPTION_NIS_SERVERS", 27,
OPT_IPV6_ADDRESS_TYPE,
true);
- EXPECT_EQ("OPTION_NIS_SERVERS", opt_def3.getName());
- EXPECT_EQ(27, opt_def3.getCode());
- EXPECT_EQ(OPT_IPV6_ADDRESS_TYPE, opt_def3.getType());
- EXPECT_TRUE(opt_def3.getArrayType());
- EXPECT_NO_THROW(opt_def3.validate());
+ EXPECT_EQ("OPTION_NIS_SERVERS", opt_def5.getName());
+ EXPECT_EQ(27, opt_def5.getCode());
+ EXPECT_EQ(OPT_IPV6_ADDRESS_TYPE, opt_def5.getType());
+ EXPECT_TRUE(opt_def5.getArrayType());
+ EXPECT_NO_THROW(opt_def5.validate());
// The created object is invalid if invalid data type is specified but
// constructor shouldn't throw exception. The object is validated after
// it has been created.
EXPECT_NO_THROW(
- OptionDefinition opt_def4("OPTION_SERVERID",
+ OptionDefinition opt_def6("OPTION_SERVERID",
OPT_UNKNOWN_TYPE + 10,
OPT_STRING_TYPE);
);
@@ -213,6 +237,11 @@ TEST_F(OptionDefinitionTest, validate) {
"record");
opt_def16.addRecordField("uint8");
opt_def16.addRecordField("string");
+
+ // Check invalid encapsulated option space name.
+ OptionDefinition opt_def17("OPTION_VENDOR_OPTS", D6O_VENDOR_OPTS,
+ "uint32", "invalid%space%name");
+ EXPECT_THROW(opt_def17.validate(), MalformedOptionDefinition);
}
diff --git a/src/lib/dhcp/tests/option_int_array_unittest.cc b/src/lib/dhcp/tests/option_int_array_unittest.cc
index 1aeb584..cd868d8 100644
--- a/src/lib/dhcp/tests/option_int_array_unittest.cc
+++ b/src/lib/dhcp/tests/option_int_array_unittest.cc
@@ -294,6 +294,52 @@ public:
EXPECT_TRUE(std::equal(buf_.begin(), buf_.begin() + opt_len, out_data.begin()));;
}
+ /// @brief Test ability to set all values.
+ ///
+ /// @tparam T numeric type to perform the test for.
+ template<typename T>
+ void setValuesTest() {
+ const uint16_t opt_code = 100;
+ // Create option with empty vector of values.
+ boost::shared_ptr<OptionIntArray<T> >
+ opt(new OptionIntArray<T>(Option::V6, opt_code));
+ // Initialize vector with some data and pass to the option.
+ std::vector<T> values;
+ for (int i = 0; i < 10; ++i) {
+ values.push_back(numeric_limits<uint8_t>::max() - i);
+ }
+ opt->setValues(values);
+
+ // Check if universe, option type and data was set correctly.
+ EXPECT_EQ(Option::V6, opt->getUniverse());
+ EXPECT_EQ(opt_code, opt->getType());
+ std::vector<T> returned_values = opt->getValues();
+ EXPECT_TRUE(std::equal(values.begin(), values.end(), returned_values.begin()));
+ }
+
+ /// @brief Test ability to add values one by one.
+ ///
+ /// @tparam T numeric type to perform the test for.
+ template<typename T>
+ void addValuesTest() {
+ const uint16_t opt_code = 100;
+ // Create option with empty vector of values.
+ boost::shared_ptr<OptionIntArray<T> >
+ opt(new OptionIntArray<T>(Option::V6, opt_code));
+ // Initialize vector with some data and add the same data
+ // to the option.
+ std::vector<T> values;
+ for (int i = 0; i < 10; ++i) {
+ values.push_back(numeric_limits<T>::max() - i);
+ opt->addValue(numeric_limits<T>::max() - i);
+ }
+
+ // Check if universe, option type and data was set correctly.
+ EXPECT_EQ(Option::V6, opt->getUniverse());
+ EXPECT_EQ(opt_code, opt->getType());
+ std::vector<T> returned_values = opt->getValues();
+ EXPECT_TRUE(std::equal(values.begin(), values.end(), returned_values.begin()));
+ }
OptionBuffer buf_; ///< Option buffer
OutputBuffer out_buf_; ///< Output buffer
@@ -371,118 +417,51 @@ TEST_F(OptionIntArrayTest, bufferToInt32V6) {
}
TEST_F(OptionIntArrayTest, setValuesUint8) {
- const uint16_t opt_code = 100;
- // Create option with empty vector of values.
- boost::shared_ptr<OptionIntArray<uint8_t> >
- opt(new OptionIntArray<uint8_t>(Option::V6, opt_code));
- // Initialize vector with some data and pass to the option.
- std::vector<uint8_t> values;
- for (int i = 0; i < 10; ++i) {
- values.push_back(numeric_limits<uint8_t>::max() - i);
- }
- opt->setValues(values);
-
- // Check if universe, option type and data was set correctly.
- EXPECT_EQ(Option::V6, opt->getUniverse());
- EXPECT_EQ(opt_code, opt->getType());
- std::vector<uint8_t> returned_values = opt->getValues();
- EXPECT_TRUE(std::equal(values.begin(), values.end(), returned_values.begin()));
+ setValuesTest<uint8_t>();
}
TEST_F(OptionIntArrayTest, setValuesInt8) {
- const uint16_t opt_code = 100;
- // Create option with empty vector of values.
- boost::shared_ptr<OptionIntArray<int8_t> >
- opt(new OptionIntArray<int8_t>(Option::V6, opt_code));
- // Initialize vector with some data and pass to the option.
- std::vector<int8_t> values;
- for (int i = 0; i < 10; ++i) {
- values.push_back(numeric_limits<int8_t>::min() + i);
- }
- opt->setValues(values);
-
- // Check if universe, option type and data was set correctly.
- EXPECT_EQ(Option::V6, opt->getUniverse());
- EXPECT_EQ(opt_code, opt->getType());
- std::vector<int8_t> returned_values = opt->getValues();
- EXPECT_TRUE(std::equal(values.begin(), values.end(), returned_values.begin()));
+ setValuesTest<int8_t>();
}
TEST_F(OptionIntArrayTest, setValuesUint16) {
- const uint16_t opt_code = 101;
- // Create option with empty vector of values.
- boost::shared_ptr<OptionIntArray<uint16_t> >
- opt(new OptionIntArray<uint16_t>(Option::V6, opt_code));
- // Initialize vector with some data and pass to the option.
- std::vector<uint16_t> values;
- for (int i = 0; i < 10; ++i) {
- values.push_back(numeric_limits<uint16_t>::max() - i);
- }
- opt->setValues(values);
-
- // Check if universe, option type and data was set correctly.
- EXPECT_EQ(Option::V6, opt->getUniverse());
- EXPECT_EQ(opt_code, opt->getType());
- std::vector<uint16_t> returned_values = opt->getValues();
- EXPECT_TRUE(std::equal(values.begin(), values.end(), returned_values.begin()));
+ setValuesTest<uint16_t>();
}
TEST_F(OptionIntArrayTest, setValuesInt16) {
- const uint16_t opt_code = 101;
- // Create option with empty vector of values.
- boost::shared_ptr<OptionIntArray<int16_t> >
- opt(new OptionIntArray<int16_t>(Option::V6, opt_code));
- // Initialize vector with some data and pass to the option.
- std::vector<int16_t> values;
- for (int i = 0; i < 10; ++i) {
- values.push_back(numeric_limits<int16_t>::min() + i);
- }
- opt->setValues(values);
-
- // Check if universe, option type and data was set correctly.
- EXPECT_EQ(Option::V6, opt->getUniverse());
- EXPECT_EQ(opt_code, opt->getType());
- std::vector<int16_t> returned_values = opt->getValues();
- EXPECT_TRUE(std::equal(values.begin(), values.end(), returned_values.begin()));
+ setValuesTest<int16_t>();
}
TEST_F(OptionIntArrayTest, setValuesUint32) {
- const uint32_t opt_code = 101;
- // Create option with empty vector of values.
- boost::shared_ptr<OptionIntArray<uint32_t> >
- opt(new OptionIntArray<uint32_t>(Option::V6, opt_code));
- // Initialize vector with some data and pass to the option.
- std::vector<uint32_t> values;
- for (int i = 0; i < 10; ++i) {
- values.push_back(numeric_limits<uint32_t>::max() - i);
- }
- opt->setValues(values);
-
- // Check if universe, option type and data was set correctly.
- EXPECT_EQ(Option::V6, opt->getUniverse());
- EXPECT_EQ(opt_code, opt->getType());
- std::vector<uint32_t> returned_values = opt->getValues();
- EXPECT_TRUE(std::equal(values.begin(), values.end(), returned_values.begin()));
+ setValuesTest<uint16_t>();
}
TEST_F(OptionIntArrayTest, setValuesInt32) {
- const uint32_t opt_code = 101;
- // Create option with empty vector of values.
- boost::shared_ptr<OptionIntArray<int32_t> >
- opt(new OptionIntArray<int32_t>(Option::V6, opt_code));
- // Initialize vector with some data and pass to the option.
- std::vector<int32_t> values;
- for (int i = 0; i < 10; ++i) {
- values.push_back(numeric_limits<int32_t>::min() + i);
- }
- opt->setValues(values);
+ setValuesTest<int16_t>();
+}
- // Check if universe, option type and data was set correctly.
- EXPECT_EQ(Option::V6, opt->getUniverse());
- EXPECT_EQ(opt_code, opt->getType());
- std::vector<int32_t> returned_values = opt->getValues();
- EXPECT_TRUE(std::equal(values.begin(), values.end(), returned_values.begin()));
+TEST_F(OptionIntArrayTest, addValuesUint8) {
+ addValuesTest<uint8_t>();
}
+TEST_F(OptionIntArrayTest, addValuesInt8) {
+ addValuesTest<int8_t>();
+}
+
+TEST_F(OptionIntArrayTest, addValuesUint16) {
+ addValuesTest<uint16_t>();
+}
+
+TEST_F(OptionIntArrayTest, addValuesInt16) {
+ addValuesTest<int16_t>();
+}
+
+TEST_F(OptionIntArrayTest, addValuesUint32) {
+ addValuesTest<uint16_t>();
+}
+
+TEST_F(OptionIntArrayTest, addValuesInt32) {
+ addValuesTest<int16_t>();
+}
} // anonymous namespace
diff --git a/src/lib/dhcp/tests/option_space_unittest.cc b/src/lib/dhcp/tests/option_space_unittest.cc
new file mode 100644
index 0000000..d3a6f53
--- /dev/null
+++ b/src/lib/dhcp/tests/option_space_unittest.cc
@@ -0,0 +1,150 @@
+// Copyright (C) 2012, 2013 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+
+#include <dhcp/option_space.h>
+
+#include <gtest/gtest.h>
+
+using namespace isc::dhcp;
+using namespace isc;
+
+namespace {
+
+// The purpose of this test is to verify that the constructor
+// creates an object with members initialized to correct values.
+TEST(OptionSpaceTest, constructor) {
+ // Create some option space.
+ OptionSpace space("isc", true);
+ EXPECT_EQ("isc", space.getName());
+ EXPECT_TRUE(space.isVendorSpace());
+
+ // Create another object with different values
+ // to check that the values will change.
+ OptionSpace space2("abc", false);
+ EXPECT_EQ("abc", space2.getName());
+ EXPECT_FALSE(space2.isVendorSpace());
+
+ // Verify that constructor throws exception if invalid
+ // option space name is provided.
+ EXPECT_THROW(OptionSpace("invalid%space.name"), InvalidOptionSpace);
+}
+
+// The purpose of this test is to verify that the vendor-space flag
+// can be overriden.
+TEST(OptionSpaceTest, setVendorSpace) {
+ OptionSpace space("isc", true);
+ EXPECT_EQ("isc", space.getName());
+ EXPECT_TRUE(space.isVendorSpace());
+
+ // Override the vendor space flag.
+ space.clearVendorSpace();
+ EXPECT_FALSE(space.isVendorSpace());
+}
+
+// The purpose of this test is to verify that the static function
+// to validate the option space name works correctly.
+TEST(OptionSpaceTest, validateName) {
+ // Positive test scenarios: letters, digits, dashes, underscores
+ // lower/upper case allowed.
+ EXPECT_TRUE(OptionSpace::validateName("abc"));
+ EXPECT_TRUE(OptionSpace::validateName("dash-allowed"));
+ EXPECT_TRUE(OptionSpace::validateName("two-dashes-allowed"));
+ EXPECT_TRUE(OptionSpace::validateName("underscore_allowed"));
+ EXPECT_TRUE(OptionSpace::validateName("underscore_three_times_allowed"));
+ EXPECT_TRUE(OptionSpace::validateName("digits0912"));
+ EXPECT_TRUE(OptionSpace::validateName("1234"));
+ EXPECT_TRUE(OptionSpace::validateName("UPPER_CASE_allowed"));
+
+ // Negative test scenarions: empty strings, dots, spaces are not
+ // allowed
+ EXPECT_FALSE(OptionSpace::validateName(""));
+ EXPECT_FALSE(OptionSpace::validateName(" "));
+ EXPECT_FALSE(OptionSpace::validateName(" isc "));
+ EXPECT_FALSE(OptionSpace::validateName("isc "));
+ EXPECT_FALSE(OptionSpace::validateName(" isc"));
+ EXPECT_FALSE(OptionSpace::validateName("isc with-space"));
+
+ // Hyphens and underscores are not allowed at the beginning
+ // and at the end of the option space name.
+ EXPECT_FALSE(OptionSpace::validateName("-isc"));
+ EXPECT_FALSE(OptionSpace::validateName("isc-"));
+ EXPECT_FALSE(OptionSpace::validateName("_isc"));
+ EXPECT_FALSE(OptionSpace::validateName("isc_"));
+
+ // Test other special characters
+ const char specials[] = { '!', '@', '#', '$', '%', '^', '&', '*', '(', ')',
+ '+', '=', '[', ']', '{', '}', ';', ':', '"', '\'',
+ '\\', '|', '<','>', ',', '.', '?', '~', '`' };
+ for (int i = 0; i < sizeof(specials); ++i) {
+ std::ostringstream stream;
+ // Concatenate valid option space name: "abc" with an invalid character.
+ // That way we get option space names like: "abc!", "abc$" etc. It is
+ // expected that the validating function fails form them.
+ stream << "abc" << specials[i];
+ EXPECT_FALSE(OptionSpace::validateName(stream.str()))
+ << "Test failed for special character '" << specials[i] << "'.";
+ }
+}
+
+// The purpose of this test is to verify that the constructors of the
+// OptionSpace6 class set the class members to correct values.
+TEST(OptionSpace6Test, constructor) {
+ // Create some option space and do not specify enterprise number.
+ // In such case the vendor space flag is expected to be
+ // set to false.
+ OptionSpace6 space1("abcd");
+ EXPECT_EQ("abcd", space1.getName());
+ EXPECT_FALSE(space1.isVendorSpace());
+
+ // Create an option space and specify an enterprise number. In this
+ // case the vendor space flag is expected to be set to true and the
+ // enterprise number should be set to a desired value.
+ OptionSpace6 space2("abcd", 2145);
+ EXPECT_EQ("abcd", space2.getName());
+ EXPECT_TRUE(space2.isVendorSpace());
+ EXPECT_EQ(2145, space2.getEnterpriseNumber());
+
+ // Verify that constructors throw an exception when invalid option
+ // space name has been specified.
+ EXPECT_THROW(OptionSpace6("isc dhcp"), InvalidOptionSpace);
+ EXPECT_THROW(OptionSpace6("isc%dhcp", 2145), InvalidOptionSpace);
+}
+
+// The purpose of this test is to verify an option space can be marked
+// vendor option space and enterprise number can be set.
+TEST(OptionSpace6Test, setVendorSpace) {
+ OptionSpace6 space("isc");
+ EXPECT_EQ("isc", space.getName());
+ EXPECT_FALSE(space.isVendorSpace());
+
+ // Mark it vendor option space and set enterprise id.
+ space.setVendorSpace(1234);
+ EXPECT_TRUE(space.isVendorSpace());
+ EXPECT_EQ(1234, space.getEnterpriseNumber());
+
+ // Override the enterprise number to make sure and make sure that
+ // the new number is returned by the object.
+ space.setVendorSpace(2345);
+ EXPECT_TRUE(space.isVendorSpace());
+ EXPECT_EQ(2345, space.getEnterpriseNumber());
+
+ // Clear the vendor option space flag.
+ space.clearVendorSpace();
+ EXPECT_FALSE(space.isVendorSpace());
+}
+
+
+}; // end of anonymous namespace
diff --git a/src/lib/dhcp/tests/option_unittest.cc b/src/lib/dhcp/tests/option_unittest.cc
index afa64d5..1fc49f1 100644
--- a/src/lib/dhcp/tests/option_unittest.cc
+++ b/src/lib/dhcp/tests/option_unittest.cc
@@ -116,7 +116,7 @@ TEST_F(OptionTest, v4_data1) {
// now store that option into a buffer
OutputBuffer buf(100);
EXPECT_NO_THROW(
- opt->pack4(buf);
+ opt->pack(buf);
);
// check content of that buffer
@@ -173,7 +173,7 @@ TEST_F(OptionTest, v4_data2) {
// now store that option into a buffer
OutputBuffer buf(100);
EXPECT_NO_THROW(
- opt->pack4(buf);
+ opt->pack(buf);
);
// check content of that buffer
@@ -471,7 +471,7 @@ TEST_F(OptionTest, setUintX) {
// verify setUint8
opt1->setUint8(255);
EXPECT_EQ(255, opt1->getUint8());
- opt1->pack4(outBuf_);
+ opt1->pack(outBuf_);
EXPECT_EQ(3, opt1->len());
EXPECT_EQ(3, outBuf_.getLength());
uint8_t exp1[] = {125, 1, 255};
@@ -480,7 +480,7 @@ TEST_F(OptionTest, setUintX) {
// verify getUint16
outBuf_.clear();
opt2->setUint16(12345);
- opt2->pack4(outBuf_);
+ opt2->pack(outBuf_);
EXPECT_EQ(12345, opt2->getUint16());
EXPECT_EQ(4, opt2->len());
EXPECT_EQ(4, outBuf_.getLength());
@@ -490,7 +490,7 @@ TEST_F(OptionTest, setUintX) {
// verify getUint32
outBuf_.clear();
opt4->setUint32(0x12345678);
- opt4->pack4(outBuf_);
+ opt4->pack(outBuf_);
EXPECT_EQ(0x12345678, opt4->getUint32());
EXPECT_EQ(6, opt4->len());
EXPECT_EQ(6, outBuf_.getLength());
@@ -505,7 +505,7 @@ TEST_F(OptionTest, setData) {
buf_.begin(), buf_.begin() + 10));
buf_.resize(20, 1);
opt1->setData(buf_.begin(), buf_.end());
- opt1->pack4(outBuf_);
+ opt1->pack(outBuf_);
ASSERT_EQ(outBuf_.getLength() - opt1->getHeaderLen(), buf_.size());
const uint8_t* test_data = static_cast<const uint8_t*>(outBuf_.getData());
EXPECT_TRUE(0 == memcmp(&buf_[0], test_data + opt1->getHeaderLen(),
@@ -518,7 +518,7 @@ TEST_F(OptionTest, setData) {
outBuf_.clear();
buf_.resize(5, 1);
opt2->setData(buf_.begin(), buf_.end());
- opt2->pack4(outBuf_);
+ opt2->pack(outBuf_);
ASSERT_EQ(outBuf_.getLength() - opt1->getHeaderLen(), buf_.size());
test_data = static_cast<const uint8_t*>(outBuf_.getData());
EXPECT_TRUE(0 == memcmp(&buf_[0], test_data + opt1->getHeaderLen(),
diff --git a/src/lib/dhcpsrv/Makefile.am b/src/lib/dhcpsrv/Makefile.am
index 0b02ef8..e721267 100644
--- a/src/lib/dhcpsrv/Makefile.am
+++ b/src/lib/dhcpsrv/Makefile.am
@@ -43,7 +43,6 @@ libb10_dhcpsrv_la_SOURCES += memfile_lease_mgr.cc memfile_lease_mgr.h
if HAVE_MYSQL
libb10_dhcpsrv_la_SOURCES += mysql_lease_mgr.cc mysql_lease_mgr.h
endif
-libb10_dhcpsrv_la_SOURCES += option_space.cc option_space.h
libb10_dhcpsrv_la_SOURCES += option_space_container.h
libb10_dhcpsrv_la_SOURCES += pool.cc pool.h
libb10_dhcpsrv_la_SOURCES += subnet.cc subnet.h
diff --git a/src/lib/dhcpsrv/alloc_engine.cc b/src/lib/dhcpsrv/alloc_engine.cc
index 7a64dac..3602aac 100644
--- a/src/lib/dhcpsrv/alloc_engine.cc
+++ b/src/lib/dhcpsrv/alloc_engine.cc
@@ -274,7 +274,7 @@ AllocEngine::allocateAddress4(const SubnetPtr& subnet,
}
// Check if there's existing lease for that subnet/clientid/hwaddr combination.
- Lease4Ptr existing = LeaseMgrFactory::instance().getLease4(hwaddr->hwaddr_, subnet->getID());
+ Lease4Ptr existing = LeaseMgrFactory::instance().getLease4(*hwaddr, subnet->getID());
if (existing) {
// We have a lease already. This is a returning client, probably after
// its reboot.
diff --git a/src/lib/dhcpsrv/alloc_engine.h b/src/lib/dhcpsrv/alloc_engine.h
index c6cbc35..7e3d136 100644
--- a/src/lib/dhcpsrv/alloc_engine.h
+++ b/src/lib/dhcpsrv/alloc_engine.h
@@ -214,8 +214,8 @@ protected:
/// @param clientid client identifier
/// @param hwaddr client's hardware address
/// @param lease lease to be renewed
- /// @param renewed lease (typically the same passed as lease parameter)
- /// or NULL if the lease cannot be renewed
+ /// @param fake_allocation is this real i.e. REQUEST (false) or just picking
+ /// an address for DISCOVER that is not really allocated (true)
Lease4Ptr
renewLease4(const SubnetPtr& subnet,
const ClientIdPtr& clientid,
diff --git a/src/lib/dhcpsrv/cfgmgr.h b/src/lib/dhcpsrv/cfgmgr.h
index 0e56869..f4eecb5 100644
--- a/src/lib/dhcpsrv/cfgmgr.h
+++ b/src/lib/dhcpsrv/cfgmgr.h
@@ -18,7 +18,7 @@
#include <asiolink/io_address.h>
#include <dhcp/option.h>
#include <dhcp/option_definition.h>
-#include <dhcpsrv/option_space.h>
+#include <dhcp/option_space.h>
#include <dhcpsrv/option_space_container.h>
#include <dhcpsrv/pool.h>
#include <dhcpsrv/subnet.h>
diff --git a/src/lib/dhcpsrv/dbaccess_parser.cc b/src/lib/dhcpsrv/dbaccess_parser.cc
index c32284e..d29cf54 100644
--- a/src/lib/dhcpsrv/dbaccess_parser.cc
+++ b/src/lib/dhcpsrv/dbaccess_parser.cc
@@ -48,7 +48,7 @@ DbAccessParser::build(isc::data::ConstElementPtr config_value) {
// 4. If all is OK, update the stored keyword/value pairs.
// 1. Take a copy of the stored keyword/value pairs.
- map<string, string> values_copy = values_;
+ std::map<string, string> values_copy = values_;
// 2. Update the copy with the passed keywords.
BOOST_FOREACH(ConfigPair param, config_value->mapValue()) {
diff --git a/src/lib/dhcpsrv/lease_mgr.cc b/src/lib/dhcpsrv/lease_mgr.cc
index 6608b14..2310dd4 100644
--- a/src/lib/dhcpsrv/lease_mgr.cc
+++ b/src/lib/dhcpsrv/lease_mgr.cc
@@ -113,11 +113,22 @@ Lease4::toText() const {
bool
Lease4::operator==(const Lease4& other) const {
+ if ( (client_id_ && !other.client_id_) ||
+ (!client_id_ && other.client_id_) ) {
+ // One lease has client-id, but the other doesn't
+ return false;
+ }
+
+ if (client_id_ && other.client_id_ &&
+ *client_id_ != *other.client_id_) {
+ // Different client-ids
+ return false;
+ }
+
return (
addr_ == other.addr_ &&
ext_ == other.ext_ &&
hwaddr_ == other.hwaddr_ &&
- *client_id_ == *other.client_id_ &&
t1_ == other.t1_ &&
t2_ == other.t2_ &&
valid_lft_ == other.valid_lft_ &&
diff --git a/src/lib/dhcpsrv/lease_mgr.h b/src/lib/dhcpsrv/lease_mgr.h
index f781576..e6aaa51 100644
--- a/src/lib/dhcpsrv/lease_mgr.h
+++ b/src/lib/dhcpsrv/lease_mgr.h
@@ -114,9 +114,18 @@ public:
/// leases.
struct Lease {
+ /// @brief Constructor
+ ///
+ /// @param addr IP address
+ /// @param t1 renewal time
+ /// @param t2 rebinding time
+ /// @param valid_lft Lifetime of the lease
+ /// @param subnet_id Subnet identification
+ /// @param cltt Client last transmission time
Lease(const isc::asiolink::IOAddress& addr, uint32_t t1, uint32_t t2,
uint32_t valid_lft, SubnetID subnet_id, time_t cltt);
+ /// @brief Destructor
virtual ~Lease() {}
/// @brief IPv4 ot IPv6 address
@@ -226,13 +235,14 @@ struct Lease4 : public Lease {
/// @brief Constructor
///
- /// @param addr IPv4 address as unsigned 32-bit integer in network byte
- /// order.
+ /// @param addr IPv4 address.
/// @param hwaddr Hardware address buffer
/// @param hwaddr_len Length of hardware address buffer
/// @param clientid Client identification buffer
/// @param clientid_len Length of client identification buffer
/// @param valid_lft Lifetime of the lease
+ /// @param t1 renewal time
+ /// @param t2 rebinding time
/// @param cltt Client last transmission time
/// @param subnet_id Subnet identification
Lease4(const isc::asiolink::IOAddress& addr, const uint8_t* hwaddr, size_t hwaddr_len,
diff --git a/src/lib/dhcpsrv/mysql_lease_mgr.cc b/src/lib/dhcpsrv/mysql_lease_mgr.cc
index 292df61..6b6cde5 100644
--- a/src/lib/dhcpsrv/mysql_lease_mgr.cc
+++ b/src/lib/dhcpsrv/mysql_lease_mgr.cc
@@ -338,12 +338,25 @@ public:
bind_[1].length = &hwaddr_length_;
// client_id: varbinary(128)
- client_id_ = lease_->client_id_->getClientId();
- client_id_length_ = client_id_.size();
- bind_[2].buffer_type = MYSQL_TYPE_BLOB;
- bind_[2].buffer = reinterpret_cast<char*>(&client_id_[0]);
- bind_[2].buffer_length = client_id_length_;
- bind_[2].length = &client_id_length_;
+ if (lease_->client_id_) {
+ client_id_ = lease_->client_id_->getClientId();
+ client_id_length_ = client_id_.size();
+ bind_[2].buffer_type = MYSQL_TYPE_BLOB;
+ bind_[2].buffer = reinterpret_cast<char*>(&client_id_[0]);
+ bind_[2].buffer_length = client_id_length_;
+ bind_[2].length = &client_id_length_;
+ } else {
+ bind_[2].buffer_type = MYSQL_TYPE_NULL;
+
+ // According to http://dev.mysql.com/doc/refman/5.5/en/
+ // c-api-prepared-statement-data-structures.html, the other
+ // fields doesn't matter if type is set to MYSQL_TYPE_NULL,
+ // but let's set them to some sane values in case earlier versions
+ // didn't have that assumption.
+ static my_bool no_clientid = MLM_TRUE;
+ bind_[2].buffer = NULL;
+ bind_[2].is_null = &no_clientid;
+ }
// valid lifetime: unsigned int
bind_[3].buffer_type = MYSQL_TYPE_LONG;
diff --git a/src/lib/dhcpsrv/option_space.cc b/src/lib/dhcpsrv/option_space.cc
deleted file mode 100644
index 0e802a7..0000000
--- a/src/lib/dhcpsrv/option_space.cc
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright (C) 2012, 2013 Internet Systems Consortium, Inc. ("ISC")
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-// PERFORMANCE OF THIS SOFTWARE.
-
-#include <dhcpsrv/option_space.h>
-#include <boost/algorithm/string/classification.hpp>
-#include <boost/algorithm/string/predicate.hpp>
-
-namespace isc {
-namespace dhcp {
-
-OptionSpace::OptionSpace(const std::string& name, const bool vendor_space)
- : name_(name), vendor_space_(vendor_space) {
- // Check that provided option space name is valid.
- if (!validateName(name_)) {
- isc_throw(InvalidOptionSpace, "Invalid option space name "
- << name_);
- }
-}
-
-bool
-OptionSpace::validateName(const std::string& name) {
-
- using namespace boost::algorithm;
-
- // Allowed characters are: lower or upper case letters, digits,
- // underscores and hyphens. Empty option spaces are not allowed.
- if (all(name, boost::is_from_range('a', 'z') ||
- boost::is_from_range('A', 'Z') ||
- boost::is_digit() ||
- boost::is_any_of(std::string("-_"))) &&
- !name.empty() &&
- // Hyphens and underscores are not allowed at the beginning
- // and at the end of the option space name.
- !all(find_head(name, 1), boost::is_any_of(std::string("-_"))) &&
- !all(find_tail(name, 1), boost::is_any_of(std::string("-_")))) {
- return (true);
-
- }
- return (false);
-}
-
-OptionSpace6::OptionSpace6(const std::string& name)
- : OptionSpace(name),
- enterprise_number_(0) {
-}
-
-OptionSpace6::OptionSpace6(const std::string& name,
- const uint32_t enterprise_number)
- : OptionSpace(name, true),
- enterprise_number_(enterprise_number) {
-}
-
-void
-OptionSpace6::setVendorSpace(const uint32_t enterprise_number) {
- enterprise_number_ = enterprise_number;
- OptionSpace::setVendorSpace();
-}
-
-} // end of isc::dhcp namespace
-} // end of isc namespace
diff --git a/src/lib/dhcpsrv/option_space.h b/src/lib/dhcpsrv/option_space.h
deleted file mode 100644
index 9eebd76..0000000
--- a/src/lib/dhcpsrv/option_space.h
+++ /dev/null
@@ -1,189 +0,0 @@
-// Copyright (C) 2012, 2013 Internet Systems Consortium, Inc. ("ISC")
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-// PERFORMANCE OF THIS SOFTWARE.
-
-#ifndef OPTION_SPACE_H
-#define OPTION_SPACE_H
-
-#include <exceptions/exceptions.h>
-#include <boost/shared_ptr.hpp>
-#include <map>
-#include <stdint.h>
-#include <string>
-
-namespace isc {
-namespace dhcp {
-
-/// @brief Exception to be thrown when invalid option space
-/// is specified.
-class InvalidOptionSpace : public Exception {
-public:
- InvalidOptionSpace(const char* file, size_t line, const char* what) :
- isc::Exception(file, line, what) { };
-};
-
-/// OptionSpace forward declaration.
-class OptionSpace;
-/// A pointer to OptionSpace object.
-typedef boost::shared_ptr<OptionSpace> OptionSpacePtr;
-/// A collection of option spaces.
-typedef std::map<std::string, OptionSpacePtr> OptionSpaceCollection;
-
-/// @brief DHCP option space.
-///
-/// This class represents single option space. The option spaces are used
-/// to group DHCP options having unique option codes. The special type
-/// of the option space is so called "vendor specific option space".
-/// It groups sub-options being sent within Vendor Encapsulated Options.
-/// For DHCPv4 it is the option with code 43. The option spaces are
-/// assigned to option instances represented by isc::dhcp::Option and
-/// other classes derived from it. Each particular option may belong to
-/// multiple option spaces.
-/// This class may be used to represent any DHCPv4 option space. If the
-/// option space is to group DHCPv4 Vendor Encapsulated Options then
-/// "vendor space" flag must be set using \ref OptionSpace::setVendorSpace
-/// or the argument passed to the constructor. In theory, this class can
-/// be also used to represent non-vendor specific DHCPv6 option space
-/// but this is discouraged. For DHCPv6 option spaces the OptionSpace6
-/// class should be used instead.
-///
-/// @note this class is intended to be used to represent DHCPv4 option
-/// spaces only. However, it hasn't been called OptionSpace4 (that would
-/// suggest that it is specific to DHCPv4) because it can be also
-/// used to represent some DHCPv6 option spaces and is a base class
-/// for \ref OptionSpace6. Thus, if one declared the container as follows:
-/// @code
-/// std::vector<OptionSpace4> container;
-/// @endcode
-/// it would suggest that the container holds DHCPv4 option spaces while
-/// it could hold both DHCPv4 and DHCPv6 option spaces as the OptionSpace6
-/// object could be upcast to OptionSpace4. This confusion does not appear
-/// when OptionSpace is used as a name for the base class.
-class OptionSpace {
-public:
-
- /// @brief Constructor.
- ///
- /// @param name option space name.
- /// @param vendor_space boolean value that indicates that the object
- /// describes the vendor specific option space.
- ///
- /// @throw isc::dhcp::InvalidOptionSpace if given option space name
- /// contains invalid characters or is empty. This constructor uses
- /// \ref validateName function to check that the specified name is
- /// correct.
- OptionSpace(const std::string& name, const bool vendor_space = false);
-
- /// @brief Return option space name.
- ///
- /// @return option space name.
- const std::string& getName() const { return (name_); }
-
- /// @brief Mark option space as non-vendor space.
- void clearVendorSpace() {
- vendor_space_ = false;
- }
-
- /// @brief Check if option space is vendor specific.
- ///
- /// @return boolean value that indicates if the object describes
- /// the vendor specific option space.
- bool isVendorSpace() const { return (vendor_space_); }
-
- /// @brief Mark option space as vendor specific.
- void setVendorSpace() {
- vendor_space_ = true;
- }
-
- /// @brief Checks that the provided option space name is valid.
- ///
- /// It is expected that option space name consists of upper or
- /// lower case letters or digits. Also, it may contain underscores
- /// or dashes. Other characters are prohibited. The empty option
- /// space names are invalid.
- ///
- /// @param name option space name to be validated.
- ///
- /// @return true if the option space is valid, else it returns false.
- static bool validateName(const std::string& name);
-
-private:
- std::string name_; ///< Holds option space name.
-
- bool vendor_space_; ///< Is this the vendor space?
-
-};
-
-/// @brief DHCPv6 option space with enterprise number assigned.
-///
-/// This class extends the base class with the support for enterprise numbers.
-/// The enterprise numbers are assigned by IANA to various organizations
-/// and they are carried as uint32_t integers in DHCPv6 Vendor Specific
-/// Information Options (VSIO). For more information refer to RFC3315.
-/// All option spaces that group VSIO options must have enterprise number
-/// set. It can be set using a constructor or \ref setVendorSpace function.
-/// The extra functionality of this class (enterprise numbers) allows to
-/// represent DHCPv6 vendor-specific option spaces but this class is also
-/// intended to be used for all other DHCPv6 option spaces. That way all
-/// DHCPv6 option spaces can be stored in the container holding OptionSpace6
-/// objects. Also, it is easy to mark vendor-specific option space as non-vendor
-/// specific option space (and the other way around) without a need to cast
-/// between OptionSpace and OptionSpace6 types.
-class OptionSpace6 : public OptionSpace {
-public:
-
- /// @brief Constructor for non-vendor-specific options.
- ///
- /// This constructor marks option space as non-vendor specific.
- ///
- /// @param name option space name.
- ///
- /// @throw isc::dhcp::InvalidOptionSpace if given option space name
- /// contains invalid characters or is empty. This constructor uses
- /// \ref OptionSpace::validateName function to check that the specified
- /// name is correct.
- OptionSpace6(const std::string& name);
-
- /// @brief Constructor for vendor-specific options.
- ///
- /// This constructor marks option space as vendor specific and sets
- /// enterprise number to a given value.
- ///
- /// @param name option space name.
- /// @param enterprise_number enterprise number.
- ///
- /// @throw isc::dhcp::InvalidOptionSpace if given option space name
- /// contains invalid characters or is empty. This constructor uses
- /// \ref OptionSpace::validateName function to check that the specified
- /// name is correct.
- OptionSpace6(const std::string& name, const uint32_t enterprise_number);
-
- /// @brief Return enterprise number for the option space.
- ///
- /// @return enterprise number.
- uint32_t getEnterpriseNumber() const { return (enterprise_number_); }
-
- /// @brief Mark option space as vendor specific.
- ///
- /// @param enterprise_number enterprise number.
- void setVendorSpace(const uint32_t enterprise_number);
-
-private:
-
- uint32_t enterprise_number_; ///< IANA assigned enterprise number.
-};
-
-} // namespace isc::dhcp
-} // namespace isc
-
-#endif // OPTION_SPACE_H
diff --git a/src/lib/dhcpsrv/option_space_container.h b/src/lib/dhcpsrv/option_space_container.h
index f90bedd..ba16fbb 100644
--- a/src/lib/dhcpsrv/option_space_container.h
+++ b/src/lib/dhcpsrv/option_space_container.h
@@ -41,7 +41,7 @@ public:
/// @brief Adds a new item to the option_space.
///
/// @param item reference to the item being added.
- /// @param name of the option space.
+ /// @param option_space name of the option space.
void addItem(const ItemType& item, const std::string& option_space) {
ItemsContainerPtr items = getItems(option_space);
items->push_back(item);
diff --git a/src/lib/dhcpsrv/subnet.cc b/src/lib/dhcpsrv/subnet.cc
index 0443a33..daf3f9e 100644
--- a/src/lib/dhcpsrv/subnet.cc
+++ b/src/lib/dhcpsrv/subnet.cc
@@ -13,6 +13,7 @@
// PERFORMANCE OF THIS SOFTWARE.
#include <asiolink/io_address.h>
+#include <dhcp/option_space.h>
#include <dhcpsrv/addr_utilities.h>
#include <dhcpsrv/subnet.h>
@@ -44,14 +45,12 @@ bool Subnet::inRange(const isc::asiolink::IOAddress& addr) const {
}
void
-Subnet::addOption(OptionPtr& option, bool persistent,
+Subnet::addOption(const OptionPtr& option, bool persistent,
const std::string& option_space) {
- // @todo Once the #2313 is merged we need to use the OptionSpace object to
- // validate the option space name here. For now, let's check that the name
- // is not empty as the empty namespace has a special meaning here - it is
- // returned when desired namespace is not found when getOptions is called.
- if (option_space.empty()) {
- isc_throw(isc::BadValue, "option space name must not be empty");
+ // Check that the option space name is valid.
+ if (!OptionSpace::validateName(option_space)) {
+ isc_throw(isc::BadValue, "invalid option space name: '"
+ << option_space << "'");
}
validateOption(option);
diff --git a/src/lib/dhcpsrv/subnet.h b/src/lib/dhcpsrv/subnet.h
index 471fb03..cf29450 100644
--- a/src/lib/dhcpsrv/subnet.h
+++ b/src/lib/dhcpsrv/subnet.h
@@ -69,7 +69,7 @@ public:
///
/// @param opt option
/// @param persist if true option is always sent.
- OptionDescriptor(OptionPtr& opt, bool persist)
+ OptionDescriptor(const OptionPtr& opt, bool persist)
: option(opt), persistent(persist) {};
/// @brief Constructor
@@ -225,7 +225,7 @@ public:
/// @param option_space name of the option space to add an option to.
///
/// @throw isc::BadValue if invalid option provided.
- void addOption(OptionPtr& option, bool persistent,
+ void addOption(const OptionPtr& option, bool persistent,
const std::string& option_space);
/// @brief Delete all options configured for the subnet.
diff --git a/src/lib/dhcpsrv/tests/Makefile.am b/src/lib/dhcpsrv/tests/Makefile.am
index 00476c0..e19fd87 100644
--- a/src/lib/dhcpsrv/tests/Makefile.am
+++ b/src/lib/dhcpsrv/tests/Makefile.am
@@ -37,7 +37,6 @@ libdhcpsrv_unittests_SOURCES += memfile_lease_mgr_unittest.cc
if HAVE_MYSQL
libdhcpsrv_unittests_SOURCES += mysql_lease_mgr_unittest.cc
endif
-libdhcpsrv_unittests_SOURCES += option_space_unittest.cc
libdhcpsrv_unittests_SOURCES += pool_unittest.cc
libdhcpsrv_unittests_SOURCES += schema_copy.h
libdhcpsrv_unittests_SOURCES += subnet_unittest.cc
diff --git a/src/lib/dhcpsrv/tests/dbaccess_parser_unittest.cc b/src/lib/dhcpsrv/tests/dbaccess_parser_unittest.cc
index 05c0743..24cfb1a 100644
--- a/src/lib/dhcpsrv/tests/dbaccess_parser_unittest.cc
+++ b/src/lib/dhcpsrv/tests/dbaccess_parser_unittest.cc
@@ -120,7 +120,7 @@ public:
SCOPED_TRACE(trace_string);
// Construct a map of keyword value pairs.
- map<string, string> expected;
+ std::map<string, string> expected;
size_t expected_count = 0;
for (size_t i = 0; keyval[i] != NULL; i += 2) {
// Get the value. This should not be NULL
@@ -147,7 +147,7 @@ public:
actual != parameters.end(); ++actual) {
// Does the keyword exist in the set of expected keywords?
- map<string, string>::iterator corresponding =
+ std::map<string, string>::iterator corresponding =
expected.find(actual->first);
ASSERT_TRUE(corresponding != expected.end());
diff --git a/src/lib/dhcpsrv/tests/option_space_unittest.cc b/src/lib/dhcpsrv/tests/option_space_unittest.cc
deleted file mode 100644
index f8d75c8..0000000
--- a/src/lib/dhcpsrv/tests/option_space_unittest.cc
+++ /dev/null
@@ -1,150 +0,0 @@
-// Copyright (C) 2012, 2013 Internet Systems Consortium, Inc. ("ISC")
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-// PERFORMANCE OF THIS SOFTWARE.
-
-#include <config.h>
-
-#include <dhcpsrv/option_space.h>
-
-#include <gtest/gtest.h>
-
-using namespace isc::dhcp;
-using namespace isc;
-
-namespace {
-
-// The purpose of this test is to verify that the constructor
-// creates an object with members initialized to correct values.
-TEST(OptionSpaceTest, constructor) {
- // Create some option space.
- OptionSpace space("isc", true);
- EXPECT_EQ("isc", space.getName());
- EXPECT_TRUE(space.isVendorSpace());
-
- // Create another object with different values
- // to check that the values will change.
- OptionSpace space2("abc", false);
- EXPECT_EQ("abc", space2.getName());
- EXPECT_FALSE(space2.isVendorSpace());
-
- // Verify that constructor throws exception if invalid
- // option space name is provided.
- EXPECT_THROW(OptionSpace("invalid%space.name"), InvalidOptionSpace);
-}
-
-// The purpose of this test is to verify that the vendor-space flag
-// can be overriden.
-TEST(OptionSpaceTest, setVendorSpace) {
- OptionSpace space("isc", true);
- EXPECT_EQ("isc", space.getName());
- EXPECT_TRUE(space.isVendorSpace());
-
- // Override the vendor space flag.
- space.clearVendorSpace();
- EXPECT_FALSE(space.isVendorSpace());
-}
-
-// The purpose of this test is to verify that the static function
-// to validate the option space name works correctly.
-TEST(OptionSpaceTest, validateName) {
- // Positive test scenarios: letters, digits, dashes, underscores
- // lower/upper case allowed.
- EXPECT_TRUE(OptionSpace::validateName("abc"));
- EXPECT_TRUE(OptionSpace::validateName("dash-allowed"));
- EXPECT_TRUE(OptionSpace::validateName("two-dashes-allowed"));
- EXPECT_TRUE(OptionSpace::validateName("underscore_allowed"));
- EXPECT_TRUE(OptionSpace::validateName("underscore_three_times_allowed"));
- EXPECT_TRUE(OptionSpace::validateName("digits0912"));
- EXPECT_TRUE(OptionSpace::validateName("1234"));
- EXPECT_TRUE(OptionSpace::validateName("UPPER_CASE_allowed"));
-
- // Negative test scenarions: empty strings, dots, spaces are not
- // allowed
- EXPECT_FALSE(OptionSpace::validateName(""));
- EXPECT_FALSE(OptionSpace::validateName(" "));
- EXPECT_FALSE(OptionSpace::validateName(" isc "));
- EXPECT_FALSE(OptionSpace::validateName("isc "));
- EXPECT_FALSE(OptionSpace::validateName(" isc"));
- EXPECT_FALSE(OptionSpace::validateName("isc with-space"));
-
- // Hyphens and underscores are not allowed at the beginning
- // and at the end of the option space name.
- EXPECT_FALSE(OptionSpace::validateName("-isc"));
- EXPECT_FALSE(OptionSpace::validateName("isc-"));
- EXPECT_FALSE(OptionSpace::validateName("_isc"));
- EXPECT_FALSE(OptionSpace::validateName("isc_"));
-
- // Test other special characters
- const char specials[] = { '!', '@', '#', '$', '%', '^', '&', '*', '(', ')',
- '+', '=', '[', ']', '{', '}', ';', ':', '"', '\'',
- '\\', '|', '<','>', ',', '.', '?', '~', '`' };
- for (int i = 0; i < sizeof(specials); ++i) {
- std::ostringstream stream;
- // Concatenate valid option space name: "abc" with an invalid character.
- // That way we get option space names like: "abc!", "abc$" etc. It is
- // expected that the validating function fails form them.
- stream << "abc" << specials[i];
- EXPECT_FALSE(OptionSpace::validateName(stream.str()))
- << "Test failed for special character '" << specials[i] << "'.";
- }
-}
-
-// The purpose of this test is to verify that the constructors of the
-// OptionSpace6 class set the class members to correct values.
-TEST(OptionSpace6Test, constructor) {
- // Create some option space and do not specify enterprise number.
- // In such case the vendor space flag is expected to be
- // set to false.
- OptionSpace6 space1("abcd");
- EXPECT_EQ("abcd", space1.getName());
- EXPECT_FALSE(space1.isVendorSpace());
-
- // Create an option space and specify an enterprise number. In this
- // case the vendor space flag is expected to be set to true and the
- // enterprise number should be set to a desired value.
- OptionSpace6 space2("abcd", 2145);
- EXPECT_EQ("abcd", space2.getName());
- EXPECT_TRUE(space2.isVendorSpace());
- EXPECT_EQ(2145, space2.getEnterpriseNumber());
-
- // Verify that constructors throw an exception when invalid option
- // space name has been specified.
- EXPECT_THROW(OptionSpace6("isc dhcp"), InvalidOptionSpace);
- EXPECT_THROW(OptionSpace6("isc%dhcp", 2145), InvalidOptionSpace);
-}
-
-// The purpose of this test is to verify an option space can be marked
-// vendor option space and enterprise number can be set.
-TEST(OptionSpace6Test, setVendorSpace) {
- OptionSpace6 space("isc");
- EXPECT_EQ("isc", space.getName());
- EXPECT_FALSE(space.isVendorSpace());
-
- // Mark it vendor option space and set enterprise id.
- space.setVendorSpace(1234);
- EXPECT_TRUE(space.isVendorSpace());
- EXPECT_EQ(1234, space.getEnterpriseNumber());
-
- // Override the enterprise number to make sure and make sure that
- // the new number is returned by the object.
- space.setVendorSpace(2345);
- EXPECT_TRUE(space.isVendorSpace());
- EXPECT_EQ(2345, space.getEnterpriseNumber());
-
- // Clear the vendor option space flag.
- space.clearVendorSpace();
- EXPECT_FALSE(space.isVendorSpace());
-}
-
-
-}; // end of anonymous namespace
diff --git a/src/lib/dns/Makefile.am b/src/lib/dns/Makefile.am
index 286bd8c..bbf33ed 100644
--- a/src/lib/dns/Makefile.am
+++ b/src/lib/dns/Makefile.am
@@ -8,6 +8,10 @@ AM_CXXFLAGS = $(B10_CXXFLAGS)
CLEANFILES = *.gcno *.gcda
CLEANFILES += rrclass.h rrtype.h rrparamregistry.cc rdataclass.h rdataclass.cc
+# These two are created with rrtype/class.h, so not explicitly listed in
+# BUILT_SOURCES.
+CLEANFILES += python/rrtype_constants_inc.cc
+CLEANFILES += python/rrclass_constants_inc.cc
EXTRA_DIST = rrclass-placeholder.h
EXTRA_DIST += rrparamregistry-placeholder.cc
diff --git a/src/lib/dns/gen-rdatacode.py.in b/src/lib/dns/gen-rdatacode.py.in
index fc63d73..b385bf4 100755
--- a/src/lib/dns/gen-rdatacode.py.in
+++ b/src/lib/dns/gen-rdatacode.py.in
@@ -33,16 +33,43 @@ import sys
# Example:
# new_rdata_factory_users = [('a', 'in'), ('a', 'ch'), ('soa', 'generic')]
new_rdata_factory_users = [('aaaa', 'in'),
+ ('cname', 'generic'),
+ ('dname', 'generic'),
('hinfo', 'generic'),
('naptr', 'generic'),
+ ('mx', 'generic'),
+ ('ns', 'generic'),
+ ('ptr', 'generic'),
('soa', 'generic'),
('spf', 'generic'),
+ ('srv', 'in'),
('txt', 'generic')
]
-re_typecode = re.compile('([\da-z]+)_(\d+)')
+re_typecode = re.compile('([\da-z\-]+)_(\d+)')
classcode2txt = {}
typecode2txt = {}
+# For meta types and types well-known but not implemented. This is a dict from
+# type code values (as string) to textual mnemonic.
+meta_types = {
+ # Real meta types. We won't have Rdata implement for them, but we need
+ # RRType constants.
+ '251': 'ixfr', '252': 'axfr', '255': 'any',
+ # Obsolete types. We probalby won't implement Rdata for them, but it's
+ # better to have RRType constants.
+ '3': 'md', '4': 'mf', '7': 'mb', '8': 'mg', '9': 'mr', '30': 'nxt',
+ '38': 'a6', '254': 'maila',
+ # Types officially assigned but not yet supported in our implementation.
+ '10': 'null', '11': 'wks', '19': 'x25', '21': 'rt', '22': 'nsap',
+ '23': 'nsap-ptr', '24': 'sig', '20': 'isdn', '25': 'key', '26': 'px',
+ '27': 'gpos', '29': 'loc', '36': 'kx', '37': 'cert', '42': 'apl',
+ '45': 'ipseckey', '52': 'tlsa', '55': 'hip', '103': 'unspec',
+ '104': 'nid', '105': 'l32', '106': 'l64', '107': 'lp', '249': 'tkey',
+ '253': 'mailb', '256': 'uri', '257': 'caa'
+ }
+# Classes that don't have any known types. This is a dict from type code
+# values (as string) to textual mnemonic.
+meta_classes = {'254': 'none'}
typeandclass = []
generic_code = 65536 # something larger than any code value
rdata_declarations = ''
@@ -185,11 +212,11 @@ def import_definitions(classcode2txt, typecode2txt, typeandclass):
type_code = m.group(2)
if not type_code in typecode2txt:
typecode2txt[type_code] = type_txt
- if re.search('\cc$', file):
+ if re.search('\.cc$', file):
if rdatadef_mtime < getmtime(file):
rdatadef_mtime = getmtime(file)
class_definitions += import_classdef(class_txt, file)
- elif re.search('\h$', file):
+ elif re.search('\.h$', file):
if rdatahdr_mtime < getmtime(file):
rdatahdr_mtime = getmtime(file)
rdata_declarations += import_classheader(class_txt,
@@ -255,36 +282,66 @@ class MasterLoaderCallbacks;
def generate_typeclasscode(fileprefix, basemtime, code2txt, type_or_class):
placeholder = '@srcdir@/' + fileprefix + '-placeholder.h'
outputfile = '@builddir@/' + fileprefix + '.h'
+ py_outputfile = '@builddir@/python/' + fileprefix + '_constants_inc.cc'
upper_key = type_or_class.upper() # TYPE or CLASS
lower_key = 'rr' + type_or_class.lower() # rrtype or rrclass
cap_key = type_or_class # Type or Class
- if not need_generate(outputfile, basemtime) and getmtime(outputfile) > getmtime(placeholder):
+ # We only decide whether to generate files for libdns++ files; Python
+ # files are generated if and only if libdns++ files are generated.
+ # In practice it should be sufficient.
+ if (not need_generate(outputfile, basemtime) and
+ getmtime(outputfile) > getmtime(placeholder)):
print('skip generating ' + outputfile)
return
- declarationtxt = ''
- deftxt = ''
- for code in code2txt.keys():
- codetxt = code2txt[code].upper()
- declarationtxt += ' ' * 4 + 'static const RR' + cap_key + '& ' + codetxt + '();\n'
- deftxt += '''inline const RR''' + cap_key + '''&
-RR''' + cap_key + '''::''' + codetxt + '''() {
- static RR''' + cap_key + ''' ''' + lower_key + '''(''' + code + ''');
+ # Create a list of (code, code-text) pairs, where code-text is generally
+ # upper-cased, with applying speicial filters when necessary.
+ def convert(code_txt):
+ # Workaround by heuristics: there's a "NULL" RR type, but it would
+ # cause conflict with the C/C++ macro. We use Null as a special case.
+ if code_txt == 'null':
+ return 'Null'
+ # Likewise, convert "nsap-ptr" to "NSAP_PTR" as a dash cannot be part
+ # of a C/C++ variable.
+ if code_txt == 'nsap-ptr':
+ return 'NSAP_PTR'
+ return code_txt.upper()
+ codes = [ (code, convert(txt)) for code, txt in code2txt.items() ]
+
+ # Dump source code for libdns++
+ with open(placeholder, 'r') as header_temp:
+ with open(outputfile, 'w') as header_out:
+ header_out.write(heading_txt)
+ for line in header_temp:
+ header_out.write(line)
+ if re.match('\s+// BEGIN_WELL_KNOWN_' + upper_key +
+ '_DECLARATIONS$', line):
+ for code in codes:
+ header_out.write(' ' * 4 + 'static const RR' +
+ cap_key + '& ' + code[1] + '();\n')
+ if re.match('// BEGIN_WELL_KNOWN_' + upper_key +
+ '_DEFINITIONS$', line):
+ for code in codes:
+ header_out.write('''inline const RR''' + cap_key +
+ '''&
+RR''' + cap_key + '''::''' + code[1] + '''() {
+ static RR''' + cap_key + ''' ''' + lower_key + '''(''' + code[0] + ''');
return (''' + lower_key + ''');
}\n
-'''
- header_temp = open(placeholder, 'r')
- header_out = open(outputfile, 'w')
- header_out.write(heading_txt)
- for line in header_temp.readlines():
- header_out.write(line)
- if re.match('\s+// BEGIN_WELL_KNOWN_' + upper_key + '_DECLARATIONS$', line):
- header_out.write(declarationtxt)
- if re.match('// BEGIN_WELL_KNOWN_' + upper_key + '_DEFINITIONS$', line):
- header_out.write('\n' + deftxt)
- header_out.close()
- header_temp.close()
+''')
+
+ # Dump source code snippet for isc.dns Python module
+ with open(py_outputfile, 'w') as py_out:
+ py_out.write(" // auto-generated by ../gen-rdatacode.py."
+ " Don't edit this file.\n")
+ py_out.write("\n")
+ for code in codes:
+ py_out.write('''\
+ installClassVariable(''' + lower_key + '''_type, "''' + code[1] + '''",
+ createRR''' + cap_key + '''Object(RR''' + \
+ cap_key + '''::''' + code[1] + '''()));
+''')
def generate_rrparam(fileprefix, basemtime):
placeholder = '@srcdir@/' + fileprefix + '-placeholder.cc'
@@ -331,6 +388,16 @@ def generate_rrparam(fileprefix, basemtime):
typeandclassparams += ', RdataFactoryPtr(new ' + rdf_class + '<'
typeandclassparams += class_txt + '::' + type_utxt + '>()));\n'
+ typeandclassparams += indent + '// Meta and non-implemented RR types\n'
+ for type_code, type_txt in meta_types.items():
+ typeandclassparams += indent + \
+ 'addType("' + type_txt.upper() + '", ' + type_code + ');\n'
+
+ typeandclassparams += indent + '// Meta classes\n'
+ for cls_code, cls_txt in meta_classes.items():
+ typeandclassparams += indent + \
+ 'addClass("' + cls_txt.upper() + '", ' + cls_code + ');\n'
+
rrparam_temp = open(placeholder, 'r')
rrparam_out = open(outputfile, 'w')
rrparam_out.write(heading_txt)
@@ -347,9 +414,14 @@ if __name__ == "__main__":
generate_rdatadef('@builddir@/rdataclass.cc', rdatadef_mtime)
generate_rdatahdr('@builddir@/rdataclass.h', heading_txt,
rdata_declarations, rdatahdr_mtime)
- generate_typeclasscode('rrtype', rdatahdr_mtime, typecode2txt, 'Type')
+
+ # merge auto-generated types/classes with meta maps and generate the
+ # corresponding code.
+ generate_typeclasscode('rrtype', rdatahdr_mtime,
+ dict(typecode2txt, **meta_types), 'Type')
generate_typeclasscode('rrclass', classdir_mtime,
- classcode2txt, 'Class')
+ dict(classcode2txt, **meta_classes), 'Class')
+
generate_rrparam('rrparamregistry', rdatahdr_mtime)
except:
sys.stderr.write('Code generation failed due to exception: %s\n' %
diff --git a/src/lib/dns/name.cc b/src/lib/dns/name.cc
index 079033a..ff00374 100644
--- a/src/lib/dns/name.cc
+++ b/src/lib/dns/name.cc
@@ -227,7 +227,6 @@ stringParse(Iterator s, Iterator send, bool downcase, Offsets& offsets,
isc_throw(BadLabelType,
"invalid label type in " << string(orig_s, send));
}
- state = ft_escape;
// FALLTHROUGH
case ft_escape:
if (!isdigit(c & 0xff)) {
diff --git a/src/lib/dns/python/opcode_python.cc b/src/lib/dns/python/opcode_python.cc
index 50436a9..8d40d9d 100644
--- a/src/lib/dns/python/opcode_python.cc
+++ b/src/lib/dns/python/opcode_python.cc
@@ -43,62 +43,12 @@ void Opcode_destroy(s_Opcode* const self);
PyObject* Opcode_getCode(const s_Opcode* const self);
PyObject* Opcode_toText(const s_Opcode* const self);
PyObject* Opcode_str(PyObject* self);
-PyObject* Opcode_QUERY(const s_Opcode* self);
-PyObject* Opcode_IQUERY(const s_Opcode* self);
-PyObject* Opcode_STATUS(const s_Opcode* self);
-PyObject* Opcode_RESERVED3(const s_Opcode* self);
-PyObject* Opcode_NOTIFY(const s_Opcode* self);
-PyObject* Opcode_UPDATE(const s_Opcode* self);
-PyObject* Opcode_RESERVED6(const s_Opcode* self);
-PyObject* Opcode_RESERVED7(const s_Opcode* self);
-PyObject* Opcode_RESERVED8(const s_Opcode* self);
-PyObject* Opcode_RESERVED9(const s_Opcode* self);
-PyObject* Opcode_RESERVED10(const s_Opcode* self);
-PyObject* Opcode_RESERVED11(const s_Opcode* self);
-PyObject* Opcode_RESERVED12(const s_Opcode* self);
-PyObject* Opcode_RESERVED13(const s_Opcode* self);
-PyObject* Opcode_RESERVED14(const s_Opcode* self);
-PyObject* Opcode_RESERVED15(const s_Opcode* self);
-PyObject* Opcode_richcmp(const s_Opcode* const self,
- const s_Opcode* const other, int op);
PyMethodDef Opcode_methods[] = {
{ "get_code", reinterpret_cast<PyCFunction>(Opcode_getCode), METH_NOARGS,
"Returns the code value" },
{ "to_text", reinterpret_cast<PyCFunction>(Opcode_toText), METH_NOARGS,
"Returns the text representation" },
- { "QUERY", reinterpret_cast<PyCFunction>(Opcode_QUERY),
- METH_NOARGS | METH_STATIC, "Creates a QUERY Opcode" },
- { "IQUERY", reinterpret_cast<PyCFunction>(Opcode_IQUERY),
- METH_NOARGS | METH_STATIC, "Creates a IQUERY Opcode" },
- { "STATUS", reinterpret_cast<PyCFunction>(Opcode_STATUS),
- METH_NOARGS | METH_STATIC, "Creates a STATUS Opcode" },
- { "RESERVED3", reinterpret_cast<PyCFunction>(Opcode_RESERVED3),
- METH_NOARGS | METH_STATIC, "Creates a RESERVED3 Opcode" },
- { "NOTIFY", reinterpret_cast<PyCFunction>(Opcode_NOTIFY),
- METH_NOARGS | METH_STATIC, "Creates a NOTIFY Opcode" },
- { "UPDATE", reinterpret_cast<PyCFunction>(Opcode_UPDATE),
- METH_NOARGS | METH_STATIC, "Creates a UPDATE Opcode" },
- { "RESERVED6", reinterpret_cast<PyCFunction>(Opcode_RESERVED6),
- METH_NOARGS | METH_STATIC, "Creates a RESERVED6 Opcode" },
- { "RESERVED7", reinterpret_cast<PyCFunction>(Opcode_RESERVED7),
- METH_NOARGS | METH_STATIC, "Creates a RESERVED7 Opcode" },
- { "RESERVED8", reinterpret_cast<PyCFunction>(Opcode_RESERVED8),
- METH_NOARGS | METH_STATIC, "Creates a RESERVED8 Opcode" },
- { "RESERVED9", reinterpret_cast<PyCFunction>(Opcode_RESERVED9),
- METH_NOARGS | METH_STATIC, "Creates a RESERVED9 Opcode" },
- { "RESERVED10", reinterpret_cast<PyCFunction>(Opcode_RESERVED10),
- METH_NOARGS | METH_STATIC, "Creates a RESERVED10 Opcode" },
- { "RESERVED11", reinterpret_cast<PyCFunction>(Opcode_RESERVED11),
- METH_NOARGS | METH_STATIC, "Creates a RESERVED11 Opcode" },
- { "RESERVED12", reinterpret_cast<PyCFunction>(Opcode_RESERVED12),
- METH_NOARGS | METH_STATIC, "Creates a RESERVED12 Opcode" },
- { "RESERVED13", reinterpret_cast<PyCFunction>(Opcode_RESERVED13),
- METH_NOARGS | METH_STATIC, "Creates a RESERVED13 Opcode" },
- { "RESERVED14", reinterpret_cast<PyCFunction>(Opcode_RESERVED14),
- METH_NOARGS | METH_STATIC, "Creates a RESERVED14 Opcode" },
- { "RESERVED15", reinterpret_cast<PyCFunction>(Opcode_RESERVED15),
- METH_NOARGS | METH_STATIC, "Creates a RESERVED15 Opcode" },
{ NULL, NULL, 0, NULL }
};
@@ -156,96 +106,6 @@ Opcode_str(PyObject* self) {
}
PyObject*
-Opcode_createStatic(const Opcode& opcode) {
- s_Opcode* ret = PyObject_New(s_Opcode, &opcode_type);
- if (ret != NULL) {
- ret->cppobj = &opcode;
- ret->static_code = true;
- }
- return (ret);
-}
-
-PyObject*
-Opcode_QUERY(const s_Opcode*) {
- return (Opcode_createStatic(Opcode::QUERY()));
-}
-
-PyObject*
-Opcode_IQUERY(const s_Opcode*) {
- return (Opcode_createStatic(Opcode::IQUERY()));
-}
-
-PyObject*
-Opcode_STATUS(const s_Opcode*) {
- return (Opcode_createStatic(Opcode::STATUS()));
-}
-
-PyObject*
-Opcode_RESERVED3(const s_Opcode*) {
- return (Opcode_createStatic(Opcode::RESERVED3()));
-}
-
-PyObject*
-Opcode_NOTIFY(const s_Opcode*) {
- return (Opcode_createStatic(Opcode::NOTIFY()));
-}
-
-PyObject*
-Opcode_UPDATE(const s_Opcode*) {
- return (Opcode_createStatic(Opcode::UPDATE()));
-}
-
-PyObject*
-Opcode_RESERVED6(const s_Opcode*) {
- return (Opcode_createStatic(Opcode::RESERVED6()));
-}
-
-PyObject*
-Opcode_RESERVED7(const s_Opcode*) {
- return (Opcode_createStatic(Opcode::RESERVED7()));
-}
-
-PyObject*
-Opcode_RESERVED8(const s_Opcode*) {
- return (Opcode_createStatic(Opcode::RESERVED8()));
-}
-
-PyObject*
-Opcode_RESERVED9(const s_Opcode*) {
- return (Opcode_createStatic(Opcode::RESERVED9()));
-}
-
-PyObject*
-Opcode_RESERVED10(const s_Opcode*) {
- return (Opcode_createStatic(Opcode::RESERVED10()));
-}
-
-PyObject*
-Opcode_RESERVED11(const s_Opcode*) {
- return (Opcode_createStatic(Opcode::RESERVED11()));
-}
-
-PyObject*
-Opcode_RESERVED12(const s_Opcode*) {
- return (Opcode_createStatic(Opcode::RESERVED12()));
-}
-
-PyObject*
-Opcode_RESERVED13(const s_Opcode*) {
- return (Opcode_createStatic(Opcode::RESERVED13()));
-}
-
-PyObject*
-Opcode_RESERVED14(const s_Opcode*) {
- return (Opcode_createStatic(Opcode::RESERVED14()));
-}
-
-PyObject*
-Opcode_RESERVED15(const s_Opcode*) {
- return (Opcode_createStatic(Opcode::RESERVED15()));
-}
-
-PyObject*
Opcode_richcmp(const s_Opcode* const self, const s_Opcode* const other,
const int op)
{
diff --git a/src/lib/dns/python/pydnspp.cc b/src/lib/dns/python/pydnspp.cc
index c75c737..30dc090 100644
--- a/src/lib/dns/python/pydnspp.cc
+++ b/src/lib/dns/python/pydnspp.cc
@@ -294,38 +294,83 @@ initModulePart_Opcode(PyObject* mod) {
return (false);
}
- addClassVariable(opcode_type, "QUERY_CODE",
- Py_BuildValue("h", Opcode::QUERY_CODE));
- addClassVariable(opcode_type, "IQUERY_CODE",
- Py_BuildValue("h", Opcode::IQUERY_CODE));
- addClassVariable(opcode_type, "STATUS_CODE",
- Py_BuildValue("h", Opcode::STATUS_CODE));
- addClassVariable(opcode_type, "RESERVED3_CODE",
- Py_BuildValue("h", Opcode::RESERVED3_CODE));
- addClassVariable(opcode_type, "NOTIFY_CODE",
- Py_BuildValue("h", Opcode::NOTIFY_CODE));
- addClassVariable(opcode_type, "UPDATE_CODE",
- Py_BuildValue("h", Opcode::UPDATE_CODE));
- addClassVariable(opcode_type, "RESERVED6_CODE",
- Py_BuildValue("h", Opcode::RESERVED6_CODE));
- addClassVariable(opcode_type, "RESERVED7_CODE",
- Py_BuildValue("h", Opcode::RESERVED7_CODE));
- addClassVariable(opcode_type, "RESERVED8_CODE",
- Py_BuildValue("h", Opcode::RESERVED8_CODE));
- addClassVariable(opcode_type, "RESERVED9_CODE",
- Py_BuildValue("h", Opcode::RESERVED9_CODE));
- addClassVariable(opcode_type, "RESERVED10_CODE",
- Py_BuildValue("h", Opcode::RESERVED10_CODE));
- addClassVariable(opcode_type, "RESERVED11_CODE",
- Py_BuildValue("h", Opcode::RESERVED11_CODE));
- addClassVariable(opcode_type, "RESERVED12_CODE",
- Py_BuildValue("h", Opcode::RESERVED12_CODE));
- addClassVariable(opcode_type, "RESERVED13_CODE",
- Py_BuildValue("h", Opcode::RESERVED13_CODE));
- addClassVariable(opcode_type, "RESERVED14_CODE",
- Py_BuildValue("h", Opcode::RESERVED14_CODE));
- addClassVariable(opcode_type, "RESERVED15_CODE",
- Py_BuildValue("h", Opcode::RESERVED15_CODE));
+ try {
+ installClassVariable(opcode_type, "QUERY_CODE",
+ Py_BuildValue("h", Opcode::QUERY_CODE));
+ installClassVariable(opcode_type, "IQUERY_CODE",
+ Py_BuildValue("h", Opcode::IQUERY_CODE));
+ installClassVariable(opcode_type, "STATUS_CODE",
+ Py_BuildValue("h", Opcode::STATUS_CODE));
+ installClassVariable(opcode_type, "RESERVED3_CODE",
+ Py_BuildValue("h", Opcode::RESERVED3_CODE));
+ installClassVariable(opcode_type, "NOTIFY_CODE",
+ Py_BuildValue("h", Opcode::NOTIFY_CODE));
+ installClassVariable(opcode_type, "UPDATE_CODE",
+ Py_BuildValue("h", Opcode::UPDATE_CODE));
+ installClassVariable(opcode_type, "RESERVED6_CODE",
+ Py_BuildValue("h", Opcode::RESERVED6_CODE));
+ installClassVariable(opcode_type, "RESERVED7_CODE",
+ Py_BuildValue("h", Opcode::RESERVED7_CODE));
+ installClassVariable(opcode_type, "RESERVED8_CODE",
+ Py_BuildValue("h", Opcode::RESERVED8_CODE));
+ installClassVariable(opcode_type, "RESERVED9_CODE",
+ Py_BuildValue("h", Opcode::RESERVED9_CODE));
+ installClassVariable(opcode_type, "RESERVED10_CODE",
+ Py_BuildValue("h", Opcode::RESERVED10_CODE));
+ installClassVariable(opcode_type, "RESERVED11_CODE",
+ Py_BuildValue("h", Opcode::RESERVED11_CODE));
+ installClassVariable(opcode_type, "RESERVED12_CODE",
+ Py_BuildValue("h", Opcode::RESERVED12_CODE));
+ installClassVariable(opcode_type, "RESERVED13_CODE",
+ Py_BuildValue("h", Opcode::RESERVED13_CODE));
+ installClassVariable(opcode_type, "RESERVED14_CODE",
+ Py_BuildValue("h", Opcode::RESERVED14_CODE));
+ installClassVariable(opcode_type, "RESERVED15_CODE",
+ Py_BuildValue("h", Opcode::RESERVED15_CODE));
+
+ installClassVariable(opcode_type, "QUERY",
+ createOpcodeObject(Opcode::QUERY()));
+ installClassVariable(opcode_type, "IQUERY",
+ createOpcodeObject(Opcode::IQUERY()));
+ installClassVariable(opcode_type, "STATUS",
+ createOpcodeObject(Opcode::STATUS()));
+ installClassVariable(opcode_type, "RESERVED3",
+ createOpcodeObject(Opcode::RESERVED3()));
+ installClassVariable(opcode_type, "NOTIFY",
+ createOpcodeObject(Opcode::NOTIFY()));
+ installClassVariable(opcode_type, "UPDATE",
+ createOpcodeObject(Opcode::UPDATE()));
+ installClassVariable(opcode_type, "RESERVED6",
+ createOpcodeObject(Opcode::RESERVED6()));
+ installClassVariable(opcode_type, "RESERVED7",
+ createOpcodeObject(Opcode::RESERVED7()));
+ installClassVariable(opcode_type, "RESERVED8",
+ createOpcodeObject(Opcode::RESERVED8()));
+ installClassVariable(opcode_type, "RESERVED9",
+ createOpcodeObject(Opcode::RESERVED9()));
+ installClassVariable(opcode_type, "RESERVED10",
+ createOpcodeObject(Opcode::RESERVED10()));
+ installClassVariable(opcode_type, "RESERVED11",
+ createOpcodeObject(Opcode::RESERVED11()));
+ installClassVariable(opcode_type, "RESERVED12",
+ createOpcodeObject(Opcode::RESERVED12()));
+ installClassVariable(opcode_type, "RESERVED13",
+ createOpcodeObject(Opcode::RESERVED13()));
+ installClassVariable(opcode_type, "RESERVED14",
+ createOpcodeObject(Opcode::RESERVED14()));
+ installClassVariable(opcode_type, "RESERVED15",
+ createOpcodeObject(Opcode::RESERVED15()));
+ } catch (const std::exception& ex) {
+ const std::string ex_what =
+ "Unexpected failure in Opcode initialization: " +
+ std::string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (false);
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure in Opcode initialization");
+ return (false);
+ }
return (true);
}
@@ -341,40 +386,87 @@ initModulePart_Rcode(PyObject* mod) {
return (false);
}
- addClassVariable(rcode_type, "NOERROR_CODE",
- Py_BuildValue("h", Rcode::NOERROR_CODE));
- addClassVariable(rcode_type, "FORMERR_CODE",
- Py_BuildValue("h", Rcode::FORMERR_CODE));
- addClassVariable(rcode_type, "SERVFAIL_CODE",
- Py_BuildValue("h", Rcode::SERVFAIL_CODE));
- addClassVariable(rcode_type, "NXDOMAIN_CODE",
- Py_BuildValue("h", Rcode::NXDOMAIN_CODE));
- addClassVariable(rcode_type, "NOTIMP_CODE",
- Py_BuildValue("h", Rcode::NOTIMP_CODE));
- addClassVariable(rcode_type, "REFUSED_CODE",
- Py_BuildValue("h", Rcode::REFUSED_CODE));
- addClassVariable(rcode_type, "YXDOMAIN_CODE",
- Py_BuildValue("h", Rcode::YXDOMAIN_CODE));
- addClassVariable(rcode_type, "YXRRSET_CODE",
- Py_BuildValue("h", Rcode::YXRRSET_CODE));
- addClassVariable(rcode_type, "NXRRSET_CODE",
- Py_BuildValue("h", Rcode::NXRRSET_CODE));
- addClassVariable(rcode_type, "NOTAUTH_CODE",
- Py_BuildValue("h", Rcode::NOTAUTH_CODE));
- addClassVariable(rcode_type, "NOTZONE_CODE",
- Py_BuildValue("h", Rcode::NOTZONE_CODE));
- addClassVariable(rcode_type, "RESERVED11_CODE",
- Py_BuildValue("h", Rcode::RESERVED11_CODE));
- addClassVariable(rcode_type, "RESERVED12_CODE",
- Py_BuildValue("h", Rcode::RESERVED12_CODE));
- addClassVariable(rcode_type, "RESERVED13_CODE",
- Py_BuildValue("h", Rcode::RESERVED13_CODE));
- addClassVariable(rcode_type, "RESERVED14_CODE",
- Py_BuildValue("h", Rcode::RESERVED14_CODE));
- addClassVariable(rcode_type, "RESERVED15_CODE",
- Py_BuildValue("h", Rcode::RESERVED15_CODE));
- addClassVariable(rcode_type, "BADVERS_CODE",
- Py_BuildValue("h", Rcode::BADVERS_CODE));
+ try {
+ installClassVariable(rcode_type, "NOERROR_CODE",
+ Py_BuildValue("h", Rcode::NOERROR_CODE));
+ installClassVariable(rcode_type, "FORMERR_CODE",
+ Py_BuildValue("h", Rcode::FORMERR_CODE));
+ installClassVariable(rcode_type, "SERVFAIL_CODE",
+ Py_BuildValue("h", Rcode::SERVFAIL_CODE));
+ installClassVariable(rcode_type, "NXDOMAIN_CODE",
+ Py_BuildValue("h", Rcode::NXDOMAIN_CODE));
+ installClassVariable(rcode_type, "NOTIMP_CODE",
+ Py_BuildValue("h", Rcode::NOTIMP_CODE));
+ installClassVariable(rcode_type, "REFUSED_CODE",
+ Py_BuildValue("h", Rcode::REFUSED_CODE));
+ installClassVariable(rcode_type, "YXDOMAIN_CODE",
+ Py_BuildValue("h", Rcode::YXDOMAIN_CODE));
+ installClassVariable(rcode_type, "YXRRSET_CODE",
+ Py_BuildValue("h", Rcode::YXRRSET_CODE));
+ installClassVariable(rcode_type, "NXRRSET_CODE",
+ Py_BuildValue("h", Rcode::NXRRSET_CODE));
+ installClassVariable(rcode_type, "NOTAUTH_CODE",
+ Py_BuildValue("h", Rcode::NOTAUTH_CODE));
+ installClassVariable(rcode_type, "NOTZONE_CODE",
+ Py_BuildValue("h", Rcode::NOTZONE_CODE));
+ installClassVariable(rcode_type, "RESERVED11_CODE",
+ Py_BuildValue("h", Rcode::RESERVED11_CODE));
+ installClassVariable(rcode_type, "RESERVED12_CODE",
+ Py_BuildValue("h", Rcode::RESERVED12_CODE));
+ installClassVariable(rcode_type, "RESERVED13_CODE",
+ Py_BuildValue("h", Rcode::RESERVED13_CODE));
+ installClassVariable(rcode_type, "RESERVED14_CODE",
+ Py_BuildValue("h", Rcode::RESERVED14_CODE));
+ installClassVariable(rcode_type, "RESERVED15_CODE",
+ Py_BuildValue("h", Rcode::RESERVED15_CODE));
+ installClassVariable(rcode_type, "BADVERS_CODE",
+ Py_BuildValue("h", Rcode::BADVERS_CODE));
+
+ installClassVariable(rcode_type, "NOERROR",
+ createRcodeObject(Rcode::NOERROR()));
+ installClassVariable(rcode_type, "FORMERR",
+ createRcodeObject(Rcode::FORMERR()));
+ installClassVariable(rcode_type, "SERVFAIL",
+ createRcodeObject(Rcode::SERVFAIL()));
+ installClassVariable(rcode_type, "NXDOMAIN",
+ createRcodeObject(Rcode::NXDOMAIN()));
+ installClassVariable(rcode_type, "NOTIMP",
+ createRcodeObject(Rcode::NOTIMP()));
+ installClassVariable(rcode_type, "REFUSED",
+ createRcodeObject(Rcode::REFUSED()));
+ installClassVariable(rcode_type, "YXDOMAIN",
+ createRcodeObject(Rcode::YXDOMAIN()));
+ installClassVariable(rcode_type, "YXRRSET",
+ createRcodeObject(Rcode::YXRRSET()));
+ installClassVariable(rcode_type, "NXRRSET",
+ createRcodeObject(Rcode::NXRRSET()));
+ installClassVariable(rcode_type, "NOTAUTH",
+ createRcodeObject(Rcode::NOTAUTH()));
+ installClassVariable(rcode_type, "NOTZONE",
+ createRcodeObject(Rcode::NOTZONE()));
+ installClassVariable(rcode_type, "RESERVED11",
+ createRcodeObject(Rcode::RESERVED11()));
+ installClassVariable(rcode_type, "RESERVED12",
+ createRcodeObject(Rcode::RESERVED12()));
+ installClassVariable(rcode_type, "RESERVED13",
+ createRcodeObject(Rcode::RESERVED13()));
+ installClassVariable(rcode_type, "RESERVED14",
+ createRcodeObject(Rcode::RESERVED14()));
+ installClassVariable(rcode_type, "RESERVED15",
+ createRcodeObject(Rcode::RESERVED15()));
+ installClassVariable(rcode_type, "BADVERS",
+ createRcodeObject(Rcode::BADVERS()));
+ } catch (const std::exception& ex) {
+ const std::string ex_what =
+ "Unexpected failure in Rcode initialization: " +
+ std::string(ex.what());
+ PyErr_SetString(po_IscException, ex_what.c_str());
+ return (false);
+ } catch (...) {
+ PyErr_SetString(PyExc_SystemError,
+ "Unexpected failure in Rcode initialization");
+ return (false);
+ }
return (true);
}
@@ -432,6 +524,9 @@ initModulePart_RRClass(PyObject* mod) {
NULL, NULL);
PyObjectContainer(po_IncompleteRRClass).installToModule(
mod, "IncompleteRRClass");
+
+ // Incorporate auto-generated RRClass constants
+#include <dns/python/rrclass_constants_inc.cc>
} catch (const std::exception& ex) {
const std::string ex_what =
"Unexpected failure in RRClass initialization: " +
@@ -518,6 +613,9 @@ initModulePart_RRType(PyObject* mod) {
NULL, NULL);
PyObjectContainer(po_IncompleteRRType).installToModule(
mod, "IncompleteRRType");
+
+ // Incorporate auto-generated RRType constants
+#include <dns/python/rrtype_constants_inc.cc>
} catch (const std::exception& ex) {
const std::string ex_what =
"Unexpected failure in RRType initialization: " +
diff --git a/src/lib/dns/python/rcode_python.cc b/src/lib/dns/python/rcode_python.cc
index 42b48e7..67b45e7 100644
--- a/src/lib/dns/python/rcode_python.cc
+++ b/src/lib/dns/python/rcode_python.cc
@@ -55,23 +55,6 @@ PyObject* Rcode_getCode(const s_Rcode* const self);
PyObject* Rcode_getExtendedCode(const s_Rcode* const self);
PyObject* Rcode_toText(const s_Rcode* const self);
PyObject* Rcode_str(PyObject* self);
-PyObject* Rcode_NOERROR(const s_Rcode* self);
-PyObject* Rcode_FORMERR(const s_Rcode* self);
-PyObject* Rcode_SERVFAIL(const s_Rcode* self);
-PyObject* Rcode_NXDOMAIN(const s_Rcode* self);
-PyObject* Rcode_NOTIMP(const s_Rcode* self);
-PyObject* Rcode_REFUSED(const s_Rcode* self);
-PyObject* Rcode_YXDOMAIN(const s_Rcode* self);
-PyObject* Rcode_YXRRSET(const s_Rcode* self);
-PyObject* Rcode_NXRRSET(const s_Rcode* self);
-PyObject* Rcode_NOTAUTH(const s_Rcode* self);
-PyObject* Rcode_NOTZONE(const s_Rcode* self);
-PyObject* Rcode_RESERVED11(const s_Rcode* self);
-PyObject* Rcode_RESERVED12(const s_Rcode* self);
-PyObject* Rcode_RESERVED13(const s_Rcode* self);
-PyObject* Rcode_RESERVED14(const s_Rcode* self);
-PyObject* Rcode_RESERVED15(const s_Rcode* self);
-PyObject* Rcode_BADVERS(const s_Rcode* self);
PyObject* Rcode_richcmp(const s_Rcode* const self,
const s_Rcode* const other, int op);
@@ -83,40 +66,6 @@ PyMethodDef Rcode_methods[] = {
"Returns the upper 8-bit part of the extended code value" },
{ "to_text", reinterpret_cast<PyCFunction>(Rcode_toText), METH_NOARGS,
"Returns the text representation" },
- { "NOERROR", reinterpret_cast<PyCFunction>(Rcode_NOERROR),
- METH_NOARGS | METH_STATIC, "Creates a NOERROR Rcode" },
- { "FORMERR", reinterpret_cast<PyCFunction>(Rcode_FORMERR),
- METH_NOARGS | METH_STATIC, "Creates a FORMERR Rcode" },
- { "SERVFAIL", reinterpret_cast<PyCFunction>(Rcode_SERVFAIL),
- METH_NOARGS | METH_STATIC, "Creates a SERVFAIL Rcode" },
- { "NXDOMAIN", reinterpret_cast<PyCFunction>(Rcode_NXDOMAIN),
- METH_NOARGS | METH_STATIC, "Creates a NXDOMAIN Rcode" },
- { "NOTIMP", reinterpret_cast<PyCFunction>(Rcode_NOTIMP),
- METH_NOARGS | METH_STATIC, "Creates a NOTIMP Rcode" },
- { "REFUSED", reinterpret_cast<PyCFunction>(Rcode_REFUSED),
- METH_NOARGS | METH_STATIC, "Creates a REFUSED Rcode" },
- { "YXDOMAIN", reinterpret_cast<PyCFunction>(Rcode_YXDOMAIN),
- METH_NOARGS | METH_STATIC, "Creates a YXDOMAIN Rcode" },
- { "YXRRSET", reinterpret_cast<PyCFunction>(Rcode_YXRRSET),
- METH_NOARGS | METH_STATIC, "Creates a YYRRSET Rcode" },
- { "NXRRSET", reinterpret_cast<PyCFunction>(Rcode_NXRRSET),
- METH_NOARGS | METH_STATIC, "Creates a NXRRSET Rcode" },
- { "NOTAUTH", reinterpret_cast<PyCFunction>(Rcode_NOTAUTH),
- METH_NOARGS | METH_STATIC, "Creates a NOTAUTH Rcode" },
- { "NOTZONE", reinterpret_cast<PyCFunction>(Rcode_NOTZONE),
- METH_NOARGS | METH_STATIC, "Creates a NOTZONE Rcode" },
- { "RESERVED11", reinterpret_cast<PyCFunction>(Rcode_RESERVED11),
- METH_NOARGS | METH_STATIC, "Creates a RESERVED11 Rcode" },
- { "RESERVED12", reinterpret_cast<PyCFunction>(Rcode_RESERVED12),
- METH_NOARGS | METH_STATIC, "Creates a RESERVED12 Rcode" },
- { "RESERVED13", reinterpret_cast<PyCFunction>(Rcode_RESERVED13),
- METH_NOARGS | METH_STATIC, "Creates a RESERVED13 Rcode" },
- { "RESERVED14", reinterpret_cast<PyCFunction>(Rcode_RESERVED14),
- METH_NOARGS | METH_STATIC, "Creates a RESERVED14 Rcode" },
- { "RESERVED15", reinterpret_cast<PyCFunction>(Rcode_RESERVED15),
- METH_NOARGS | METH_STATIC, "Creates a RESERVED15 Rcode" },
- { "BADVERS", reinterpret_cast<PyCFunction>(Rcode_BADVERS),
- METH_NOARGS | METH_STATIC, "Creates a BADVERS Rcode" },
{ NULL, NULL, 0, NULL }
};
@@ -193,101 +142,6 @@ Rcode_str(PyObject* self) {
}
PyObject*
-Rcode_createStatic(const Rcode& rcode) {
- s_Rcode* ret = PyObject_New(s_Rcode, &rcode_type);
- if (ret != NULL) {
- ret->cppobj = &rcode;
- ret->static_code = true;
- }
- return (ret);
-}
-
-PyObject*
-Rcode_NOERROR(const s_Rcode*) {
- return (Rcode_createStatic(Rcode::NOERROR()));
-}
-
-PyObject*
-Rcode_FORMERR(const s_Rcode*) {
- return (Rcode_createStatic(Rcode::FORMERR()));
-}
-
-PyObject*
-Rcode_SERVFAIL(const s_Rcode*) {
- return (Rcode_createStatic(Rcode::SERVFAIL()));
-}
-
-PyObject*
-Rcode_NXDOMAIN(const s_Rcode*) {
- return (Rcode_createStatic(Rcode::NXDOMAIN()));
-}
-
-PyObject*
-Rcode_NOTIMP(const s_Rcode*) {
- return (Rcode_createStatic(Rcode::NOTIMP()));
-}
-
-PyObject*
-Rcode_REFUSED(const s_Rcode*) {
- return (Rcode_createStatic(Rcode::REFUSED()));
-}
-
-PyObject*
-Rcode_YXDOMAIN(const s_Rcode*) {
- return (Rcode_createStatic(Rcode::YXDOMAIN()));
-}
-
-PyObject*
-Rcode_YXRRSET(const s_Rcode*) {
- return (Rcode_createStatic(Rcode::YXRRSET()));
-}
-
-PyObject*
-Rcode_NXRRSET(const s_Rcode*) {
- return (Rcode_createStatic(Rcode::NXRRSET()));
-}
-
-PyObject*
-Rcode_NOTAUTH(const s_Rcode*) {
- return (Rcode_createStatic(Rcode::NOTAUTH()));
-}
-
-PyObject*
-Rcode_NOTZONE(const s_Rcode*) {
- return (Rcode_createStatic(Rcode::NOTZONE()));
-}
-
-PyObject*
-Rcode_RESERVED11(const s_Rcode*) {
- return (Rcode_createStatic(Rcode::RESERVED11()));
-}
-
-PyObject*
-Rcode_RESERVED12(const s_Rcode*) {
- return (Rcode_createStatic(Rcode::RESERVED12()));
-}
-
-PyObject*
-Rcode_RESERVED13(const s_Rcode*) {
- return (Rcode_createStatic(Rcode::RESERVED13()));
-}
-
-PyObject*
-Rcode_RESERVED14(const s_Rcode*) {
- return (Rcode_createStatic(Rcode::RESERVED14()));
-}
-
-PyObject*
-Rcode_RESERVED15(const s_Rcode*) {
- return (Rcode_createStatic(Rcode::RESERVED15()));
-}
-
-PyObject*
-Rcode_BADVERS(const s_Rcode*) {
- return (Rcode_createStatic(Rcode::BADVERS()));
-}
-
-PyObject*
Rcode_richcmp(const s_Rcode* const self, const s_Rcode* const other,
const int op)
{
diff --git a/src/lib/dns/python/rrclass_python.cc b/src/lib/dns/python/rrclass_python.cc
index a566f47..d62c88d 100644
--- a/src/lib/dns/python/rrclass_python.cc
+++ b/src/lib/dns/python/rrclass_python.cc
@@ -54,13 +54,6 @@ PyObject* RRClass_getCode(s_RRClass* self);
PyObject* RRClass_richcmp(s_RRClass* self, s_RRClass* other, int op);
Py_hash_t RRClass_hash(PyObject* pyself);
-// Static function for direct class creation
-PyObject* RRClass_IN(s_RRClass *self);
-PyObject* RRClass_CH(s_RRClass *self);
-PyObject* RRClass_HS(s_RRClass *self);
-PyObject* RRClass_NONE(s_RRClass *self);
-PyObject* RRClass_ANY(s_RRClass *self);
-
typedef CPPPyObjectContainer<s_RRClass, RRClass> RRClassContainer;
// This list contains the actual set of functions we have in
@@ -81,11 +74,6 @@ PyMethodDef RRClass_methods[] = {
"returned" },
{ "get_code", reinterpret_cast<PyCFunction>(RRClass_getCode), METH_NOARGS,
"Returns the class code as an integer" },
- { "IN", reinterpret_cast<PyCFunction>(RRClass_IN), METH_NOARGS | METH_STATIC, "Creates an IN RRClass" },
- { "CH", reinterpret_cast<PyCFunction>(RRClass_CH), METH_NOARGS | METH_STATIC, "Creates a CH RRClass" },
- { "HS", reinterpret_cast<PyCFunction>(RRClass_HS), METH_NOARGS | METH_STATIC, "Creates an HS RRClass" },
- { "NONE", reinterpret_cast<PyCFunction>(RRClass_NONE), METH_NOARGS | METH_STATIC, "Creates a NONE RRClass" },
- { "ANY", reinterpret_cast<PyCFunction>(RRClass_ANY), METH_NOARGS | METH_STATIC, "Creates an ANY RRClass" },
{ NULL, NULL, 0, NULL }
};
@@ -234,37 +222,6 @@ RRClass_richcmp(s_RRClass* self, s_RRClass* other, int op) {
Py_RETURN_FALSE;
}
-//
-// Common function for RRClass_IN/CH/etc.
-//
-PyObject* RRClass_createStatic(RRClass stc) {
- s_RRClass* ret = PyObject_New(s_RRClass, &rrclass_type);
- if (ret != NULL) {
- ret->cppobj = new RRClass(stc);
- }
- return (ret);
-}
-
-PyObject* RRClass_IN(s_RRClass*) {
- return (RRClass_createStatic(RRClass::IN()));
-}
-
-PyObject* RRClass_CH(s_RRClass*) {
- return (RRClass_createStatic(RRClass::CH()));
-}
-
-PyObject* RRClass_HS(s_RRClass*) {
- return (RRClass_createStatic(RRClass::HS()));
-}
-
-PyObject* RRClass_NONE(s_RRClass*) {
- return (RRClass_createStatic(RRClass::NONE()));
-}
-
-PyObject* RRClass_ANY(s_RRClass*) {
- return (RRClass_createStatic(RRClass::ANY()));
-}
-
Py_hash_t
RRClass_hash(PyObject* pyself) {
const s_RRClass* const self = static_cast<s_RRClass*>(pyself);
diff --git a/src/lib/dns/python/rrset_collection_python_inc.cc b/src/lib/dns/python/rrset_collection_python_inc.cc
index f6eb8a3..baf8ec8 100644
--- a/src/lib/dns/python/rrset_collection_python_inc.cc
+++ b/src/lib/dns/python/rrset_collection_python_inc.cc
@@ -36,6 +36,41 @@ Returns the RRset in the collection that exactly matches the given\n\
name, rrclass and rrtype. If no matching RRset is found, None is\n\
returned.\n\
\n\
+This method's implementations currently are not specified to handle\n\
+RRTypes such as RRSIG and NSEC3. This interface may be refined to\n\
+clarify this point in the future, and perhaps, provide additional API\n\
+for these RRType.\n\
+\n\
+As for RRSIG, there are some fundamental open questions. For example,\n\
+it's not clear whether we want to return all RRSIGs of the given name\n\
+covering any RR types (in which case, we need to figure out how), or\n\
+we need to extend the interface so we can specify the covered type. A\n\
+specific derived implementation may return something if type RRSIG is\n\
+specified, but this is not specified here at the base class level. So,\n\
+for RRSIGs the behavior should be assumed as undefined.\n\
+\n\
+As for NSEC3, it's not clear whether owner names (which included\n\
+hashed labels) are the best choice of search key, because in many\n\
+cases, what the application wants to find is an NSEC3 that has the\n\
+hash of some particular \"normal\" domain names. Also, if the\n\
+underlying implementation encapsulates a single zone, NSEC3 records\n\
+conceptually belong to a separate name space, which may cause\n\
+implementation difficulty.\n\
+\n\
+Behavior with meta types such as ANY and AXFR are also undefined. A\n\
+specific implementation may return something for these. But, unlike\n\
+the case of RRSIGs, these types of RRsets are not expected to be added\n\
+to any implementation of collection in the first place (by the\n\
+definition of \"meta types\"), so querying for such types is\n\
+basically an invalid operation. The API doesn't require\n\
+implementations to check this condition and reject it, so the behavior\n\
+is undefined. This interface will not be refined in future versions\n\
+for these meta types.\n\
+\n\
+Exceptions:\n\
+ RRsetCollectionError if find() results in some implementation-\n\
+ specific error.\n\
+\n\
Parameters:\n\
name (isc.dns.Name) The name of the RRset to search for.\n\
rrtype (isc.dns.RRType) The type of the RRset to search for.\n\
@@ -121,7 +156,7 @@ find(name, rrclass, rrtype) -> isc.dns.RRset\n\
Find a matching RRset in the collection.\n\
\n\
Returns the RRset in the collection that exactly matches the given\n\
-name, rrclass and rrtype. If no matching RRset is found, NULL is\n\
+name, rrclass and rrtype. If no matching RRset is found, None is\n\
returned.\n\
\n\
Parameters:\n\
@@ -129,7 +164,7 @@ Parameters:\n\
rrclass The class of the RRset to search for.\n\
rrtype The type of the RRset to search for.\n\
\n\
-Return Value(s): The RRset if found, NULL otherwise.\n\
+Return Value(s): The RRset if found, None otherwise.\n\
";
// Modifications
diff --git a/src/lib/dns/python/rrtype_python.cc b/src/lib/dns/python/rrtype_python.cc
index 97b66d4..bf705cc 100644
--- a/src/lib/dns/python/rrtype_python.cc
+++ b/src/lib/dns/python/rrtype_python.cc
@@ -50,25 +50,6 @@ PyObject* RRType_toWire(s_RRType* self, PyObject* args);
PyObject* RRType_getCode(s_RRType* self);
PyObject* RRType_richcmp(s_RRType* self, s_RRType* other, int op);
Py_hash_t RRType_hash(PyObject* pyself);
-PyObject* RRType_NSEC3PARAM(s_RRType *self);
-PyObject* RRType_DNAME(s_RRType *self);
-PyObject* RRType_PTR(s_RRType *self);
-PyObject* RRType_MX(s_RRType *self);
-PyObject* RRType_DNSKEY(s_RRType *self);
-PyObject* RRType_TXT(s_RRType *self);
-PyObject* RRType_RRSIG(s_RRType *self);
-PyObject* RRType_NSEC(s_RRType *self);
-PyObject* RRType_AAAA(s_RRType *self);
-PyObject* RRType_DS(s_RRType *self);
-PyObject* RRType_OPT(s_RRType *self);
-PyObject* RRType_A(s_RRType *self);
-PyObject* RRType_NS(s_RRType *self);
-PyObject* RRType_CNAME(s_RRType *self);
-PyObject* RRType_SOA(s_RRType *self);
-PyObject* RRType_NSEC3(s_RRType *self);
-PyObject* RRType_IXFR(s_RRType *self);
-PyObject* RRType_AXFR(s_RRType *self);
-PyObject* RRType_ANY(s_RRType *self);
typedef CPPPyObjectContainer<s_RRType, RRType> RRTypeContainer;
@@ -90,25 +71,6 @@ PyMethodDef RRType_methods[] = {
"returned" },
{ "get_code", reinterpret_cast<PyCFunction>(RRType_getCode), METH_NOARGS,
"Returns the type code as an integer" },
- { "NSEC3PARAM", reinterpret_cast<PyCFunction>(RRType_NSEC3PARAM), METH_NOARGS | METH_STATIC, "Creates an NSEC3PARAM RRType" },
- { "DNAME", reinterpret_cast<PyCFunction>(RRType_DNAME), METH_NOARGS | METH_STATIC, "Creates a DNAME RRType" },
- { "PTR", reinterpret_cast<PyCFunction>(RRType_PTR), METH_NOARGS | METH_STATIC, "Creates a PTR RRType" },
- { "MX", reinterpret_cast<PyCFunction>(RRType_MX), METH_NOARGS | METH_STATIC, "Creates an MX RRType" },
- { "DNSKEY", reinterpret_cast<PyCFunction>(RRType_DNSKEY), METH_NOARGS | METH_STATIC, "Creates a DNSKEY RRType" },
- { "TXT", reinterpret_cast<PyCFunction>(RRType_TXT), METH_NOARGS | METH_STATIC, "Creates a TXT RRType" },
- { "RRSIG", reinterpret_cast<PyCFunction>(RRType_RRSIG), METH_NOARGS | METH_STATIC, "Creates a RRSIG RRType" },
- { "NSEC", reinterpret_cast<PyCFunction>(RRType_NSEC), METH_NOARGS | METH_STATIC, "Creates a NSEC RRType" },
- { "AAAA", reinterpret_cast<PyCFunction>(RRType_AAAA), METH_NOARGS | METH_STATIC, "Creates an AAAA RRType" },
- { "DS", reinterpret_cast<PyCFunction>(RRType_DS), METH_NOARGS | METH_STATIC, "Creates a DS RRType" },
- { "OPT", reinterpret_cast<PyCFunction>(RRType_OPT), METH_NOARGS | METH_STATIC, "Creates an OPT RRType" },
- { "A", reinterpret_cast<PyCFunction>(RRType_A), METH_NOARGS | METH_STATIC, "Creates an A RRType" },
- { "NS", reinterpret_cast<PyCFunction>(RRType_NS), METH_NOARGS | METH_STATIC, "Creates an NS RRType" },
- { "CNAME", reinterpret_cast<PyCFunction>(RRType_CNAME), METH_NOARGS | METH_STATIC, "Creates a CNAME RRType" },
- { "SOA", reinterpret_cast<PyCFunction>(RRType_SOA), METH_NOARGS | METH_STATIC, "Creates a SOA RRType" },
- { "NSEC3", reinterpret_cast<PyCFunction>(RRType_NSEC3), METH_NOARGS | METH_STATIC, "Creates an NSEC3 RRType" },
- { "IXFR", reinterpret_cast<PyCFunction>(RRType_IXFR), METH_NOARGS | METH_STATIC, "Creates an IXFR RRType" },
- { "AXFR", reinterpret_cast<PyCFunction>(RRType_AXFR), METH_NOARGS | METH_STATIC, "Creates an AXFR RRType" },
- { "ANY", reinterpret_cast<PyCFunction>(RRType_ANY), METH_NOARGS | METH_STATIC, "Creates an ANY RRType" },
{ NULL, NULL, 0, NULL }
};
@@ -263,112 +225,6 @@ RRType_richcmp(s_RRType* self, s_RRType* other, int op) {
Py_RETURN_FALSE;
}
-//
-// Common function for RRType_A/NS/etc.
-//
-PyObject* RRType_createStatic(RRType stc) {
- s_RRType* ret = PyObject_New(s_RRType, &rrtype_type);
- if (ret != NULL) {
- ret->cppobj = new RRType(stc);
- }
- return (ret);
-}
-
-PyObject*
-RRType_NSEC3PARAM(s_RRType*) {
- return (RRType_createStatic(RRType::NSEC3PARAM()));
-}
-
-PyObject*
-RRType_DNAME(s_RRType*) {
- return (RRType_createStatic(RRType::DNAME()));
-}
-
-PyObject*
-RRType_PTR(s_RRType*) {
- return (RRType_createStatic(RRType::PTR()));
-}
-
-PyObject*
-RRType_MX(s_RRType*) {
- return (RRType_createStatic(RRType::MX()));
-}
-
-PyObject*
-RRType_DNSKEY(s_RRType*) {
- return (RRType_createStatic(RRType::DNSKEY()));
-}
-
-PyObject*
-RRType_TXT(s_RRType*) {
- return (RRType_createStatic(RRType::TXT()));
-}
-
-PyObject*
-RRType_RRSIG(s_RRType*) {
- return (RRType_createStatic(RRType::RRSIG()));
-}
-
-PyObject*
-RRType_NSEC(s_RRType*) {
- return (RRType_createStatic(RRType::NSEC()));
-}
-
-PyObject*
-RRType_AAAA(s_RRType*) {
- return (RRType_createStatic(RRType::AAAA()));
-}
-
-PyObject*
-RRType_DS(s_RRType*) {
- return (RRType_createStatic(RRType::DS()));
-}
-
-PyObject*
-RRType_OPT(s_RRType*) {
- return (RRType_createStatic(RRType::OPT()));
-}
-
-PyObject*
-RRType_A(s_RRType*) {
- return (RRType_createStatic(RRType::A()));
-}
-
-PyObject*
-RRType_NS(s_RRType*) {
- return (RRType_createStatic(RRType::NS()));
-}
-
-PyObject*
-RRType_CNAME(s_RRType*) {
- return (RRType_createStatic(RRType::CNAME()));
-}
-
-PyObject*
-RRType_SOA(s_RRType*) {
- return (RRType_createStatic(RRType::SOA()));
-}
-
-PyObject*
-RRType_NSEC3(s_RRType*) {
- return (RRType_createStatic(RRType::NSEC3()));
-}
-
-PyObject*
-RRType_IXFR(s_RRType*) {
- return (RRType_createStatic(RRType::IXFR()));
-}
-
-PyObject*
-RRType_AXFR(s_RRType*) {
- return (RRType_createStatic(RRType::AXFR()));
-}
-
-PyObject*
-RRType_ANY(s_RRType*) {
- return (RRType_createStatic(RRType::ANY()));
-}
-
Py_hash_t
RRType_hash(PyObject* pyself) {
const s_RRType* const self = static_cast<s_RRType*>(pyself);
diff --git a/src/lib/dns/python/tests/edns_python_test.py b/src/lib/dns/python/tests/edns_python_test.py
index b249213..150dfd6 100644
--- a/src/lib/dns/python/tests/edns_python_test.py
+++ b/src/lib/dns/python/tests/edns_python_test.py
@@ -108,8 +108,8 @@ class EDNSTest(unittest.TestCase):
def test_towire_renderer(self):
renderer = MessageRenderer()
- extrcode_noerror = Rcode.NOERROR().get_extended_code()
- extrcode_badvers = Rcode.BADVERS().get_extended_code()
+ extrcode_noerror = Rcode.NOERROR.get_extended_code()
+ extrcode_badvers = Rcode.BADVERS.get_extended_code()
self.assertEqual(1, self.edns_base.to_wire(renderer, extrcode_noerror))
wiredata = read_wire_data("edns_toWire1.wire")
@@ -148,7 +148,7 @@ class EDNSTest(unittest.TestCase):
self.assertEqual(0, renderer.get_length())
def test_towire_buffer(self):
- extrcode_noerror = Rcode.NOERROR().get_extended_code()
+ extrcode_noerror = Rcode.NOERROR.get_extended_code()
obuffer = bytes()
obuffer = self.edns_base.to_wire(obuffer, extrcode_noerror)
diff --git a/src/lib/dns/python/tests/message_python_test.py b/src/lib/dns/python/tests/message_python_test.py
index b9c0d5c..bf39a83 100644
--- a/src/lib/dns/python/tests/message_python_test.py
+++ b/src/lib/dns/python/tests/message_python_test.py
@@ -59,8 +59,8 @@ LONG_TXT4 = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef012
def create_message():
message_render = Message(Message.RENDER)
message_render.set_qid(0x1035)
- message_render.set_opcode(Opcode.QUERY())
- message_render.set_rcode(Rcode.NOERROR())
+ message_render.set_opcode(Opcode.QUERY)
+ message_render.set_rcode(Rcode.NOERROR)
message_render.set_header_flag(Message.HEADERFLAG_QR)
message_render.set_header_flag(Message.HEADERFLAG_RD)
message_render.set_header_flag(Message.HEADERFLAG_AA)
@@ -161,7 +161,7 @@ class MessageTest(unittest.TestCase):
def test_set_rcode(self):
self.assertRaises(TypeError, self.r.set_rcode, "wrong")
- rcode = Rcode.BADVERS()
+ rcode = Rcode.BADVERS
self.r.set_rcode(rcode)
self.assertEqual(rcode, self.r.get_rcode())
@@ -173,7 +173,7 @@ class MessageTest(unittest.TestCase):
def test_set_opcode(self):
self.assertRaises(TypeError, self.r.set_opcode, "wrong")
- opcode = Opcode.IQUERY()
+ opcode = Opcode.IQUERY
self.r.set_opcode(opcode)
self.assertEqual(opcode, self.r.get_opcode())
@@ -304,8 +304,8 @@ class MessageTest(unittest.TestCase):
self.assertRaises(TypeError, self.r.clear, 3)
def test_clear_question_section(self):
- self.r.add_question(Question(Name("www.example.com"), RRClass.IN(),
- RRType.A()))
+ self.r.add_question(Question(Name("www.example.com"), RRClass.IN,
+ RRType.A))
self.assertEqual(1, self.r.get_rr_count(Message.SECTION_QUESTION))
self.r.clear_section(Message.SECTION_QUESTION)
self.assertEqual(0, self.r.get_rr_count(Message.SECTION_QUESTION))
@@ -336,19 +336,19 @@ class MessageTest(unittest.TestCase):
renderer.get_data())
def test_to_wire_without_opcode(self):
- self.r.set_rcode(Rcode.NOERROR())
+ self.r.set_rcode(Rcode.NOERROR)
self.assertRaises(InvalidMessageOperation, self.r.to_wire,
MessageRenderer())
def test_to_wire_without_rcode(self):
- self.r.set_opcode(Opcode.QUERY())
+ self.r.set_opcode(Opcode.QUERY)
self.assertRaises(InvalidMessageOperation, self.r.to_wire,
MessageRenderer())
def __common_tsigmessage_setup(self, flags=[Message.HEADERFLAG_RD],
rrtype=RRType("A"), answer_data=None):
- self.r.set_opcode(Opcode.QUERY())
- self.r.set_rcode(Rcode.NOERROR())
+ self.r.set_opcode(Opcode.QUERY)
+ self.r.set_rcode(Rcode.NOERROR)
for flag in flags:
self.r.set_header_flag(flag)
if answer_data is not None:
@@ -407,8 +407,8 @@ class MessageTest(unittest.TestCase):
self.__common_tsig_checks("message_toWire4.wire")
def test_to_wire_tsig_truncation3(self):
- self.r.set_opcode(Opcode.QUERY())
- self.r.set_rcode(Rcode.NOERROR())
+ self.r.set_opcode(Opcode.QUERY)
+ self.r.set_rcode(Rcode.NOERROR)
for i in range(1, 68):
self.r.add_question(Question(Name("www.example.com"),
RRClass("IN"), RRType(i)))
@@ -469,11 +469,11 @@ test.example.com. 3600 IN A 192.0.2.2
self.assertEqual(msg_str, str(message_render))
def test_to_text_without_opcode(self):
- self.r.set_rcode(Rcode.NOERROR())
+ self.r.set_rcode(Rcode.NOERROR)
self.assertRaises(InvalidMessageOperation, self.r.to_text)
def test_to_text_without_rcode(self):
- self.r.set_opcode(Opcode.QUERY())
+ self.r.set_opcode(Opcode.QUERY)
self.assertRaises(InvalidMessageOperation, self.r.to_text)
def test_from_wire(self):
@@ -488,8 +488,8 @@ test.example.com. 3600 IN A 192.0.2.2
message_parse = Message(0)
factoryFromFile(message_parse, "message_fromWire1")
self.assertEqual(0x1035, message_parse.get_qid())
- self.assertEqual(Opcode.QUERY(), message_parse.get_opcode())
- self.assertEqual(Rcode.NOERROR(), message_parse.get_rcode())
+ self.assertEqual(Opcode.QUERY, message_parse.get_opcode())
+ self.assertEqual(Rcode.NOERROR, message_parse.get_rcode())
self.assertTrue(message_parse.get_header_flag(Message.HEADERFLAG_QR))
self.assertTrue(message_parse.get_header_flag(Message.HEADERFLAG_RD))
self.assertTrue(message_parse.get_header_flag(Message.HEADERFLAG_AA))
@@ -568,7 +568,7 @@ test.example.com. 3600 IN A 192.0.2.2
# Extended Rcode = BADVERS
message_parse = Message(Message.PARSE)
factoryFromFile(message_parse, "message_fromWire10.wire")
- self.assertEqual(Rcode.BADVERS(), message_parse.get_rcode())
+ self.assertEqual(Rcode.BADVERS, message_parse.get_rcode())
# Maximum extended Rcode
message_parse.clear(Message.PARSE)
diff --git a/src/lib/dns/python/tests/messagerenderer_python_test.py b/src/lib/dns/python/tests/messagerenderer_python_test.py
index 5362496..8d5f26f 100644
--- a/src/lib/dns/python/tests/messagerenderer_python_test.py
+++ b/src/lib/dns/python/tests/messagerenderer_python_test.py
@@ -31,8 +31,8 @@ class MessageRendererTest(unittest.TestCase):
message = Message(Message.RENDER)
message.set_qid(123)
- message.set_opcode(Opcode.QUERY())
- message.set_rcode(Rcode.NOERROR())
+ message.set_opcode(Opcode.QUERY)
+ message.set_rcode(Rcode.NOERROR)
message.add_question(Question(name, c, t))
self.message1 = message
@@ -40,8 +40,8 @@ class MessageRendererTest(unittest.TestCase):
message.set_qid(123)
message.set_header_flag(Message.HEADERFLAG_AA, True)
message.set_header_flag(Message.HEADERFLAG_QR, True)
- message.set_opcode(Opcode.QUERY())
- message.set_rcode(Rcode.NOERROR())
+ message.set_opcode(Opcode.QUERY)
+ message.set_rcode(Rcode.NOERROR)
message.add_question(Question(name, c, t))
rrset = RRset(name, c, t, ttl)
rrset.add_rdata(Rdata(t, c, "192.0.2.98"))
diff --git a/src/lib/dns/python/tests/nsec3hash_python_test.py b/src/lib/dns/python/tests/nsec3hash_python_test.py
index 1a247d0..320529a 100644
--- a/src/lib/dns/python/tests/nsec3hash_python_test.py
+++ b/src/lib/dns/python/tests/nsec3hash_python_test.py
@@ -24,9 +24,9 @@ class NSEC3HashTest(unittest.TestCase):
def setUp(self):
self.nsec3_common = "2T7B4G4VSA5SMI47K61MV5BV1A22BOJR A RRSIG"
- self.test_hash = NSEC3Hash(Rdata(RRType.NSEC3PARAM(), RRClass.IN(),
+ self.test_hash = NSEC3Hash(Rdata(RRType.NSEC3PARAM, RRClass.IN,
"1 0 12 aabbccdd"))
- self.test_hash_nsec3 = NSEC3Hash(Rdata(RRType.NSEC3(), RRClass.IN(),
+ self.test_hash_nsec3 = NSEC3Hash(Rdata(RRType.NSEC3, RRClass.IN,
"1 0 12 aabbccdd " +
self.nsec3_common))
def test_bad_construct(self):
@@ -37,20 +37,20 @@ class NSEC3HashTest(unittest.TestCase):
self.assertRaises(TypeError, NSEC3Hash, "1 0 12 aabbccdd")
# additional parameter
- self.assertRaises(TypeError, NSEC3Hash, Rdata(RRType.NSEC3PARAM(),
- RRClass.IN(),
+ self.assertRaises(TypeError, NSEC3Hash, Rdata(RRType.NSEC3PARAM,
+ RRClass.IN,
"1 0 12 aabbccdd"), 1)
# Invaid type of RDATA
- self.assertRaises(TypeError, NSEC3Hash, Rdata(RRType.A(), RRClass.IN(),
+ self.assertRaises(TypeError, NSEC3Hash, Rdata(RRType.A, RRClass.IN,
"192.0.2.1"))
def test_unknown_algorithm(self):
self.assertRaises(UnknownNSEC3HashAlgorithm, NSEC3Hash,
- Rdata(RRType.NSEC3PARAM(), RRClass.IN(),
+ Rdata(RRType.NSEC3PARAM, RRClass.IN,
"2 0 12 aabbccdd"))
self.assertRaises(UnknownNSEC3HashAlgorithm, NSEC3Hash,
- Rdata(RRType.NSEC3(), RRClass.IN(),
+ Rdata(RRType.NSEC3, RRClass.IN,
"2 0 12 aabbccdd " + self.nsec3_common))
def calculate_check(self, hash):
@@ -71,15 +71,15 @@ class NSEC3HashTest(unittest.TestCase):
# Using unusually large iterations, something larger than the 8-bit
#range. (expected hash value generated by BIND 9's dnssec-signzone)
- self.test_hash = NSEC3Hash(Rdata(RRType.NSEC3PARAM(),
- RRClass.IN(), "1 0 256 AABBCCDD"))
+ self.test_hash = NSEC3Hash(Rdata(RRType.NSEC3PARAM,
+ RRClass.IN, "1 0 256 AABBCCDD"))
self.assertEqual("COG6A52MJ96MNMV3QUCAGGCO0RHCC2Q3",
self.test_hash.calculate(Name("example.org")))
# Some boundary cases: 0-iteration and empty salt. Borrowed from the
# .com zone data.
- self.test_hash = NSEC3Hash(Rdata(RRType.NSEC3PARAM(),
- RRClass.IN(),"1 0 0 -"))
+ self.test_hash = NSEC3Hash(Rdata(RRType.NSEC3PARAM,
+ RRClass.IN,"1 0 0 -"))
self.assertEqual("CK0POJMG874LJREF7EFN8430QVIT8BSM",
self.test_hash.calculate(Name("com")))
@@ -90,39 +90,39 @@ class NSEC3HashTest(unittest.TestCase):
def check_match(self, hash, rrtype, postfix):
# If all parameters match, it's considered to be matched.
- self.assertTrue(hash.match(Rdata(rrtype, RRClass.IN(),
+ self.assertTrue(hash.match(Rdata(rrtype, RRClass.IN,
"1 0 12 aabbccdd" + postfix)))
# Algorithm doesn't match
- self.assertFalse(hash.match(Rdata(rrtype, RRClass.IN(),
+ self.assertFalse(hash.match(Rdata(rrtype, RRClass.IN,
"2 0 12 aabbccdd" + postfix)))
# Iterations doesn't match
- self.assertFalse(hash.match(Rdata(rrtype, RRClass.IN(),
+ self.assertFalse(hash.match(Rdata(rrtype, RRClass.IN,
"1 0 1 aabbccdd" + postfix)))
# Salt doesn't match
- self.assertFalse(hash.match(Rdata(rrtype, RRClass.IN(),
+ self.assertFalse(hash.match(Rdata(rrtype, RRClass.IN,
"1 0 12 aabbccde" + postfix)))
# Salt doesn't match: the other has an empty salt
- self.assertFalse(hash.match(Rdata(rrtype, RRClass.IN(),
+ self.assertFalse(hash.match(Rdata(rrtype, RRClass.IN,
"1 0 12 -" + postfix)))
# Flag doesn't matter
- self.assertTrue(hash.match(Rdata(rrtype, RRClass.IN(),
+ self.assertTrue(hash.match(Rdata(rrtype, RRClass.IN,
"1 1 12 aabbccdd" + postfix)))
def test_match(self):
- self.check_match(self.test_hash, RRType.NSEC3(),
+ self.check_match(self.test_hash, RRType.NSEC3,
" " + self.nsec3_common)
- self.check_match(self.test_hash_nsec3, RRType.NSEC3(),
+ self.check_match(self.test_hash_nsec3, RRType.NSEC3,
" " + self.nsec3_common)
- self.check_match(self.test_hash, RRType.NSEC3PARAM(), "")
- self.check_match(self.test_hash_nsec3, RRType.NSEC3PARAM(), "")
+ self.check_match(self.test_hash, RRType.NSEC3PARAM, "")
+ self.check_match(self.test_hash_nsec3, RRType.NSEC3PARAM, "")
# bad parameter checks
self.assertRaises(TypeError, self.test_hash.match, 1)
self.assertRaises(TypeError, self.test_hash.match,
- Rdata(RRType.NSEC3(), RRClass.IN(),
+ Rdata(RRType.NSEC3, RRClass.IN,
"1 0 12 aabbccdd " + self.nsec3_common), 1)
self.assertRaises(TypeError, self.test_hash.match,
- Rdata(RRType.A(), RRClass.IN(), "192.0.2.1"))
+ Rdata(RRType.A, RRClass.IN, "192.0.2.1"))
if __name__ == '__main__':
unittest.main()
diff --git a/src/lib/dns/python/tests/opcode_python_test.py b/src/lib/dns/python/tests/opcode_python_test.py
index 84f449f..d938aa6 100644
--- a/src/lib/dns/python/tests/opcode_python_test.py
+++ b/src/lib/dns/python/tests/opcode_python_test.py
@@ -34,53 +34,53 @@ class OpcodeTest(unittest.TestCase):
self.assertEqual(Opcode.UPDATE_CODE, Opcode(5).get_code())
self.assertEqual(Opcode.RESERVED15_CODE, Opcode(15).get_code())
- self.assertEqual(Opcode.QUERY_CODE, Opcode.QUERY().get_code())
- self.assertEqual(Opcode.IQUERY_CODE, Opcode.IQUERY().get_code())
- self.assertEqual(Opcode.NOTIFY_CODE, Opcode.NOTIFY().get_code())
- self.assertEqual(Opcode.UPDATE_CODE, Opcode.UPDATE().get_code())
- self.assertEqual(Opcode.RESERVED15_CODE, Opcode.RESERVED15().get_code())
+ self.assertEqual(Opcode.QUERY_CODE, Opcode.QUERY.get_code())
+ self.assertEqual(Opcode.IQUERY_CODE, Opcode.IQUERY.get_code())
+ self.assertEqual(Opcode.NOTIFY_CODE, Opcode.NOTIFY.get_code())
+ self.assertEqual(Opcode.UPDATE_CODE, Opcode.UPDATE.get_code())
+ self.assertEqual(Opcode.RESERVED15_CODE, Opcode.RESERVED15.get_code())
def test_get_code(self):
- self.assertEqual(0, Opcode.QUERY().get_code())
- self.assertEqual(1, Opcode.IQUERY().get_code())
- self.assertEqual(2, Opcode.STATUS().get_code())
- self.assertEqual(3, Opcode.RESERVED3().get_code())
- self.assertEqual(4, Opcode.NOTIFY().get_code())
- self.assertEqual(5, Opcode.UPDATE().get_code())
- self.assertEqual(6, Opcode.RESERVED6().get_code())
- self.assertEqual(7, Opcode.RESERVED7().get_code())
- self.assertEqual(8, Opcode.RESERVED8().get_code())
- self.assertEqual(9, Opcode.RESERVED9().get_code())
- self.assertEqual(10, Opcode.RESERVED10().get_code())
- self.assertEqual(11, Opcode.RESERVED11().get_code())
- self.assertEqual(12, Opcode.RESERVED12().get_code())
- self.assertEqual(13, Opcode.RESERVED13().get_code())
- self.assertEqual(14, Opcode.RESERVED14().get_code())
- self.assertEqual(15, Opcode.RESERVED15().get_code())
+ self.assertEqual(0, Opcode.QUERY.get_code())
+ self.assertEqual(1, Opcode.IQUERY.get_code())
+ self.assertEqual(2, Opcode.STATUS.get_code())
+ self.assertEqual(3, Opcode.RESERVED3.get_code())
+ self.assertEqual(4, Opcode.NOTIFY.get_code())
+ self.assertEqual(5, Opcode.UPDATE.get_code())
+ self.assertEqual(6, Opcode.RESERVED6.get_code())
+ self.assertEqual(7, Opcode.RESERVED7.get_code())
+ self.assertEqual(8, Opcode.RESERVED8.get_code())
+ self.assertEqual(9, Opcode.RESERVED9.get_code())
+ self.assertEqual(10, Opcode.RESERVED10.get_code())
+ self.assertEqual(11, Opcode.RESERVED11.get_code())
+ self.assertEqual(12, Opcode.RESERVED12.get_code())
+ self.assertEqual(13, Opcode.RESERVED13.get_code())
+ self.assertEqual(14, Opcode.RESERVED14.get_code())
+ self.assertEqual(15, Opcode.RESERVED15.get_code())
def test_to_text(self):
- self.assertEqual("QUERY", Opcode.QUERY().to_text())
- self.assertEqual("QUERY", str(Opcode.QUERY()))
- self.assertEqual("IQUERY", Opcode.IQUERY().to_text())
- self.assertEqual("STATUS", Opcode.STATUS().to_text())
- self.assertEqual("RESERVED3", Opcode.RESERVED3().to_text())
- self.assertEqual("NOTIFY", Opcode.NOTIFY().to_text())
- self.assertEqual("UPDATE", Opcode.UPDATE().to_text())
- self.assertEqual("RESERVED6", Opcode.RESERVED6().to_text())
- self.assertEqual("RESERVED7", Opcode.RESERVED7().to_text())
- self.assertEqual("RESERVED8", Opcode.RESERVED8().to_text())
- self.assertEqual("RESERVED9", Opcode.RESERVED9().to_text())
- self.assertEqual("RESERVED10", Opcode.RESERVED10().to_text())
- self.assertEqual("RESERVED11", Opcode.RESERVED11().to_text())
- self.assertEqual("RESERVED12", Opcode.RESERVED12().to_text())
- self.assertEqual("RESERVED13", Opcode.RESERVED13().to_text())
- self.assertEqual("RESERVED14", Opcode.RESERVED14().to_text())
- self.assertEqual("RESERVED15", Opcode.RESERVED15().to_text())
+ self.assertEqual("QUERY", Opcode.QUERY.to_text())
+ self.assertEqual("QUERY", str(Opcode.QUERY))
+ self.assertEqual("IQUERY", Opcode.IQUERY.to_text())
+ self.assertEqual("STATUS", Opcode.STATUS.to_text())
+ self.assertEqual("RESERVED3", Opcode.RESERVED3.to_text())
+ self.assertEqual("NOTIFY", Opcode.NOTIFY.to_text())
+ self.assertEqual("UPDATE", Opcode.UPDATE.to_text())
+ self.assertEqual("RESERVED6", Opcode.RESERVED6.to_text())
+ self.assertEqual("RESERVED7", Opcode.RESERVED7.to_text())
+ self.assertEqual("RESERVED8", Opcode.RESERVED8.to_text())
+ self.assertEqual("RESERVED9", Opcode.RESERVED9.to_text())
+ self.assertEqual("RESERVED10", Opcode.RESERVED10.to_text())
+ self.assertEqual("RESERVED11", Opcode.RESERVED11.to_text())
+ self.assertEqual("RESERVED12", Opcode.RESERVED12.to_text())
+ self.assertEqual("RESERVED13", Opcode.RESERVED13.to_text())
+ self.assertEqual("RESERVED14", Opcode.RESERVED14.to_text())
+ self.assertEqual("RESERVED15", Opcode.RESERVED15.to_text())
def test_richcmp(self):
- o1 = Opcode.QUERY()
- o2 = Opcode.NOTIFY()
- o3 = Opcode.NOTIFY()
+ o1 = Opcode.QUERY
+ o2 = Opcode.NOTIFY
+ o3 = Opcode.NOTIFY
self.assertTrue(o2 == o3)
self.assertFalse(o2 != o3)
self.assertTrue(o1 != o2)
diff --git a/src/lib/dns/python/tests/rcode_python_test.py b/src/lib/dns/python/tests/rcode_python_test.py
index 77fed3a..c4a8067 100644
--- a/src/lib/dns/python/tests/rcode_python_test.py
+++ b/src/lib/dns/python/tests/rcode_python_test.py
@@ -54,36 +54,36 @@ class RcodeTest(unittest.TestCase):
self.assertEqual(Rcode.RESERVED15_CODE, Rcode(15).get_code())
self.assertEqual(Rcode.BADVERS_CODE, Rcode(16).get_code())
- self.assertEqual(Rcode.NOERROR_CODE, Rcode.NOERROR().get_code())
- self.assertEqual(Rcode.FORMERR_CODE, Rcode.FORMERR().get_code())
- self.assertEqual(Rcode.NOTIMP_CODE, Rcode.NOTIMP().get_code())
- self.assertEqual(Rcode.REFUSED_CODE, Rcode.REFUSED().get_code())
- self.assertEqual(Rcode.RESERVED15_CODE, Rcode.RESERVED15().get_code())
- self.assertEqual(Rcode.BADVERS_CODE, Rcode.BADVERS().get_code())
+ self.assertEqual(Rcode.NOERROR_CODE, Rcode.NOERROR.get_code())
+ self.assertEqual(Rcode.FORMERR_CODE, Rcode.FORMERR.get_code())
+ self.assertEqual(Rcode.NOTIMP_CODE, Rcode.NOTIMP.get_code())
+ self.assertEqual(Rcode.REFUSED_CODE, Rcode.REFUSED.get_code())
+ self.assertEqual(Rcode.RESERVED15_CODE, Rcode.RESERVED15.get_code())
+ self.assertEqual(Rcode.BADVERS_CODE, Rcode.BADVERS.get_code())
def test_get_code(self):
- self.assertEqual(0, Rcode.NOERROR().get_code())
- self.assertEqual(1, Rcode.FORMERR().get_code())
- self.assertEqual(2, Rcode.SERVFAIL().get_code())
- self.assertEqual(3, Rcode.NXDOMAIN().get_code())
- self.assertEqual(4, Rcode.NOTIMP().get_code())
- self.assertEqual(5, Rcode.REFUSED().get_code())
- self.assertEqual(6, Rcode.YXDOMAIN().get_code())
- self.assertEqual(7, Rcode.YXRRSET().get_code())
- self.assertEqual(8, Rcode.NXRRSET().get_code())
- self.assertEqual(9, Rcode.NOTAUTH().get_code())
- self.assertEqual(10, Rcode.NOTZONE().get_code())
- self.assertEqual(11, Rcode.RESERVED11().get_code())
- self.assertEqual(12, Rcode.RESERVED12().get_code())
- self.assertEqual(13, Rcode.RESERVED13().get_code())
- self.assertEqual(14, Rcode.RESERVED14().get_code())
- self.assertEqual(15, Rcode.RESERVED15().get_code())
- self.assertEqual(16, Rcode.BADVERS().get_code())
+ self.assertEqual(0, Rcode.NOERROR.get_code())
+ self.assertEqual(1, Rcode.FORMERR.get_code())
+ self.assertEqual(2, Rcode.SERVFAIL.get_code())
+ self.assertEqual(3, Rcode.NXDOMAIN.get_code())
+ self.assertEqual(4, Rcode.NOTIMP.get_code())
+ self.assertEqual(5, Rcode.REFUSED.get_code())
+ self.assertEqual(6, Rcode.YXDOMAIN.get_code())
+ self.assertEqual(7, Rcode.YXRRSET.get_code())
+ self.assertEqual(8, Rcode.NXRRSET.get_code())
+ self.assertEqual(9, Rcode.NOTAUTH.get_code())
+ self.assertEqual(10, Rcode.NOTZONE.get_code())
+ self.assertEqual(11, Rcode.RESERVED11.get_code())
+ self.assertEqual(12, Rcode.RESERVED12.get_code())
+ self.assertEqual(13, Rcode.RESERVED13.get_code())
+ self.assertEqual(14, Rcode.RESERVED14.get_code())
+ self.assertEqual(15, Rcode.RESERVED15.get_code())
+ self.assertEqual(16, Rcode.BADVERS.get_code())
def test_get_extended_code(self):
- self.assertEqual(0, Rcode.NOERROR().get_extended_code())
- self.assertEqual(0, Rcode.YXRRSET().get_extended_code())
- self.assertEqual(1, Rcode.BADVERS().get_extended_code())
+ self.assertEqual(0, Rcode.NOERROR.get_extended_code())
+ self.assertEqual(0, Rcode.YXRRSET.get_extended_code())
+ self.assertEqual(1, Rcode.BADVERS.get_extended_code())
self.assertEqual(0xab, Rcode(0xabf).get_extended_code())
self.assertEqual(0xff, Rcode(0xfff).get_extended_code())
@@ -107,13 +107,13 @@ class RcodeTest(unittest.TestCase):
self.assertEqual("RESERVED15", Rcode(15).to_text())
self.assertEqual("BADVERS", Rcode(16).to_text())
- self.assertEqual("17", Rcode(Rcode.BADVERS().get_code() + 1).to_text())
+ self.assertEqual("17", Rcode(Rcode.BADVERS.get_code() + 1).to_text())
self.assertEqual("4095", Rcode(0xfff).to_text())
def test_richcmp(self):
- r1 = Rcode.NOERROR()
- r2 = Rcode.FORMERR()
- r3 = Rcode.FORMERR()
+ r1 = Rcode.NOERROR
+ r2 = Rcode.FORMERR
+ r3 = Rcode.FORMERR
self.assertTrue(r2 == r3)
self.assertTrue(r1 != r2)
self.assertFalse(r1 == r2)
diff --git a/src/lib/dns/python/tests/rrclass_python_test.py b/src/lib/dns/python/tests/rrclass_python_test.py
index a048c4c..880e331 100644
--- a/src/lib/dns/python/tests/rrclass_python_test.py
+++ b/src/lib/dns/python/tests/rrclass_python_test.py
@@ -23,8 +23,8 @@ from pydnspp import *
class RRClassTest(unittest.TestCase):
def setUp(self):
- self.c1 = RRClass.IN()
- self.c2 = RRClass.CH()
+ self.c1 = RRClass.IN
+ self.c2 = RRClass.CH
def test_init(self):
self.assertRaises(InvalidRRClass, RRClass, "wrong")
@@ -81,17 +81,17 @@ class RRClassTest(unittest.TestCase):
def test_hash(self):
# Exploiting the knowledge that the hash value is the numeric class
# value, we can predict the comparison result.
- self.assertEqual(hash(RRClass.IN()), hash(RRClass("IN")))
+ self.assertEqual(hash(RRClass.IN), hash(RRClass("IN")))
self.assertEqual(hash(RRClass("in")), hash(RRClass("IN")))
- self.assertNotEqual(hash(RRClass.IN()), hash(RRClass.CH()))
- self.assertNotEqual(hash(RRClass.IN()), hash(RRClass("CLASS65535")))
+ self.assertNotEqual(hash(RRClass.IN), hash(RRClass.CH))
+ self.assertNotEqual(hash(RRClass.IN), hash(RRClass("CLASS65535")))
def test_statics(self):
- self.assertEqual(RRClass.IN(), RRClass("IN"))
- self.assertEqual(RRClass.CH(), RRClass("CH"))
- self.assertEqual(RRClass.HS(), RRClass("HS"))
- self.assertEqual(254, RRClass.NONE().get_code())
- self.assertEqual(255, RRClass.ANY().get_code())
+ self.assertEqual(RRClass.IN, RRClass("IN"))
+ self.assertEqual(RRClass.CH, RRClass("CH"))
+ self.assertEqual(RRClass.HS, RRClass("HS"))
+ self.assertEqual(254, RRClass.NONE.get_code())
+ self.assertEqual(255, RRClass.ANY.get_code())
if __name__ == '__main__':
unittest.main()
diff --git a/src/lib/dns/python/tests/rrset_collection_python_test.py b/src/lib/dns/python/tests/rrset_collection_python_test.py
index 2cf286e..1bbbc80 100644
--- a/src/lib/dns/python/tests/rrset_collection_python_test.py
+++ b/src/lib/dns/python/tests/rrset_collection_python_test.py
@@ -34,64 +34,64 @@ class RRsetCollectionTest(unittest.TestCase):
self.assertRaises(TypeError, RRsetCollection, 1)
self.assertRaises(TypeError, RRsetCollection, # extra arg
b'example. 0 A 192.0.2.1',
- Name('example'), RRClass.IN(), 1)
+ Name('example'), RRClass.IN, 1)
self.assertRaises(TypeError, RRsetCollection, # incorrect order
- b'example. 0 A 192.0.2.1', RRClass.IN(),
+ b'example. 0 A 192.0.2.1', RRClass.IN,
Name('example'))
# constructor will result in C++ exception.
self.assertRaises(IscException, RRsetCollection,
TESTDATA_DIR + '/no_such_file', Name('example.org'),
- RRClass.IN())
+ RRClass.IN)
def check_find_result(self, rrsets):
# Commonly used check pattern
- found = rrsets.find(Name('www.example.org'), RRClass.IN(), RRType.A())
+ found = rrsets.find(Name('www.example.org'), RRClass.IN, RRType.A)
self.assertNotEqual(None, found)
self.assertEqual(Name('www.example.org'), found.get_name())
- self.assertEqual(RRClass.IN(), found.get_class())
- self.assertEqual(RRType.A(), found.get_type())
+ self.assertEqual(RRClass.IN, found.get_class())
+ self.assertEqual(RRType.A, found.get_type())
self.assertEqual('192.0.2.1', found.get_rdata()[0].to_text())
def test_find(self):
# Checking the underlying find() is called as intended, both for
# success and failure cases, and with two different constructors.
rrsets = RRsetCollection(TESTDATA_DIR + '/example.org',
- Name('example.org'), RRClass.IN())
+ Name('example.org'), RRClass.IN)
self.check_find_result(rrsets)
- self.assertEqual(None, rrsets.find(Name('example.org'), RRClass.IN(),
- RRType.A()))
+ self.assertEqual(None, rrsets.find(Name('example.org'), RRClass.IN,
+ RRType.A))
rrsets = RRsetCollection(b'www.example.org. 3600 IN A 192.0.2.1',
- Name('example.org'), RRClass.IN())
+ Name('example.org'), RRClass.IN)
self.check_find_result(rrsets)
- self.assertEqual(None, rrsets.find(Name('example.org'), RRClass.IN(),
- RRType.A()))
+ self.assertEqual(None, rrsets.find(Name('example.org'), RRClass.IN,
+ RRType.A))
def test_find_badargs(self):
rrsets = RRsetCollection()
# Check bad arguments: bad types
- self.assertRaises(TypeError, rrsets.find, 1, RRClass.IN(), RRType.A())
+ self.assertRaises(TypeError, rrsets.find, 1, RRClass.IN, RRType.A)
self.assertRaises(TypeError, rrsets.find, Name('example'), 1,
- RRType.A())
+ RRType.A)
self.assertRaises(TypeError, rrsets.find, Name('example'), 1,
- RRType.A())
+ RRType.A)
self.assertRaises(TypeError, rrsets.find, Name('example'),
- RRClass.IN(), 1)
- self.assertRaises(TypeError, rrsets.find, Name('example'), RRType.A(),
- RRClass.IN())
+ RRClass.IN, 1)
+ self.assertRaises(TypeError, rrsets.find, Name('example'), RRType.A,
+ RRClass.IN)
# Check bad arguments: too many/few arguments
self.assertRaises(TypeError, rrsets.find, Name('example'),
- RRClass.IN(), RRType.A(), 0)
+ RRClass.IN, RRType.A, 0)
self.assertRaises(TypeError, rrsets.find, Name('example'),
- RRClass.IN())
+ RRClass.IN)
def test_add_remove_rrset(self):
name = Name('www.example.org')
- rrclass = RRClass.IN()
- rrtype = RRType.A()
+ rrclass = RRClass.IN
+ rrtype = RRType.A
# Create a collection with no RRsets
rrsets = RRsetCollection()
@@ -134,7 +134,7 @@ class RRsetCollectionTest(unittest.TestCase):
pass
rrsets = EmptyRRsetCollection()
self.assertRaises(TypeError, rrsets.find, Name('www.example.org'),
- RRClass.IN(), RRType.A())
+ RRClass.IN, RRType.A)
if __name__ == '__main__':
unittest.main()
diff --git a/src/lib/dns/python/tests/rrset_python_test.py b/src/lib/dns/python/tests/rrset_python_test.py
index 0544872..010b60c 100644
--- a/src/lib/dns/python/tests/rrset_python_test.py
+++ b/src/lib/dns/python/tests/rrset_python_test.py
@@ -23,7 +23,7 @@ import os
from pydnspp import *
class TestModuleSpec(unittest.TestCase):
-
+
def setUp(self):
self.test_name = Name("test.example.com")
self.test_domain = Name("example.com")
@@ -78,8 +78,8 @@ class TestModuleSpec(unittest.TestCase):
def test_add_rdata(self):
# no iterator to read out yet (TODO: add addition test once implemented)
- self.assertRaises(TypeError, self.rrset_a.add_rdata, Rdata(RRType("NS"), RRClass("IN"), "test.name"))
- pass
+ self.assertRaises(TypeError, self.rrset_a.add_rdata,
+ Rdata(RRType("NS"), RRClass("IN"), "test.name."))
def test_to_text(self):
self.assertEqual("test.example.com. 3600 IN A 192.0.2.1\n"
@@ -126,6 +126,6 @@ class TestModuleSpec(unittest.TestCase):
# they would leak.
self.assertEqual(1, sys.getrefcount(self.rrset_a.get_rdata()))
self.assertEqual(1, sys.getrefcount(self.rrset_a.get_rdata()[0]))
-
+
if __name__ == '__main__':
unittest.main()
diff --git a/src/lib/dns/python/tests/rrtype_python_test.py b/src/lib/dns/python/tests/rrtype_python_test.py
index 4548b50..7d20136 100644
--- a/src/lib/dns/python/tests/rrtype_python_test.py
+++ b/src/lib/dns/python/tests/rrtype_python_test.py
@@ -119,35 +119,35 @@ class TestModuleSpec(unittest.TestCase):
def test_hash(self):
# Exploiting the knowledge that the hash value is the numeric class
# value, we can predict the comparison result.
- self.assertEqual(hash(RRType.AAAA()), hash(RRType("AAAA")))
+ self.assertEqual(hash(RRType.AAAA), hash(RRType("AAAA")))
self.assertEqual(hash(RRType("aaaa")), hash(RRType("AAAA")))
self.assertEqual(hash(RRType(28)), hash(RRType("AAAA")))
- self.assertNotEqual(hash(RRType.A()), hash(RRType.NS()))
- self.assertNotEqual(hash(RRType.AAAA()), hash(RRType("Type65535")))
+ self.assertNotEqual(hash(RRType.A), hash(RRType.NS))
+ self.assertNotEqual(hash(RRType.AAAA), hash(RRType("Type65535")))
def test_statics(self):
- self.assertEqual(RRType("NSEC3PARAM"), RRType.NSEC3PARAM())
- self.assertEqual(RRType("DNAME"), RRType.DNAME())
- self.assertEqual(RRType("PTR"), RRType.PTR())
- self.assertEqual(RRType("MX"), RRType.MX())
- self.assertEqual(RRType("DNSKEY"), RRType.DNSKEY())
- self.assertEqual(RRType("TXT"), RRType.TXT())
- self.assertEqual(RRType("RRSIG"), RRType.RRSIG())
- self.assertEqual(RRType("NSEC"), RRType.NSEC())
- self.assertEqual(RRType("AAAA"), RRType.AAAA())
- self.assertEqual(RRType("DS"), RRType.DS())
- self.assertEqual(RRType("OPT"), RRType.OPT())
- self.assertEqual(RRType("A"), RRType.A())
- self.assertEqual(RRType("NS"), RRType.NS())
- self.assertEqual(RRType("CNAME"), RRType.CNAME())
- self.assertEqual(RRType("SOA"), RRType.SOA())
- self.assertEqual(RRType("NSEC3"), RRType.NSEC3())
+ self.assertEqual(RRType("NSEC3PARAM"), RRType.NSEC3PARAM)
+ self.assertEqual(RRType("DNAME"), RRType.DNAME)
+ self.assertEqual(RRType("PTR"), RRType.PTR)
+ self.assertEqual(RRType("MX"), RRType.MX)
+ self.assertEqual(RRType("DNSKEY"), RRType.DNSKEY)
+ self.assertEqual(RRType("TXT"), RRType.TXT)
+ self.assertEqual(RRType("RRSIG"), RRType.RRSIG)
+ self.assertEqual(RRType("NSEC"), RRType.NSEC)
+ self.assertEqual(RRType("AAAA"), RRType.AAAA)
+ self.assertEqual(RRType("DS"), RRType.DS)
+ self.assertEqual(RRType("OPT"), RRType.OPT)
+ self.assertEqual(RRType("A"), RRType.A)
+ self.assertEqual(RRType("NS"), RRType.NS)
+ self.assertEqual(RRType("CNAME"), RRType.CNAME)
+ self.assertEqual(RRType("SOA"), RRType.SOA)
+ self.assertEqual(RRType("NSEC3"), RRType.NSEC3)
# these can't be built with string input
# (see the original cpp TODO)
- self.assertEqual(251, RRType.IXFR().get_code())
- self.assertEqual(252, RRType.AXFR().get_code())
- self.assertEqual(255, RRType.ANY().get_code())
+ self.assertEqual(251, RRType.IXFR.get_code())
+ self.assertEqual(252, RRType.AXFR.get_code())
+ self.assertEqual(255, RRType.ANY.get_code())
if __name__ == '__main__':
unittest.main()
diff --git a/src/lib/dns/python/tests/tsig_python_test.py b/src/lib/dns/python/tests/tsig_python_test.py
index 4d99175..282431c 100644
--- a/src/lib/dns/python/tests/tsig_python_test.py
+++ b/src/lib/dns/python/tests/tsig_python_test.py
@@ -40,7 +40,7 @@ class TSIGContextTest(unittest.TestCase):
self.keyring = TSIGKeyRing()
self.message = Message(Message.RENDER)
self.renderer = MessageRenderer()
- self.test_class = RRClass.IN()
+ self.test_class = RRClass.IN
self.test_ttl = RRTTL(86400)
self.secret = base64.b64decode(b"SFuWd/q99SzF8Yzd1QbB9g==")
self.tsig_ctx = TSIGContext(TSIGKey(self.test_name,
@@ -59,12 +59,12 @@ class TSIGContextTest(unittest.TestCase):
# Note: intentionally use camelCase so that we can easily copy-paste
# corresponding C++ tests.
def createMessageAndSign(self, id, qname, ctx, message_flags=RD_FLAG,
- qtype=RRType.A(), answer_data=None,
+ qtype=RRType.A, answer_data=None,
answer_type=None, add_question=True,
- rcode=Rcode.NOERROR()):
+ rcode=Rcode.NOERROR):
self.message.clear(Message.RENDER)
self.message.set_qid(id)
- self.message.set_opcode(Opcode.QUERY())
+ self.message.set_opcode(Opcode.QUERY)
self.message.set_rcode(rcode)
if (message_flags & QR_FLAG) != 0:
self.message.set_header_flag(Message.HEADERFLAG_QR)
@@ -120,7 +120,7 @@ class TSIGContextTest(unittest.TestCase):
self.assertEqual(TSIGContext.STATE_INIT, self.tsig_ctx.get_state())
# And there should be no error code.
- self.assertEqual(TSIGError(Rcode.NOERROR()), self.tsig_ctx.get_error())
+ self.assertEqual(TSIGError(Rcode.NOERROR), self.tsig_ctx.get_error())
# No message signed yet
self.assertRaises(TSIGContextError, self.tsig_ctx.last_had_signature)
@@ -249,7 +249,7 @@ class TSIGContextTest(unittest.TestCase):
tsig = self.createMessageAndSign(self.qid, self.test_name,
self.tsig_verify_ctx,
QR_FLAG|AA_FLAG|RD_FLAG,
- RRType.A(), "192.0.2.1")
+ RRType.A, "192.0.2.1")
expected_mac = b"\x8f\xcd\xa6\x6a\x7c\xd1\xa3\xb9\x94\x8e\xb1\x86" + \
b"\x9d\x38\x4a\x9f"
@@ -280,7 +280,7 @@ class TSIGContextTest(unittest.TestCase):
zone_name = Name("example.com")
tsig = self.createMessageAndSign(axfr_qid, zone_name, self.tsig_ctx,
- 0, RRType.AXFR())
+ 0, RRType.AXFR)
received_data = read_wire_data("tsig_verify1.wire")
self.commonVerifyChecks(self.tsig_verify_ctx, tsig, received_data,
@@ -289,10 +289,10 @@ class TSIGContextTest(unittest.TestCase):
tsig = self.createMessageAndSign(axfr_qid, zone_name,
self.tsig_verify_ctx,
- AA_FLAG|QR_FLAG, RRType.AXFR(),
+ AA_FLAG|QR_FLAG, RRType.AXFR,
"ns.example.com. root.example.com." +\
" 2011041503 7200 3600 2592000 1200",
- RRType.SOA())
+ RRType.SOA)
received_data = read_wire_data("tsig_verify2.wire")
self.commonVerifyChecks(self.tsig_ctx, tsig, received_data,
@@ -302,8 +302,8 @@ class TSIGContextTest(unittest.TestCase):
b"\x60\x34\x13\x09\x68"
tsig = self.createMessageAndSign(axfr_qid, zone_name,
self.tsig_verify_ctx,
- AA_FLAG|QR_FLAG, RRType.AXFR(),
- "ns.example.com.", RRType.NS(),
+ AA_FLAG|QR_FLAG, RRType.AXFR,
+ "ns.example.com.", RRType.NS,
False)
self.commonSignChecks(tsig, axfr_qid, 0x4da8e951, expected_mac)
@@ -316,7 +316,7 @@ class TSIGContextTest(unittest.TestCase):
test_qid = 0x7fc4
tsig = self.createMessageAndSign(test_qid, self.test_name,
- self.tsig_ctx, 0, RRType.SOA())
+ self.tsig_ctx, 0, RRType.SOA)
# "advance the clock" and try validating, which should fail due to
# BADTIME
@@ -328,8 +328,8 @@ class TSIGContextTest(unittest.TestCase):
# make and sign a response in the context of TSIG error.
tsig = self.createMessageAndSign(test_qid, self.test_name,
self.tsig_verify_ctx,
- QR_FLAG, RRType.SOA(), None, None,
- True, Rcode.NOTAUTH())
+ QR_FLAG, RRType.SOA, None, None,
+ True, Rcode.NOTAUTH)
expected_otherdata = b"\x00\x00\x4d\xa8\xbe\x86"
expected_mac = b"\xd4\xb0\x43\xf6\xf4\x44\x95\xec\x8a\x01\x26" +\
@@ -344,7 +344,7 @@ class TSIGContextTest(unittest.TestCase):
fix_current_time(0x4da8b9d6)
tsig = self.createMessageAndSign(self.qid, self.test_name,
- self.tsig_ctx, 0, RRType.SOA())
+ self.tsig_ctx, 0, RRType.SOA)
# "rewind the clock" and try validating, which should fail due to
# BADTIME
@@ -361,7 +361,7 @@ class TSIGContextTest(unittest.TestCase):
fix_current_time(0x4da8b9d6)
tsig = self.createMessageAndSign(self.qid, self.test_name,
- self.tsig_ctx, 0, RRType.SOA())
+ self.tsig_ctx, 0, RRType.SOA)
fix_current_time(0x4da8b9d6 + 301)
self.assertEqual(TSIGError.BAD_TIME,
@@ -382,7 +382,7 @@ class TSIGContextTest(unittest.TestCase):
def test_badtime_overflow(self):
fix_current_time(200)
tsig = self.createMessageAndSign(self.qid, self.test_name,
- self.tsig_ctx, 0, RRType.SOA())
+ self.tsig_ctx, 0, RRType.SOA)
# This should be in the okay range, but since "200 - fudge" overflows
# and we compare them as 64-bit unsigned integers, it results in a
@@ -522,7 +522,7 @@ class TSIGContextTest(unittest.TestCase):
self.tsig_verify_ctx.get_state())
self.createMessageAndSign(self.qid, self.test_name,
self.tsig_verify_ctx,
- QR_FLAG|AA_FLAG|RD_FLAG, RRType.A(),
+ QR_FLAG|AA_FLAG|RD_FLAG, RRType.A,
"192.0.2.1")
self.assertEqual(TSIGContext.STATE_SENT_RESPONSE,
self.tsig_verify_ctx.get_state())
diff --git a/src/lib/dns/python/tests/tsigerror_python_test.py b/src/lib/dns/python/tests/tsigerror_python_test.py
index a968b6b..01860d3 100644
--- a/src/lib/dns/python/tests/tsigerror_python_test.py
+++ b/src/lib/dns/python/tests/tsigerror_python_test.py
@@ -28,7 +28,7 @@ class TSIGErrorTest(unittest.TestCase):
def test_from_rcode(self):
# We use RCODE for code values from 0-15.
- self.assertEqual(0, TSIGError(Rcode.NOERROR()).get_code())
+ self.assertEqual(0, TSIGError(Rcode.NOERROR).get_code())
self.assertEqual(15, TSIGError(Rcode(15)).get_code())
# From error code 16 TSIG errors define a separate space, so passing
@@ -50,19 +50,19 @@ class TSIGErrorTest(unittest.TestCase):
self.assertEqual(TSIGError.BAD_TIME_CODE, TSIGError.BAD_TIME.get_code())
def test_equal(self):
- self.assertTrue(TSIGError.NOERROR == TSIGError(Rcode.NOERROR()))
- self.assertTrue(TSIGError(Rcode.NOERROR()) == TSIGError.NOERROR)
+ self.assertTrue(TSIGError.NOERROR == TSIGError(Rcode.NOERROR))
+ self.assertTrue(TSIGError(Rcode.NOERROR) == TSIGError.NOERROR)
self.assertTrue(TSIGError.BAD_SIG == TSIGError(16))
self.assertTrue(TSIGError(16) == TSIGError.BAD_SIG)
def test_nequal(self):
- self.assertTrue(TSIGError.BAD_KEY != TSIGError(Rcode.NOERROR()))
- self.assertTrue(TSIGError(Rcode.NOERROR()) != TSIGError.BAD_KEY)
+ self.assertTrue(TSIGError.BAD_KEY != TSIGError(Rcode.NOERROR))
+ self.assertTrue(TSIGError(Rcode.NOERROR) != TSIGError.BAD_KEY)
def test_to_text(self):
# TSIGError derived from the standard Rcode
- self.assertEqual("NOERROR", TSIGError(Rcode.NOERROR()).to_text())
+ self.assertEqual("NOERROR", TSIGError(Rcode.NOERROR).to_text())
# Well known TSIG errors
self.assertEqual("BADSIG", TSIGError.BAD_SIG.to_text())
@@ -74,21 +74,21 @@ class TSIGErrorTest(unittest.TestCase):
self.assertEqual("65535", TSIGError(65535).to_text());
# also check str() works same way
- self.assertEqual("NOERROR", str(TSIGError(Rcode.NOERROR())))
+ self.assertEqual("NOERROR", str(TSIGError(Rcode.NOERROR)))
self.assertEqual("BADSIG", str(TSIGError.BAD_SIG))
def test_to_rcode(self):
# TSIGError derived from the standard Rcode
- self.assertEqual(Rcode.NOERROR(), TSIGError(Rcode.NOERROR()).to_rcode())
+ self.assertEqual(Rcode.NOERROR, TSIGError(Rcode.NOERROR).to_rcode())
# Well known TSIG errors
- self.assertEqual(Rcode.NOTAUTH(), TSIGError.BAD_SIG.to_rcode())
- self.assertEqual(Rcode.NOTAUTH(), TSIGError.BAD_KEY.to_rcode())
- self.assertEqual(Rcode.NOTAUTH(), TSIGError.BAD_TIME.to_rcode())
+ self.assertEqual(Rcode.NOTAUTH, TSIGError.BAD_SIG.to_rcode())
+ self.assertEqual(Rcode.NOTAUTH, TSIGError.BAD_KEY.to_rcode())
+ self.assertEqual(Rcode.NOTAUTH, TSIGError.BAD_TIME.to_rcode())
# Unknown (or not yet supported) codes are treated as SERVFAIL.
- self.assertEqual(Rcode.SERVFAIL(), TSIGError(19).to_rcode())
- self.assertEqual(Rcode.SERVFAIL(), TSIGError(65535).to_rcode())
+ self.assertEqual(Rcode.SERVFAIL, TSIGError(19).to_rcode())
+ self.assertEqual(Rcode.SERVFAIL, TSIGError(65535).to_rcode())
# Check there's no redundant refcount (which would cause leak)
self.assertEqual(1, sys.getrefcount(TSIGError.BAD_SIG.to_rcode()))
diff --git a/src/lib/dns/python/tests/zone_checker_python_test.py b/src/lib/dns/python/tests/zone_checker_python_test.py
index 66b6c47..dc7d258 100644
--- a/src/lib/dns/python/tests/zone_checker_python_test.py
+++ b/src/lib/dns/python/tests/zone_checker_python_test.py
@@ -35,8 +35,8 @@ class ZoneCheckerTest(unittest.TestCase):
rrsets = RRsetCollection(b'example.org. 0 SOA . . 0 0 0 0 0\n' +
b'example.org. 0 NS ns.example.org.\n' +
b'ns.example.org. 0 A 192.0.2.1\n',
- Name('example.org'), RRClass.IN())
- self.assertTrue(check_zone(Name('example.org'), RRClass.IN(),
+ Name('example.org'), RRClass.IN)
+ self.assertTrue(check_zone(Name('example.org'), RRClass.IN,
rrsets,
(lambda r: self.__callback(r, errors),
lambda r: self.__callback(r, warns))))
@@ -45,8 +45,8 @@ class ZoneCheckerTest(unittest.TestCase):
# Check fails and one additional warning.
rrsets = RRsetCollection(b'example.org. 0 NS ns.example.org.',
- Name('example.org'), RRClass.IN())
- self.assertFalse(check_zone(Name('example.org'), RRClass.IN(), rrsets,
+ Name('example.org'), RRClass.IN)
+ self.assertFalse(check_zone(Name('example.org'), RRClass.IN, rrsets,
(lambda r: self.__callback(r, errors),
lambda r: self.__callback(r, warns))))
self.assertEqual(['zone example.org/IN: has 0 SOA records'], errors)
@@ -56,7 +56,7 @@ class ZoneCheckerTest(unittest.TestCase):
# Same RRset collection, suppressing callbacks
errors = []
warns = []
- self.assertFalse(check_zone(Name('example.org'), RRClass.IN(), rrsets,
+ self.assertFalse(check_zone(Name('example.org'), RRClass.IN, rrsets,
(None, None)))
self.assertEqual([], errors)
self.assertEqual([], warns)
@@ -64,29 +64,29 @@ class ZoneCheckerTest(unittest.TestCase):
def test_check_badarg(self):
rrsets = RRsetCollection()
# Bad types
- self.assertRaises(TypeError, check_zone, 1, RRClass.IN(), rrsets,
+ self.assertRaises(TypeError, check_zone, 1, RRClass.IN, rrsets,
(None, None))
self.assertRaises(TypeError, check_zone, Name('example'), 1, rrsets,
(None, None))
- self.assertRaises(TypeError, check_zone, Name('example'), RRClass.IN(),
+ self.assertRaises(TypeError, check_zone, Name('example'), RRClass.IN,
1, (None, None))
- self.assertRaises(TypeError, check_zone, Name('example'), RRClass.IN(),
+ self.assertRaises(TypeError, check_zone, Name('example'), RRClass.IN,
rrsets, 1)
# Bad callbacks
- self.assertRaises(TypeError, check_zone, Name('example'), RRClass.IN(),
+ self.assertRaises(TypeError, check_zone, Name('example'), RRClass.IN,
rrsets, (None, None, None))
- self.assertRaises(TypeError, check_zone, Name('example'), RRClass.IN(),
+ self.assertRaises(TypeError, check_zone, Name('example'), RRClass.IN,
rrsets, (1, None))
- self.assertRaises(TypeError, check_zone, Name('example'), RRClass.IN(),
+ self.assertRaises(TypeError, check_zone, Name('example'), RRClass.IN,
rrsets, (None, 1))
# Extra/missing args
- self.assertRaises(TypeError, check_zone, Name('example'), RRClass.IN(),
+ self.assertRaises(TypeError, check_zone, Name('example'), RRClass.IN,
rrsets, (None, None), 1)
- self.assertRaises(TypeError, check_zone, Name('example'), RRClass.IN(),
+ self.assertRaises(TypeError, check_zone, Name('example'), RRClass.IN,
rrsets)
- check_zone(Name('example'), RRClass.IN(), rrsets, (None, None))
+ check_zone(Name('example'), RRClass.IN, rrsets, (None, None))
def test_check_callback_fail(self):
# Let the call raise a Python exception. It should be propagated to
@@ -96,7 +96,7 @@ class ZoneCheckerTest(unittest.TestCase):
# Using an empty collection, triggering an error callback.
self.assertRaises(FakeException, check_zone, Name('example.org'),
- RRClass.IN(), RRsetCollection(),
+ RRClass.IN, RRsetCollection(),
(__bad_callback, None))
# An unusual case: the callback is expected to return None, but if it
@@ -108,7 +108,7 @@ class ZoneCheckerTest(unittest.TestCase):
ref_checker = RefChecker()
orig_refcnt = sys.getrefcount(ref_checker)
- check_zone(Name('example.org'), RRClass.IN(), RRsetCollection(),
+ check_zone(Name('example.org'), RRClass.IN, RRsetCollection(),
(lambda r: __callback(r, ref_checker), None))
self.assertEqual(orig_refcnt, sys.getrefcount(ref_checker))
@@ -132,48 +132,45 @@ class ZoneCheckerTest(unittest.TestCase):
raise FakeException('find error')
if self.__find_result is not 'use_default':
return self.__find_result
- if rrtype == RRType.SOA():
- soa = RRset(Name('example'), RRClass.IN(), rrtype,
- RRTTL(0))
- soa.add_rdata(Rdata(RRType.SOA(), RRClass.IN(),
+ if rrtype == RRType.SOA:
+ soa = RRset(Name('example'), RRClass.IN, rrtype, RRTTL(0))
+ soa.add_rdata(Rdata(RRType.SOA, RRClass.IN,
'. . 0 0 0 0 0'))
return soa
- if rrtype == RRType.NS():
- ns = RRset(Name('example'), RRClass.IN(), rrtype,
- RRTTL(0))
- ns.add_rdata(Rdata(RRType.NS(), RRClass.IN(),
- 'example.org'))
+ if rrtype == RRType.NS:
+ ns = RRset(Name('example'), RRClass.IN, rrtype, RRTTL(0))
+ ns.add_rdata(Rdata(RRType.NS, RRClass.IN, 'example.org.'))
return ns
return None
# A successful case. Just checking it works in that case.
rrsets = FakeRRsetCollection()
- self.assertTrue(check_zone(Name('example'), RRClass.IN(), rrsets,
+ self.assertTrue(check_zone(Name('example'), RRClass.IN, rrsets,
(None, None)))
# Likewise, normal case but zone check fails.
rrsets = FakeRRsetCollection(False, None)
- self.assertFalse(check_zone(Name('example'), RRClass.IN(), rrsets,
+ self.assertFalse(check_zone(Name('example'), RRClass.IN, rrsets,
(None, None)))
# Our find() returns a bad type of result.
rrsets = FakeRRsetCollection(False, 1)
- self.assertRaises(TypeError, check_zone, Name('example'), RRClass.IN(),
+ self.assertRaises(TypeError, check_zone, Name('example'), RRClass.IN,
rrsets, (None, None))
# Our find() returns an empty SOA RRset. C++ zone checker code
# throws, which results in IscException.
rrsets = FakeRRsetCollection(False, RRset(Name('example'),
- RRClass.IN(),
- RRType.SOA(), RRTTL(0)))
+ RRClass.IN,
+ RRType.SOA, RRTTL(0)))
self.assertRaises(IscException, check_zone, Name('example'),
- RRClass.IN(), rrsets, (None, None))
+ RRClass.IN, rrsets, (None, None))
# Our find() raises an exception. That exception is propagated to
# the top level.
rrsets = FakeRRsetCollection(True)
self.assertRaises(FakeException, check_zone, Name('example'),
- RRClass.IN(), rrsets, (None, None))
+ RRClass.IN, rrsets, (None, None))
if __name__ == '__main__':
unittest.main()
diff --git a/src/lib/dns/rdata/generic/cname_5.cc b/src/lib/dns/rdata/generic/cname_5.cc
index 5bb0aea..e87eeec 100644
--- a/src/lib/dns/rdata/generic/cname_5.cc
+++ b/src/lib/dns/rdata/generic/cname_5.cc
@@ -22,15 +22,49 @@
#include <dns/rdata.h>
#include <dns/rdataclass.h>
+#include <dns/rdata/generic/detail/lexer_util.h>
+
using namespace std;
using namespace isc::util;
+using isc::dns::rdata::generic::detail::createNameFromLexer;
// BEGIN_ISC_NAMESPACE
// BEGIN_RDATA_NAMESPACE
+/// \brief Constructor from string.
+///
+/// The given string must represent a valid CNAME RDATA. There can be extra
+/// space characters at the beginning or end of the text (which are simply
+/// ignored), but other extra text, including a new line, will make the
+/// construction fail with an exception.
+///
+/// The CNAME must be absolute since there's no parameter that specifies
+/// the origin name; if it is not absolute, \c MissingNameOrigin
+/// exception will be thrown. These must not be represented as a quoted
+/// string.
+///
+/// \throw Others Exception from the Name and RRTTL constructors.
+/// \throw InvalidRdataText Other general syntax errors.
CNAME::CNAME(const std::string& namestr) :
- cname_(namestr)
-{}
+ // Fill in dummy name and replace it soon below.
+ cname_(Name::ROOT_NAME())
+{
+ try {
+ std::istringstream ss(namestr);
+ MasterLexer lexer;
+ lexer.pushSource(ss);
+
+ cname_ = createNameFromLexer(lexer, NULL);
+
+ if (lexer.getNextToken().getType() != MasterToken::END_OF_FILE) {
+ isc_throw(InvalidRdataText, "extra input text for CNAME: "
+ << namestr);
+ }
+ } catch (const MasterLexer::LexerError& ex) {
+ isc_throw(InvalidRdataText, "Failed to construct CNAME from '" <<
+ namestr << "': " << ex.what());
+ }
+}
CNAME::CNAME(InputBuffer& buffer, size_t) :
Rdata(), cname_(buffer)
@@ -39,6 +73,27 @@ CNAME::CNAME(InputBuffer& buffer, size_t) :
// check consistency.
}
+/// \brief Constructor with a context of MasterLexer.
+///
+/// The \c lexer should point to the beginning of valid textual
+/// representation of a CNAME RDATA. The CNAME field can be
+/// non-absolute if \c origin is non-NULL, in which case \c origin is
+/// used to make it absolute. It must not be represented as a quoted
+/// string.
+///
+/// \throw MasterLexer::LexerError General parsing error such as missing field.
+/// \throw Other Exceptions from the Name and RRTTL constructors if
+/// construction of textual fields as these objects fail.
+///
+/// \param lexer A \c MasterLexer object parsing a master file for the
+/// RDATA to be created
+/// \param origin If non NULL, specifies the origin of CNAME when it
+/// is non-absolute.
+CNAME::CNAME(MasterLexer& lexer, const Name* origin,
+ MasterLoader::Options, MasterLoaderCallbacks&) :
+ cname_(createNameFromLexer(lexer, origin))
+{}
+
CNAME::CNAME(const CNAME& other) :
Rdata(), cname_(other.cname_)
{}
diff --git a/src/lib/dns/rdata/generic/dname_39.cc b/src/lib/dns/rdata/generic/dname_39.cc
index a22fcc3..d1d349e 100644
--- a/src/lib/dns/rdata/generic/dname_39.cc
+++ b/src/lib/dns/rdata/generic/dname_39.cc
@@ -22,15 +22,49 @@
#include <dns/rdata.h>
#include <dns/rdataclass.h>
+#include <dns/rdata/generic/detail/lexer_util.h>
+
using namespace std;
using namespace isc::util;
+using isc::dns::rdata::generic::detail::createNameFromLexer;
// BEGIN_ISC_NAMESPACE
// BEGIN_RDATA_NAMESPACE
+/// \brief Constructor from string.
+///
+/// The given string must represent a valid DNAME RDATA. There can be extra
+/// space characters at the beginning or end of the text (which are simply
+/// ignored), but other extra text, including a new line, will make the
+/// construction fail with an exception.
+///
+/// The TARGET must be absolute since there's no parameter that specifies
+/// the origin name; if it is not absolute, \c MissingNameOrigin
+/// exception will be thrown. These must not be represented as a quoted
+/// string.
+///
+/// \throw Others Exception from the Name and RRTTL constructors.
+/// \throw InvalidRdataText Other general syntax errors.
DNAME::DNAME(const std::string& namestr) :
- dname_(namestr)
-{}
+ // Fill in dummy name and replace it soon below.
+ dname_(Name::ROOT_NAME())
+{
+ try {
+ std::istringstream ss(namestr);
+ MasterLexer lexer;
+ lexer.pushSource(ss);
+
+ dname_ = createNameFromLexer(lexer, NULL);
+
+ if (lexer.getNextToken().getType() != MasterToken::END_OF_FILE) {
+ isc_throw(InvalidRdataText, "extra input text for DNAME: "
+ << namestr);
+ }
+ } catch (const MasterLexer::LexerError& ex) {
+ isc_throw(InvalidRdataText, "Failed to construct DNAME from '" <<
+ namestr << "': " << ex.what());
+ }
+}
DNAME::DNAME(InputBuffer& buffer, size_t) :
dname_(buffer)
@@ -39,6 +73,27 @@ DNAME::DNAME(InputBuffer& buffer, size_t) :
// check consistency.
}
+/// \brief Constructor with a context of MasterLexer.
+///
+/// The \c lexer should point to the beginning of valid textual
+/// representation of a DNAME RDATA. The TARGET field can be
+/// non-absolute if \c origin is non-NULL, in which case \c origin is
+/// used to make it absolute. It must not be represented as a quoted
+/// string.
+///
+/// \throw MasterLexer::LexerError General parsing error such as missing field.
+/// \throw Other Exceptions from the Name and RRTTL constructors if
+/// construction of textual fields as these objects fail.
+///
+/// \param lexer A \c MasterLexer object parsing a master file for the
+/// RDATA to be created
+/// \param origin If non NULL, specifies the origin of TARGET when it
+/// is non-absolute.
+DNAME::DNAME(MasterLexer& lexer, const Name* origin,
+ MasterLoader::Options, MasterLoaderCallbacks&) :
+ dname_(createNameFromLexer(lexer, origin))
+{}
+
DNAME::DNAME(const DNAME& other) :
Rdata(), dname_(other.dname_)
{}
diff --git a/src/lib/dns/rdata/generic/mx_15.cc b/src/lib/dns/rdata/generic/mx_15.cc
index b95ba05..12ada97 100644
--- a/src/lib/dns/rdata/generic/mx_15.cc
+++ b/src/lib/dns/rdata/generic/mx_15.cc
@@ -26,9 +26,12 @@
#include <dns/rdata.h>
#include <dns/rdataclass.h>
+#include <dns/rdata/generic/detail/lexer_util.h>
+
using namespace std;
using boost::lexical_cast;
using namespace isc::util;
+using isc::dns::rdata::generic::detail::createNameFromLexer;
// BEGIN_ISC_NAMESPACE
// BEGIN_RDATA_NAMESPACE
@@ -40,21 +43,80 @@ MX::MX(InputBuffer& buffer, size_t) :
// check consistency.
}
+/// \brief Constructor from string.
+///
+/// The given string must represent a valid MX RDATA. There can be extra
+/// space characters at the beginning or end of the text (which are simply
+/// ignored), but other extra text, including a new line, will make the
+/// construction fail with an exception.
+///
+/// The EXCHANGE name must be absolute since there's no parameter that
+/// specifies the origin name; if it is not absolute, \c MissingNameOrigin
+/// exception will be thrown. It must not be represented as a quoted
+/// string.
+///
+/// See the construction that takes \c MasterLexer for other fields.
+///
+/// \throw Others Exception from the Name and RRTTL constructors.
+/// \throw InvalidRdataText Other general syntax errors.
MX::MX(const std::string& mx_str) :
- preference_(0), mxname_(".")
+ // Fill in dummy name and replace them soon below.
+ preference_(0), mxname_(Name::ROOT_NAME())
{
- istringstream iss(mx_str);
- uint16_t pref;
- string mxname;
-
- iss >> pref >> mxname;
+ try {
+ std::istringstream ss(mx_str);
+ MasterLexer lexer;
+ lexer.pushSource(ss);
+
+ const uint32_t num =
+ lexer.getNextToken(MasterToken::NUMBER).getNumber();
+ if (num > 65535) {
+ isc_throw(InvalidRdataText, "Invalid MX preference in: "
+ << mx_str);
+ }
+ preference_ = static_cast<uint16_t>(num);
+
+ mxname_ = createNameFromLexer(lexer, NULL);
+
+ if (lexer.getNextToken().getType() != MasterToken::END_OF_FILE) {
+ isc_throw(InvalidRdataText, "extra input text for MX: "
+ << mx_str);
+ }
+ } catch (const MasterLexer::LexerError& ex) {
+ isc_throw(InvalidRdataText, "Failed to construct MX from '" <<
+ mx_str << "': " << ex.what());
+ }
+}
- if (iss.bad() || iss.fail() || !iss.eof()) {
- isc_throw(InvalidRdataText, "Invalid MX text format");
+/// \brief Constructor with a context of MasterLexer.
+///
+/// The \c lexer should point to the beginning of valid textual representation
+/// of an MX RDATA. The EXCHANGE field can be non-absolute if \c origin
+/// is non-NULL, in which case \c origin is used to make it absolute.
+/// It must not be represented as a quoted string.
+///
+/// The PREFERENCE field must be a valid decimal representation of an
+/// unsigned 16-bit integer.
+///
+/// \throw MasterLexer::LexerError General parsing error such as missing field.
+/// \throw Other Exceptions from the Name and RRTTL constructors if
+/// construction of textual fields as these objects fail.
+///
+/// \param lexer A \c MasterLexer object parsing a master file for the
+/// RDATA to be created
+/// \param origin If non NULL, specifies the origin of EXCHANGE when it
+/// is non-absolute.
+MX::MX(MasterLexer& lexer, const Name* origin,
+ MasterLoader::Options, MasterLoaderCallbacks&) :
+ preference_(0), mxname_(".")
+{
+ const uint32_t num = lexer.getNextToken(MasterToken::NUMBER).getNumber();
+ if (num > 65535) {
+ isc_throw(InvalidRdataText, "Invalid MX preference: " << num);
}
+ preference_ = static_cast<uint16_t>(num);
- preference_ = pref;
- mxname_ = Name(mxname);
+ mxname_ = createNameFromLexer(lexer, origin);
}
MX::MX(uint16_t preference, const Name& mxname) :
diff --git a/src/lib/dns/rdata/generic/ns_2.cc b/src/lib/dns/rdata/generic/ns_2.cc
index 631da9d..d75ab7d 100644
--- a/src/lib/dns/rdata/generic/ns_2.cc
+++ b/src/lib/dns/rdata/generic/ns_2.cc
@@ -22,15 +22,49 @@
#include <dns/rdata.h>
#include <dns/rdataclass.h>
+#include <dns/rdata/generic/detail/lexer_util.h>
+
using namespace std;
using namespace isc::util;
+using isc::dns::rdata::generic::detail::createNameFromLexer;
// BEGIN_ISC_NAMESPACE
// BEGIN_RDATA_NAMESPACE
+/// \brief Constructor from string.
+///
+/// The given string must represent a valid NS RDATA. There can be extra
+/// space characters at the beginning or end of the text (which are simply
+/// ignored), but other extra text, including a new line, will make the
+/// construction fail with an exception.
+///
+/// The NSDNAME must be absolute since there's no parameter that
+/// specifies the origin name; if it is not absolute, \c
+/// MissingNameOrigin exception will be thrown. These must not be
+/// represented as a quoted string.
+///
+/// \throw Others Exception from the Name and RRTTL constructors.
+/// \throw InvalidRdataText Other general syntax errors.
NS::NS(const std::string& namestr) :
- nsname_(namestr)
-{}
+ // Fill in dummy name and replace them soon below.
+ nsname_(Name::ROOT_NAME())
+{
+ try {
+ std::istringstream ss(namestr);
+ MasterLexer lexer;
+ lexer.pushSource(ss);
+
+ nsname_ = createNameFromLexer(lexer, NULL);
+
+ if (lexer.getNextToken().getType() != MasterToken::END_OF_FILE) {
+ isc_throw(InvalidRdataText, "extra input text for NS: "
+ << namestr);
+ }
+ } catch (const MasterLexer::LexerError& ex) {
+ isc_throw(InvalidRdataText, "Failed to construct NS from '" <<
+ namestr << "': " << ex.what());
+ }
+}
NS::NS(InputBuffer& buffer, size_t) :
nsname_(buffer)
@@ -39,6 +73,27 @@ NS::NS(InputBuffer& buffer, size_t) :
// check consistency.
}
+/// \brief Constructor with a context of MasterLexer.
+///
+/// The \c lexer should point to the beginning of valid textual
+/// representation of an NS RDATA. The NSDNAME field can be
+/// non-absolute if \c origin is non-NULL, in which case \c origin is
+/// used to make it absolute. It must not be represented as a quoted
+/// string.
+///
+/// \throw MasterLexer::LexerError General parsing error such as missing field.
+/// \throw Other Exceptions from the Name and RRTTL constructors if
+/// construction of textual fields as these objects fail.
+///
+/// \param lexer A \c MasterLexer object parsing a master file for the
+/// RDATA to be created
+/// \param origin If non NULL, specifies the origin of NSDNAME when it
+/// is non-absolute.
+NS::NS(MasterLexer& lexer, const Name* origin,
+ MasterLoader::Options, MasterLoaderCallbacks&) :
+ nsname_(createNameFromLexer(lexer, origin))
+{}
+
NS::NS(const NS& other) :
Rdata(), nsname_(other.nsname_)
{}
diff --git a/src/lib/dns/rdata/generic/ptr_12.cc b/src/lib/dns/rdata/generic/ptr_12.cc
index b76fc7f..080faee 100644
--- a/src/lib/dns/rdata/generic/ptr_12.cc
+++ b/src/lib/dns/rdata/generic/ptr_12.cc
@@ -22,15 +22,49 @@
#include <dns/rdata.h>
#include <dns/rdataclass.h>
+#include <dns/rdata/generic/detail/lexer_util.h>
+
using namespace std;
using namespace isc::util;
+using isc::dns::rdata::generic::detail::createNameFromLexer;
// BEGIN_ISC_NAMESPACE
// BEGIN_RDATA_NAMESPACE
+/// \brief Constructor from string.
+///
+/// The given string must represent a valid PTR RDATA. There can be
+/// extra space characters at the beginning or end of the text (which
+/// are simply ignored), but other extra text, including a new line,
+/// will make the construction fail with an exception.
+///
+/// The PTRDNAME must be absolute since there's no parameter that
+/// specifies the origin name; if it is not absolute, \c
+/// MissingNameOrigin exception will be thrown. These must not be
+/// represented as a quoted string.
+///
+/// \throw Others Exception from the Name and RRTTL constructors.
+/// \throw InvalidRdataText Other general syntax errors.
PTR::PTR(const std::string& type_str) :
- ptr_name_(type_str)
-{}
+ // Fill in dummy name and replace them soon below.
+ ptr_name_(Name::ROOT_NAME())
+{
+ try {
+ std::istringstream ss(type_str);
+ MasterLexer lexer;
+ lexer.pushSource(ss);
+
+ ptr_name_ = createNameFromLexer(lexer, NULL);
+
+ if (lexer.getNextToken().getType() != MasterToken::END_OF_FILE) {
+ isc_throw(InvalidRdataText, "extra input text for PTR: "
+ << type_str);
+ }
+ } catch (const MasterLexer::LexerError& ex) {
+ isc_throw(InvalidRdataText, "Failed to construct PTR from '" <<
+ type_str << "': " << ex.what());
+ }
+}
PTR::PTR(InputBuffer& buffer, size_t) :
ptr_name_(buffer)
@@ -39,6 +73,27 @@ PTR::PTR(InputBuffer& buffer, size_t) :
// check consistency.
}
+/// \brief Constructor with a context of MasterLexer.
+///
+/// The \c lexer should point to the beginning of valid textual
+/// representation of a PTR RDATA. The PTRDNAME field can be
+/// non-absolute if \c origin is non-NULL, in which case \c origin is
+/// used to make it absolute. It must not be represented as a quoted
+/// string.
+///
+/// \throw MasterLexer::LexerError General parsing error such as missing field.
+/// \throw Other Exceptions from the Name and RRTTL constructors if
+/// construction of textual fields as these objects fail.
+///
+/// \param lexer A \c MasterLexer object parsing a master file for the
+/// RDATA to be created
+/// \param origin If non NULL, specifies the origin of PTRDNAME when it
+/// is non-absolute.
+PTR::PTR(MasterLexer& lexer, const Name* origin,
+ MasterLoader::Options, MasterLoaderCallbacks&) :
+ ptr_name_(createNameFromLexer(lexer, origin))
+{}
+
PTR::PTR(const PTR& source) :
Rdata(), ptr_name_(source.ptr_name_)
{}
diff --git a/src/lib/dns/rdata/in_1/srv_33.cc b/src/lib/dns/rdata/in_1/srv_33.cc
index af8bbe3..ac62071 100644
--- a/src/lib/dns/rdata/in_1/srv_33.cc
+++ b/src/lib/dns/rdata/in_1/srv_33.cc
@@ -25,9 +25,12 @@
#include <dns/rdata.h>
#include <dns/rdataclass.h>
+#include <dns/rdata/generic/detail/lexer_util.h>
+
using namespace std;
using namespace isc::util;
using namespace isc::util::str;
+using isc::dns::rdata::generic::detail::createNameFromLexer;
// BEGIN_ISC_NAMESPACE
// BEGIN_RDATA_NAMESPACE
@@ -48,45 +51,57 @@ struct SRVImpl {
/// \brief Constructor from string.
///
-/// \c srv_str must be formatted as follows:
-/// \code <Priority> <Weight> <Port> <Target>
-/// \endcode
-/// where
-/// - <Priority>, <Weight>, and <Port> are an unsigned
-/// 16-bit decimal integer.
-/// - <Target> is a valid textual representation of domain name.
-///
-/// An example of valid string is:
-/// \code "1 5 1500 example.com." \endcode
-///
-/// <b>Exceptions</b>
-///
-/// If <Target> is not a valid domain name, a corresponding exception
-/// from the \c Name class will be thrown;
-/// if %any of the other bullet points above is not met, an exception of
-/// class \c InvalidRdataText will be thrown.
-/// This constructor internally involves resource allocation, and if it fails
-/// a corresponding standard exception will be thrown.
+/// The given string must represent a valid SRV RDATA. There can be extra
+/// space characters at the beginning or end of the text (which are simply
+/// ignored), but other extra text, including a new line, will make the
+/// construction fail with an exception.
+///
+/// The TARGET name must be absolute since there's no parameter that
+/// specifies the origin name; if it is not absolute, \c MissingNameOrigin
+/// exception will be thrown. It must not be represented as a quoted
+/// string.
+///
+/// See the construction that takes \c MasterLexer for other fields.
+///
+/// \throw Others Exception from the Name and RRTTL constructors.
+/// \throw InvalidRdataText Other general syntax errors.
SRV::SRV(const std::string& srv_str) :
impl_(NULL)
{
- istringstream iss(srv_str);
-
try {
- const int32_t priority = tokenToNum<int32_t, 16>(getToken(iss));
- const int32_t weight = tokenToNum<int32_t, 16>(getToken(iss));
- const int32_t port = tokenToNum<int32_t, 16>(getToken(iss));
- const Name targetname(getToken(iss));
-
- if (!iss.eof()) {
- isc_throw(InvalidRdataText, "Unexpected input for SRV RDATA: " <<
- srv_str);
+ std::istringstream ss(srv_str);
+ MasterLexer lexer;
+ lexer.pushSource(ss);
+
+ uint32_t num = lexer.getNextToken(MasterToken::NUMBER).getNumber();
+ if (num > 65535) {
+ isc_throw(InvalidRdataText, "Invalid SRV priority in: " << srv_str);
+ }
+ const uint16_t priority = static_cast<uint16_t>(num);
+
+ num = lexer.getNextToken(MasterToken::NUMBER).getNumber();
+ if (num > 65535) {
+ isc_throw(InvalidRdataText, "Invalid SRV weight in: " << srv_str);
+ }
+ const uint16_t weight = static_cast<uint16_t>(num);
+
+ num = lexer.getNextToken(MasterToken::NUMBER).getNumber();
+ if (num > 65535) {
+ isc_throw(InvalidRdataText, "Invalid SRV port in: " << srv_str);
+ }
+ const uint16_t port = static_cast<uint16_t>(num);
+
+ const Name targetname = createNameFromLexer(lexer, NULL);
+
+ if (lexer.getNextToken().getType() != MasterToken::END_OF_FILE) {
+ isc_throw(InvalidRdataText, "extra input text for SRV: "
+ << srv_str);
}
impl_ = new SRVImpl(priority, weight, port, targetname);
- } catch (const StringTokenError& ste) {
- isc_throw(InvalidRdataText, "Invalid SRV text: " <<
- ste.what() << ": " << srv_str);
+ } catch (const MasterLexer::LexerError& ex) {
+ isc_throw(InvalidRdataText, "Failed to construct SRV from '" <<
+ srv_str << "': " << ex.what());
}
}
@@ -112,14 +127,58 @@ SRV::SRV(InputBuffer& buffer, size_t rdata_len) {
isc_throw(InvalidRdataLength, "SRV too short");
}
- uint16_t priority = buffer.readUint16();
- uint16_t weight = buffer.readUint16();
- uint16_t port = buffer.readUint16();
+ const uint16_t priority = buffer.readUint16();
+ const uint16_t weight = buffer.readUint16();
+ const uint16_t port = buffer.readUint16();
const Name targetname(buffer);
impl_ = new SRVImpl(priority, weight, port, targetname);
}
+/// \brief Constructor with a context of MasterLexer.
+///
+/// The \c lexer should point to the beginning of valid textual representation
+/// of an SRV RDATA. The TARGET field can be non-absolute if \c origin
+/// is non-NULL, in which case \c origin is used to make it absolute.
+/// It must not be represented as a quoted string.
+///
+/// The PRIORITY, WEIGHT and PORT fields must each be a valid decimal
+/// representation of an unsigned 16-bit integers respectively.
+///
+/// \throw MasterLexer::LexerError General parsing error such as missing field.
+/// \throw Other Exceptions from the Name and RRTTL constructors if
+/// construction of textual fields as these objects fail.
+///
+/// \param lexer A \c MasterLexer object parsing a master file for the
+/// RDATA to be created
+/// \param origin If non NULL, specifies the origin of TARGET when it
+/// is non-absolute.
+SRV::SRV(MasterLexer& lexer, const Name* origin,
+ MasterLoader::Options, MasterLoaderCallbacks&)
+{
+ uint32_t num = lexer.getNextToken(MasterToken::NUMBER).getNumber();
+ if (num > 65535) {
+ isc_throw(InvalidRdataText, "Invalid SRV priority: " << num);
+ }
+ const uint16_t priority = static_cast<uint16_t>(num);
+
+ num = lexer.getNextToken(MasterToken::NUMBER).getNumber();
+ if (num > 65535) {
+ isc_throw(InvalidRdataText, "Invalid SRV weight: " << num);
+ }
+ const uint16_t weight = static_cast<uint16_t>(num);
+
+ num = lexer.getNextToken(MasterToken::NUMBER).getNumber();
+ if (num > 65535) {
+ isc_throw(InvalidRdataText, "Invalid SRV port: " << num);
+ }
+ const uint16_t port = static_cast<uint16_t>(num);
+
+ const Name targetname = createNameFromLexer(lexer, origin);
+
+ impl_ = new SRVImpl(priority, weight, port, targetname);
+}
+
/// \brief The copy constructor.
///
/// It internally allocates a resource, and if it fails a corresponding
diff --git a/src/lib/dns/rdata/template.h b/src/lib/dns/rdata/template.h
index 9e84cc3..3bfeb85 100644
--- a/src/lib/dns/rdata/template.h
+++ b/src/lib/dns/rdata/template.h
@@ -39,6 +39,10 @@
// Note: do not remove the comment lines beginning with "BEGIN_" and "END_".
// These are markers used by a script for auto-generating build-able source
// files.
+//
+// On completion of implementing a new type of Rdata, remove the corresponding
+// entry from the meta_types dictionary of gen-rdatacode.py.in. Otherwise
+// it will cause build failure.
class MyType : public Rdata {
public:
diff --git a/src/lib/dns/rrclass-placeholder.h b/src/lib/dns/rrclass-placeholder.h
index 1ff4163..89dc49d 100644
--- a/src/lib/dns/rrclass-placeholder.h
+++ b/src/lib/dns/rrclass-placeholder.h
@@ -294,28 +294,14 @@ public:
// BEGIN_WELL_KNOWN_CLASS_DECLARATIONS
// END_WELL_KNOWN_CLASS_DECLARATIONS
-
- static const RRClass& NONE();
private:
- // \brief Meta-classes
- enum {
- RRCLASS_RESERVED0 = 0,
- RRCLASS_NONE = 254
- };
uint16_t classcode_;
};
// BEGIN_WELL_KNOWN_CLASS_DEFINITIONS
// END_WELL_KNOWN_CLASS_DEFINITIONS
-inline const RRClass&
-RRClass::NONE() {
- static RRClass rrclass(RRCLASS_NONE);
-
- return (rrclass);
-}
-
///
/// \brief Insert the \c RRClass as a string into stream.
///
diff --git a/src/lib/dns/rrset_collection_base.h b/src/lib/dns/rrset_collection_base.h
index 7ccf7b5..f7c9b6b 100644
--- a/src/lib/dns/rrset_collection_base.h
+++ b/src/lib/dns/rrset_collection_base.h
@@ -27,7 +27,7 @@ namespace dns {
/// \brief Error during RRsetCollectionBase find() operation
///
-/// This exception is thrown when an calling implementation of
+/// This exception is thrown when calling an implementation of
/// \c RRsetCollectionBase::find() results in an error which is not due
/// to unmatched data, but because of some other underlying error
/// condition.
@@ -58,13 +58,37 @@ public:
/// is found, \c NULL is returned.
///
/// This method's implementations currently are not specified to
- /// handle \c RRTypes such as RRSIG and NSEC3. RRSIGs are attached
- /// to their corresponding \c RRset and it is not straightforward to
- /// search for them. Searching for RRSIGs will return \c false
- /// always. Support for RRSIGs may be added in the future.
+ /// handle \c RRTypes such as RRSIG and NSEC3. This interface may be
+ /// refined to clarify this point in the future, and perhaps, provide
+ /// additional API for these RRType.
///
- /// Non-concrete types such as ANY and AXFR are unsupported and will
- /// return \c false always.
+ /// As for RRSIG, there are some fundamental open questions. For
+ /// example, it's not clear whether we want to return all RRSIGs of
+ /// the given name covering any RR types (in which case, we need to
+ /// figure out how), or we need to extend the interface so we can
+ /// specify the covered type. A specific derived implementation may
+ /// return something if type RRSIG is specified, but this is not
+ /// specified here at the base class level. So, for RRSIGs the
+ /// behavior should be assumed as undefined.
+ ///
+ /// As for NSEC3, it's not clear whether owner names (which included
+ /// hashed labels) are the best choice of search key, because in many
+ /// cases, what the application wants to find is an NSEC3 that has the
+ /// hash of some particular "normal" domain names. Also, if the underlying
+ /// implementation encapsulates a single zone, NSEC3 records conceptually
+ /// belong to a separate name space, which may cause implementation
+ /// difficulty.
+ ///
+ /// Behavior with meta types such as ANY and AXFR are also
+ /// undefined. A specific implementation may return something for
+ /// these. But, unlike the case of RRSIGs, these types of RRsets
+ /// are not expected to be added to any implementation of
+ /// collection in the first place (by the definition of "meta
+ /// types"), so querying for such types is basically an invalid
+ /// operation. The API doesn't require implementations to check
+ /// this condition and reject it, so the behavior is
+ /// undefined. This interface will not be refined in future
+ /// versions for these meta types.
///
/// \throw RRsetCollectionError if find() results in some
/// implementation-specific error.
diff --git a/src/lib/dns/rrtype-placeholder.h b/src/lib/dns/rrtype-placeholder.h
index 273a486..5541635 100644
--- a/src/lib/dns/rrtype-placeholder.h
+++ b/src/lib/dns/rrtype-placeholder.h
@@ -262,43 +262,13 @@ public:
// BEGIN_WELL_KNOWN_TYPE_DECLARATIONS
// END_WELL_KNOWN_TYPE_DECLARATIONS
- static const RRType& IXFR();
- static const RRType& AXFR();
- static const RRType& ANY();
-
private:
- // \brief Meta-classes
- // XXX: these should be implemented using rrparamregistry
- enum {
- RRTYPE_IXFR = 251,
- RRTYPE_AXFR = 252,
- RRTYPE_ANY = 255
- };
-
uint16_t typecode_;
};
// BEGIN_WELL_KNOWN_TYPE_DEFINITIONS
// END_WELL_KNOWN_TYPE_DEFINITIONS
-inline const RRType&
-RRType::IXFR() {
- static RRType rrtype(RRTYPE_IXFR);
- return (rrtype);
-}
-
-inline const RRType&
-RRType::AXFR() {
- static RRType rrtype(RRTYPE_AXFR);
- return (rrtype);
-}
-
-inline const RRType&
-RRType::ANY() {
- static RRType rrtype(RRTYPE_ANY);
- return (rrtype);
-}
-
///
/// \brief Insert the \c RRType as a string into stream.
///
diff --git a/src/lib/dns/tests/rdata_cname_unittest.cc b/src/lib/dns/tests/rdata_cname_unittest.cc
index 6451f72..5f602f0 100644
--- a/src/lib/dns/tests/rdata_cname_unittest.cc
+++ b/src/lib/dns/tests/rdata_cname_unittest.cc
@@ -33,11 +33,16 @@ using namespace isc::dns::rdata;
namespace {
class Rdata_CNAME_Test : public RdataTest {
- // there's nothing to specialize
+public:
+ Rdata_CNAME_Test() :
+ rdata_cname("cn.example.com."),
+ rdata_cname2("cn2.example.com.")
+ {}
+
+ const generic::CNAME rdata_cname;
+ const generic::CNAME rdata_cname2;
};
-const generic::CNAME rdata_cname("cn.example.com");
-const generic::CNAME rdata_cname2("cn2.example.com");
const uint8_t wiredata_cname[] = {
0x02, 0x63, 0x6e, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x03,
0x63, 0x6f, 0x6d, 0x00 };
@@ -50,16 +55,21 @@ const uint8_t wiredata_cname2[] = {
0x03, 0x63, 0x6e, 0x32, 0xc0, 0x03 };
TEST_F(Rdata_CNAME_Test, createFromText) {
- EXPECT_EQ(0, rdata_cname.compare(generic::CNAME("cn.example.com")));
+ EXPECT_EQ(0, rdata_cname.compare(generic::CNAME("cn.example.com.")));
// explicitly add a trailing dot. should be the same RDATA.
EXPECT_EQ(0, rdata_cname.compare(generic::CNAME("cn.example.com.")));
// should be case sensitive.
- EXPECT_EQ(0, rdata_cname.compare(generic::CNAME("CN.EXAMPLE.COM")));
+ EXPECT_EQ(0, rdata_cname.compare(generic::CNAME("CN.EXAMPLE.COM.")));
// RDATA of a class-independent type should be recognized for any
// "unknown" class.
EXPECT_EQ(0, rdata_cname.compare(*createRdata(RRType("CNAME"),
RRClass(65000),
- "cn.example.com")));
+ "cn.example.com.")));
+}
+
+TEST_F(Rdata_CNAME_Test, badText) {
+ // Extra text at end of line
+ EXPECT_THROW(generic::CNAME("cname.example.com. extra."), InvalidRdataText);
}
TEST_F(Rdata_CNAME_Test, createFromWire) {
@@ -79,7 +89,7 @@ TEST_F(Rdata_CNAME_Test, createFromWire) {
"rdata_cname_fromWire", 71),
DNSMessageFORMERR);
- EXPECT_EQ(0, generic::CNAME("cn2.example.com").compare(
+ EXPECT_EQ(0, generic::CNAME("cn2.example.com.").compare(
*rdataFactoryFromFile(RRType("CNAME"), RRClass("IN"),
"rdata_cname_fromWire", 55)));
EXPECT_THROW(*rdataFactoryFromFile(RRType("CNAME"), RRClass("IN"),
@@ -90,7 +100,17 @@ TEST_F(Rdata_CNAME_Test, createFromWire) {
TEST_F(Rdata_CNAME_Test, createFromLexer) {
EXPECT_EQ(0, rdata_cname.compare(
*test::createRdataUsingLexer(RRType::CNAME(), RRClass::IN(),
- "cn.example.com")));
+ "cn.example.com.")));
+
+ // test::createRdataUsingLexer() constructs relative to
+ // "example.org." origin.
+ EXPECT_EQ(0, generic::CNAME("cname10.example.org.").compare(
+ *test::createRdataUsingLexer(RRType::CNAME(), RRClass::IN(),
+ "cname10")));
+
+ // Extra text at end of line
+ EXPECT_FALSE(test::createRdataUsingLexer(RRType::CNAME(), RRClass::IN(),
+ "cname.example.com. extra."));
}
TEST_F(Rdata_CNAME_Test, toWireBuffer) {
diff --git a/src/lib/dns/tests/rdata_dname_unittest.cc b/src/lib/dns/tests/rdata_dname_unittest.cc
index c4e517c..7209e36 100644
--- a/src/lib/dns/tests/rdata_dname_unittest.cc
+++ b/src/lib/dns/tests/rdata_dname_unittest.cc
@@ -33,11 +33,16 @@ using namespace isc::dns::rdata;
namespace {
class Rdata_DNAME_Test : public RdataTest {
- // there's nothing to specialize
+public:
+ Rdata_DNAME_Test() :
+ rdata_dname("dn.example.com."),
+ rdata_dname2("dn2.example.com.")
+ {}
+
+ const generic::DNAME rdata_dname;
+ const generic::DNAME rdata_dname2;
};
-const generic::DNAME rdata_dname("dn.example.com");
-const generic::DNAME rdata_dname2("dn2.example.com");
const uint8_t wiredata_dname[] = {
0x02, 0x64, 0x6e, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x03,
0x63, 0x6f, 0x6d, 0x00 };
@@ -52,16 +57,21 @@ const uint8_t wiredata_dname2[] = {
0x63, 0x6f, 0x6d, 0x00 };
TEST_F(Rdata_DNAME_Test, createFromText) {
- EXPECT_EQ(0, rdata_dname.compare(generic::DNAME("dn.example.com")));
+ EXPECT_EQ(0, rdata_dname.compare(generic::DNAME("dn.example.com.")));
// explicitly add a trailing dot. should be the same RDATA.
EXPECT_EQ(0, rdata_dname.compare(generic::DNAME("dn.example.com.")));
// should be case sensitive.
- EXPECT_EQ(0, rdata_dname.compare(generic::DNAME("DN.EXAMPLE.COM")));
+ EXPECT_EQ(0, rdata_dname.compare(generic::DNAME("DN.EXAMPLE.COM.")));
// RDATA of a class-independent type should be recognized for any
// "unknown" class.
EXPECT_EQ(0, rdata_dname.compare(*createRdata(RRType("DNAME"),
RRClass(65000),
- "dn.example.com")));
+ "dn.example.com.")));
+}
+
+TEST_F(Rdata_DNAME_Test, badText) {
+ // Extra text at end of line
+ EXPECT_THROW(generic::DNAME("dname.example.com. extra."), InvalidRdataText);
}
TEST_F(Rdata_DNAME_Test, createFromWire) {
@@ -81,7 +91,7 @@ TEST_F(Rdata_DNAME_Test, createFromWire) {
"rdata_dname_fromWire", 71),
DNSMessageFORMERR);
- EXPECT_EQ(0, generic::DNAME("dn2.example.com").compare(
+ EXPECT_EQ(0, generic::DNAME("dn2.example.com.").compare(
*rdataFactoryFromFile(RRType("DNAME"), RRClass("IN"),
"rdata_dname_fromWire", 55)));
EXPECT_THROW(*rdataFactoryFromFile(RRType("DNAME"), RRClass("IN"),
@@ -92,7 +102,17 @@ TEST_F(Rdata_DNAME_Test, createFromWire) {
TEST_F(Rdata_DNAME_Test, createFromLexer) {
EXPECT_EQ(0, rdata_dname.compare(
*test::createRdataUsingLexer(RRType::DNAME(), RRClass::IN(),
- "dn.example.com")));
+ "dn.example.com.")));
+
+ // test::createRdataUsingLexer() constructs relative to
+ // "example.org." origin.
+ EXPECT_EQ(0, generic::DNAME("dname8.example.org.").compare(
+ *test::createRdataUsingLexer(RRType::DNAME(), RRClass::IN(),
+ "dname8")));
+
+ // Extra text at end of line
+ EXPECT_FALSE(test::createRdataUsingLexer(RRType::DNAME(), RRClass::IN(),
+ "dname.example.com. extra."));
}
TEST_F(Rdata_DNAME_Test, toWireBuffer) {
diff --git a/src/lib/dns/tests/rdata_mx_unittest.cc b/src/lib/dns/tests/rdata_mx_unittest.cc
index 6c6039a..6e4eaba 100644
--- a/src/lib/dns/tests/rdata_mx_unittest.cc
+++ b/src/lib/dns/tests/rdata_mx_unittest.cc
@@ -32,13 +32,16 @@ using namespace isc::dns::rdata;
namespace {
class Rdata_MX_Test : public RdataTest {
- // there's nothing to specialize
-};
+public:
+ Rdata_MX_Test() :
+ rdata_mx(10, Name("mx.example.com"))
+ {}
-const generic::MX rdata_mx(10, Name("mx.example.com"));
+ const generic::MX rdata_mx;
+};
TEST_F(Rdata_MX_Test, createFromText) {
- const generic::MX rdata_mx2("10 mx.example.com");
+ const generic::MX rdata_mx2("10 mx.example.com.");
EXPECT_EQ(0, rdata_mx2.compare(rdata_mx));
}
@@ -48,6 +51,12 @@ TEST_F(Rdata_MX_Test, badText) {
EXPECT_THROW(const generic::MX rdata_mx("SPOON"), InvalidRdataText);
EXPECT_THROW(const generic::MX rdata_mx("10 mx. example.com."),
InvalidRdataText);
+ // No origin and relative
+ EXPECT_THROW(const generic::MX rdata_mx("10 mx.example.com"),
+ MissingNameOrigin);
+ // Extra text at end of line
+ EXPECT_THROW(const generic::MX rdata_mx("10 mx.example.com. extra."),
+ InvalidRdataText);
}
TEST_F(Rdata_MX_Test, copy) {
@@ -65,11 +74,25 @@ TEST_F(Rdata_MX_Test, createFromWire) {
TEST_F(Rdata_MX_Test, createFromLexer) {
EXPECT_EQ(0, rdata_mx.compare(
*test::createRdataUsingLexer(RRType::MX(), RRClass::IN(),
- "10 mx.example.com")));
+ "10 mx.example.com.")));
+
+ // test::createRdataUsingLexer() constructs relative to
+ // "example.org." origin.
+ EXPECT_EQ(0, generic::MX("10 mx2.example.org.").compare(
+ *test::createRdataUsingLexer(RRType::MX(), RRClass::IN(),
+ "10 mx2")));
// Exceptions cause NULL to be returned.
EXPECT_FALSE(test::createRdataUsingLexer(RRType::MX(), RRClass::IN(),
- "10 mx. example.com"));
+ "10 mx. example.com."));
+
+ // 65536 is larger than maximum possible preference
+ EXPECT_FALSE(test::createRdataUsingLexer(RRType::MX(), RRClass::IN(),
+ "65536 mx.example.com."));
+
+ // Extra text at end of line
+ EXPECT_FALSE(test::createRdataUsingLexer(RRType::MX(), RRClass::IN(),
+ "10 mx.example.com. extra."));
}
TEST_F(Rdata_MX_Test, toWireRenderer) {
diff --git a/src/lib/dns/tests/rdata_ns_unittest.cc b/src/lib/dns/tests/rdata_ns_unittest.cc
index d536393..53eb670 100644
--- a/src/lib/dns/tests/rdata_ns_unittest.cc
+++ b/src/lib/dns/tests/rdata_ns_unittest.cc
@@ -33,11 +33,16 @@ using namespace isc::dns::rdata;
namespace {
class Rdata_NS_Test : public RdataTest {
- // there's nothing to specialize
+public:
+ Rdata_NS_Test() :
+ rdata_ns("ns.example.com."),
+ rdata_ns2("ns2.example.com.")
+ {}
+
+ const generic::NS rdata_ns;
+ const generic::NS rdata_ns2;
};
-const generic::NS rdata_ns("ns.example.com");
-const generic::NS rdata_ns2("ns2.example.com");
const uint8_t wiredata_ns[] = {
0x02, 0x6e, 0x73, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x03,
0x63, 0x6f, 0x6d, 0x00 };
@@ -50,15 +55,20 @@ const uint8_t wiredata_ns2[] = {
0x03, 0x6e, 0x73, 0x32, 0xc0, 0x03 };
TEST_F(Rdata_NS_Test, createFromText) {
- EXPECT_EQ(0, rdata_ns.compare(generic::NS("ns.example.com")));
+ EXPECT_EQ(0, rdata_ns.compare(generic::NS("ns.example.com.")));
// explicitly add a trailing dot. should be the same RDATA.
EXPECT_EQ(0, rdata_ns.compare(generic::NS("ns.example.com.")));
// should be case sensitive.
- EXPECT_EQ(0, rdata_ns.compare(generic::NS("NS.EXAMPLE.COM")));
+ EXPECT_EQ(0, rdata_ns.compare(generic::NS("NS.EXAMPLE.COM.")));
// RDATA of a class-independent type should be recognized for any
// "unknown" class.
EXPECT_EQ(0, rdata_ns.compare(*createRdata(RRType("NS"), RRClass(65000),
- "ns.example.com")));
+ "ns.example.com.")));
+}
+
+TEST_F(Rdata_NS_Test, badText) {
+ // Extra input at end of line
+ EXPECT_THROW(generic::NS("ns.example.com. extra."), InvalidRdataText);
}
TEST_F(Rdata_NS_Test, createFromWire) {
@@ -78,7 +88,7 @@ TEST_F(Rdata_NS_Test, createFromWire) {
"rdata_ns_fromWire", 71),
DNSMessageFORMERR);
- EXPECT_EQ(0, generic::NS("ns2.example.com").compare(
+ EXPECT_EQ(0, generic::NS("ns2.example.com.").compare(
*rdataFactoryFromFile(RRType("NS"), RRClass("IN"),
"rdata_ns_fromWire", 55)));
EXPECT_THROW(*rdataFactoryFromFile(RRType("NS"), RRClass("IN"),
@@ -89,11 +99,21 @@ TEST_F(Rdata_NS_Test, createFromWire) {
TEST_F(Rdata_NS_Test, createFromLexer) {
EXPECT_EQ(0, rdata_ns.compare(
*test::createRdataUsingLexer(RRType::NS(), RRClass::IN(),
- "ns.example.com")));
+ "ns.example.com.")));
+
+ // test::createRdataUsingLexer() constructs relative to
+ // "example.org." origin.
+ EXPECT_EQ(0, generic::NS("ns8.example.org.").compare(
+ *test::createRdataUsingLexer(RRType::NS(), RRClass::IN(),
+ "ns8")));
// Exceptions cause NULL to be returned.
EXPECT_FALSE(test::createRdataUsingLexer(RRType::NS(), RRClass::IN(),
""));
+
+ // Extra input at end of line
+ EXPECT_FALSE(test::createRdataUsingLexer(RRType::NS(), RRClass::IN(),
+ "ns.example.com. extra."));
}
TEST_F(Rdata_NS_Test, toWireBuffer) {
@@ -119,13 +139,13 @@ TEST_F(Rdata_NS_Test, toText) {
}
TEST_F(Rdata_NS_Test, compare) {
- generic::NS small("a.example");
- generic::NS large("example");
+ generic::NS small("a.example.");
+ generic::NS large("example.");
EXPECT_TRUE(Name("a.example") > Name("example"));
EXPECT_GT(0, small.compare(large));
}
TEST_F(Rdata_NS_Test, getNSName) {
- EXPECT_EQ(Name("ns.example.com"), rdata_ns.getNSName());
+ EXPECT_EQ(Name("ns.example.com."), rdata_ns.getNSName());
}
}
diff --git a/src/lib/dns/tests/rdata_ptr_unittest.cc b/src/lib/dns/tests/rdata_ptr_unittest.cc
index 44b849a..5d6d37d 100644
--- a/src/lib/dns/tests/rdata_ptr_unittest.cc
+++ b/src/lib/dns/tests/rdata_ptr_unittest.cc
@@ -37,11 +37,16 @@ using namespace isc::dns::rdata;
namespace {
class Rdata_PTR_Test : public RdataTest {
- // there's nothing to specialize
+public:
+ Rdata_PTR_Test() :
+ rdata_ptr("ns.example.com."),
+ rdata_ptr2("ns2.example.com.")
+ {}
+
+ const generic::PTR rdata_ptr;
+ const generic::PTR rdata_ptr2;
};
-const generic::PTR rdata_ptr("ns.example.com");
-const generic::PTR rdata_ptr2("ns2.example.com");
const uint8_t wiredata_ptr[] = {
0x02, 0x6e, 0x73, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x03,
0x63, 0x6f, 0x6d, 0x00 };
@@ -54,15 +59,20 @@ const uint8_t wiredata_ptr2[] = {
0x03, 0x6e, 0x73, 0x32, 0xc0, 0x03 };
TEST_F(Rdata_PTR_Test, createFromText) {
- EXPECT_EQ(0, rdata_ptr.compare(generic::PTR("ns.example.com")));
+ EXPECT_EQ(0, rdata_ptr.compare(generic::PTR("ns.example.com.")));
// explicitly add a trailing dot. should be the same RDATA.
EXPECT_EQ(0, rdata_ptr.compare(generic::PTR("ns.example.com.")));
// should be case sensitive.
- EXPECT_EQ(0, rdata_ptr.compare(generic::PTR("NS.EXAMPLE.COM")));
+ EXPECT_EQ(0, rdata_ptr.compare(generic::PTR("NS.EXAMPLE.COM.")));
// RDATA of a class-independent type should be recognized for any
// "unknown" class.
EXPECT_EQ(0, rdata_ptr.compare(*createRdata(RRType("PTR"), RRClass(65000),
- "ns.example.com")));
+ "ns.example.com.")));
+}
+
+TEST_F(Rdata_PTR_Test, badText) {
+ // Extra text at end of line
+ EXPECT_THROW(generic::PTR("foo.example.com. extra."), InvalidRdataText);
}
TEST_F(Rdata_PTR_Test, createFromWire) {
@@ -82,7 +92,7 @@ TEST_F(Rdata_PTR_Test, createFromWire) {
"rdata_ns_fromWire", 71),
DNSMessageFORMERR);
- EXPECT_EQ(0, generic::PTR("ns2.example.com").compare(
+ EXPECT_EQ(0, generic::PTR("ns2.example.com.").compare(
*rdataFactoryFromFile(RRType("PTR"), RRClass("IN"),
"rdata_ns_fromWire", 55)));
EXPECT_THROW(*rdataFactoryFromFile(RRType("PTR"), RRClass("IN"),
@@ -93,7 +103,17 @@ TEST_F(Rdata_PTR_Test, createFromWire) {
TEST_F(Rdata_PTR_Test, createFromLexer) {
EXPECT_EQ(0, rdata_ptr.compare(
*test::createRdataUsingLexer(RRType::PTR(), RRClass::IN(),
- "ns.example.com")));
+ "ns.example.com.")));
+
+ // test::createRdataUsingLexer() constructs relative to
+ // "example.org." origin.
+ EXPECT_EQ(0, generic::PTR("foo0.example.org.").compare(
+ *test::createRdataUsingLexer(RRType::PTR(), RRClass::IN(),
+ "foo0")));
+
+ // Extra text at end of line
+ EXPECT_FALSE(test::createRdataUsingLexer(RRType::PTR(), RRClass::IN(),
+ "foo.example.com. extra."));
}
TEST_F(Rdata_PTR_Test, toWireBuffer) {
@@ -119,8 +139,8 @@ TEST_F(Rdata_PTR_Test, toText) {
}
TEST_F(Rdata_PTR_Test, compare) {
- generic::PTR small("a.example");
- generic::PTR large("example");
+ generic::PTR small("a.example.");
+ generic::PTR large("example.");
EXPECT_TRUE(Name("a.example") > Name("example"));
EXPECT_GT(0, small.compare(large));
}
diff --git a/src/lib/dns/tests/rdata_srv_unittest.cc b/src/lib/dns/tests/rdata_srv_unittest.cc
index 066755f..6ca0c7f 100644
--- a/src/lib/dns/tests/rdata_srv_unittest.cc
+++ b/src/lib/dns/tests/rdata_srv_unittest.cc
@@ -33,14 +33,23 @@ using namespace isc::dns::rdata;
namespace {
class Rdata_SRV_Test : public RdataTest {
- // there's nothing to specialize
+public:
+ Rdata_SRV_Test() :
+ srv_txt("1 5 1500 a.example.com."),
+ srv_txt2("1 5 1400 example.com."),
+ too_long_label("012345678901234567890123456789"
+ "0123456789012345678901234567890123."),
+ rdata_srv(srv_txt),
+ rdata_srv2(srv_txt2)
+ {}
+
+ const string srv_txt;
+ const string srv_txt2;
+ const string too_long_label;
+ const in::SRV rdata_srv;
+ const in::SRV rdata_srv2;
};
-string srv_txt("1 5 1500 a.example.com.");
-string srv_txt2("1 5 1400 example.com.");
-string too_long_label("012345678901234567890123456789"
- "0123456789012345678901234567890123");
-
// 1 5 1500 a.example.com.
const uint8_t wiredata_srv[] = {
0x00, 0x01, 0x00, 0x05, 0x05, 0xdc, 0x01, 0x61, 0x07, 0x65, 0x78,
@@ -50,9 +59,6 @@ const uint8_t wiredata_srv2[] = {
0x00, 0x01, 0x00, 0x05, 0x05, 0x78, 0x07, 0x65, 0x78, 0x61, 0x6d,
0x70, 0x6c, 0x65, 0x03, 0x63, 0x6f, 0x6d, 0x00};
-const in::SRV rdata_srv(srv_txt);
-const in::SRV rdata_srv2(srv_txt2);
-
TEST_F(Rdata_SRV_Test, createFromText) {
EXPECT_EQ(1, rdata_srv.getPriority());
EXPECT_EQ(5, rdata_srv.getWeight());
@@ -78,6 +84,8 @@ TEST_F(Rdata_SRV_Test, badText) {
// bad name
EXPECT_THROW(in::SRV("1 5 1500 a.example.com." + too_long_label),
TooLongLabel);
+ // Extra text at end of line
+ EXPECT_THROW(in::SRV("1 5 1500 a.example.com. extra."), InvalidRdataText);
}
TEST_F(Rdata_SRV_Test, assignment) {
@@ -124,10 +132,29 @@ TEST_F(Rdata_SRV_Test, createFromLexer) {
*test::createRdataUsingLexer(RRType::SRV(), RRClass::IN(),
"1 5 1500 a.example.com.")));
+ // test::createRdataUsingLexer() constructs relative to
+ // "example.org." origin.
+ EXPECT_EQ(0, in::SRV("1 5 1500 server16.example.org.").compare(
+ *test::createRdataUsingLexer(RRType::SRV(), RRClass::IN(),
+ "1 5 1500 server16")));
+
// Exceptions cause NULL to be returned.
+
+ // Bad priority
+ EXPECT_FALSE(test::createRdataUsingLexer(RRType::SRV(), RRClass::IN(),
+ "65536 5 1500 "
+ "a.example.com."));
+ // Bad weight
+ EXPECT_FALSE(test::createRdataUsingLexer(RRType::SRV(), RRClass::IN(),
+ "1 65536 1500 "
+ "a.example.com."));
+ // Bad port
EXPECT_FALSE(test::createRdataUsingLexer(RRType::SRV(), RRClass::IN(),
"1 5 281474976710656 "
"a.example.com."));
+ // Extra text at end of line
+ EXPECT_FALSE(test::createRdataUsingLexer(RRType::SRV(), RRClass::IN(),
+ "1 5 1500 a.example.com. extra."));
}
TEST_F(Rdata_SRV_Test, toWireBuffer) {
diff --git a/src/lib/dns/tests/rrclass_unittest.cc b/src/lib/dns/tests/rrclass_unittest.cc
index 11f1c54..17af873 100644
--- a/src/lib/dns/tests/rrclass_unittest.cc
+++ b/src/lib/dns/tests/rrclass_unittest.cc
@@ -148,4 +148,29 @@ TEST_F(RRClassTest, LeftShiftOperator) {
oss << RRClass::IN();
EXPECT_EQ(RRClass::IN().toText(), oss.str());
}
+
+// Below, we'll check definitions for all well-known RR classes; whether they
+// are defined and have the correct parameter values. Test data are generated
+// from the list available at:
+// http://www.iana.org/assignments/dns-parameters/dns-parameters.xml
+struct ClassParam {
+ const char* const txt; // "IN", "CH", etc
+ const uint16_t code; // 1, 3,
+ const RRClass& (*obj)(); // RRClass::IN(), etc
+} known_classes[] = {
+ {"IN", 1, RRClass::IN}, {"CH", 3, RRClass::CH}, {"HS", 4, RRClass::HS},
+ {"NONE", 254, RRClass::NONE}, {"ANY", 255, RRClass::ANY},
+ {NULL, 0, NULL}
+};
+
+TEST(RRClassConstTest, wellKnowns) {
+ for (int i = 0; known_classes[i].txt; ++i) {
+ SCOPED_TRACE("Checking well known RRClass: " +
+ string(known_classes[i].txt));
+ EXPECT_EQ(known_classes[i].code,
+ RRClass(known_classes[i].txt).getCode());
+ EXPECT_EQ(known_classes[i].code,
+ (*known_classes[i].obj)().getCode());
+ }
+}
}
diff --git a/src/lib/dns/tests/rrset_unittest.cc b/src/lib/dns/tests/rrset_unittest.cc
index 725eea7..d16ce3c 100644
--- a/src/lib/dns/tests/rrset_unittest.cc
+++ b/src/lib/dns/tests/rrset_unittest.cc
@@ -168,7 +168,7 @@ TEST_F(RRsetTest, addRdataPtr) {
// Pointer version of addRdata() doesn't type check and does allow to
//add a different type of Rdata as a result.
rrset_a_empty.addRdata(createRdata(RRType::NS(), RRClass::IN(),
- "ns.example.com"));
+ "ns.example.com."));
EXPECT_EQ(3, rrset_a_empty.getRdataCount());
}
@@ -205,7 +205,7 @@ TEST_F(RRsetTest, toText) {
// Unless it is type ANY or NONE
EXPECT_EQ("test.example.com. 3600 ANY A\n",
rrset_any_a_empty.toText());
- EXPECT_EQ("test.example.com. 3600 CLASS254 A\n",
+ EXPECT_EQ("test.example.com. 3600 NONE A\n",
rrset_none_a_empty.toText());
}
diff --git a/src/lib/dns/tests/rrtype_unittest.cc b/src/lib/dns/tests/rrtype_unittest.cc
index 28ecee6..ee302a1 100644
--- a/src/lib/dns/tests/rrtype_unittest.cc
+++ b/src/lib/dns/tests/rrtype_unittest.cc
@@ -145,4 +145,57 @@ TEST_F(RRTypeTest, LeftShiftOperator) {
oss << RRType::A();
EXPECT_EQ(RRType::A().toText(), oss.str());
}
+
+// Below, we'll check definitions for all well-known RR types; whether they
+// are defined and have the correct parameter values. Test data are generated
+// from the list available at:
+// http://www.iana.org/assignments/dns-parameters/dns-parameters.xml
+struct TypeParam {
+ const char* const txt; // "A", "AAAA", "NS", etc
+ const uint16_t code; // 1, 28, 2, etc
+ const RRType& (*obj)(); // RRType::A(), etc
+} known_types[] = {
+ {"A", 1, RRType::A}, {"NS", 2, RRType::NS}, {"MD", 3, RRType::MD},
+ {"MF", 4, RRType::MF}, {"CNAME", 5, RRType::CNAME},
+ {"SOA", 6, RRType::SOA}, {"MB", 7, RRType::MB}, {"MG", 8, RRType::MG},
+ {"MR", 9, RRType::MR}, {"NULL", 10, RRType::Null},
+ {"WKS", 11, RRType::WKS}, {"PTR", 12, RRType::PTR},
+ {"HINFO", 13, RRType::HINFO}, {"MINFO", 14, RRType::MINFO},
+ {"MX", 15, RRType::MX}, {"TXT", 16, RRType::TXT}, {"RP", 17, RRType::RP},
+ {"AFSDB", 18, RRType::AFSDB}, {"X25", 19, RRType::X25},
+ {"ISDN", 20, RRType::ISDN}, {"RT", 21, RRType::RT},
+ {"NSAP", 22, RRType::NSAP}, {"NSAP-PTR", 23, RRType::NSAP_PTR},
+ {"SIG", 24, RRType::SIG}, {"KEY", 25, RRType::KEY},
+ {"PX", 26, RRType::PX}, {"GPOS", 27, RRType::GPOS},
+ {"AAAA", 28, RRType::AAAA}, {"LOC", 29, RRType::LOC},
+ {"NXT", 30, RRType::NXT}, {"SRV", 33, RRType::SRV},
+ {"NAPTR", 35, RRType::NAPTR}, {"KX", 36, RRType::KX},
+ {"CERT", 37, RRType::CERT}, {"A6", 38, RRType::A6},
+ {"DNAME", 39, RRType::DNAME}, {"OPT", 41, RRType::OPT},
+ {"APL", 42, RRType::APL}, {"DS", 43, RRType::DS},
+ {"SSHFP", 44, RRType::SSHFP}, {"IPSECKEY", 45, RRType::IPSECKEY},
+ {"RRSIG", 46, RRType::RRSIG}, {"NSEC", 47, RRType::NSEC},
+ {"DNSKEY", 48, RRType::DNSKEY}, {"DHCID", 49, RRType::DHCID},
+ {"NSEC3", 50, RRType::NSEC3}, {"NSEC3PARAM", 51, RRType::NSEC3PARAM},
+ {"TLSA", 52, RRType::TLSA}, {"HIP", 55, RRType::HIP},
+ {"SPF", 99, RRType::SPF}, {"UNSPEC", 103, RRType::UNSPEC},
+ {"NID", 104, RRType::NID}, {"L32", 105, RRType::L32},
+ {"L64", 106, RRType::L64}, {"LP", 107, RRType::LP},
+ {"TKEY", 249, RRType::TKEY}, {"TSIG", 250, RRType::TSIG},
+ {"IXFR", 251, RRType::IXFR}, {"AXFR", 252, RRType::AXFR},
+ {"MAILB", 253, RRType::MAILB}, {"MAILA", 254, RRType::MAILA},
+ {"ANY", 255, RRType::ANY}, {"URI", 256, RRType::URI},
+ {"CAA", 257, RRType::CAA}, {"DLV", 32769, RRType::DLV},
+ {NULL, 0, NULL}
+};
+
+TEST(RRTypeConstTest, wellKnowns) {
+ for (int i = 0; known_types[i].txt; ++i) {
+ SCOPED_TRACE("Checking well known RRType: " +
+ string(known_types[i].txt));
+ EXPECT_EQ(known_types[i].code, RRType(known_types[i].txt).getCode());
+ EXPECT_EQ(known_types[i].code,
+ (*known_types[i].obj)().getCode());
+ }
+}
}
diff --git a/src/lib/dns/tests/zone_checker_unittest.cc b/src/lib/dns/tests/zone_checker_unittest.cc
index dbe204d..320cda6 100644
--- a/src/lib/dns/tests/zone_checker_unittest.cc
+++ b/src/lib/dns/tests/zone_checker_unittest.cc
@@ -160,7 +160,7 @@ TEST_F(ZoneCheckerTest, checkSOA) {
// Likewise, if the SOA RRset contains non SOA Rdata, it should be a bug.
rrsets_->removeRRset(zname_, zclass_, RRType::SOA());
soa_.reset(new RRset(zname_, zclass_, RRType::SOA(), RRTTL(60)));
- soa_->addRdata(createRdata(RRType::NS(), zclass_, "ns.example.com"));
+ soa_->addRdata(createRdata(RRType::NS(), zclass_, "ns.example.com."));
rrsets_->addRRset(soa_);
EXPECT_THROW(checkZone(zname_, zclass_, *rrsets_, callbacks_), Unexpected);
checkIssues(); // no error/warning should be reported
@@ -218,7 +218,7 @@ TEST_F(ZoneCheckerTest, checkNSData) {
// If there's a CNAME at the name instead, it's an error.
rrsets_->removeRRset(Name("*.example.com"), zclass_, RRType::A());
RRsetPtr cname(new RRset(ns_name, zclass_, RRType::CNAME(), RRTTL(60)));
- cname->addRdata(generic::CNAME("cname.example.com"));
+ cname->addRdata(generic::CNAME("cname.example.com."));
rrsets_->addRRset(cname);
EXPECT_FALSE(checkZone(zname_, zclass_, *rrsets_, callbacks_));
expected_errors_.push_back("zone example.com/IN: NS 'ns.example.com' is "
@@ -245,7 +245,7 @@ TEST_F(ZoneCheckerTest, checkNSData) {
rrsets_->removeRRset(ns_name, zclass_, RRType::CNAME());
rrsets_->removeRRset(zname_, zclass_, RRType::NS());
ns_.reset(new RRset(zname_, zclass_, RRType::NS(), RRTTL(60)));
- ns_->addRdata(generic::NS("ns.example.org"));
+ ns_->addRdata(generic::NS("ns.example.org."));
rrsets_->addRRset(ns_);
EXPECT_TRUE(checkZone(zname_, zclass_, *rrsets_, callbacks_));
checkIssues();
@@ -274,7 +274,7 @@ TEST_F(ZoneCheckerTest, checkNSWithDelegation) {
rrsets_->addRRset(ns_);
RRsetPtr child_ns(new RRset(Name("child.example.com"), zclass_,
RRType::NS(), RRTTL(60)));
- child_ns->addRdata(generic::NS("ns.example.org"));
+ child_ns->addRdata(generic::NS("ns.example.org."));
rrsets_->addRRset(child_ns);
EXPECT_TRUE(checkZone(zname_, zclass_, *rrsets_, callbacks_));
checkIssues();
@@ -282,7 +282,7 @@ TEST_F(ZoneCheckerTest, checkNSWithDelegation) {
// Zone cut at the NS name. Same result.
rrsets_->removeRRset(child_ns->getName(), zclass_, RRType::NS());
child_ns.reset(new RRset(ns_name, zclass_, RRType::NS(), RRTTL(60)));
- child_ns->addRdata(generic::NS("ns.example.org"));
+ child_ns->addRdata(generic::NS("ns.example.org."));
rrsets_->addRRset(child_ns);
EXPECT_TRUE(checkZone(zname_, zclass_, *rrsets_, callbacks_));
checkIssues();
@@ -291,7 +291,7 @@ TEST_F(ZoneCheckerTest, checkNSWithDelegation) {
rrsets_->removeRRset(child_ns->getName(), zclass_, RRType::NS());
child_ns.reset(new RRset(Name("another.ns.child.example.com"), zclass_,
RRType::NS(), RRTTL(60)));
- child_ns->addRdata(generic::NS("ns.example.org"));
+ child_ns->addRdata(generic::NS("ns.example.org."));
rrsets_->addRRset(child_ns);
EXPECT_TRUE(checkZone(zname_, zclass_, *rrsets_, callbacks_));
expected_warns_.push_back("zone example.com/IN: NS has no address");
@@ -309,7 +309,7 @@ TEST_F(ZoneCheckerTest, checkNSWithDNAME) {
ns_->addRdata(generic::NS(ns_name));
rrsets_->addRRset(ns_);
RRsetPtr dname(new RRset(zname_, zclass_, RRType::DNAME(), RRTTL(60)));
- dname->addRdata(generic::DNAME("example.org"));
+ dname->addRdata(generic::DNAME("example.org."));
rrsets_->addRRset(dname);
EXPECT_FALSE(checkZone(zname_, zclass_, *rrsets_, callbacks_));
expected_errors_.push_back("zone example.com/IN: NS 'ns.child.example.com'"
@@ -320,7 +320,7 @@ TEST_F(ZoneCheckerTest, checkNSWithDNAME) {
rrsets_->removeRRset(zname_, zclass_, RRType::DNAME());
dname.reset(new RRset(Name("child.example.com"), zclass_, RRType::DNAME(),
RRTTL(60)));
- dname->addRdata(generic::DNAME("example.org"));
+ dname->addRdata(generic::DNAME("example.org."));
rrsets_->addRRset(dname);
EXPECT_FALSE(checkZone(zname_, zclass_, *rrsets_, callbacks_));
expected_errors_.push_back("zone example.com/IN: NS 'ns.child.example.com'"
@@ -332,7 +332,7 @@ TEST_F(ZoneCheckerTest, checkNSWithDNAME) {
// this implementation prefers the NS and skips further checks.
ns_.reset(new RRset(Name("child.example.com"), zclass_, RRType::NS(),
RRTTL(60)));
- ns_->addRdata(generic::NS("ns.example.org"));
+ ns_->addRdata(generic::NS("ns.example.org."));
rrsets_->addRRset(ns_);
EXPECT_TRUE(checkZone(zname_, zclass_, *rrsets_, callbacks_));
checkIssues();
@@ -342,7 +342,7 @@ TEST_F(ZoneCheckerTest, checkNSWithDNAME) {
rrsets_->removeRRset(dname->getName(), zclass_, RRType::DNAME());
rrsets_->removeRRset(ns_->getName(), zclass_, RRType::NS());
dname.reset(new RRset(ns_name, zclass_, RRType::DNAME(), RRTTL(60)));
- dname->addRdata(generic::DNAME("example.org"));
+ dname->addRdata(generic::DNAME("example.org."));
rrsets_->addRRset(dname);
EXPECT_TRUE(checkZone(zname_, zclass_, *rrsets_, callbacks_));
expected_warns_.push_back("zone example.com/IN: NS has no address");
diff --git a/src/lib/nsas/tests/nameserver_address_store_unittest.cc b/src/lib/nsas/tests/nameserver_address_store_unittest.cc
index 6ddae72..ceb5775 100644
--- a/src/lib/nsas/tests/nameserver_address_store_unittest.cc
+++ b/src/lib/nsas/tests/nameserver_address_store_unittest.cc
@@ -386,7 +386,7 @@ TEST_F(NameserverAddressStoreTest, CombinedTest) {
// But we do not answer it right away. We create a new zone and
// let this nameserver entry get out.
- rrns_->addRdata(rdata::generic::NS("example.cz"));
+ rrns_->addRdata(rdata::generic::NS("example.cz."));
nsas.lookupAndAnswer(EXAMPLE_CO_UK, RRClass::IN(), rrns_, getCallback());
// It really should ask something, one of the nameservers
diff --git a/src/lib/nsas/tests/nsas_test.h b/src/lib/nsas/tests/nsas_test.h
index d6b4d92..9f92149 100644
--- a/src/lib/nsas/tests/nsas_test.h
+++ b/src/lib/nsas/tests/nsas_test.h
@@ -264,8 +264,8 @@ protected:
rrch_->addRdata(ConstRdataPtr(new RdataTest<A>("1324")));
// NS records take a single name
- rrns_->addRdata(rdata::generic::NS("example.fr"));
- rrns_->addRdata(rdata::generic::NS("example.de"));
+ rrns_->addRdata(rdata::generic::NS("example.fr."));
+ rrns_->addRdata(rdata::generic::NS("example.de."));
// Single NS record with 0 TTL
rr_single_->addRdata(rdata::generic::NS(ns_name_));
diff --git a/src/lib/python/bind10_config.py.in b/src/lib/python/bind10_config.py.in
index 6db64e2..9cd8d66 100644
--- a/src/lib/python/bind10_config.py.in
+++ b/src/lib/python/bind10_config.py.in
@@ -43,8 +43,8 @@ def reload():
# the system.
# PLUGIN_PATHS: configuration modules that are not associated to specific
# process
- # LIBEXECPATH: Paths to programs invoked by the boss process
- # The boss process (directly or via a helper module) uses this as
+ # LIBEXECPATH: Paths to programs invoked by the b10-init process
+ # The b10-init process (directly or via a helper module) uses this as
# the prefererred PATH before starting a child process.
# When "FROM_SOURCE", it lists the directories where the programs are
# built so that when BIND 10 is experimentally started on the source
@@ -53,7 +53,7 @@ def reload():
# B10_FROM_SOURCE_LOCALSTATEDIR is specifically intended to be used for
# tests where we want to use various types of configuration within the test
# environment. (We may want to make it even more generic so that the path
- # is passed from the boss process)
+ # is passed from the b10-init process)
if "B10_FROM_SOURCE" in os.environ:
if "B10_FROM_SOURCE_LOCALSTATEDIR" in os.environ:
DATA_PATH = os.environ["B10_FROM_SOURCE_LOCALSTATEDIR"]
diff --git a/src/lib/python/isc/__init__.py b/src/lib/python/isc/__init__.py
index 029f110..37138a2 100644
--- a/src/lib/python/isc/__init__.py
+++ b/src/lib/python/isc/__init__.py
@@ -1,7 +1,3 @@
-# On some systems, it appears the dynamic linker gets
-# confused if the order is not right here
-# There is probably a solution for this, but for now:
-# order is important here!
-import isc.cc
-import isc.config
-import isc.datasrc
+"""
+This is the top directory for common BIND 10 Python modules and packages.
+"""
diff --git a/src/lib/python/isc/bind10/Makefile.am b/src/lib/python/isc/bind10/Makefile.am
index aa5d0ab..8d2f179 100644
--- a/src/lib/python/isc/bind10/Makefile.am
+++ b/src/lib/python/isc/bind10/Makefile.am
@@ -3,3 +3,8 @@ SUBDIRS = . tests
python_PYTHON = __init__.py sockcreator.py component.py special_component.py \
socket_cache.py
pythondir = $(pyexecdir)/isc/bind10
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/bind10/component.py b/src/lib/python/isc/bind10/component.py
index febeb10..2efb376 100644
--- a/src/lib/python/isc/bind10/component.py
+++ b/src/lib/python/isc/bind10/component.py
@@ -28,12 +28,12 @@ configuration). This is yet to be designed.
"""
import isc.log
-from isc.log_messages.bind10_messages import *
+from isc.log_messages.init_messages import *
import time
import os
import signal
-logger = isc.log.Logger("boss")
+logger = isc.log.Logger("init")
DBG_TRACE_DATA = 20
DBG_TRACE_DETAILED = 80
@@ -96,13 +96,13 @@ class BaseComponent:
that is already shutting down, impossible to stop, etc. We need to add more
states in future to handle it properly.
"""
- def __init__(self, boss, kind):
+ def __init__(self, b10_init, kind):
"""
Creates the component in not running mode.
The parameters are:
- - `boss` the boss object to plug into. The component needs to plug
- into it to know when it failed, etc.
+ - `b10_init` the b10_init object to plug into. The component needs
+ to plug into it to know when it failed, etc.
- `kind` is the kind of component. It may be one of:
* 'core' means the system can't run without it and it can't be
safely restarted. If it does not start, the system is brought
@@ -127,7 +127,7 @@ class BaseComponent:
Note that the __init__ method of child class should have these
parameters:
- __init__(self, process, boss, kind, address=None, params=None)
+ __init__(self, process, b10_init, kind, address=None, params=None)
The extra parameters are:
- `process` - which program should be started.
@@ -153,7 +153,7 @@ class BaseComponent:
raise ValueError('Component kind can not be ' + kind)
self.__state = STATE_STOPPED
self._kind = kind
- self._boss = boss
+ self._b10_init = b10_init
self._original_start_time = None
def start(self):
@@ -204,13 +204,14 @@ class BaseComponent:
def failed(self, exit_code):
"""
- Notify the component it crashed. This will be called from boss object.
+ Notify the component it crashed. This will be called from b10_init
+ object.
If you try to call failed on a component that is not running,
a ValueError is raised.
If it is a core component or needed component and it was started only
- recently, the component will become dead and will ask the boss to shut
+ recently, the component will become dead and will ask b10_init to shut
down with error exit status. A dead component can't be started again.
Otherwise the component will try to restart.
@@ -253,7 +254,7 @@ class BaseComponent:
self._original_start_time):
self.__state = STATE_DEAD
logger.fatal(BIND10_COMPONENT_UNSATISFIED, self.name())
- self._boss.component_shutdown(1)
+ self._b10_init.component_shutdown(1)
return False
# This means we want to restart
else:
@@ -326,7 +327,7 @@ class BaseComponent:
should be registered).
You should register all the processes created by calling
- self._boss.register_process.
+ self._b10_init.register_process.
"""
pass
@@ -407,15 +408,15 @@ class Component(BaseComponent):
directly. It is not recommended to override methods of this class
on one-by-one basis.
"""
- def __init__(self, process, boss, kind, address=None, params=None,
+ def __init__(self, process, b10_init, kind, address=None, params=None,
start_func=None):
"""
Creates the component in not running mode.
The parameters are:
- `process` is the name of the process to start.
- - `boss` the boss object to plug into. The component needs to plug
- into it to know when it failed, etc.
+ - `b10_init` the b10-init object to plug into. The component needs to
+ plug into it to know when it failed, etc.
- `kind` is the kind of component. Refer to the documentation of
BaseComponent for details.
- `address` is the address on message bus. It is used to ask it to
@@ -429,7 +430,7 @@ class Component(BaseComponent):
There's a sensible default if not provided, which just launches
the program without any special care.
"""
- BaseComponent.__init__(self, boss, kind)
+ BaseComponent.__init__(self, b10_init, kind)
self._process = process
self._start_func = start_func
self._address = address
@@ -443,25 +444,26 @@ class Component(BaseComponent):
process and return the procinfo object describing the running process.
If you don't provide the _start_func, the usual startup by calling
- boss.start_simple is performed.
+ b10_init.start_simple is performed.
"""
# This one is not tested. For one, it starts a real process
# which is out of scope of unit tests, for another, it just
- # delegates the starting to other function in boss (if a derived
+ # delegates the starting to other function in b10_init (if a derived
# class does not provide an override function), which is tested
# by use.
if self._start_func is not None:
procinfo = self._start_func()
else:
# TODO Handle params, etc
- procinfo = self._boss.start_simple(self._process)
+ procinfo = self._b10_init.start_simple(self._process)
self._procinfo = procinfo
- self._boss.register_process(self.pid(), self)
+ self._b10_init.register_process(self.pid(), self)
def _stop_internal(self):
- self._boss.stop_process(self._process, self._address, self.pid())
+ self._b10_init.stop_process(self._process, self._address, self.pid())
# TODO Some way to wait for the process that doesn't want to
- # terminate and kill it would prove nice (or add it to boss somewhere?)
+ # terminate and kill it would prove nice (or add it to b10_init
+ # somewhere?)
def name(self):
"""
@@ -498,7 +500,7 @@ class Configurator:
b10-auth as core, it is safe to stop that one.
The parameters are:
- * `boss`: The boss we are managing for.
+ * `b10_init`: The b10-init we are managing for.
* `specials`: Dict of specially started components. Each item is a class
representing the component.
@@ -527,13 +529,14 @@ class Configurator:
priority are started before the ones with lower priority. If it is
not present, it defaults to 0.
"""
- def __init__(self, boss, specials = {}):
+ def __init__(self, b10_init, specials = {}):
"""
Initializes the configurator, but nothing is started yet.
- The boss parameter is the boss object used to start and stop processes.
+ The b10_init parameter is the b10-init object used to start and stop
+ processes.
"""
- self.__boss = boss
+ self.__b10_init = b10_init
# These could be __private, but as we access them from within unittest,
# it's more comfortable to have them just _protected.
@@ -551,7 +554,7 @@ class Configurator:
def startup(self, configuration):
"""
Starts the first set of processes. This configuration is expected
- to be hardcoded from the boss itself to start the configuration
+ to be hardcoded from the b10-init itself to start the configuration
manager and other similar things.
"""
if self._running:
@@ -642,7 +645,7 @@ class Configurator:
# TODO: Better error handling
creator = self.__specials[component_config['special']]
component = creator(component_config.get('process', cname),
- self.__boss, component_config['kind'],
+ self.__b10_init, component_config['kind'],
component_config.get('address'),
component_config.get('params'))
priority = component_config.get('priority', 0)
diff --git a/src/lib/python/isc/bind10/sockcreator.py b/src/lib/python/isc/bind10/sockcreator.py
index 593d1a6..db9e6c5 100644
--- a/src/lib/python/isc/bind10/sockcreator.py
+++ b/src/lib/python/isc/bind10/sockcreator.py
@@ -20,10 +20,10 @@ import errno
import copy
import subprocess
import copy
-from isc.log_messages.bind10_messages import *
+from isc.log_messages.init_messages import *
from libutil_io_python import recv_fd
-logger = isc.log.Logger("boss")
+logger = isc.log.Logger("init")
"""
Module that comunicates with the privileged socket creator (b10-sockcreator).
@@ -251,7 +251,7 @@ class Creator(Parser):
"""Function used before running a program that needs to run as a
different user."""
# Put us into a separate process group so we don't get
- # SIGINT signals on Ctrl-C (the boss will shut everthing down by
+ # SIGINT signals on Ctrl-C (b10-init will shut everthing down by
# other means).
os.setpgrp()
diff --git a/src/lib/python/isc/bind10/socket_cache.py b/src/lib/python/isc/bind10/socket_cache.py
index d6c1175..1c5199c 100644
--- a/src/lib/python/isc/bind10/socket_cache.py
+++ b/src/lib/python/isc/bind10/socket_cache.py
@@ -106,7 +106,8 @@ class Cache:
This is the cache for sockets from socket creator. The purpose of cache
is to hold the sockets that were requested, until they are no longer
needed. One reason is, the socket is created before it is sent over the
- unix domain socket in boss, so we need to keep it somewhere for a while.
+ unix domain socket in b10-init, so we need to keep it somewhere for a
+ while.
The other reason is, a single socket might be requested multiple times.
So we keep it here in case someone else might ask for it.
@@ -114,7 +115,7 @@ class Cache:
Each socket kept here has a reference count and when it drops to zero,
it is removed from cache and closed.
- This is expected to be part of Boss, it is not a general utility class.
+ This is expected to be part of Init, it is not a general utility class.
It is not expected to be subclassed. The methods and members are named
as protected so tests are easier access into them.
@@ -175,7 +176,7 @@ class Cache:
restrictions and of all copies of socket handed out are considered,
so it can be raised even if you call it with share_mode 'ANY').
- isc.bind10.sockcreator.CreatorError: fatal creator errors are
- propagated. Thay should cause the boss to exit if ever encountered.
+ propagated. Thay should cause b10-init to exit if ever encountered.
Note that it isn't guaranteed the tokens would be unique and they
should be used as an opaque handle only.
@@ -220,11 +221,11 @@ class Cache:
one returned from previous call from get_token. The token can be used
only once to receive the socket.
- The application is a token representing the application that requested
- it. Currently, boss uses the file descriptor of connection from the
- application, but anything which can be a key in a dict is OK from the
- cache's point of view. You just need to use the same thing in
- drop_application.
+ The application is a token representing the application that
+ requested it. Currently, b10-init uses the file descriptor of
+ connection from the application, but anything which can be a key in
+ a dict is OK from the cache's point of view. You just need to use
+ the same thing in drop_application.
In case the token is considered invalid (it doesn't come from the
get_token, it was already used, the socket wasn't picked up soon
diff --git a/src/lib/python/isc/bind10/special_component.py b/src/lib/python/isc/bind10/special_component.py
index dcd9b64..3196795 100644
--- a/src/lib/python/isc/bind10/special_component.py
+++ b/src/lib/python/isc/bind10/special_component.py
@@ -26,23 +26,23 @@ class SockCreator(BaseComponent):
Note: _creator shouldn't be reset explicitly once created. The
underlying Popen object would then wait() the child process internally,
- which breaks the assumption of the boss, who is expecting to see
+ which breaks the assumption of b10-init, who is expecting to see
the process die in waitpid().
"""
- def __init__(self, process, boss, kind, address=None, params=None):
- BaseComponent.__init__(self, boss, kind)
+ def __init__(self, process, b10_init, kind, address=None, params=None):
+ BaseComponent.__init__(self, b10_init, kind)
self.__creator = None
def _start_internal(self):
- self._boss.curproc = 'b10-sockcreator'
+ self._b10_init.curproc = 'b10-sockcreator'
self.__creator = isc.bind10.sockcreator.Creator(LIBEXECPATH + ':' +
os.environ['PATH'])
- self._boss.register_process(self.pid(), self)
- self._boss.set_creator(self.__creator)
- self._boss.log_started(self.pid())
+ self._b10_init.register_process(self.pid(), self)
+ self._b10_init.set_creator(self.__creator)
+ self._b10_init.log_started(self.pid())
# We are now ready for switching user.
- self._boss.change_user()
+ self._b10_init.change_user()
def _stop_internal(self):
self.__creator.terminate()
@@ -64,12 +64,12 @@ class SockCreator(BaseComponent):
class Msgq(Component):
"""
- The message queue. Starting is passed to boss, stopping is not supported
- and we leave the boss kill it by signal.
+ The message queue. Starting is passed to b10-init, stopping is not
+ supported and we leave b10-init kill it by signal.
"""
- def __init__(self, process, boss, kind, address=None, params=None):
- Component.__init__(self, process, boss, kind, None, None,
- boss.start_msgq)
+ def __init__(self, process, b10_init, kind, address=None, params=None):
+ Component.__init__(self, process, b10_init, kind, None, None,
+ b10_init.start_msgq)
def _stop_internal(self):
"""
@@ -78,7 +78,7 @@ class Msgq(Component):
But as it is stateless, it's OK to kill it.
So we disable this method (as the only time it could be called is
- during shutdown) and wait for the boss to kill it in the next shutdown
+ during shutdown) and wait for b10-init to kill it in the next shutdown
step.
This actually breaks the recommendation at Component we shouldn't
@@ -89,24 +89,24 @@ class Msgq(Component):
pass
class CfgMgr(Component):
- def __init__(self, process, boss, kind, address=None, params=None):
- Component.__init__(self, process, boss, kind, 'ConfigManager',
- None, boss.start_cfgmgr)
+ def __init__(self, process, b10_init, kind, address=None, params=None):
+ Component.__init__(self, process, b10_init, kind, 'ConfigManager',
+ None, b10_init.start_cfgmgr)
class Auth(Component):
- def __init__(self, process, boss, kind, address=None, params=None):
- Component.__init__(self, process, boss, kind, 'Auth', None,
- boss.start_auth)
+ def __init__(self, process, b10_init, kind, address=None, params=None):
+ Component.__init__(self, process, b10_init, kind, 'Auth', None,
+ b10_init.start_auth)
class Resolver(Component):
- def __init__(self, process, boss, kind, address=None, params=None):
- Component.__init__(self, process, boss, kind, 'Resolver', None,
- boss.start_resolver)
+ def __init__(self, process, b10_init, kind, address=None, params=None):
+ Component.__init__(self, process, b10_init, kind, 'Resolver', None,
+ b10_init.start_resolver)
class CmdCtl(Component):
- def __init__(self, process, boss, kind, address=None, params=None):
- Component.__init__(self, process, boss, kind, 'Cmdctl', None,
- boss.start_cmdctl)
+ def __init__(self, process, b10_init, kind, address=None, params=None):
+ Component.__init__(self, process, b10_init, kind, 'Cmdctl', None,
+ b10_init.start_cmdctl)
def get_specials():
"""
List of specially started components. Each one should be the class than can
diff --git a/src/lib/python/isc/bind10/tests/component_test.py b/src/lib/python/isc/bind10/tests/component_test.py
index 8603201..adc035e 100644
--- a/src/lib/python/isc/bind10/tests/component_test.py
+++ b/src/lib/python/isc/bind10/tests/component_test.py
@@ -31,9 +31,9 @@ class TestError(Exception):
"""
pass
-class BossUtils:
+class InitUtils:
"""
- A class that brings some utilities for pretending we're Boss.
+ A class that brings some utilities for pretending we're Init.
This is expected to be inherited by the testcases themselves.
"""
def setUp(self):
@@ -70,7 +70,7 @@ class BossUtils:
isc.bind10.component.time.time = lambda: tm + 30
# Few functions that pretend to start something. Part of pretending of
- # being boss.
+ # being b10-init.
def start_msgq(self):
pass
@@ -86,7 +86,7 @@ class BossUtils:
def start_cmdctl(self):
pass
-class ComponentTests(BossUtils, unittest.TestCase):
+class ComponentTests(InitUtils, unittest.TestCase):
"""
Tests for the bind10.component.Component class
"""
@@ -94,7 +94,7 @@ class ComponentTests(BossUtils, unittest.TestCase):
"""
Pretend a newly started system.
"""
- BossUtils.setUp(self)
+ InitUtils.setUp(self)
self._shutdown = False
self._exitcode = None
self.__start_called = False
@@ -103,7 +103,7 @@ class ComponentTests(BossUtils, unittest.TestCase):
self.__registered_processes = {}
self.__stop_process_params = None
self.__start_simple_params = None
- # Pretending to be boss
+ # Pretending to be b10-init
self.__change_user_called = False
def change_user(self):
@@ -149,7 +149,7 @@ class ComponentTests(BossUtils, unittest.TestCase):
its behaviour.
The process used is some nonsense, as this isn't used in this
- kind of tests and we pretend to be the boss.
+ kind of tests and we pretend to be the b10-init.
"""
component = Component('No process', self, kind, 'homeless', [])
component._start_internal = self.__start
@@ -176,7 +176,7 @@ class ComponentTests(BossUtils, unittest.TestCase):
Test the correct data are stored inside the component.
"""
component = self.__create_component('core')
- self.assertEqual(self, component._boss)
+ self.assertEqual(self, component._b10_init)
self.assertEqual("No process", component._process)
self.assertEqual(None, component._start_func)
self.assertEqual("homeless", component._address)
@@ -539,7 +539,7 @@ class ComponentTests(BossUtils, unittest.TestCase):
def register_process(self, pid, process):
"""
- Part of pretending to be a boss
+ Part of pretending to be a b10-init
"""
self.__registered_processes[pid] = process
@@ -570,13 +570,13 @@ class ComponentTests(BossUtils, unittest.TestCase):
def stop_process(self, process, address, pid):
"""
- Part of pretending to be boss.
+ Part of pretending to be b10-init.
"""
self.__stop_process_params = (process, address, pid)
def start_simple(self, process):
"""
- Part of pretending to be boss.
+ Part of pretending to be b10-init.
"""
self.__start_simple_params = process
@@ -632,14 +632,14 @@ class ComponentTests(BossUtils, unittest.TestCase):
def set_creator(self, creator):
"""
- Part of faking being the boss. Check the creator (faked as well)
+ Part of faking being the b10-init. Check the creator (faked as well)
is passed here.
"""
self.assertTrue(isinstance(creator, self.FakeCreator))
def log_started(self, pid):
"""
- Part of faking the boss. Check the pid is the one of the fake creator.
+ Part of faking the b10-init. Check the pid is the one of the fake creator.
"""
self.assertEqual(42, pid)
@@ -706,13 +706,13 @@ class FailComponent(BaseComponent):
"""
A mock component that fails whenever it is started.
"""
- def __init__(self, name, boss, kind, address=None, params=None):
- BaseComponent.__init__(self, boss, kind)
+ def __init__(self, name, b10_init, kind, address=None, params=None):
+ BaseComponent.__init__(self, b10_init, kind)
def _start_internal(self):
raise TestError("test error")
-class ConfiguratorTest(BossUtils, unittest.TestCase):
+class ConfiguratorTest(InitUtils, unittest.TestCase):
"""
Tests for the configurator.
"""
@@ -720,7 +720,7 @@ class ConfiguratorTest(BossUtils, unittest.TestCase):
"""
Prepare some test data for the tests.
"""
- BossUtils.setUp(self)
+ InitUtils.setUp(self)
self.log = []
# The core "hardcoded" configuration
self.__core = {
@@ -755,11 +755,12 @@ class ConfiguratorTest(BossUtils, unittest.TestCase):
self.__core_log = self.__core_log_create + self.__core_log_start
self.__specials = { 'test': self.__component_test }
- def __component_test(self, process, boss, kind, address=None, params=None):
+ def __component_test(self, process, b10_init, kind, address=None,
+ params=None):
"""
Create a test component. It will log events to us.
"""
- self.assertEqual(self, boss)
+ self.assertEqual(self, b10_init)
return TestComponent(self, process, kind, address, params)
def test_init(self):
diff --git a/src/lib/python/isc/cc/Makefile.am b/src/lib/python/isc/cc/Makefile.am
index b0ba3b2..ba6fe50 100644
--- a/src/lib/python/isc/cc/Makefile.am
+++ b/src/lib/python/isc/cc/Makefile.am
@@ -1,6 +1,19 @@
SUBDIRS = . tests
-python_PYTHON = __init__.py data.py session.py message.py
+python_PYTHON = __init__.py data.py session.py message.py logger.py
+BUILT_SOURCES = $(PYTHON_LOGMSGPKG_DIR)/work/pycc_messages.py
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/pycc_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+CLEANFILES = $(PYTHON_LOGMSGPKG_DIR)/work/pycc_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/pycc_messages.pyc
+
+EXTRA_DIST = pycc_messages.mes
+
+# Define rule to build logging source files from message file
+$(PYTHON_LOGMSGPKG_DIR)/work/pycc_messages.py: pycc_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/pycc_messages.mes
pythondir = $(pyexecdir)/isc/cc
diff --git a/src/lib/python/isc/cc/logger.py b/src/lib/python/isc/cc/logger.py
new file mode 100644
index 0000000..5fd440f
--- /dev/null
+++ b/src/lib/python/isc/cc/logger.py
@@ -0,0 +1,26 @@
+# Copyright (C) 2013 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+""" This is a logging utility module for other modules of the cc library
+package.
+
+"""
+
+import isc.log
+
+# C++ version of the CC module uses 'cc'; using the same name does not
+# necessarily cause disruption, but we use a different name to minimize
+# possible confusion.
+logger = isc.log.Logger('pycc')
diff --git a/src/lib/python/isc/cc/pycc_messages.mes b/src/lib/python/isc/cc/pycc_messages.mes
new file mode 100644
index 0000000..997b899
--- /dev/null
+++ b/src/lib/python/isc/cc/pycc_messages.mes
@@ -0,0 +1,20 @@
+# Copyright (C) 2013 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the libddns_messages python module.
+
+% PYCC_LNAME_RECEIVED received local name: %1
+Debug message: the local module received its unique identifier (name)
+from msgq on completion of establishing the session with msgq.
diff --git a/src/lib/python/isc/cc/session.py b/src/lib/python/isc/cc/session.py
index 33a47bd..caac553 100644
--- a/src/lib/python/isc/cc/session.py
+++ b/src/lib/python/isc/cc/session.py
@@ -22,6 +22,9 @@ import threading
import bind10_config
import isc.cc.message
+import isc.log
+from isc.cc.logger import logger
+from isc.log_messages.pycc_messages import *
class ProtocolError(Exception): pass
class NetworkError(Exception): pass
@@ -60,6 +63,8 @@ class Session:
self._lname = msg["lname"]
if not self._lname:
raise ProtocolError("Could not get local name")
+ logger.debug(logger.DBGLVL_TRACE_BASIC, PYCC_LNAME_RECEIVED,
+ self._lname)
except socket.error as se:
raise SessionError(se)
diff --git a/src/lib/python/isc/cc/tests/message_test.py b/src/lib/python/isc/cc/tests/message_test.py
index c417068..17f034d 100644
--- a/src/lib/python/isc/cc/tests/message_test.py
+++ b/src/lib/python/isc/cc/tests/message_test.py
@@ -27,20 +27,27 @@ class MessageTest(unittest.TestCase):
self.msg1_str = "{\"just\": [\"an\", \"arbitrary\", \"structure\"]}";
self.msg1_wire = self.msg1_str.encode()
- self.msg2 = { "aaa": [ 1, 1.1, True, False, None ] }
- self.msg2_str = "{\"aaa\": [1, 1.1, true, false, null]}";
+ self.msg2 = { "aaa": [ 1, True, False, None ] }
+ self.msg2_str = "{\"aaa\": [1, true, false, null]}";
self.msg2_wire = self.msg2_str.encode()
self.msg3 = { "aaa": [ 1, 1.1, True, False, "string\n" ] }
self.msg3_str = "{\"aaa\": [1, 1.1, true, false, \"string\n\" ]}";
self.msg3_wire = self.msg3_str.encode()
+ # Due to the inherent impreciseness of floating point values,
+ # we test this one separately (with AlmostEqual)
+ self.msg_float = 1.1
+ self.msg_float_str = "1.1";
+ self.msg_float_wire = self.msg_float_str.encode()
+
def test_encode_json(self):
self.assertEqual(self.msg1_wire, isc.cc.message.to_wire(self.msg1))
self.assertEqual(self.msg2_wire, isc.cc.message.to_wire(self.msg2))
-
+ self.assertAlmostEqual(float(self.msg_float_wire),
+ float(isc.cc.message.to_wire(self.msg_float)))
self.assertRaises(TypeError, isc.cc.message.to_wire, NotImplemented)
-
+
def test_decode_json(self):
self.assertEqual(self.msg1, isc.cc.message.from_wire(self.msg1_wire))
self.assertEqual(self.msg2, isc.cc.message.from_wire(self.msg2_wire))
diff --git a/src/lib/python/isc/cc/tests/session_test.py b/src/lib/python/isc/cc/tests/session_test.py
index e589085..e8656e7 100644
--- a/src/lib/python/isc/cc/tests/session_test.py
+++ b/src/lib/python/isc/cc/tests/session_test.py
@@ -19,6 +19,7 @@
import unittest
import os
+import json
from isc.cc.session import *
# our fake socket, where we can read and insert messages
@@ -73,8 +74,23 @@ class MySocket():
result.extend(self.readsent(header_length))
result.extend(self.readsent(data_length))
+
return result
+ def readsentmsg_parsed(self):
+ length_buf = self.readsent(4)
+ length = struct.unpack('>I', length_buf)[0]
+ header_length_buf = self.readsent(2)
+ header_length = struct.unpack('>H', header_length_buf)[0]
+ data_length = length - 2 - header_length
+
+ env = json.loads(self.readsent(header_length).decode('utf-8'), strict=False)
+ if (data_length > 0):
+ msg = json.loads(self.readsent(data_length).decode('utf-8'), strict=False)
+ else:
+ msg = {}
+ return (env, msg)
+
def recv(self, length):
if len(self.recvqueue) == 0:
if self._blocking:
@@ -208,25 +224,25 @@ class testSession(unittest.TestCase):
# 'malformed' messages
# shouldn't some of these raise exceptions?
- #self.recv_and_compare(sess,
+ #self.recv_and_compare(sess,
# b'\x00',
# None, None)
- #self.recv_and_compare(sess,
+ #self.recv_and_compare(sess,
# b'\x00\x00\x00\x10',
# None, None)
- #self.recv_and_compare(sess,
+ #self.recv_and_compare(sess,
# b'\x00\x00\x00\x02\x00\x00',
# None, None)
- #self.recv_and_compare(sess,
+ #self.recv_and_compare(sess,
# b'\x00\x00\x00\x02\x00\x02',
# None, None)
- #self.recv_and_compare(sess,
+ #self.recv_and_compare(sess,
# b'',
# None, None)
# need to clear
sess._socket.recvqueue = bytearray()
-
+
# 'queueing' system
# sending message {'to': 'someone', 'reply': 1}, {"hello": "a"}
#print("sending message {'to': 'someone', 'reply': 1}, {'hello': 'a'}")
@@ -240,7 +256,7 @@ class testSession(unittest.TestCase):
self.assertEqual({'to': 'someone', 'reply': 1}, env)
self.assertEqual({"hello": "a"}, msg)
self.assertFalse(sess.has_queued_msgs())
-
+
# ask for a differe sequence number reply (that doesn't exist)
# then ask for the one that is there
self.assertFalse(sess.has_queued_msgs())
@@ -253,7 +269,7 @@ class testSession(unittest.TestCase):
self.assertEqual({'to': 'someone', 'reply': 1}, env)
self.assertEqual({"hello": "a"}, msg)
self.assertFalse(sess.has_queued_msgs())
-
+
# ask for a differe sequence number reply (that doesn't exist)
# then ask for any message
self.assertFalse(sess.has_queued_msgs())
@@ -266,7 +282,7 @@ class testSession(unittest.TestCase):
self.assertEqual({'to': 'someone', 'reply': 1}, env)
self.assertEqual({"hello": "a"}, msg)
self.assertFalse(sess.has_queued_msgs())
-
+
#print("sending message {'to': 'someone', 'reply': 1}, {'hello': 'a'}")
# ask for a differe sequence number reply (that doesn't exist)
@@ -287,7 +303,7 @@ class testSession(unittest.TestCase):
self.assertEqual({'to': 'someone'}, env)
self.assertEqual({"hello": "b"}, msg)
self.assertFalse(sess.has_queued_msgs())
-
+
# send a message, then one with specific reply value
# ask for that specific message (get the second)
# then ask for any message (get the first)
@@ -326,48 +342,56 @@ class testSession(unittest.TestCase):
def test_group_subscribe(self):
sess = MySession()
sess.group_subscribe("mygroup")
- sent = sess._socket.readsentmsg()
- self.assertEqual(sent, b'\x00\x00\x00<\x00:{"group": "mygroup", "type": "subscribe", "instance": "*"}')
-
+ sent = sess._socket.readsentmsg_parsed()
+ self.assertEqual(sent, ({"group": "mygroup", "type": "subscribe",
+ "instance": "*"}, {}))
+
sess.group_subscribe("mygroup")
- sent = sess._socket.readsentmsg()
- self.assertEqual(sent, b'\x00\x00\x00<\x00:{"group": "mygroup", "type": "subscribe", "instance": "*"}')
-
+ sent = sess._socket.readsentmsg_parsed()
+ self.assertEqual(sent, ({"group": "mygroup", "type": "subscribe",
+ "instance": "*"}, {}))
+
sess.group_subscribe("mygroup", "my_instance")
- sent = sess._socket.readsentmsg()
- self.assertEqual(sent, b'\x00\x00\x00F\x00D{"group": "mygroup", "type": "subscribe", "instance": "my_instance"}')
+ sent = sess._socket.readsentmsg_parsed()
+ self.assertEqual(sent, ({"group": "mygroup", "type": "subscribe",
+ "instance": "my_instance"}, {}))
def test_group_unsubscribe(self):
sess = MySession()
sess.group_unsubscribe("mygroup")
- sent = sess._socket.readsentmsg()
- self.assertEqual(sent, b'\x00\x00\x00>\x00<{"group": "mygroup", "type": "unsubscribe", "instance": "*"}')
-
+ sent = sess._socket.readsentmsg_parsed()
+ self.assertEqual(sent, ({"group": "mygroup", "type": "unsubscribe",
+ "instance": "*"}, {}))
+
sess.group_unsubscribe("mygroup")
- sent = sess._socket.readsentmsg()
- self.assertEqual(sent, b'\x00\x00\x00>\x00<{"group": "mygroup", "type": "unsubscribe", "instance": "*"}')
-
+ sent = sess._socket.readsentmsg_parsed()
+ self.assertEqual(sent, ({"group": "mygroup", "type": "unsubscribe",
+ "instance": "*"}, {}))
+
sess.group_unsubscribe("mygroup", "my_instance")
- sent = sess._socket.readsentmsg()
- self.assertEqual(sent, b'\x00\x00\x00H\x00F{"group": "mygroup", "type": "unsubscribe", "instance": "my_instance"}')
+ sent = sess._socket.readsentmsg_parsed()
+ self.assertEqual(sent, ({"group": "mygroup", "type": "unsubscribe",
+ "instance": "my_instance"}, {}))
def test_group_sendmsg(self):
sess = MySession()
self.assertEqual(sess._sequence, 1)
sess.group_sendmsg({ 'hello': 'a' }, "my_group")
- sent = sess._socket.readsentmsg()
- self.assertEqual(sent, b'\x00\x00\x00p\x00`{"from": "test_name", "seq": 2, "to": "*", "instance": "*", "group": "my_group", "type": "send"}{"hello": "a"}')
+ sent = sess._socket.readsentmsg_parsed()
+ self.assertEqual(sent, ({"from": "test_name", "seq": 2, "to": "*",
+ "instance": "*", "group": "my_group",
+ "type": "send"}, {"hello": "a"}))
self.assertEqual(sess._sequence, 2)
sess.group_sendmsg({ 'hello': 'a' }, "my_group", "my_instance")
- sent = sess._socket.readsentmsg()
- self.assertEqual(sent, b'\x00\x00\x00z\x00j{"from": "test_name", "seq": 3, "to": "*", "instance": "my_instance", "group": "my_group", "type": "send"}{"hello": "a"}')
+ sent = sess._socket.readsentmsg_parsed()
+ self.assertEqual(sent, ({"from": "test_name", "seq": 3, "to": "*", "instance": "my_instance", "group": "my_group", "type": "send"}, {"hello": "a"}))
self.assertEqual(sess._sequence, 3)
-
+
sess.group_sendmsg({ 'hello': 'a' }, "your_group", "your_instance")
- sent = sess._socket.readsentmsg()
- self.assertEqual(sent, b'\x00\x00\x00~\x00n{"from": "test_name", "seq": 4, "to": "*", "instance": "your_instance", "group": "your_group", "type": "send"}{"hello": "a"}')
+ sent = sess._socket.readsentmsg_parsed()
+ self.assertEqual(sent, ({"from": "test_name", "seq": 4, "to": "*", "instance": "your_instance", "group": "your_group", "type": "send"}, {"hello": "a"}))
self.assertEqual(sess._sequence, 4)
def test_group_recvmsg(self):
@@ -377,13 +401,25 @@ class testSession(unittest.TestCase):
def test_group_reply(self):
sess = MySession()
- sess.group_reply({ 'from': 'me', 'group': 'our_group', 'instance': 'other_instance', 'seq': 4}, {"hello": "a"})
- sent = sess._socket.readsentmsg();
- self.assertEqual(sent, b'\x00\x00\x00\x8b\x00{{"from": "test_name", "seq": 2, "to": "me", "instance": "other_instance", "reply": 4, "group": "our_group", "type": "send"}{"hello": "a"}')
-
- sess.group_reply({ 'from': 'me', 'group': 'our_group', 'instance': 'other_instance', 'seq': 9}, {"hello": "a"})
- sent = sess._socket.readsentmsg();
- self.assertEqual(sent, b'\x00\x00\x00\x8b\x00{{"from": "test_name", "seq": 3, "to": "me", "instance": "other_instance", "reply": 9, "group": "our_group", "type": "send"}{"hello": "a"}')
+ sess.group_reply({ 'from': 'me', 'group': 'our_group',
+ 'instance': 'other_instance', 'seq': 4},
+ {"hello": "a"})
+ sent = sess._socket.readsentmsg_parsed();
+ self.assertEqual(sent, ({"from": "test_name", "seq": 2,
+ "to": "me", "instance": "other_instance",
+ "reply": 4, "group": "our_group",
+ "type": "send"},
+ {"hello": "a"}))
+
+ sess.group_reply({ 'from': 'me', 'group': 'our_group',
+ 'instance': 'other_instance', 'seq': 9},
+ {"hello": "a"})
+ sent = sess._socket.readsentmsg_parsed();
+ self.assertEqual(sent, ({"from": "test_name", "seq": 3,
+ "to": "me", "instance": "other_instance",
+ "reply": 9, "group": "our_group",
+ "type": "send"},
+ {"hello": "a"}))
def test_timeout(self):
if "BIND10_TEST_SOCKET_FILE" not in os.environ:
diff --git a/src/lib/python/isc/config/cfgmgr.py b/src/lib/python/isc/config/cfgmgr.py
index 9563cab..bc24cbb 100644
--- a/src/lib/python/isc/config/cfgmgr.py
+++ b/src/lib/python/isc/config/cfgmgr.py
@@ -68,6 +68,62 @@ class ConfigManagerData:
self.db_filename = data_path + os.sep + file_name
self.data_path = data_path
+ def check_for_updates(file_config):
+ """
+ Given the parsed JSON data from the config file,
+ check whether it needs updating due to version changes.
+ Return the data with updates (or the original data if no
+ updates were necessary).
+ Even though it is at this moment not technically necessary, this
+ function makes and returns a copy of the given data.
+ """
+ config = copy.deepcopy(file_config)
+ if 'version' in config:
+ data_version = config['version']
+ else:
+ # If it is not present, assume latest or earliest?
+ data_version = 1
+
+ # For efficiency, if up-to-date, return now
+ if data_version == config_data.BIND10_CONFIG_DATA_VERSION:
+ return config
+
+ # Don't know what to do if it is more recent
+ if data_version > config_data.BIND10_CONFIG_DATA_VERSION:
+ raise ConfigManagerDataReadError(
+ "Cannot load configuration file: version "
+ "%d not yet supported" % config['version'])
+
+ # At some point we might give up supporting older versions
+ if data_version < 1:
+ raise ConfigManagerDataReadError(
+ "Cannot load configuration file: version "
+ "%d no longer supported" % config['version'])
+
+ # Ok, so we have a still-supported older version. Apply all
+ # updates
+ new_data_version = data_version
+ if new_data_version == 1:
+ # only format change, no other changes necessary
+ new_data_version = 2
+ if new_data_version == 2:
+ # 'Boss' got changed to 'Init'; If for some reason both are
+ # present, simply ignore the old one
+ if 'Boss' in config:
+ if not 'Init' in config:
+ config['Init'] = config['Boss']
+ del config['Boss']
+ else:
+ # This should not happen, but we don't want to overwrite
+ # any config in this case, so warn about it
+ logger.warn(CFGMGR_CONFIG_UPDATE_BOSS_AND_INIT_FOUND)
+ new_data_version = 3
+
+ config['version'] = new_data_version
+ logger.info(CFGMGR_AUTOMATIC_CONFIG_DATABASE_UPDATE, data_version,
+ new_data_version)
+ return config
+
def read_from_file(data_path, file_name):
"""Read the current configuration found in the file file_name.
If file_name is absolute, data_path is ignored. Otherwise
@@ -90,21 +146,7 @@ class ConfigManagerData:
# If possible, we automatically convert to the new
# scheme and update the configuration
# If not, we raise an exception
- if 'version' in file_config:
- if file_config['version'] == config_data.BIND10_CONFIG_DATA_VERSION:
- config.data = file_config
- elif file_config['version'] == 1:
- # only format change, no other changes necessary
- file_config['version'] = 2
- logger.info(CFGMGR_AUTOMATIC_CONFIG_DATABASE_UPDATE, 1, 2)
- config.data = file_config
- else:
- if config_data.BIND10_CONFIG_DATA_VERSION > file_config['version']:
- raise ConfigManagerDataReadError("Cannot load configuration file: version %d no longer supported" % file_config['version'])
- else:
- raise ConfigManagerDataReadError("Cannot load configuration file: version %d not yet supported" % file_config['version'])
- else:
- raise ConfigManagerDataReadError("No version information in configuration file " + config.db_filename)
+ config.data = ConfigManagerData.check_for_updates(file_config)
except IOError as ioe:
# if IOError is 'no such file or directory', then continue
# (raise empty), otherwise fail (raise error)
@@ -210,7 +252,7 @@ class ConfigManager:
else:
self.cc = isc.cc.Session()
self.cc.group_subscribe("ConfigManager")
- self.cc.group_subscribe("Boss", "ConfigManager")
+ self.cc.group_subscribe("Init", "ConfigManager")
self.running = False
# As a core module, CfgMgr is different than other modules,
# as it does not use a ModuleCCSession, and hence needs
@@ -232,10 +274,10 @@ class ConfigManager:
# handler, so make it use defaults (and flush any buffered logs)
ccsession.default_logconfig_handler({}, self.log_config_data)
- def notify_boss(self):
- """Notifies the Boss module that the Config Manager is running"""
+ def notify_b10_init(self):
+ """Notifies the Init module that the Config Manager is running"""
# TODO: Use a real, broadcast notification here.
- self.cc.group_sendmsg({"running": "ConfigManager"}, "Boss")
+ self.cc.group_sendmsg({"running": "ConfigManager"}, "Init")
def set_module_spec(self, spec):
"""Adds a ModuleSpec"""
@@ -551,7 +593,7 @@ class ConfigManager:
def run(self):
"""Runs the configuration manager."""
self.running = True
- while (self.running):
+ while self.running:
# we just wait eternally for any command here, so disable
# timeouts for this specific recv
self.cc.set_timeout(0)
@@ -566,3 +608,4 @@ class ConfigManager:
# Only respond if there actually is something to respond with
if answer is not None:
self.cc.group_reply(env, answer)
+ logger.info(CFGMGR_STOPPED_BY_COMMAND)
diff --git a/src/lib/python/isc/config/cfgmgr_messages.mes b/src/lib/python/isc/config/cfgmgr_messages.mes
index 8701db3..73b6cef 100644
--- a/src/lib/python/isc/config/cfgmgr_messages.mes
+++ b/src/lib/python/isc/config/cfgmgr_messages.mes
@@ -41,6 +41,16 @@ system. The most likely cause is that msgq is not running.
The configuration manager is starting, reading and saving the configuration
settings to the shown file.
+% CFGMGR_CONFIG_UPDATE_BOSS_AND_INIT_FOUND Configuration found for both 'Boss' and 'Init', ignoring 'Boss'
+In the process of updating the configuration from version 2 to version 3,
+the configuration manager has found that there are existing configurations
+for both the old value 'Boss' and the new value 'Init'. This should in
+theory not happen, as in older versions 'Init' does not exist, and in newer
+versions 'Boss' does not exist. The configuration manager will continue
+with the update process, leaving the values for both as they are, so as not
+to overwrite any settings. However, the values for 'Boss' are ignored by
+BIND 10, and it is probably wise to check the configuration file manually.
+
% CFGMGR_DATA_READ_ERROR error reading configuration database from disk: %1
There was a problem reading the persistent configuration data as stored
on disk. The file may be corrupted, or it is of a version from where
@@ -61,6 +71,9 @@ error is given. The most likely cause is that the system does not have
write access to the configuration database file. The updated
configuration is not stored.
+% CFGMGR_STOPPED_BY_COMMAND received shutdown command, shutting down
+The configuration manager received a shutdown command, and is exiting.
+
% CFGMGR_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down
There was a keyboard interrupt signal to stop the cfgmgr daemon. The
daemon will now shut down.
diff --git a/src/lib/python/isc/config/config_data.py b/src/lib/python/isc/config/config_data.py
index ae61e2a..495d20b 100644
--- a/src/lib/python/isc/config/config_data.py
+++ b/src/lib/python/isc/config/config_data.py
@@ -27,7 +27,7 @@ import copy
class ConfigDataError(Exception): pass
-BIND10_CONFIG_DATA_VERSION = 2
+BIND10_CONFIG_DATA_VERSION = 3
# Helper functions
def spec_part_is_list(spec_part):
diff --git a/src/lib/python/isc/config/tests/cfgmgr_test.py b/src/lib/python/isc/config/tests/cfgmgr_test.py
index 891a7d7..d99fb86 100644
--- a/src/lib/python/isc/config/tests/cfgmgr_test.py
+++ b/src/lib/python/isc/config/tests/cfgmgr_test.py
@@ -29,7 +29,7 @@ class TestConfigManagerData(unittest.TestCase):
self.writable_data_path = os.environ['CONFIG_WR_TESTDATA_PATH']
self.config_manager_data = ConfigManagerData(self.writable_data_path,
file_name="b10-config.db")
- self.assert_(self.config_manager_data)
+ self.assertTrue(self.config_manager_data)
def test_abs_file(self):
"""
@@ -49,6 +49,49 @@ class TestConfigManagerData(unittest.TestCase):
self.assertEqual(self.config_manager_data.db_filename,
self.writable_data_path + os.sep + "b10-config.db")
+ def test_check_for_updates_up_to_date(self):
+ # This should automatically give an up-to-date version
+ file_config = ConfigManagerData.read_from_file(
+ self.writable_data_path, "b10-config.db").data
+ updated_config = ConfigManagerData.check_for_updates(file_config)
+ self.assertEqual(file_config, updated_config)
+
+ def test_check_for_updates_from_1(self):
+ config = { "version": 1,
+ "foo": "bar",
+ "something": [ 1, 2, 3 ] }
+ updated = ConfigManagerData.check_for_updates(config)
+ config['version'] = config_data.BIND10_CONFIG_DATA_VERSION
+ self.assertEqual(config, updated)
+
+ def test_check_for_updates_from_2(self):
+ # No 'Boss' present, no change (except version)
+ config = { "version": 2,
+ "foo": "bar",
+ "something": [ 1, 2, 3 ] }
+ updated = ConfigManagerData.check_for_updates(config)
+ config['version'] = config_data.BIND10_CONFIG_DATA_VERSION
+ self.assertEqual(config, updated)
+
+ # With Boss, should be changed to 'Init'
+ config = { "version": 2,
+ "Boss": { "some config": 1 },
+ "something": [ 1, 2, 3 ] }
+ updated = ConfigManagerData.check_for_updates(config)
+ config = { "version": config_data.BIND10_CONFIG_DATA_VERSION,
+ "Init": { "some config": 1 },
+ "something": [ 1, 2, 3 ] }
+ self.assertEqual(config, updated)
+
+ # With Boss AND Init, no change
+ config = { "version": 2,
+ "Boss": { "some config": 1 },
+ "Init": { "some other config": 1 },
+ "something": [ 1, 2, 3 ] }
+ updated = ConfigManagerData.check_for_updates(config)
+ config['version'] = config_data.BIND10_CONFIG_DATA_VERSION
+ self.assertEqual(config, updated)
+
def test_read_from_file(self):
ConfigManagerData.read_from_file(self.writable_data_path, "b10-config.db")
self.assertRaises(ConfigManagerDataEmpty,
@@ -170,65 +213,61 @@ class TestConfigManager(unittest.TestCase):
cm.config.db_filename)
def test_init(self):
- self.assert_(self.cm.module_specs == {})
- self.assert_(self.cm.data_path == self.writable_data_path)
- self.assert_(self.cm.config != None)
- self.assert_(self.fake_session.has_subscription("ConfigManager"))
- self.assert_(self.fake_session.has_subscription("Boss", "ConfigManager"))
+ self.assertEqual(self.cm.module_specs, {})
+ self.assertEqual(self.cm.data_path, self.writable_data_path)
+ self.assertIsNotNone(self.cm.config)
+ self.assertTrue(self.fake_session.has_subscription("ConfigManager"))
+ self.assertTrue(self.fake_session.has_subscription("Init", "ConfigManager"))
self.assertFalse(self.cm.running)
- def test_notify_boss(self):
- self.cm.notify_boss()
- msg = self.fake_session.get_message("Boss", None)
- self.assert_(msg)
+ def test_notify_b10_init(self):
+ self.cm.notify_b10_init()
+ msg = self.fake_session.get_message("Init", None)
+ self.assertTrue(msg)
# this one is actually wrong, but 'current status quo'
self.assertEqual(msg, {"running": "ConfigManager"})
def test_set_module_spec(self):
module_spec = isc.config.module_spec.module_spec_from_file(self.data_path + os.sep + "spec1.spec")
- self.assert_(module_spec.get_module_name() not in self.cm.module_specs)
+ self.assertNotIn(module_spec.get_module_name(), self.cm.module_specs)
self.cm.set_module_spec(module_spec)
- self.assert_(module_spec.get_module_name() in self.cm.module_specs)
- self.assert_(module_spec.get_module_name() not in
- self.cm.virtual_modules)
+ self.assertIn(module_spec.get_module_name(), self.cm.module_specs)
+ self.assertNotIn(module_spec.get_module_name(), self.cm.virtual_modules)
def test_remove_module_spec(self):
module_spec = isc.config.module_spec.module_spec_from_file(self.data_path + os.sep + "spec1.spec")
- self.assert_(module_spec.get_module_name() not in self.cm.module_specs)
+ self.assertNotIn(module_spec.get_module_name(), self.cm.module_specs)
self.cm.set_module_spec(module_spec)
- self.assert_(module_spec.get_module_name() in self.cm.module_specs)
+ self.assertIn(module_spec.get_module_name(), self.cm.module_specs)
self.cm.remove_module_spec(module_spec.get_module_name())
- self.assert_(module_spec.get_module_name() not in self.cm.module_specs)
- self.assert_(module_spec.get_module_name() not in
- self.cm.virtual_modules)
+ self.assertNotIn(module_spec.get_module_name(), self.cm.module_specs)
+ self.assertNotIn(module_spec.get_module_name(), self.cm.virtual_modules)
def test_add_remove_virtual_module(self):
module_spec = isc.config.module_spec.module_spec_from_file(
self.data_path + os.sep + "spec1.spec")
check_func = lambda: True
# Make sure it's not in there before
- self.assert_(module_spec.get_module_name() not in self.cm.module_specs)
- self.assert_(module_spec.get_module_name() not in
- self.cm.virtual_modules)
+ self.assertNotIn(module_spec.get_module_name(), self.cm.module_specs)
+ self.assertNotIn(module_spec.get_module_name(), self.cm.virtual_modules)
# Add it there
self.cm.set_virtual_module(module_spec, check_func)
# Check it's in there
- self.assert_(module_spec.get_module_name() in self.cm.module_specs)
+ self.assertIn(module_spec.get_module_name(), self.cm.module_specs)
self.assertEqual(self.cm.module_specs[module_spec.get_module_name()],
module_spec)
self.assertEqual(self.cm.virtual_modules[module_spec.get_module_name()],
check_func)
# Remove it again
self.cm.remove_module_spec(module_spec.get_module_name())
- self.assert_(module_spec.get_module_name() not in self.cm.module_specs)
- self.assert_(module_spec.get_module_name() not in
- self.cm.virtual_modules)
+ self.assertNotIn(module_spec.get_module_name(), self.cm.module_specs)
+ self.assertNotIn(module_spec.get_module_name(), self.cm.virtual_modules)
def test_get_module_spec(self):
module_spec = isc.config.module_spec.module_spec_from_file(self.data_path + os.sep + "spec1.spec")
- self.assert_(module_spec.get_module_name() not in self.cm.module_specs)
+ self.assertNotIn(module_spec.get_module_name(), self.cm.module_specs)
self.cm.set_module_spec(module_spec)
- self.assert_(module_spec.get_module_name() in self.cm.module_specs)
+ self.assertIn(module_spec.get_module_name(), self.cm.module_specs)
module_spec2 = self.cm.get_module_spec(module_spec.get_module_name())
self.assertEqual(module_spec.get_full_spec(), module_spec2)
@@ -238,16 +277,16 @@ class TestConfigManager(unittest.TestCase):
config_spec = self.cm.get_config_spec()
self.assertEqual(config_spec, {})
module_spec = isc.config.module_spec.module_spec_from_file(self.data_path + os.sep + "spec1.spec")
- self.assert_(module_spec.get_module_name() not in self.cm.module_specs)
+ self.assertNotIn(module_spec.get_module_name(), self.cm.module_specs)
self.cm.set_module_spec(module_spec)
- self.assert_(module_spec.get_module_name() in self.cm.module_specs)
+ self.assertIn(module_spec.get_module_name(), self.cm.module_specs)
config_spec = self.cm.get_config_spec()
self.assertEqual(config_spec, { 'Spec1': None })
self.cm.remove_module_spec('Spec1')
module_spec = isc.config.module_spec.module_spec_from_file(self.data_path + os.sep + "spec2.spec")
- self.assert_(module_spec.get_module_name() not in self.cm.module_specs)
+ self.assertNotIn(module_spec.get_module_name(), self.cm.module_specs)
self.cm.set_module_spec(module_spec)
- self.assert_(module_spec.get_module_name() in self.cm.module_specs)
+ self.assertIn(module_spec.get_module_name(), self.cm.module_specs)
config_spec = self.cm.get_config_spec()
self.assertEqual(config_spec['Spec2'], module_spec.get_config_spec())
config_spec = self.cm.get_config_spec('Spec2')
@@ -258,16 +297,16 @@ class TestConfigManager(unittest.TestCase):
commands_spec = self.cm.get_commands_spec()
self.assertEqual(commands_spec, {})
module_spec = isc.config.module_spec.module_spec_from_file(self.data_path + os.sep + "spec1.spec")
- self.assert_(module_spec.get_module_name() not in self.cm.module_specs)
+ self.assertNotIn(module_spec.get_module_name(), self.cm.module_specs)
self.cm.set_module_spec(module_spec)
- self.assert_(module_spec.get_module_name() in self.cm.module_specs)
+ self.assertIn(module_spec.get_module_name(), self.cm.module_specs)
commands_spec = self.cm.get_commands_spec()
self.assertEqual(commands_spec, { 'Spec1': None })
self.cm.remove_module_spec('Spec1')
module_spec = isc.config.module_spec.module_spec_from_file(self.data_path + os.sep + "spec2.spec")
- self.assert_(module_spec.get_module_name() not in self.cm.module_specs)
+ self.assertNotIn(module_spec.get_module_name(), self.cm.module_specs)
self.cm.set_module_spec(module_spec)
- self.assert_(module_spec.get_module_name() in self.cm.module_specs)
+ self.assertIn(module_spec.get_module_name(), self.cm.module_specs)
commands_spec = self.cm.get_commands_spec()
self.assertEqual(commands_spec['Spec2'], module_spec.get_commands_spec())
commands_spec = self.cm.get_commands_spec('Spec2')
@@ -277,16 +316,16 @@ class TestConfigManager(unittest.TestCase):
statistics_spec = self.cm.get_statistics_spec()
self.assertEqual(statistics_spec, {})
module_spec = isc.config.module_spec.module_spec_from_file(self.data_path + os.sep + "spec1.spec")
- self.assert_(module_spec.get_module_name() not in self.cm.module_specs)
+ self.assertNotIn(module_spec.get_module_name(), self.cm.module_specs)
self.cm.set_module_spec(module_spec)
- self.assert_(module_spec.get_module_name() in self.cm.module_specs)
+ self.assertIn(module_spec.get_module_name(), self.cm.module_specs)
statistics_spec = self.cm.get_statistics_spec()
self.assertEqual(statistics_spec, { 'Spec1': None })
self.cm.remove_module_spec('Spec1')
module_spec = isc.config.module_spec.module_spec_from_file(self.data_path + os.sep + "spec2.spec")
- self.assert_(module_spec.get_module_name() not in self.cm.module_specs)
+ self.assertNotIn(module_spec.get_module_name(), self.cm.module_specs)
self.cm.set_module_spec(module_spec)
- self.assert_(module_spec.get_module_name() in self.cm.module_specs)
+ self.assertIn(module_spec.get_module_name(), self.cm.module_specs)
statistics_spec = self.cm.get_statistics_spec()
self.assertEqual(statistics_spec['Spec2'], module_spec.get_statistics_spec())
statistics_spec = self.cm.get_statistics_spec('Spec2')
@@ -543,7 +582,8 @@ class TestConfigManager(unittest.TestCase):
def test_set_config_all(self):
my_ok_answer = { 'result': [ 0 ] }
- self.assertEqual({"version": 2}, self.cm.config.data)
+ self.assertEqual({"version": config_data.BIND10_CONFIG_DATA_VERSION},
+ self.cm.config.data)
self.fake_session.group_sendmsg(my_ok_answer, "ConfigManager")
self.cm.handle_msg(ccsession.create_command(
diff --git a/src/lib/python/isc/config/tests/config_data_test.py b/src/lib/python/isc/config/tests/config_data_test.py
index 45feb35..ddeabb6 100644
--- a/src/lib/python/isc/config/tests/config_data_test.py
+++ b/src/lib/python/isc/config/tests/config_data_test.py
@@ -360,8 +360,8 @@ class TestMultiConfigData(unittest.TestCase):
self.assertFalse(self.mcd.have_specification(module_spec.get_module_name()))
self.mcd.set_specification(module_spec)
self.assertTrue(self.mcd.have_specification(module_spec.get_module_name()))
- self.assert_(module_spec.get_module_name() in self.mcd._specifications)
- self.assertEquals(module_spec, self.mcd._specifications[module_spec.get_module_name()])
+ self.assertIn(module_spec.get_module_name(), self.mcd._specifications)
+ self.assertEqual(module_spec, self.mcd._specifications[module_spec.get_module_name()])
self.assertRaises(ConfigDataError, self.mcd.set_specification, "asdf")
self.mcd.remove_specification(module_spec.get_module_name())
self.assertFalse(self.mcd.have_specification(module_spec.get_module_name()))
@@ -693,12 +693,15 @@ class TestMultiConfigData(unittest.TestCase):
'name': 'Spec32', 'value': None,
'modified': False}], maps)
maps = self.mcd.get_value_maps("/Spec32/named_set_item")
- self.assertEqual([{'default': True, 'type': 'integer',
- 'name': 'Spec32/named_set_item/a',
- 'value': 1, 'modified': False},
- {'default': True, 'type': 'integer',
- 'name': 'Spec32/named_set_item/b',
- 'value': 2, 'modified': False}], maps)
+ self.assertEqual(len(maps), 2)
+ self.assertIn({'default': True, 'type': 'integer',
+ 'name': 'Spec32/named_set_item/a',
+ 'value': 1, 'modified': False},
+ maps)
+ self.assertIn({'default': True, 'type': 'integer',
+ 'name': 'Spec32/named_set_item/b',
+ 'value': 2, 'modified': False},
+ maps)
maps = self.mcd.get_value_maps("/Spec32/named_set_item/a")
self.assertEqual([{'default': True, 'type': 'integer',
'name': 'Spec32/named_set_item/a',
@@ -829,10 +832,10 @@ class TestMultiConfigData(unittest.TestCase):
"bbbb": 6})
config_items = self.mcd.get_config_item_list("/Spec32/named_set_item",
True)
- self.assertEqual(['Spec32/named_set_item/aaaa',
- 'Spec32/named_set_item/aabb',
- 'Spec32/named_set_item/bbbb',
- ], config_items)
+ self.assertEqual(len(config_items), 3)
+ self.assertIn('Spec32/named_set_item/aaaa', config_items)
+ self.assertIn('Spec32/named_set_item/aabb', config_items)
+ self.assertIn('Spec32/named_set_item/bbbb', config_items)
self.mcd.set_value('Spec32/named_set_item', {})
config_items = self.mcd.get_config_item_list("/Spec32/named_set_item",
diff --git a/src/lib/python/isc/config/tests/module_spec_test.py b/src/lib/python/isc/config/tests/module_spec_test.py
index a2a2daf..4767860 100644
--- a/src/lib/python/isc/config/tests/module_spec_test.py
+++ b/src/lib/python/isc/config/tests/module_spec_test.py
@@ -38,7 +38,7 @@ class TestModuleSpec(unittest.TestCase):
def spec1(self, dd):
module_spec = dd.get_full_spec()
- self.assert_('module_name' in module_spec)
+ self.assertIn('module_name', module_spec)
self.assertEqual(module_spec['module_name'], "Spec1")
def test_open_file_name(self):
diff --git a/src/lib/python/isc/datasrc/client_python.cc b/src/lib/python/isc/datasrc/client_python.cc
index 9e4bd42..a30ae38 100644
--- a/src/lib/python/isc/datasrc/client_python.cc
+++ b/src/lib/python/isc/datasrc/client_python.cc
@@ -27,7 +27,7 @@
#include <datasrc/database.h>
#include <datasrc/data_source.h>
#include <datasrc/sqlite3_accessor.h>
-#include <datasrc/iterator.h>
+#include <datasrc/zone_iterator.h>
#include <datasrc/client_list.h>
#include <dns/python/name_python.h>
diff --git a/src/lib/python/isc/datasrc/finder_python.cc b/src/lib/python/isc/datasrc/finder_python.cc
index 1b0e3d1..05c44c9 100644
--- a/src/lib/python/isc/datasrc/finder_python.cc
+++ b/src/lib/python/isc/datasrc/finder_python.cc
@@ -26,7 +26,7 @@
#include <datasrc/database.h>
#include <datasrc/data_source.h>
#include <datasrc/sqlite3_accessor.h>
-#include <datasrc/iterator.h>
+#include <datasrc/zone_iterator.h>
#include <datasrc/zone.h>
#include <dns/python/name_python.h>
diff --git a/src/lib/python/isc/datasrc/iterator_python.cc b/src/lib/python/isc/datasrc/iterator_python.cc
index 9e6900c..9757a3b 100644
--- a/src/lib/python/isc/datasrc/iterator_python.cc
+++ b/src/lib/python/isc/datasrc/iterator_python.cc
@@ -25,7 +25,7 @@
#include <datasrc/client.h>
#include <datasrc/database.h>
#include <datasrc/sqlite3_accessor.h>
-#include <datasrc/iterator.h>
+#include <datasrc/zone_iterator.h>
#include <dns/python/name_python.h>
#include <dns/python/rrset_python.h>
diff --git a/src/lib/python/isc/datasrc/sqlite3_ds.py b/src/lib/python/isc/datasrc/sqlite3_ds.py
index dc80afd..19d8581 100644
--- a/src/lib/python/isc/datasrc/sqlite3_ds.py
+++ b/src/lib/python/isc/datasrc/sqlite3_ds.py
@@ -25,7 +25,7 @@ RR_RDATA_INDEX = 7
# Current major and minor versions of schema
SCHEMA_MAJOR_VERSION = 2
-SCHEMA_MINOR_VERSION = 1
+SCHEMA_MINOR_VERSION = 2
class Sqlite3DSError(Exception):
""" Define exceptions."""
@@ -73,6 +73,8 @@ def create(cur):
cur.execute("CREATE INDEX records_byrname ON records (rname)")
cur.execute("""CREATE INDEX records_bytype_and_rname ON records
(rdtype, rname)""")
+ cur.execute("""CREATE INDEX records_byrname_and_rdtype ON records
+ (rname, rdtype)""")
cur.execute("""CREATE TABLE nsec3 (id INTEGER PRIMARY KEY,
zone_id INTEGER NOT NULL,
hash TEXT NOT NULL COLLATE NOCASE,
diff --git a/src/lib/python/isc/datasrc/tests/clientlist_test.py b/src/lib/python/isc/datasrc/tests/clientlist_test.py
index ea39d4e..bdac69c 100644
--- a/src/lib/python/isc/datasrc/tests/clientlist_test.py
+++ b/src/lib/python/isc/datasrc/tests/clientlist_test.py
@@ -43,8 +43,8 @@ class ClientListTest(unittest.TestCase):
Test the constructor. It should accept an RRClass. Check it
reject invalid inputs.
"""
- isc.datasrc.ConfigurableClientList(isc.dns.RRClass.IN())
- isc.datasrc.ConfigurableClientList(isc.dns.RRClass.CH())
+ isc.datasrc.ConfigurableClientList(isc.dns.RRClass.IN)
+ isc.datasrc.ConfigurableClientList(isc.dns.RRClass.CH)
# Not enough arguments
self.assertRaises(TypeError, isc.datasrc.ConfigurableClientList)
# Bad types of arguments
@@ -52,7 +52,7 @@ class ClientListTest(unittest.TestCase):
self.assertRaises(TypeError, isc.datasrc.ConfigurableClientList, "IN")
# Too many arguments
self.assertRaises(TypeError, isc.datasrc.ConfigurableClientList,
- isc.dns.RRClass.IN(), isc.dns.RRClass.IN())
+ isc.dns.RRClass.IN, isc.dns.RRClass.IN)
def test_configure(self):
"""
@@ -60,7 +60,7 @@ class ClientListTest(unittest.TestCase):
ones are acceptend and invalid rejected. We check the changes
have effect.
"""
- self.clist = isc.datasrc.ConfigurableClientList(isc.dns.RRClass.IN())
+ self.clist = isc.datasrc.ConfigurableClientList(isc.dns.RRClass.IN)
# This should be NOP now
self.clist.configure("[]", True)
# Check the zone is not there yet
@@ -102,7 +102,7 @@ class ClientListTest(unittest.TestCase):
Test the find accepts the right arguments, some of them can be omitted,
etc.
"""
- self.clist = isc.datasrc.ConfigurableClientList(isc.dns.RRClass.IN())
+ self.clist = isc.datasrc.ConfigurableClientList(isc.dns.RRClass.IN)
self.clist.configure('''[{
"type": "MasterFiles",
"params": {
diff --git a/src/lib/python/isc/datasrc/tests/datasrc_test.py b/src/lib/python/isc/datasrc/tests/datasrc_test.py
index 36cf951..64f3e53 100644
--- a/src/lib/python/isc/datasrc/tests/datasrc_test.py
+++ b/src/lib/python/isc/datasrc/tests/datasrc_test.py
@@ -51,8 +51,8 @@ def check_for_rrset(expected_rrsets, rrset):
return False
def create_soa(serial):
- soa = RRset(Name('example.org'), RRClass.IN(), RRType.SOA(), RRTTL(3600))
- soa.add_rdata(Rdata(RRType.SOA(), RRClass.IN(),
+ soa = RRset(Name('example.org'), RRClass.IN, RRType.SOA, RRTTL(3600))
+ soa.add_rdata(Rdata(RRType.SOA, RRClass.IN,
'ns1.example.org. admin.example.org. ' +
str(serial) + ' 3600 1800 2419200 7200'))
return soa
@@ -66,13 +66,13 @@ def test_findall_common(self, tested):
result, rrset, _ = tested.find_all(isc.dns.Name("www.sql1.example.com"),
ZoneFinder.FIND_DEFAULT)
self.assertEqual(ZoneFinder.DELEGATION, result)
- expected = RRset(Name('sql1.example.com.'), RRClass.IN(), RRType.NS(),
+ expected = RRset(Name('sql1.example.com.'), RRClass.IN, RRType.NS,
RRTTL(3600))
- expected.add_rdata(Rdata(RRType.NS(), RRClass.IN(),
+ expected.add_rdata(Rdata(RRType.NS, RRClass.IN,
'dns01.example.com.'))
- expected.add_rdata(Rdata(RRType.NS(), RRClass.IN(),
+ expected.add_rdata(Rdata(RRType.NS, RRClass.IN,
'dns02.example.com.'))
- expected.add_rdata(Rdata(RRType.NS(), RRClass.IN(),
+ expected.add_rdata(Rdata(RRType.NS, RRClass.IN,
'dns03.example.com.'))
self.assertTrue(rrsets_equal(expected, rrset))
@@ -88,16 +88,16 @@ def test_findall_common(self, tested):
self.assertEqual(2, len(rrsets))
rrsets.sort(key=lambda rrset: rrset.get_type().to_text())
expected = [
- RRset(Name('mix.example.com.'), RRClass.IN(), RRType.A(),
+ RRset(Name('mix.example.com.'), RRClass.IN, RRType.A,
RRTTL(3600)),
- RRset(Name('mix.example.com.'), RRClass.IN(), RRType.AAAA(),
+ RRset(Name('mix.example.com.'), RRClass.IN, RRType.AAAA,
RRTTL(3600))
]
- expected[0].add_rdata(Rdata(RRType.A(), RRClass.IN(), "192.0.2.1"))
- expected[0].add_rdata(Rdata(RRType.A(), RRClass.IN(), "192.0.2.2"))
- expected[1].add_rdata(Rdata(RRType.AAAA(), RRClass.IN(),
+ expected[0].add_rdata(Rdata(RRType.A, RRClass.IN, "192.0.2.1"))
+ expected[0].add_rdata(Rdata(RRType.A, RRClass.IN, "192.0.2.2"))
+ expected[1].add_rdata(Rdata(RRType.AAAA, RRClass.IN,
"2001:db8::1"))
- expected[1].add_rdata(Rdata(RRType.AAAA(), RRClass.IN(),
+ expected[1].add_rdata(Rdata(RRType.AAAA, RRClass.IN,
"2001:db8::2"))
for (rrset, exp) in zip(rrsets, expected):
self.assertTrue(rrsets_equal(exp, rrset))
@@ -158,9 +158,9 @@ class DataSrcClient(unittest.TestCase):
expected_rrset_list = []
name = isc.dns.Name("sql1.example.com")
- rrclass = isc.dns.RRClass.IN()
+ rrclass = isc.dns.RRClass.IN
add_rrset(expected_rrset_list, name, rrclass,
- isc.dns.RRType.DNSKEY(), isc.dns.RRTTL(3600),
+ isc.dns.RRType.DNSKEY, isc.dns.RRTTL(3600),
[
"256 3 5 AwEAAdYdRhBAEY67R/8G1N5AjGF6asIiNh/pNGeQ8xDQP13J"+
"N2lo+sNqWcmpYNhuVqRbLB+mamsU1XcCICSBvAlSmfz/ZUdafX23knAr"+
@@ -168,7 +168,7 @@ class DataSrcClient(unittest.TestCase):
"5fs0dE/xLztL/CzZ"
])
add_rrset(expected_rrset_list, name, rrclass,
- isc.dns.RRType.DNSKEY(), isc.dns.RRTTL(3600),
+ isc.dns.RRType.DNSKEY, isc.dns.RRTTL(3600),
[
"257 3 5 AwEAAbaKDSa9XEFTsjSYpUTHRotTS9Tz3krfDucugW5UokGQ"+
"KC26QlyHXlPTZkC+aRFUs/dicJX2kopndLcnlNAPWiKnKtrsFSCnIJDB"+
@@ -179,22 +179,22 @@ class DataSrcClient(unittest.TestCase):
"jRWAzGsxJiJyjd6w2k0="
])
add_rrset(expected_rrset_list, name, rrclass,
- isc.dns.RRType.NS(), isc.dns.RRTTL(3600),
+ isc.dns.RRType.NS, isc.dns.RRTTL(3600),
[
"dns01.example.com."
])
add_rrset(expected_rrset_list, name, rrclass,
- isc.dns.RRType.NS(), isc.dns.RRTTL(3600),
+ isc.dns.RRType.NS, isc.dns.RRTTL(3600),
[
"dns02.example.com."
])
add_rrset(expected_rrset_list, name, rrclass,
- isc.dns.RRType.NS(), isc.dns.RRTTL(3600),
+ isc.dns.RRType.NS, isc.dns.RRTTL(3600),
[
"dns03.example.com."
])
add_rrset(expected_rrset_list, name, rrclass,
- isc.dns.RRType.NSEC(), isc.dns.RRTTL(7200),
+ isc.dns.RRType.NSEC, isc.dns.RRTTL(7200),
[
"www.sql1.example.com. NS SOA RRSIG NSEC DNSKEY"
])
@@ -204,36 +204,36 @@ class DataSrcClient(unittest.TestCase):
# Since we passed separate_rrs = True to get_iterator, we get several
# sets of RRSIGs, one for each TTL
add_rrset(expected_rrset_list, name, rrclass,
- isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
+ isc.dns.RRType.RRSIG, isc.dns.RRTTL(3600), None)
add_rrset(expected_rrset_list, name, rrclass,
- isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
+ isc.dns.RRType.RRSIG, isc.dns.RRTTL(3600), None)
add_rrset(expected_rrset_list, name, rrclass,
- isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
+ isc.dns.RRType.RRSIG, isc.dns.RRTTL(3600), None)
add_rrset(expected_rrset_list, name, rrclass,
- isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
+ isc.dns.RRType.RRSIG, isc.dns.RRTTL(3600), None)
add_rrset(expected_rrset_list, name, rrclass,
- isc.dns.RRType.RRSIG(), isc.dns.RRTTL(7200), None)
+ isc.dns.RRType.RRSIG, isc.dns.RRTTL(7200), None)
add_rrset(expected_rrset_list, name, rrclass,
- isc.dns.RRType.SOA(), isc.dns.RRTTL(3600),
+ isc.dns.RRType.SOA, isc.dns.RRTTL(3600),
[
"master.example.com. admin.example.com. 678 3600 1800 2419200 7200"
])
name = isc.dns.Name("www.sql1.example.com.")
add_rrset(expected_rrset_list, name, rrclass,
- isc.dns.RRType.A(), isc.dns.RRTTL(3600),
+ isc.dns.RRType.A, isc.dns.RRTTL(3600),
[
"192.0.2.100"
])
name = isc.dns.Name("www.sql1.example.com.")
add_rrset(expected_rrset_list, name, rrclass,
- isc.dns.RRType.NSEC(), isc.dns.RRTTL(7200),
+ isc.dns.RRType.NSEC, isc.dns.RRTTL(7200),
[
"sql1.example.com. A RRSIG NSEC"
])
add_rrset(expected_rrset_list, name, rrclass,
- isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
+ isc.dns.RRType.RRSIG, isc.dns.RRTTL(3600), None)
add_rrset(expected_rrset_list, name, rrclass,
- isc.dns.RRType.RRSIG(), isc.dns.RRTTL(7200), None)
+ isc.dns.RRType.RRSIG, isc.dns.RRTTL(7200), None)
# rrs is an iterator, but also has direct get_next_rrset(), use
# the latter one here
@@ -287,11 +287,11 @@ class DataSrcClient(unittest.TestCase):
dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
iterator = dsc.get_iterator(isc.dns.Name("sql1.example.com."))
expected_soa = isc.dns.RRset(isc.dns.Name("sql1.example.com."),
- isc.dns.RRClass.IN(),
- isc.dns.RRType.SOA(),
+ isc.dns.RRClass.IN,
+ isc.dns.RRType.SOA,
isc.dns.RRTTL(3600))
- expected_soa.add_rdata(isc.dns.Rdata(isc.dns.RRType.SOA(),
- isc.dns.RRClass.IN(),
+ expected_soa.add_rdata(isc.dns.Rdata(isc.dns.RRType.SOA,
+ isc.dns.RRClass.IN,
"master.example.com. " +
"admin.example.com. 678 " +
"3600 1800 2419200 7200"))
@@ -337,7 +337,7 @@ class DataSrcClient(unittest.TestCase):
result, finder = dsc.find_zone(isc.dns.Name("example.com"))
self.assertEqual(finder.SUCCESS, result)
- self.assertEqual(isc.dns.RRClass.IN(), finder.get_class())
+ self.assertEqual(isc.dns.RRClass.IN, finder.get_class())
self.assertEqual("example.com.", finder.get_origin().to_text())
test_findall_common(self, finder)
@@ -347,11 +347,11 @@ class DataSrcClient(unittest.TestCase):
result, finder = dsc.find_zone(isc.dns.Name("example.com"))
self.assertEqual(finder.SUCCESS, result)
- self.assertEqual(isc.dns.RRClass.IN(), finder.get_class())
+ self.assertEqual(isc.dns.RRClass.IN, finder.get_class())
self.assertEqual("example.com.", finder.get_origin().to_text())
result, rrset, _ = finder.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.A(),
+ isc.dns.RRType.A,
finder.FIND_DEFAULT)
self.assertEqual(finder.SUCCESS, result)
self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
@@ -359,13 +359,13 @@ class DataSrcClient(unittest.TestCase):
# Check the optional parameters are optional
result, rrset, _ = finder.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.A())
+ isc.dns.RRType.A)
self.assertEqual(finder.SUCCESS, result)
self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
rrset.to_text())
result, rrset, _ = finder.find(isc.dns.Name("www.sql1.example.com"),
- isc.dns.RRType.A(),
+ isc.dns.RRType.A,
finder.FIND_DEFAULT)
self.assertEqual(finder.DELEGATION, result)
self.assertEqual("sql1.example.com. 3600 IN NS dns01.example.com.\n" +
@@ -374,7 +374,7 @@ class DataSrcClient(unittest.TestCase):
rrset.to_text())
result, rrset, _ = finder.find(isc.dns.Name("doesnotexist.example.com"),
- isc.dns.RRType.A(),
+ isc.dns.RRType.A,
finder.FIND_DEFAULT)
self.assertEqual(finder.NXDOMAIN, result)
self.assertEqual(None, rrset)
@@ -382,16 +382,16 @@ class DataSrcClient(unittest.TestCase):
self.assertRaises(isc.datasrc.OutOfZone, finder.find,
isc.dns.Name("www.some.other.domain"),
- isc.dns.RRType.A())
+ isc.dns.RRType.A)
result, rrset, _ = finder.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.TXT(),
+ isc.dns.RRType.TXT,
finder.FIND_DEFAULT)
self.assertEqual(finder.NXRRSET, result)
self.assertEqual(None, rrset)
result, rrset, _ = finder.find(isc.dns.Name("cname-ext.example.com"),
- isc.dns.RRType.A(),
+ isc.dns.RRType.A,
finder.FIND_DEFAULT)
self.assertEqual(finder.CNAME, result)
self.assertEqual(
@@ -400,14 +400,14 @@ class DataSrcClient(unittest.TestCase):
result, rrset, flags = \
finder.find(isc.dns.Name("foo.wild.example.com"),
- isc.dns.RRType.A(), finder.FIND_DEFAULT)
+ isc.dns.RRType.A, finder.FIND_DEFAULT)
self.assertEqual(finder.SUCCESS, result)
self.assertEqual(finder.RESULT_WILDCARD, flags)
self.assertEqual("foo.wild.example.com. 3600 IN A 192.0.2.255\n",
rrset.to_text())
result, rrset, _ = finder.find(isc.dns.Name("foo.wild.example.com"),
- isc.dns.RRType.TXT(),
+ isc.dns.RRType.TXT,
finder.FIND_DEFAULT)
self.assertEqual(finder.NXRRSET, result)
self.assertTrue(finder.RESULT_WILDCARD, flags)
@@ -415,7 +415,7 @@ class DataSrcClient(unittest.TestCase):
self.assertRaises(TypeError, finder.find,
"foo",
- isc.dns.RRType.A(),
+ isc.dns.RRType.A,
finder.FIND_DEFAULT)
self.assertRaises(TypeError, finder.find,
isc.dns.Name("cname-ext.example.com"),
@@ -423,7 +423,7 @@ class DataSrcClient(unittest.TestCase):
finder.FIND_DEFAULT)
self.assertRaises(TypeError, finder.find,
isc.dns.Name("cname-ext.example.com"),
- isc.dns.RRType.A(),
+ isc.dns.RRType.A,
"foo")
class DataSrcUpdater(unittest.TestCase):
@@ -451,7 +451,7 @@ class DataSrcUpdater(unittest.TestCase):
dsc = isc.datasrc.DataSourceClient("sqlite3", WRITE_ZONE_DB_CONFIG)
updater = dsc.get_updater(isc.dns.Name("example.com"), False)
result, rrset, _ = updater.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.A(),
+ isc.dns.RRType.A,
ZoneFinder.FIND_DEFAULT)
self.assertEqual(ZoneFinder.SUCCESS, result)
self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
@@ -459,7 +459,7 @@ class DataSrcUpdater(unittest.TestCase):
# Omit optional parameters
result, rrset, _ = updater.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.A())
+ isc.dns.RRType.A)
self.assertEqual(ZoneFinder.SUCCESS, result)
self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
rrset.to_text())
@@ -471,11 +471,11 @@ class DataSrcUpdater(unittest.TestCase):
# first make sure, through a separate finder, that some record exists
result, finder = dsc.find_zone(isc.dns.Name("example.com"))
self.assertEqual(finder.SUCCESS, result)
- self.assertEqual(isc.dns.RRClass.IN(), finder.get_class())
+ self.assertEqual(isc.dns.RRClass.IN, finder.get_class())
self.assertEqual("example.com.", finder.get_origin().to_text())
result, rrset, _ = finder.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.A(),
+ isc.dns.RRType.A,
finder.FIND_DEFAULT)
self.assertEqual(finder.SUCCESS, result)
self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
@@ -490,13 +490,13 @@ class DataSrcUpdater(unittest.TestCase):
# The record should be gone in the updater, but not in the original
# finder (since we have not committed)
result, rrset, _ = updater.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.A(),
+ isc.dns.RRType.A,
finder.FIND_DEFAULT)
self.assertEqual(finder.NXDOMAIN, result)
self.assertEqual(None, rrset)
result, rrset, _ = finder.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.A(),
+ isc.dns.RRType.A,
finder.FIND_DEFAULT)
self.assertEqual(finder.SUCCESS, result)
self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
@@ -508,7 +508,7 @@ class DataSrcUpdater(unittest.TestCase):
# the record should be gone now in the 'real' finder as well
result, rrset, _ = finder.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.A(),
+ isc.dns.RRType.A,
finder.FIND_DEFAULT)
self.assertEqual(finder.NXDOMAIN, result)
self.assertEqual(None, rrset)
@@ -522,7 +522,7 @@ class DataSrcUpdater(unittest.TestCase):
self.assertRaises(isc.datasrc.Error, updater.commit)
result, rrset, _ = finder.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.A(),
+ isc.dns.RRType.A,
finder.FIND_DEFAULT)
self.assertEqual(finder.SUCCESS, result)
self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
@@ -537,26 +537,26 @@ class DataSrcUpdater(unittest.TestCase):
rrsets = updater.get_rrset_collection()
# From this point we cannot make further updates
- rrset = RRset(isc.dns.Name('www.example.com'), isc.dns.RRClass.IN(),
- isc.dns.RRType.AAAA(), isc.dns.RRTTL(10))
- rrset.add_rdata(isc.dns.Rdata(isc.dns.RRType.AAAA(),
- isc.dns.RRClass.IN(), '2001:db8::1'))
+ rrset = RRset(isc.dns.Name('www.example.com'), isc.dns.RRClass.IN,
+ isc.dns.RRType.AAAA, isc.dns.RRTTL(10))
+ rrset.add_rdata(isc.dns.Rdata(isc.dns.RRType.AAAA,
+ isc.dns.RRClass.IN, '2001:db8::1'))
self.assertRaises(isc.datasrc.Error, updater.add_rrset, rrset)
# Checks basic API
found = rrsets.find(isc.dns.Name("www.example.com"),
- isc.dns.RRClass.IN(), isc.dns.RRType.A())
+ isc.dns.RRClass.IN, isc.dns.RRType.A)
self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
found.to_text())
self.assertEqual(None, rrsets.find(isc.dns.Name("www.example.com"),
- isc.dns.RRClass.IN(),
- isc.dns.RRType.AAAA()))
+ isc.dns.RRClass.IN,
+ isc.dns.RRType.AAAA))
# Once committed collection cannot be used any more.
updater.commit()
self.assertRaises(isc.dns.RRsetCollectionError,
rrsets.find, isc.dns.Name("www.example.com"),
- isc.dns.RRClass.IN(), isc.dns.RRType.A())
+ isc.dns.RRClass.IN, isc.dns.RRType.A)
# When we destroy the RRsetCollection it should release the refcount
# to the updater.
@@ -578,10 +578,10 @@ class DataSrcUpdater(unittest.TestCase):
# see if a lookup succeeds in sqlite3 ds
result, finder = dsc_sql.find_zone(isc.dns.Name("example.com"))
self.assertEqual(finder.SUCCESS, result)
- self.assertEqual(isc.dns.RRClass.IN(), finder.get_class())
+ self.assertEqual(isc.dns.RRClass.IN, finder.get_class())
self.assertEqual("example.com.", finder.get_origin().to_text())
result, rrset, _ = finder.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.A(),
+ isc.dns.RRType.A,
finder.FIND_DEFAULT)
self.assertEqual(finder.SUCCESS, result)
self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
@@ -600,11 +600,11 @@ class DataSrcUpdater(unittest.TestCase):
# first make sure, through a separate finder, that some record exists
result, finder = dsc.find_zone(isc.dns.Name("example.com"))
self.assertEqual(finder.SUCCESS, result)
- self.assertEqual(isc.dns.RRClass.IN(), finder.get_class())
+ self.assertEqual(isc.dns.RRClass.IN, finder.get_class())
self.assertEqual("example.com.", finder.get_origin().to_text())
result, rrset, _ = finder.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.A(),
+ isc.dns.RRType.A,
finder.FIND_DEFAULT)
self.assertEqual(finder.SUCCESS, result)
self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
@@ -619,7 +619,7 @@ class DataSrcUpdater(unittest.TestCase):
# The record should be gone in the updater, but not in the original
# finder (since we have not committed)
result, rrset, _ = updater.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.A(),
+ isc.dns.RRType.A,
finder.FIND_DEFAULT)
self.assertEqual(finder.NXDOMAIN, result)
self.assertEqual(None, rrset)
@@ -629,7 +629,7 @@ class DataSrcUpdater(unittest.TestCase):
# the record should still be available in the 'real' finder as well
result, rrset, _ = finder.find(isc.dns.Name("www.example.com"),
- isc.dns.RRType.A(),
+ isc.dns.RRType.A,
finder.FIND_DEFAULT)
self.assertEqual(finder.SUCCESS, result)
self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
@@ -755,9 +755,9 @@ class JournalWrite(unittest.TestCase):
conn.close()
def create_a(self, address):
- a_rr = RRset(Name('www.example.org'), RRClass.IN(), RRType.A(),
+ a_rr = RRset(Name('www.example.org'), RRClass.IN, RRType.A,
RRTTL(3600))
- a_rr.add_rdata(Rdata(RRType.A(), RRClass.IN(), address))
+ a_rr.add_rdata(Rdata(RRType.A, RRClass.IN, address))
return (a_rr)
def test_journal_write(self):
diff --git a/src/lib/python/isc/datasrc/tests/zone_loader_test.py b/src/lib/python/isc/datasrc/tests/zone_loader_test.py
index 62f67cd..4cd4879 100644
--- a/src/lib/python/isc/datasrc/tests/zone_loader_test.py
+++ b/src/lib/python/isc/datasrc/tests/zone_loader_test.py
@@ -13,6 +13,7 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+import isc.log
import isc.datasrc
import isc.dns
@@ -96,7 +97,7 @@ class ZoneLoaderTests(unittest.TestCase):
"""
result, finder = self.client.find_zone(self.test_name)
self.assertEqual(self.client.SUCCESS, result)
- result, rrset, _ = finder.find(self.test_name, isc.dns.RRType.SOA())
+ result, rrset, _ = finder.find(self.test_name, isc.dns.RRType.SOA)
self.assertEqual(finder.SUCCESS, result)
self.assertEqual(soa_txt, rrset.to_text())
@@ -231,7 +232,7 @@ class ZoneLoaderTests(unittest.TestCase):
def test_wrong_class_from_client(self):
# For ds->ds loading, wrong class is detected upon construction
# Need a bit of the extended setup for CH source client
- clientlist = isc.datasrc.ConfigurableClientList(isc.dns.RRClass.CH())
+ clientlist = isc.datasrc.ConfigurableClientList(isc.dns.RRClass.CH)
clientlist.configure('[ { "type": "static", "params": "' +
STATIC_ZONE_FILE +'" } ]', False)
self.source_client, _, _ = clientlist.find(isc.dns.Name("bind."),
diff --git a/src/lib/python/isc/datasrc/updater_inc.cc b/src/lib/python/isc/datasrc/updater_inc.cc
index f040f85..87c8158 100644
--- a/src/lib/python/isc/datasrc/updater_inc.cc
+++ b/src/lib/python/isc/datasrc/updater_inc.cc
@@ -187,10 +187,9 @@ Exceptions:\n\
\n\
";
-// Modifications
-// - isc.datasrc.RRsetCollectionBase => isc.dns.RRsetCollectionBase
-// (in the Python wrapper, the former is completely invisible)
-// - remove other reference to isc.datasrc.RRsetCollectionBase
+// Modifications:
+// - remove reference to isc.datasrc.RRsetCollectionBase (hidden for Python
+// wrapper)
const char* const ZoneUpdater_getRRsetCollection_doc = "\
get_rrset_collection() -> isc.dns.RRsetCollectionBase \n\
\n\
@@ -206,10 +205,46 @@ RRsetCollection returned has a behavior dependent on the ZoneUpdater\n\
implementation.\n\
\n\
The behavior of the RRsetCollection is similar to the behavior of the\n\
-Zonefinder returned by get_finder(). Implementations of ZoneUpdater\n\
-may not allow adding or deleting RRsets after get_rrset_collection()\n\
-is called. Implementations of ZoneUpdater may disable a previously\n\
-returned RRsetCollection after commit() is called. If an\n\
+Zonefinder returned by get_finder(). In fact, it's redundant in a\n\
+sense because one can implement the dns.RRsetCollectionBase interface\n\
+using an updater and get_finder() interface (unless it's expected to\n\
+support zone iteration, and the initial implementation of the\n\
+RRsetCollection returned by this method doesn't support it). We\n\
+still provide it as an updater's method so it will be easier for an\n\
+updater implementation to customize the RRsetCollection\n\
+implementation, and also for making it easy to impose restrictions\n\
+described below.\n\
+\n\
+Specific data sources may have special restrictions. That's especially\n\
+the case for database-based data sources. Such restrictions may also\n\
+result in limiting the usage of the RRsetCollection as described in\n\
+the following paragraphs. A specific updater implementation may\n\
+provide more flexible behavior, but applications using this interface\n\
+must assume the most restricted case unless it knows it uses a\n\
+particular specialized updater implementation that loosens specific\n\
+restrictions.\n\
+\n\
+- An application must not add or delete RRsets after\n\
+ get_rrset_collection() is called.\n\
+- An application must not use the returned collection from\n\
+ get_rrset_collection() once commit() is called on the updater that\n\
+ generates the collection.\n\
+\n\
+Implementations of ZoneUpdater may not allow adding or deleting RRsets\n\
+after get_rrset_collection() is called. This is because if an\n\
+iterator of the collection is being used at that time the modification\n\
+to the zone may break an internal assumption of the iterator and may\n\
+result in unexpected behavior. Also, the iterator may conceptually\n\
+hold a \"reader lock\" of the zone (in an implementation dependent\n\
+manner), which would prevent the addition or deletion, surprising the\n\
+caller (who would normally expect it to succeed).\n\
+\n\
+Implementations of ZoneUpdater may disable a previously returned\n\
+RRsetCollection after commit() is called. This is because the returned\n\
+RRsetCollection may internally rely on the conceptual transaction of\n\
+the updater that generates the collection (which would be literally\n\
+the case for database-based data sources), and once the transaction is\n\
+committed anything that relies on it won't be valid. If an\n\
RRsetCollection is disabled, using methods such as find() and using\n\
its iterator would cause an exception to be thrown.\n\
\n\
diff --git a/src/lib/python/isc/datasrc/updater_python.cc b/src/lib/python/isc/datasrc/updater_python.cc
index cb727c3..e61db75 100644
--- a/src/lib/python/isc/datasrc/updater_python.cc
+++ b/src/lib/python/isc/datasrc/updater_python.cc
@@ -203,7 +203,7 @@ namespace {
class s_UpdaterRRsetCollection : public s_RRsetCollection {
public:
- s_UpdaterRRsetCollection() : s_RRsetCollection() {}
+ s_UpdaterRRsetCollection() : s_RRsetCollection(), base_obj_(NULL) {}
PyObject* base_obj_;
};
diff --git a/src/lib/python/isc/ddns/libddns_messages.mes b/src/lib/python/isc/ddns/libddns_messages.mes
index 406151c..abdd4e0 100644
--- a/src/lib/python/isc/ddns/libddns_messages.mes
+++ b/src/lib/python/isc/ddns/libddns_messages.mes
@@ -121,7 +121,7 @@ a bad class. The class of the update RRset must be either the same
as the class in the Zone Section, ANY, or NONE.
A FORMERR response is sent back to the client.
-% LIBDDNS_UPDATE_DATASRC_ERROR error in datasource during DDNS update: %1
+% LIBDDNS_UPDATE_DATASRC_COMMIT_FAILED error in datasource during DDNS update: %1
An error occurred while committing the DDNS update changes to the
datasource. The specific error is printed. A SERVFAIL response is sent
back to the client.
@@ -167,7 +167,7 @@ rejected by the zone's update ACL. When this library is used by
b10-ddns, the server will then completely ignore the request; no
response will be sent.
-% LIBDDNS_UPDATE_ERROR update client %1 for zone %2: %3
+% LIBDDNS_UPDATE_PROCESSING_FAILED update client %1 for zone %2: %3
Debug message. An error is found in processing a dynamic update
request. This log message is used for general errors that are not
normally expected to happen. So, in general, it would mean some
diff --git a/src/lib/python/isc/ddns/session.py b/src/lib/python/isc/ddns/session.py
index 60834fb..3368523 100644
--- a/src/lib/python/isc/ddns/session.py
+++ b/src/lib/python/isc/ddns/session.py
@@ -135,7 +135,7 @@ class DDNS_SOA:
def __write_soa_internal(self, origin_soa, soa_num):
'''Write back serial number to soa'''
new_soa = RRset(origin_soa.get_name(), origin_soa.get_class(),
- RRType.SOA(), origin_soa.get_ttl())
+ RRType.SOA, origin_soa.get_ttl())
soa_rdata_parts = origin_soa.get_rdata()[0].to_text().split()
soa_rdata_parts[2] = str(soa_num.get_value())
new_soa.add_rdata(Rdata(origin_soa.get_type(), origin_soa.get_class(),
@@ -248,18 +248,18 @@ class UpdateSession:
self.__check_update_acl(self.__zname, self.__zclass)
self._create_diff()
prereq_result = self.__check_prerequisites()
- if prereq_result != Rcode.NOERROR():
+ if prereq_result != Rcode.NOERROR:
self.__make_response(prereq_result)
return UPDATE_ERROR, self.__zname, self.__zclass
update_result = self.__do_update()
- if update_result != Rcode.NOERROR():
+ if update_result != Rcode.NOERROR:
self.__make_response(update_result)
return UPDATE_ERROR, self.__zname, self.__zclass
- self.__make_response(Rcode.NOERROR())
+ self.__make_response(Rcode.NOERROR)
return UPDATE_SUCCESS, self.__zname, self.__zclass
except UpdateError as e:
if not e.nolog:
- logger.debug(logger.DBGLVL_TRACE_BASIC, LIBDDNS_UPDATE_ERROR,
+ logger.debug(logger.DBGLVL_TRACE_BASIC, LIBDDNS_UPDATE_PROCESSING_FAILED,
ClientFormatter(self.__client_addr, self.__tsig),
ZoneFormatter(e.zname, e.zclass), e)
# If RCODE is specified, create a corresponding resonse and return
@@ -272,7 +272,7 @@ class UpdateSession:
except isc.datasrc.Error as e:
logger.error(LIBDDNS_DATASRC_ERROR,
ClientFormatter(self.__client_addr, self.__tsig), e)
- self.__make_response(Rcode.SERVFAIL())
+ self.__make_response(Rcode.SERVFAIL)
return UPDATE_ERROR, None, None
def _get_update_zone(self):
@@ -295,11 +295,11 @@ class UpdateSession:
n_zones = self.__message.get_rr_count(SECTION_ZONE)
if n_zones != 1:
raise UpdateError('Invalid number of records in zone section: ' +
- str(n_zones), None, None, Rcode.FORMERR())
+ str(n_zones), None, None, Rcode.FORMERR)
zrecord = self.__message.get_question()[0]
- if zrecord.get_type() != RRType.SOA():
+ if zrecord.get_type() != RRType.SOA:
raise UpdateError('update zone section contains non-SOA',
- None, None, Rcode.FORMERR())
+ None, None, Rcode.FORMERR)
# See if we're serving a primary zone specified in the zone section.
zname = zrecord.get_name()
@@ -316,12 +316,12 @@ class UpdateSession:
logger.debug(DBGLVL_TRACE_BASIC, LIBDDNS_UPDATE_FORWARD_FAIL,
ClientFormatter(self.__client_addr, self.__tsig),
ZoneFormatter(zname, zclass))
- raise UpdateError('forward', zname, zclass, Rcode.NOTIMP(), True)
+ raise UpdateError('forward', zname, zclass, Rcode.NOTIMP, True)
# zone wasn't found
logger.debug(DBGLVL_TRACE_BASIC, LIBDDNS_UPDATE_NOTAUTH,
ClientFormatter(self.__client_addr, self.__tsig),
ZoneFormatter(zname, zclass))
- raise UpdateError('notauth', zname, zclass, Rcode.NOTAUTH(), True)
+ raise UpdateError('notauth', zname, zclass, Rcode.NOTAUTH, True)
def _create_diff(self):
'''
@@ -352,7 +352,7 @@ class UpdateSession:
logger.info(LIBDDNS_UPDATE_DENIED,
ClientFormatter(self.__client_addr, self.__tsig),
ZoneFormatter(zname, zclass))
- raise UpdateError('rejected', zname, zclass, Rcode.REFUSED(), True)
+ raise UpdateError('rejected', zname, zclass, Rcode.REFUSED, True)
if action == DROP:
logger.info(LIBDDNS_UPDATE_DROPPED,
ClientFormatter(self.__client_addr, self.__tsig),
@@ -459,7 +459,7 @@ class UpdateSession:
def __check_prerequisites(self):
'''Check the prerequisites section of the UPDATE Message.
RFC2136 Section 2.4.
- Returns a dns Rcode signaling either no error (Rcode.NOERROR())
+ Returns a dns Rcode signaling either no error (Rcode.NOERROR)
or that one of the prerequisites failed (any other Rcode).
'''
@@ -473,20 +473,20 @@ class UpdateSession:
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
- return Rcode.NOTZONE()
+ return Rcode.NOTZONE
# Algorithm taken from RFC2136 Section 3.2
- if rrset.get_class() == RRClass.ANY():
+ if rrset.get_class() == RRClass.ANY:
if rrset.get_ttl().get_value() != 0 or\
rrset.get_rdata_count() != 0:
logger.info(LIBDDNS_PREREQ_FORMERR_ANY,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
- return Rcode.FORMERR()
- elif rrset.get_type() == RRType.ANY():
+ return Rcode.FORMERR
+ elif rrset.get_type() == RRType.ANY:
if not self.__prereq_name_in_use(rrset):
- rcode = Rcode.NXDOMAIN()
+ rcode = Rcode.NXDOMAIN
logger.info(LIBDDNS_PREREQ_NAME_IN_USE_FAILED,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
@@ -494,23 +494,23 @@ class UpdateSession:
return rcode
else:
if not self.__prereq_rrset_exists(rrset):
- rcode = Rcode.NXRRSET()
+ rcode = Rcode.NXRRSET
logger.info(LIBDDNS_PREREQ_RRSET_EXISTS_FAILED,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset), rcode)
return rcode
- elif rrset.get_class() == RRClass.NONE():
+ elif rrset.get_class() == RRClass.NONE:
if rrset.get_ttl().get_value() != 0 or\
rrset.get_rdata_count() != 0:
logger.info(LIBDDNS_PREREQ_FORMERR_NONE,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
- return Rcode.FORMERR()
- elif rrset.get_type() == RRType.ANY():
+ return Rcode.FORMERR
+ elif rrset.get_type() == RRType.ANY:
if not self.__prereq_name_not_in_use(rrset):
- rcode = Rcode.YXDOMAIN()
+ rcode = Rcode.YXDOMAIN
logger.info(LIBDDNS_PREREQ_NAME_NOT_IN_USE_FAILED,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
@@ -518,7 +518,7 @@ class UpdateSession:
return rcode
else:
if not self.__prereq_rrset_does_not_exist(rrset):
- rcode = Rcode.YXRRSET()
+ rcode = Rcode.YXRRSET
logger.info(LIBDDNS_PREREQ_RRSET_DOES_NOT_EXIST_FAILED,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
@@ -530,7 +530,7 @@ class UpdateSession:
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
- return Rcode.FORMERR()
+ return Rcode.FORMERR
else:
collect_rrsets(exact_match_rrsets, rrset)
else:
@@ -538,11 +538,11 @@ class UpdateSession:
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
- return Rcode.FORMERR()
+ return Rcode.FORMERR
for collected_rrset in exact_match_rrsets:
if not self.__prereq_rrset_exists_value(collected_rrset):
- rcode = Rcode.NXRRSET()
+ rcode = Rcode.NXRRSET
logger.info(LIBDDNS_PREREQ_RRSET_EXISTS_VAL_FAILED,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
@@ -550,7 +550,7 @@ class UpdateSession:
return rcode
# All prerequisites are satisfied
- return Rcode.NOERROR()
+ return Rcode.NOERROR
def __set_soa_rrset(self, rrset):
'''Sets the given rrset to the member __added_soa (which
@@ -570,7 +570,7 @@ class UpdateSession:
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
- return Rcode.NOTZONE()
+ return Rcode.NOTZONE
if rrset.get_class() == self.__zclass:
# In fact, all metatypes are in a specific range,
# so one check can test TKEY to ANY
@@ -581,52 +581,52 @@ class UpdateSession:
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
- return Rcode.FORMERR()
- if rrset.get_type() == RRType.SOA():
+ return Rcode.FORMERR
+ if rrset.get_type() == RRType.SOA:
# In case there's multiple soa records in the update
# somehow, just take the last
for rr in foreach_rr(rrset):
self.__set_soa_rrset(rr)
- elif rrset.get_class() == RRClass.ANY():
+ elif rrset.get_class() == RRClass.ANY:
if rrset.get_ttl().get_value() != 0:
logger.info(LIBDDNS_UPDATE_DELETE_NONZERO_TTL,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
- return Rcode.FORMERR()
+ return Rcode.FORMERR
if rrset.get_rdata_count() > 0:
logger.info(LIBDDNS_UPDATE_DELETE_RRSET_NOT_EMPTY,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
- return Rcode.FORMERR()
+ return Rcode.FORMERR
if rrset.get_type().get_code() >= 249 and\
rrset.get_type().get_code() <= 254:
logger.info(LIBDDNS_UPDATE_DELETE_BAD_TYPE,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
- return Rcode.FORMERR()
- elif rrset.get_class() == RRClass.NONE():
+ return Rcode.FORMERR
+ elif rrset.get_class() == RRClass.NONE:
if rrset.get_ttl().get_value() != 0:
logger.info(LIBDDNS_UPDATE_DELETE_RR_NONZERO_TTL,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
- return Rcode.FORMERR()
+ return Rcode.FORMERR
if rrset.get_type().get_code() >= 249:
logger.info(LIBDDNS_UPDATE_DELETE_RR_BAD_TYPE,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
- return Rcode.FORMERR()
+ return Rcode.FORMERR
else:
logger.info(LIBDDNS_UPDATE_BAD_CLASS,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
RRsetFormatter(rrset))
- return Rcode.FORMERR()
- return Rcode.NOERROR()
+ return Rcode.FORMERR
+ return Rcode.NOERROR
def __do_update_add_single_rr(self, rr, existing_rrset):
'''Helper for __do_update_add_rrs_to_rrset: only add the
@@ -657,7 +657,7 @@ class UpdateSession:
# For a number of cases, we may need to remove data in the zone
# (note; SOA is handled separately by __do_update, so that one
# is explicitely ignored here)
- if rrset.get_type() == RRType.SOA():
+ if rrset.get_type() == RRType.SOA:
return
result, orig_rrset, _ = self.__diff.find(rrset.get_name(),
rrset.get_type())
@@ -668,7 +668,7 @@ class UpdateSession:
return
elif result == ZoneFinder.SUCCESS:
# if update is cname, and zone rr is not, ignore
- if rrset.get_type() == RRType.CNAME():
+ if rrset.get_type() == RRType.CNAME:
# Remove original CNAME record (the new one
# is added below)
self.__diff.delete_data(orig_rrset)
@@ -679,7 +679,7 @@ class UpdateSession:
elif result == ZoneFinder.NXRRSET:
# There is data present, but not for this type.
# If this type is CNAME, ignore the update
- if rrset.get_type() == RRType.CNAME():
+ if rrset.get_type() == RRType.CNAME:
return
for rr in foreach_rr(rrset):
self.__do_update_add_single_rr(rr, orig_rrset)
@@ -696,8 +696,8 @@ class UpdateSession:
rrset.get_type())
if result == ZoneFinder.SUCCESS:
if to_delete.get_name() == self.__zname and\
- (to_delete.get_type() == RRType.SOA() or\
- to_delete.get_type() == RRType.NS()):
+ (to_delete.get_type() == RRType.SOA or\
+ to_delete.get_type() == RRType.NS):
# ignore
return
for rr in foreach_rr(to_delete):
@@ -749,8 +749,8 @@ class UpdateSession:
for to_delete in rrsets:
# if name == self.__zname and type is soa or ns, don't delete!
if to_delete.get_name() == self.__zname and\
- (to_delete.get_type() == RRType.SOA() or
- to_delete.get_type() == RRType.NS()):
+ (to_delete.get_type() == RRType.SOA or
+ to_delete.get_type() == RRType.NS):
continue
else:
for rr in foreach_rr(to_delete):
@@ -771,10 +771,10 @@ class UpdateSession:
to_delete = convert_rrset_class(rrset, self.__zclass)
if rrset.get_name() == self.__zname:
- if rrset.get_type() == RRType.SOA():
+ if rrset.get_type() == RRType.SOA:
# ignore
return
- elif rrset.get_type() == RRType.NS():
+ elif rrset.get_type() == RRType.NS:
# hmm. okay. annoying. There must be at least one left,
# delegate to helper method
self.__ns_deleter_helper(to_delete)
@@ -793,14 +793,14 @@ class UpdateSession:
# serial magic and add the newly created one
# get it from DS and to increment and stuff
- result, old_soa, _ = self.__diff.find(self.__zname, RRType.SOA(),
+ result, old_soa, _ = self.__diff.find(self.__zname, RRType.SOA,
ZoneFinder.NO_WILDCARD |
ZoneFinder.FIND_GLUE_OK)
# We may implement recovering from missing SOA data at some point, but
# for now servfail on such a broken state
if result != ZoneFinder.SUCCESS:
raise UpdateError("Error finding SOA record in datasource.",
- self.__zname, self.__zclass, Rcode.SERVFAIL())
+ self.__zname, self.__zclass, Rcode.SERVFAIL)
serial_operation = DDNS_SOA()
if self.__added_soa is not None and\
serial_operation.soa_update_check(old_soa, self.__added_soa):
@@ -820,7 +820,7 @@ class UpdateSession:
'''
# prescan
prescan_result = self.__do_prescan()
- if prescan_result != Rcode.NOERROR():
+ if prescan_result != Rcode.NOERROR:
return prescan_result
# update
@@ -841,22 +841,22 @@ class UpdateSession:
for rrset in self.__message.get_section(SECTION_UPDATE):
if rrset.get_class() == self.__zclass:
self.__do_update_add_rrs_to_rrset(rrset)
- elif rrset.get_class() == RRClass.ANY():
- if rrset.get_type() == RRType.ANY():
+ elif rrset.get_class() == RRClass.ANY:
+ if rrset.get_type() == RRType.ANY:
self.__do_update_delete_name(rrset)
else:
self.__do_update_delete_rrset(rrset)
- elif rrset.get_class() == RRClass.NONE():
+ elif rrset.get_class() == RRClass.NONE:
self.__do_update_delete_rrs_from_rrset(rrset)
self.__diff.commit()
- return Rcode.NOERROR()
+ return Rcode.NOERROR
except isc.datasrc.Error as dse:
- logger.info(LIBDDNS_UPDATE_DATASRC_ERROR, dse)
- return Rcode.SERVFAIL()
+ logger.info(LIBDDNS_UPDATE_DATASRC_COMMIT_FAILED, dse)
+ return Rcode.SERVFAIL
except Exception as uce:
logger.error(LIBDDNS_UPDATE_UNCAUGHT_EXCEPTION,
ClientFormatter(self.__client_addr),
ZoneFormatter(self.__zname, self.__zclass),
uce)
- return Rcode.SERVFAIL()
+ return Rcode.SERVFAIL
diff --git a/src/lib/python/isc/ddns/tests/session_tests.py b/src/lib/python/isc/ddns/tests/session_tests.py
index f7c2d3c..bc25310 100644
--- a/src/lib/python/isc/ddns/tests/session_tests.py
+++ b/src/lib/python/isc/ddns/tests/session_tests.py
@@ -30,8 +30,8 @@ WRITE_ZONE_DB_FILE = TESTDATA_WRITE_PATH + "rwtest.sqlite3.copied"
WRITE_ZONE_DB_CONFIG = "{ \"database_file\": \"" + WRITE_ZONE_DB_FILE + "\"}"
TEST_ZONE_NAME = Name('example.org')
-UPDATE_RRTYPE = RRType.SOA()
-TEST_RRCLASS = RRClass.IN()
+UPDATE_RRTYPE = RRType.SOA
+TEST_RRCLASS = RRClass.IN
TEST_ZONE_RECORD = Question(TEST_ZONE_NAME, TEST_RRCLASS, UPDATE_RRTYPE)
TEST_CLIENT6 = ('2001:db8::1', 53, 0, 0)
TEST_CLIENT4 = ('192.0.2.1', 53)
@@ -42,8 +42,8 @@ def create_update_msg(zones=[TEST_ZONE_RECORD], prerequisites=[],
updates=[], tsig_key=None):
msg = Message(Message.RENDER)
msg.set_qid(5353) # arbitrary chosen
- msg.set_opcode(Opcode.UPDATE())
- msg.set_rcode(Rcode.NOERROR())
+ msg.set_opcode(Opcode.UPDATE)
+ msg.set_rcode(Rcode.NOERROR)
for z in zones:
msg.add_question(z)
for p in prerequisites:
@@ -99,7 +99,7 @@ class SessionModuleTests(unittest.TestCase):
def test_foreach_rr_in_rrset(self):
rrset = create_rrset("www.example.org", TEST_RRCLASS,
- RRType.A(), 3600, [ "192.0.2.1" ])
+ RRType.A, 3600, [ "192.0.2.1" ])
l = []
for rr in foreach_rr(rrset):
@@ -121,17 +121,17 @@ class SessionModuleTests(unittest.TestCase):
def test_convert_rrset_class(self):
# Converting an RRSET to a different class should work
# if the rdata types can be converted
- rrset = create_rrset("www.example.org", RRClass.NONE(), RRType.A(),
+ rrset = create_rrset("www.example.org", RRClass.NONE, RRType.A,
3600, [ b'\xc0\x00\x02\x01', b'\xc0\x00\x02\x02'])
- rrset2 = convert_rrset_class(rrset, RRClass.IN())
+ rrset2 = convert_rrset_class(rrset, RRClass.IN)
self.assertEqual("www.example.org. 3600 IN A 192.0.2.1\n" +
"www.example.org. 3600 IN A 192.0.2.2\n",
str(rrset2))
- rrset3 = convert_rrset_class(rrset2, RRClass.NONE())
- self.assertEqual("www.example.org. 3600 CLASS254 A \\# 4 " +
- "c0000201\nwww.example.org. 3600 CLASS254 " +
+ rrset3 = convert_rrset_class(rrset2, RRClass.NONE)
+ self.assertEqual("www.example.org. 3600 NONE A \\# 4 " +
+ "c0000201\nwww.example.org. 3600 NONE " +
"A \\# 4 c0000202\n",
str(rrset3))
@@ -140,10 +140,10 @@ class SessionModuleTests(unittest.TestCase):
# there was a ticket about making a better hierarchy for
# dns/parsing related exceptions)
self.assertRaises(InvalidRdataLength, convert_rrset_class,
- rrset, RRClass.CH())
+ rrset, RRClass.CH)
add_rdata(rrset, b'\xc0\x00')
self.assertRaises(DNSMessageFORMERR, convert_rrset_class,
- rrset, RRClass.IN())
+ rrset, RRClass.IN)
def test_collect_rrsets(self):
'''
@@ -152,25 +152,25 @@ class SessionModuleTests(unittest.TestCase):
'''
collected = []
- collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(),
- RRType.A(), 0, [ "192.0.2.1" ]))
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN,
+ RRType.A, 0, [ "192.0.2.1" ]))
# Same name and class, different type
- collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(),
- RRType.TXT(), 0, [ "one" ]))
- collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(),
- RRType.A(), 0, [ "192.0.2.2" ]))
- collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(),
- RRType.TXT(), 0, [ "two" ]))
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN,
+ RRType.TXT, 0, [ "one" ]))
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN,
+ RRType.A, 0, [ "192.0.2.2" ]))
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN,
+ RRType.TXT, 0, [ "two" ]))
# Same class and type as an existing one, different name
- collect_rrsets(collected, create_rrset("b.example.org", RRClass.IN(),
- RRType.A(), 0, [ "192.0.2.3" ]))
+ collect_rrsets(collected, create_rrset("b.example.org", RRClass.IN,
+ RRType.A, 0, [ "192.0.2.3" ]))
# Same name and type as an existing one, different class
- collect_rrsets(collected, create_rrset("a.example.org", RRClass.CH(),
- RRType.TXT(), 0, [ "one" ]))
- collect_rrsets(collected, create_rrset("b.example.org", RRClass.IN(),
- RRType.A(), 0, [ "192.0.2.4" ]))
- collect_rrsets(collected, create_rrset("a.example.org", RRClass.CH(),
- RRType.TXT(), 0, [ "two" ]))
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.CH,
+ RRType.TXT, 0, [ "one" ]))
+ collect_rrsets(collected, create_rrset("b.example.org", RRClass.IN,
+ RRType.A, 0, [ "192.0.2.4" ]))
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.CH,
+ RRType.TXT, 0, [ "two" ]))
strings = [ rrset.to_text() for rrset in collected ]
# note + vs , in this list
@@ -216,7 +216,7 @@ class SessionTestBase(unittest.TestCase):
'''Perform common checks on update resposne message.'''
self.assertTrue(msg.get_header_flag(Message.HEADERFLAG_QR))
# note: we convert opcode to text it'd be more helpful on failure.
- self.assertEqual(Opcode.UPDATE().to_text(), msg.get_opcode().to_text())
+ self.assertEqual(Opcode.UPDATE.to_text(), msg.get_opcode().to_text())
self.assertEqual(expected_rcode.to_text(), msg.get_rcode().to_text())
# All sections should be cleared
self.assertEqual(0, msg.get_rr_count(SECTION_ZONE))
@@ -230,22 +230,22 @@ class TestDDNSSOA(unittest.TestCase):
'''unittest for update_soa function'''
soa_update = DDNS_SOA()
soa_rr = create_rrset("example.org", TEST_RRCLASS,
- RRType.SOA(), 3600, ["ns1.example.org. " +
+ RRType.SOA, 3600, ["ns1.example.org. " +
"admin.example.org. " +
"1233 3600 1800 2419200 7200"])
expected_soa_rr = create_rrset("example.org", TEST_RRCLASS,
- RRType.SOA(), 3600, ["ns1.example.org. "
+ RRType.SOA, 3600, ["ns1.example.org. "
+ "admin.example.org. " +
"1234 3600 1800 2419200 7200"])
self.assertEqual(soa_update.update_soa(soa_rr).get_rdata()[0].to_text(),
expected_soa_rr.get_rdata()[0].to_text())
max_serial = 2 ** 32 - 1
soa_rdata = "%d %s"%(max_serial,"3600 1800 2419200 7200")
- soa_rr = create_rrset("example.org", TEST_RRCLASS, RRType.SOA(), 3600,
+ soa_rr = create_rrset("example.org", TEST_RRCLASS, RRType.SOA, 3600,
["ns1.example.org. " + "admin.example.org. " +
soa_rdata])
expected_soa_rr = create_rrset("example.org", TEST_RRCLASS,
- RRType.SOA(), 3600, ["ns1.example.org. "
+ RRType.SOA, 3600, ["ns1.example.org. "
+ "admin.example.org. " +
"1 3600 1800 2419200 7200"])
self.assertEqual(soa_update.update_soa(soa_rr).get_rdata()[0].to_text(),
@@ -253,11 +253,11 @@ class TestDDNSSOA(unittest.TestCase):
def test_soa_update_check(self):
'''unittest for soa_update_check function'''
- small_soa_rr = create_rrset("example.org", TEST_RRCLASS, RRType.SOA(),
+ small_soa_rr = create_rrset("example.org", TEST_RRCLASS, RRType.SOA,
3600, ["ns1.example.org. " +
"admin.example.org. " +
"1233 3600 1800 2419200 7200"])
- large_soa_rr = create_rrset("example.org", TEST_RRCLASS, RRType.SOA(),
+ large_soa_rr = create_rrset("example.org", TEST_RRCLASS, RRType.SOA,
3600, ["ns1.example.org. " +
"admin.example.org. " +
"1234 3600 1800 2419200 7200"])
@@ -269,11 +269,11 @@ class TestDDNSSOA(unittest.TestCase):
small_soa_rr))
small_serial = 1235 + 2 ** 31
soa_rdata = "%d %s"%(small_serial,"3600 1800 2419200 7200")
- small_soa_rr = create_rrset("example.org", TEST_RRCLASS, RRType.SOA(),
+ small_soa_rr = create_rrset("example.org", TEST_RRCLASS, RRType.SOA,
3600, ["ns1.example.org. " +
"admin.example.org. " +
soa_rdata])
- large_soa_rr = create_rrset("example.org", TEST_RRCLASS, RRType.SOA(),
+ large_soa_rr = create_rrset("example.org", TEST_RRCLASS, RRType.SOA,
3600, ["ns1.example.org. " +
"admin.example.org. " +
"1234 3600 1800 2419200 7200"])
@@ -305,41 +305,41 @@ class SessionTest(SessionTestBase):
self.assertEqual(UPDATE_ERROR, result)
self.assertEqual(None, zname)
self.assertEqual(None, zclass)
- self.check_response(session.get_message(), Rcode.FORMERR())
+ self.check_response(session.get_message(), Rcode.FORMERR)
# Zone section contains multiple records
msg = create_update_msg(zones=[TEST_ZONE_RECORD, TEST_ZONE_RECORD])
session = UpdateSession(msg, TEST_CLIENT4, None)
self.assertEqual(UPDATE_ERROR, session.handle()[0])
- self.check_response(session.get_message(), Rcode.FORMERR())
+ self.check_response(session.get_message(), Rcode.FORMERR)
# Zone section's type is not SOA
msg = create_update_msg(zones=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
- RRType.A())])
+ RRType.A)])
session = UpdateSession(msg, TEST_CLIENT4, None)
self.assertEqual(UPDATE_ERROR, session.handle()[0])
- self.check_response(session.get_message(), Rcode.FORMERR())
+ self.check_response(session.get_message(), Rcode.FORMERR)
def test_update_secondary(self):
# specified zone is configured as a secondary. Since this
# implementation doesn't support update forwarding, the result
# should be NOTIMP.
msg = create_update_msg(zones=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
- RRType.SOA())])
+ RRType.SOA)])
session = UpdateSession(msg, TEST_CLIENT4,
ZoneConfig({(TEST_ZONE_NAME, TEST_RRCLASS)},
TEST_RRCLASS, self._datasrc_client))
self.assertEqual(UPDATE_ERROR, session.handle()[0])
- self.check_response(session.get_message(), Rcode.NOTIMP())
+ self.check_response(session.get_message(), Rcode.NOTIMP)
def check_notauth(self, zname, zclass=TEST_RRCLASS):
'''Common test sequence for the 'notauth' test'''
- msg = create_update_msg(zones=[Question(zname, zclass, RRType.SOA())])
+ msg = create_update_msg(zones=[Question(zname, zclass, RRType.SOA)])
session = UpdateSession(msg, TEST_CLIENT4,
ZoneConfig({(TEST_ZONE_NAME, TEST_RRCLASS)},
TEST_RRCLASS, self._datasrc_client))
self.assertEqual(UPDATE_ERROR, session.handle()[0])
- self.check_response(session.get_message(), Rcode.NOTAUTH())
+ self.check_response(session.get_message(), Rcode.NOTAUTH)
def test_update_notauth(self):
'''Update attempt for non authoritative zones'''
@@ -349,7 +349,7 @@ class SessionTest(SessionTestBase):
# (match must be exact)
self.check_notauth(Name('sub.example.org'))
# zone class doesn't match
- self.check_notauth(Name('example.org'), RRClass.CH())
+ self.check_notauth(Name('example.org'), RRClass.CH)
def test_update_datasrc_error(self):
# if the data source client raises an exception, it should result in
@@ -358,17 +358,17 @@ class SessionTest(SessionTestBase):
def find_zone(self, name):
raise isc.datasrc.Error('faked exception')
msg = create_update_msg(zones=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
- RRType.SOA())])
+ RRType.SOA)])
session = UpdateSession(msg, TEST_CLIENT4,
ZoneConfig({(TEST_ZONE_NAME, TEST_RRCLASS)},
TEST_RRCLASS,
BadDataSourceClient()))
self.assertEqual(UPDATE_ERROR, session.handle()[0])
- self.check_response(session.get_message(), Rcode.SERVFAIL())
+ self.check_response(session.get_message(), Rcode.SERVFAIL)
def test_foreach_rr_in_rrset(self):
rrset = create_rrset("www.example.org", TEST_RRCLASS,
- RRType.A(), 3600, [ "192.0.2.1" ])
+ RRType.A, 3600, [ "192.0.2.1" ])
l = []
for rr in foreach_rr(rrset):
@@ -390,17 +390,17 @@ class SessionTest(SessionTestBase):
def test_convert_rrset_class(self):
# Converting an RRSET to a different class should work
# if the rdata types can be converted
- rrset = create_rrset("www.example.org", RRClass.NONE(), RRType.A(),
+ rrset = create_rrset("www.example.org", RRClass.NONE, RRType.A,
3600, [ b'\xc0\x00\x02\x01', b'\xc0\x00\x02\x02'])
- rrset2 = convert_rrset_class(rrset, RRClass.IN())
+ rrset2 = convert_rrset_class(rrset, RRClass.IN)
self.assertEqual("www.example.org. 3600 IN A 192.0.2.1\n" +
"www.example.org. 3600 IN A 192.0.2.2\n",
str(rrset2))
- rrset3 = convert_rrset_class(rrset2, RRClass.NONE())
- self.assertEqual("www.example.org. 3600 CLASS254 A \\# 4 " +
- "c0000201\nwww.example.org. 3600 CLASS254 " +
+ rrset3 = convert_rrset_class(rrset2, RRClass.NONE)
+ self.assertEqual("www.example.org. 3600 NONE A \\# 4 " +
+ "c0000201\nwww.example.org. 3600 NONE " +
"A \\# 4 c0000202\n",
str(rrset3))
@@ -409,10 +409,10 @@ class SessionTest(SessionTestBase):
# there was a ticket about making a better hierarchy for
# dns/parsing related exceptions)
self.assertRaises(InvalidRdataLength, convert_rrset_class,
- rrset, RRClass.CH())
+ rrset, RRClass.CH)
add_rdata(rrset, b'\xc0\x00')
self.assertRaises(DNSMessageFORMERR, convert_rrset_class,
- rrset, RRClass.IN())
+ rrset, RRClass.IN)
def test_collect_rrsets(self):
'''
@@ -421,25 +421,25 @@ class SessionTest(SessionTestBase):
'''
collected = []
- collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(),
- RRType.A(), 0, [ "192.0.2.1" ]))
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN,
+ RRType.A, 0, [ "192.0.2.1" ]))
# Same name and class, different type
- collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(),
- RRType.TXT(), 0, [ "one" ]))
- collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(),
- RRType.A(), 0, [ "192.0.2.2" ]))
- collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN(),
- RRType.TXT(), 0, [ "two" ]))
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN,
+ RRType.TXT, 0, [ "one" ]))
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN,
+ RRType.A, 0, [ "192.0.2.2" ]))
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.IN,
+ RRType.TXT, 0, [ "two" ]))
# Same class and type as an existing one, different name
- collect_rrsets(collected, create_rrset("b.example.org", RRClass.IN(),
- RRType.A(), 0, [ "192.0.2.3" ]))
+ collect_rrsets(collected, create_rrset("b.example.org", RRClass.IN,
+ RRType.A, 0, [ "192.0.2.3" ]))
# Same name and type as an existing one, different class
- collect_rrsets(collected, create_rrset("a.example.org", RRClass.CH(),
- RRType.TXT(), 0, [ "one" ]))
- collect_rrsets(collected, create_rrset("b.example.org", RRClass.IN(),
- RRType.A(), 0, [ "192.0.2.4" ]))
- collect_rrsets(collected, create_rrset("a.example.org", RRClass.CH(),
- RRType.TXT(), 0, [ "two" ]))
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.CH,
+ RRType.TXT, 0, [ "one" ]))
+ collect_rrsets(collected, create_rrset("b.example.org", RRClass.IN,
+ RRType.A, 0, [ "192.0.2.4" ]))
+ collect_rrsets(collected, create_rrset("a.example.org", RRClass.CH,
+ RRType.TXT, 0, [ "two" ]))
strings = [ rrset.to_text() for rrset in collected ]
# note + vs , in this list
@@ -469,64 +469,64 @@ class SessionTest(SessionTestBase):
'''
# Basic existence checks
# www.example.org should have an A, but not an MX
- rrset = create_rrset("www.example.org", rrclass, RRType.A(), 0)
+ rrset = create_rrset("www.example.org", rrclass, RRType.A, 0)
self.__prereq_helper(method, expected, rrset)
- rrset = create_rrset("www.example.org", rrclass, RRType.MX(), 0)
+ rrset = create_rrset("www.example.org", rrclass, RRType.MX, 0)
self.__prereq_helper(method, not expected, rrset)
# example.org should have an MX, but not an A
- rrset = create_rrset("example.org", rrclass, RRType.MX(), 0)
+ rrset = create_rrset("example.org", rrclass, RRType.MX, 0)
self.__prereq_helper(method, expected, rrset)
- rrset = create_rrset("example.org", rrclass, RRType.A(), 0)
+ rrset = create_rrset("example.org", rrclass, RRType.A, 0)
self.__prereq_helper(method, not expected, rrset)
# Also check the case where the name does not even exist
- rrset = create_rrset("doesnotexist.example.org", rrclass, RRType.A(), 0)
+ rrset = create_rrset("doesnotexist.example.org", rrclass, RRType.A, 0)
self.__prereq_helper(method, not expected, rrset)
# Wildcard expansion should not be applied, but literal matches
# should work
- rrset = create_rrset("foo.wildcard.example.org", rrclass, RRType.A(), 0)
+ rrset = create_rrset("foo.wildcard.example.org", rrclass, RRType.A, 0)
self.__prereq_helper(method, not expected, rrset)
- rrset = create_rrset("*.wildcard.example.org", rrclass, RRType.A(), 0)
+ rrset = create_rrset("*.wildcard.example.org", rrclass, RRType.A, 0)
self.__prereq_helper(method, expected, rrset)
# Likewise, CNAME directly should match, but what it points to should
# not
- rrset = create_rrset("cname.example.org", rrclass, RRType.A(), 0)
+ rrset = create_rrset("cname.example.org", rrclass, RRType.A, 0)
self.__prereq_helper(method, not expected, rrset)
- rrset = create_rrset("cname.example.org", rrclass, RRType.CNAME(), 0)
+ rrset = create_rrset("cname.example.org", rrclass, RRType.CNAME, 0)
self.__prereq_helper(method, expected, rrset)
# And also make sure a delegation (itself) is not treated as existing
# data
- rrset = create_rrset("foo.sub.example.org", rrclass, RRType.A(), 0)
+ rrset = create_rrset("foo.sub.example.org", rrclass, RRType.A, 0)
self.__prereq_helper(method, not expected, rrset)
# But the delegation data itself should match
- rrset = create_rrset("sub.example.org", rrclass, RRType.NS(), 0)
+ rrset = create_rrset("sub.example.org", rrclass, RRType.NS, 0)
self.__prereq_helper(method, expected, rrset)
# As should glue
- rrset = create_rrset("ns.sub.example.org", rrclass, RRType.A(), 0)
+ rrset = create_rrset("ns.sub.example.org", rrclass, RRType.A, 0)
self.__prereq_helper(method, expected, rrset)
def test_check_prerequisite_exists(self):
method = self._session._UpdateSession__prereq_rrset_exists
self.__check_prerequisite_exists_combined(method,
- RRClass.ANY(),
+ RRClass.ANY,
True)
def test_check_prerequisite_does_not_exist(self):
method = self._session._UpdateSession__prereq_rrset_does_not_exist
self.__check_prerequisite_exists_combined(method,
- RRClass.NONE(),
+ RRClass.NONE,
False)
def test_check_prerequisite_exists_value(self):
method = self._session._UpdateSession__prereq_rrset_exists_value
- rrset = create_rrset("www.example.org", RRClass.IN(), RRType.A(), 0)
+ rrset = create_rrset("www.example.org", RRClass.IN, RRType.A, 0)
# empty one should not match
self.__prereq_helper(method, False, rrset)
@@ -539,11 +539,11 @@ class SessionTest(SessionTestBase):
self.__prereq_helper(method, False, rrset)
# Also test one with more than one RR
- rrset = create_rrset("example.org", RRClass.IN(), RRType.NS(), 0)
+ rrset = create_rrset("example.org", RRClass.IN, RRType.NS, 0)
self.__prereq_helper(method, False, rrset)
add_rdata(rrset, "ns1.example.org.")
self.__prereq_helper(method, False, rrset)
- add_rdata(rrset, "ns2.example.org")
+ add_rdata(rrset, "ns2.example.org.")
self.__prereq_helper(method, False, rrset)
add_rdata(rrset, "ns3.example.org.")
self.__prereq_helper(method, True, rrset)
@@ -551,7 +551,7 @@ class SessionTest(SessionTestBase):
self.__prereq_helper(method, False, rrset)
# Repeat that, but try a different order of Rdata addition
- rrset = create_rrset("example.org", RRClass.IN(), RRType.NS(), 0)
+ rrset = create_rrset("example.org", RRClass.IN, RRType.NS, 0)
self.__prereq_helper(method, False, rrset)
add_rdata(rrset, "ns3.example.org.")
self.__prereq_helper(method, False, rrset)
@@ -563,8 +563,8 @@ class SessionTest(SessionTestBase):
self.__prereq_helper(method, False, rrset)
# and test one where the name does not even exist
- rrset = create_rrset("doesnotexist.example.org", RRClass.IN(),
- RRType.A(), 0, [ "192.0.2.1" ])
+ rrset = create_rrset("doesnotexist.example.org", RRClass.IN,
+ RRType.A, 0, [ "192.0.2.1" ])
self.__prereq_helper(method, False, rrset)
def __check_prerequisite_name_in_use_combined(self, method, rrclass,
@@ -573,42 +573,42 @@ class SessionTest(SessionTestBase):
in behaviour) methods __prereq_name_in_use and
__prereq_name_not_in_use
'''
- rrset = create_rrset("example.org", rrclass, RRType.ANY(), 0)
+ rrset = create_rrset("example.org", rrclass, RRType.ANY, 0)
self.__prereq_helper(method, expected, rrset)
- rrset = create_rrset("www.example.org", rrclass, RRType.ANY(), 0)
+ rrset = create_rrset("www.example.org", rrclass, RRType.ANY, 0)
self.__prereq_helper(method, expected, rrset)
rrset = create_rrset("doesnotexist.example.org", rrclass,
- RRType.ANY(), 0)
+ RRType.ANY, 0)
self.__prereq_helper(method, not expected, rrset)
rrset = create_rrset("belowdelegation.sub.example.org", rrclass,
- RRType.ANY(), 0)
+ RRType.ANY, 0)
self.__prereq_helper(method, not expected, rrset)
rrset = create_rrset("foo.wildcard.example.org", rrclass,
- RRType.ANY(), 0)
+ RRType.ANY, 0)
self.__prereq_helper(method, not expected, rrset)
# empty nonterminal should not match
rrset = create_rrset("nonterminal.example.org", rrclass,
- RRType.ANY(), 0)
+ RRType.ANY, 0)
self.__prereq_helper(method, not expected, rrset)
rrset = create_rrset("empty.nonterminal.example.org", rrclass,
- RRType.ANY(), 0)
+ RRType.ANY, 0)
self.__prereq_helper(method, expected, rrset)
def test_check_prerequisite_name_in_use(self):
method = self._session._UpdateSession__prereq_name_in_use
self.__check_prerequisite_name_in_use_combined(method,
- RRClass.ANY(),
+ RRClass.ANY,
True)
def test_check_prerequisite_name_not_in_use(self):
method = self._session._UpdateSession__prereq_name_not_in_use
self.__check_prerequisite_name_in_use_combined(method,
- RRClass.NONE(),
+ RRClass.NONE,
False)
def check_prerequisite_result(self, expected, prerequisites):
@@ -632,7 +632,7 @@ class SessionTest(SessionTestBase):
self.assertEqual(expected.to_text(),
session._UpdateSession__message.get_rcode().to_text())
# And that the result looks right
- if expected == Rcode.NOERROR():
+ if expected == Rcode.NOERROR:
self.assertEqual(UPDATE_SUCCESS, result)
else:
self.assertEqual(UPDATE_ERROR, result)
@@ -672,7 +672,7 @@ class SessionTest(SessionTestBase):
self.assertEqual(expected.to_text(),
session._UpdateSession__message.get_rcode().to_text())
# And that the result looks right
- if expected == Rcode.NOERROR():
+ if expected == Rcode.NOERROR:
self.assertEqual(UPDATE_SUCCESS, result)
else:
self.assertEqual(UPDATE_ERROR, result)
@@ -685,78 +685,75 @@ class SessionTest(SessionTestBase):
# in the specific prerequisite type tests)
# Let's first define a number of prereq's that should succeed
- rrset_exists_yes = create_rrset("example.org", RRClass.ANY(),
- RRType.SOA(), 0)
+ rrset_exists_yes = create_rrset("example.org", RRClass.ANY,
+ RRType.SOA, 0)
- rrset_exists_value_yes = create_rrset("www.example.org", RRClass.IN(),
- RRType.A(), 0, [ "192.0.2.1" ])
+ rrset_exists_value_yes = create_rrset("www.example.org", RRClass.IN,
+ RRType.A, 0, [ "192.0.2.1" ])
rrset_does_not_exist_yes = create_rrset("foo.example.org",
- RRClass.NONE(), RRType.SOA(),
+ RRClass.NONE, RRType.SOA,
0)
- name_in_use_yes = create_rrset("www.example.org", RRClass.ANY(),
- RRType.ANY(), 0)
+ name_in_use_yes = create_rrset("www.example.org", RRClass.ANY,
+ RRType.ANY, 0)
- name_not_in_use_yes = create_rrset("foo.example.org", RRClass.NONE(),
- RRType.ANY(), 0)
+ name_not_in_use_yes = create_rrset("foo.example.org", RRClass.NONE,
+ RRType.ANY, 0)
- rrset_exists_value_1 = create_rrset("example.org", RRClass.IN(),
- RRType.NS(), 0,
- [ "ns1.example.org" ])
- rrset_exists_value_2 = create_rrset("example.org", RRClass.IN(),
- RRType.NS(), 0,
- [ "ns2.example.org" ])
- rrset_exists_value_3 = create_rrset("example.org", RRClass.IN(),
- RRType.NS(), 0,
- [ "ns3.example.org" ])
+ rrset_exists_value_1 = create_rrset("example.org", RRClass.IN,
+ RRType.NS, 0, ["ns1.example.org."])
+ rrset_exists_value_2 = create_rrset("example.org", RRClass.IN,
+ RRType.NS, 0, ["ns2.example.org."])
+ rrset_exists_value_3 = create_rrset("example.org", RRClass.IN,
+ RRType.NS, 0, ["ns3.example.org."])
# and a number that should not
- rrset_exists_no = create_rrset("foo.example.org", RRClass.ANY(),
- RRType.SOA(), 0)
+ rrset_exists_no = create_rrset("foo.example.org", RRClass.ANY,
+ RRType.SOA, 0)
- rrset_exists_value_no = create_rrset("www.example.org", RRClass.IN(),
- RRType.A(), 0, [ "192.0.2.2" ])
+ rrset_exists_value_no = create_rrset("www.example.org", RRClass.IN,
+ RRType.A, 0, [ "192.0.2.2" ])
- rrset_does_not_exist_no = create_rrset("example.org", RRClass.NONE(),
- RRType.SOA(), 0)
+ rrset_does_not_exist_no = create_rrset("example.org", RRClass.NONE,
+ RRType.SOA, 0)
- name_in_use_no = create_rrset("foo.example.org", RRClass.ANY(),
- RRType.ANY(), 0)
+ name_in_use_no = create_rrset("foo.example.org", RRClass.ANY,
+ RRType.ANY, 0)
- name_not_in_use_no = create_rrset("www.example.org", RRClass.NONE(),
- RRType.ANY(), 0)
+ name_not_in_use_no = create_rrset("www.example.org", RRClass.NONE,
+ RRType.ANY, 0)
# check 'no' result codes
- self.check_prerequisite_result(Rcode.NXRRSET(),
+ self.check_prerequisite_result(Rcode.NXRRSET,
[ rrset_exists_no ])
- self.check_prerequisite_result(Rcode.NXRRSET(),
+ self.check_prerequisite_result(Rcode.NXRRSET,
[ rrset_exists_value_no ])
- self.check_prerequisite_result(Rcode.YXRRSET(),
+ self.check_prerequisite_result(Rcode.YXRRSET,
[ rrset_does_not_exist_no ])
- self.check_prerequisite_result(Rcode.NXDOMAIN(),
+ self.check_prerequisite_result(Rcode.NXDOMAIN,
[ name_in_use_no ])
- self.check_prerequisite_result(Rcode.YXDOMAIN(),
+ self.check_prerequisite_result(Rcode.YXDOMAIN,
[ name_not_in_use_no ])
# the 'yes' codes should result in ok
# individually
- self.check_prerequisite_result(Rcode.NOERROR(),
+ self.check_prerequisite_result(Rcode.NOERROR,
[ rrset_exists_yes ] )
- self.check_prerequisite_result(Rcode.NOERROR(),
+ self.check_prerequisite_result(Rcode.NOERROR,
[ rrset_exists_value_yes ])
- self.check_prerequisite_result(Rcode.NOERROR(),
+ self.check_prerequisite_result(Rcode.NOERROR,
[ rrset_does_not_exist_yes ])
- self.check_prerequisite_result(Rcode.NOERROR(),
+ self.check_prerequisite_result(Rcode.NOERROR,
[ name_in_use_yes ])
- self.check_prerequisite_result(Rcode.NOERROR(),
+ self.check_prerequisite_result(Rcode.NOERROR,
[ name_not_in_use_yes ])
- self.check_prerequisite_result(Rcode.NOERROR(),
+ self.check_prerequisite_result(Rcode.NOERROR,
[ rrset_exists_value_1,
rrset_exists_value_2,
rrset_exists_value_3])
# and together
- self.check_prerequisite_result(Rcode.NOERROR(),
+ self.check_prerequisite_result(Rcode.NOERROR,
[ rrset_exists_yes,
rrset_exists_value_yes,
rrset_does_not_exist_yes,
@@ -768,7 +765,7 @@ class SessionTest(SessionTestBase):
# try out a permutation, note that one rrset is split up,
# and the order of the RRs should not matter
- self.check_prerequisite_result(Rcode.NOERROR(),
+ self.check_prerequisite_result(Rcode.NOERROR,
[ rrset_exists_value_3,
rrset_exists_yes,
rrset_exists_value_2,
@@ -777,7 +774,7 @@ class SessionTest(SessionTestBase):
# Should fail on the first error, even if most of the
# prerequisites are ok
- self.check_prerequisite_result(Rcode.NXDOMAIN(),
+ self.check_prerequisite_result(Rcode.NXDOMAIN,
[ rrset_exists_value_3,
rrset_exists_yes,
rrset_exists_value_2,
@@ -786,39 +783,39 @@ class SessionTest(SessionTestBase):
rrset_exists_value_1])
def test_prerequisite_notzone(self):
- rrset = create_rrset("some.other.zone.", RRClass.ANY(), RRType.SOA(), 0)
- self.check_prerequisite_result(Rcode.NOTZONE(), [ rrset ])
+ rrset = create_rrset("some.other.zone.", RRClass.ANY, RRType.SOA, 0)
+ self.check_prerequisite_result(Rcode.NOTZONE, [ rrset ])
def test_prerequisites_formerr(self):
# test for form errors in the prerequisite section
# Class ANY, non-zero TTL
- rrset = create_rrset("example.org", RRClass.ANY(), RRType.SOA(), 1)
- self.check_prerequisite_result(Rcode.FORMERR(), [ rrset ])
+ rrset = create_rrset("example.org", RRClass.ANY, RRType.SOA, 1)
+ self.check_prerequisite_result(Rcode.FORMERR, [ rrset ])
# Class ANY, but with rdata
- rrset = create_rrset("example.org", RRClass.ANY(), RRType.A(), 0,
+ rrset = create_rrset("example.org", RRClass.ANY, RRType.A, 0,
[ b'\x00\x00\x00\x00' ])
- self.check_prerequisite_result(Rcode.FORMERR(), [ rrset ])
+ self.check_prerequisite_result(Rcode.FORMERR, [ rrset ])
# Class NONE, non-zero TTL
- rrset = create_rrset("example.org", RRClass.NONE(), RRType.SOA(), 1)
- self.check_prerequisite_result(Rcode.FORMERR(), [ rrset ])
+ rrset = create_rrset("example.org", RRClass.NONE, RRType.SOA, 1)
+ self.check_prerequisite_result(Rcode.FORMERR, [ rrset ])
# Class NONE, but with rdata
- rrset = create_rrset("example.org", RRClass.NONE(), RRType.A(), 0,
+ rrset = create_rrset("example.org", RRClass.NONE, RRType.A, 0,
[ b'\x00\x00\x00\x00' ])
- self.check_prerequisite_result(Rcode.FORMERR(), [ rrset ])
+ self.check_prerequisite_result(Rcode.FORMERR, [ rrset ])
# Matching class and type, but non-zero TTL
- rrset = create_rrset("www.example.org", RRClass.IN(), RRType.A(), 1,
+ rrset = create_rrset("www.example.org", RRClass.IN, RRType.A, 1,
[ "192.0.2.1" ])
- self.check_prerequisite_result(Rcode.FORMERR(), [ rrset ])
+ self.check_prerequisite_result(Rcode.FORMERR, [ rrset ])
# Completely different class
- rrset = create_rrset("example.org", RRClass.CH(), RRType.TXT(), 0,
+ rrset = create_rrset("example.org", RRClass.CH, RRType.TXT, 0,
[ "foo" ])
- self.check_prerequisite_result(Rcode.FORMERR(), [ rrset ])
+ self.check_prerequisite_result(Rcode.FORMERR, [ rrset ])
def __prereq_helper(self, method, expected, rrset):
'''Calls the given method with self._datasrc_client
@@ -830,84 +827,84 @@ class SessionTest(SessionTestBase):
'''Prepare a number of RRsets to be used in several update tests
The rrsets are stored in self'''
orig_a_rrset = create_rrset("www.example.org", TEST_RRCLASS,
- RRType.A(), 3600, [ "192.0.2.1" ])
+ RRType.A, 3600, [ "192.0.2.1" ])
self.orig_a_rrset = orig_a_rrset
rrset_update_a = create_rrset("www.example.org", TEST_RRCLASS,
- RRType.A(), 3600,
+ RRType.A, 3600,
[ "192.0.2.2", "192.0.2.3" ])
self.rrset_update_a = rrset_update_a
rrset_update_soa = create_rrset("example.org", TEST_RRCLASS,
- RRType.SOA(), 3600,
+ RRType.SOA, 3600,
[ "ns1.example.org. " +
"admin.example.org. " +
"1233 3600 1800 2419200 7200" ])
self.rrset_update_soa = rrset_update_soa
- rrset_update_soa_del = create_rrset("example.org", RRClass.NONE(),
- RRType.SOA(), 0,
+ rrset_update_soa_del = create_rrset("example.org", RRClass.NONE,
+ RRType.SOA, 0,
[ "ns1.example.org. " +
"admin.example.org. " +
"1233 3600 1800 2419200 7200" ])
self.rrset_update_soa_del = rrset_update_soa_del
rrset_update_soa2 = create_rrset("example.org", TEST_RRCLASS,
- RRType.SOA(), 3600,
+ RRType.SOA, 3600,
[ "ns1.example.org. " +
"admin.example.org. " +
"4000 3600 1800 2419200 7200" ])
self.rrset_update_soa2 = rrset_update_soa2
- rrset_update_del_name = create_rrset("www.example.org", RRClass.ANY(),
- RRType.ANY(), 0)
+ rrset_update_del_name = create_rrset("www.example.org", RRClass.ANY,
+ RRType.ANY, 0)
self.rrset_update_del_name = rrset_update_del_name
- rrset_update_del_name_apex = create_rrset("example.org", RRClass.ANY(),
- RRType.ANY(), 0)
+ rrset_update_del_name_apex = create_rrset("example.org", RRClass.ANY,
+ RRType.ANY, 0)
self.rrset_update_del_name_apex = rrset_update_del_name_apex
- rrset_update_del_rrset = create_rrset("www.example.org", RRClass.ANY(),
- RRType.A(), 0)
+ rrset_update_del_rrset = create_rrset("www.example.org", RRClass.ANY,
+ RRType.A, 0)
self.rrset_update_del_rrset = rrset_update_del_rrset
- rrset_update_del_mx_apex = create_rrset("example.org", RRClass.ANY(),
- RRType.MX(), 0)
+ rrset_update_del_mx_apex = create_rrset("example.org", RRClass.ANY,
+ RRType.MX, 0)
self.rrset_update_del_mx_apex = rrset_update_del_mx_apex
- rrset_update_del_soa_apex = create_rrset("example.org", RRClass.ANY(),
- RRType.SOA(), 0)
+ rrset_update_del_soa_apex = create_rrset("example.org", RRClass.ANY,
+ RRType.SOA, 0)
self.rrset_update_del_soa_apex = rrset_update_del_soa_apex
- rrset_update_del_ns_apex = create_rrset("example.org", RRClass.ANY(),
- RRType.NS(), 0)
+ rrset_update_del_ns_apex = create_rrset("example.org", RRClass.ANY,
+ RRType.NS, 0)
self.rrset_update_del_ns_apex = rrset_update_del_ns_apex
rrset_update_del_rrset_part = create_rrset("www.example.org",
- RRClass.NONE(), RRType.A(),
+ RRClass.NONE, RRType.A,
0,
[ b'\xc0\x00\x02\x02',
b'\xc0\x00\x02\x03' ])
self.rrset_update_del_rrset_part = rrset_update_del_rrset_part
- rrset_update_del_rrset_ns = create_rrset("example.org", RRClass.NONE(),
- RRType.NS(), 0,
+ rrset_update_del_rrset_ns = create_rrset("example.org", RRClass.NONE,
+ RRType.NS, 0,
[ b'\x03ns1\x07example\x03org\x00',
b'\x03ns2\x07example\x03org\x00',
b'\x03ns3\x07example\x03org\x00' ])
self.rrset_update_del_rrset_ns = rrset_update_del_rrset_ns
- rrset_update_del_rrset_mx = create_rrset("example.org", RRClass.NONE(),
- RRType.MX(), 0,
+ rrset_update_del_rrset_mx = create_rrset("example.org", RRClass.NONE,
+ RRType.MX, 0,
[ b'\x00\x0a\x04mail\x07example\x03org\x00' ])
self.rrset_update_del_rrset_mx = rrset_update_del_rrset_mx
def test_acl_before_prereq(self):
- name_in_use_no = create_rrset("foo.example.org", RRClass.ANY(),
- RRType.ANY(), 0)
+ name_in_use_no = create_rrset("foo.example.org", RRClass.ANY,
+ RRType.ANY, 0)
# Test a prerequisite that would fail
- self.check_full_handle_result(Rcode.NXDOMAIN(), [], [ name_in_use_no ])
+ self.check_full_handle_result(Rcode.NXDOMAIN, [], [ name_in_use_no ])
# Change ACL so that it would be denied
self._acl_map = {(TEST_ZONE_NAME, TEST_RRCLASS):
@@ -915,7 +912,7 @@ class SessionTest(SessionTestBase):
# The prerequisite should now not be reached; it should fail on the
# ACL
- self.check_full_handle_result(Rcode.REFUSED(), [], [ name_in_use_no ])
+ self.check_full_handle_result(Rcode.REFUSED, [], [ name_in_use_no ])
def test_prescan(self):
'''Test whether the prescan succeeds on data that is ok, and whether
@@ -923,29 +920,29 @@ class SessionTest(SessionTestBase):
# prepare a set of correct update statements
self.__initialize_update_rrsets()
- self.check_prescan_result(Rcode.NOERROR(), [ self.rrset_update_a ])
+ self.check_prescan_result(Rcode.NOERROR, [ self.rrset_update_a ])
# check if soa is noticed
- self.check_prescan_result(Rcode.NOERROR(), [ self.rrset_update_soa ],
+ self.check_prescan_result(Rcode.NOERROR, [ self.rrset_update_soa ],
self.rrset_update_soa)
# Other types of succesful prechecks
- self.check_prescan_result(Rcode.NOERROR(), [ self.rrset_update_soa2 ],
+ self.check_prescan_result(Rcode.NOERROR, [ self.rrset_update_soa2 ],
self.rrset_update_soa2)
- self.check_prescan_result(Rcode.NOERROR(),
+ self.check_prescan_result(Rcode.NOERROR,
[ self.rrset_update_del_name ])
- self.check_prescan_result(Rcode.NOERROR(),
+ self.check_prescan_result(Rcode.NOERROR,
[ self.rrset_update_del_name_apex ])
- self.check_prescan_result(Rcode.NOERROR(),
+ self.check_prescan_result(Rcode.NOERROR,
[ self.rrset_update_del_rrset ])
- self.check_prescan_result(Rcode.NOERROR(),
+ self.check_prescan_result(Rcode.NOERROR,
[ self.rrset_update_del_mx_apex ])
- self.check_prescan_result(Rcode.NOERROR(),
+ self.check_prescan_result(Rcode.NOERROR,
[ self.rrset_update_del_rrset_part ])
# and check a few permutations of the above
# all of them (with one of the soas)
- self.check_prescan_result(Rcode.NOERROR(),
+ self.check_prescan_result(Rcode.NOERROR,
[
self.rrset_update_a,
self.rrset_update_soa,
@@ -960,16 +957,16 @@ class SessionTest(SessionTestBase):
# Two soas. Should we reject or simply use the last?
# (RFC is not really explicit on this, but between the lines I read
# use the last)
- self.check_prescan_result(Rcode.NOERROR(),
+ self.check_prescan_result(Rcode.NOERROR,
[ self.rrset_update_soa,
self.rrset_update_soa2 ],
self.rrset_update_soa2)
- self.check_prescan_result(Rcode.NOERROR(),
+ self.check_prescan_result(Rcode.NOERROR,
[ self.rrset_update_soa2,
self.rrset_update_soa ],
self.rrset_update_soa)
- self.check_prescan_result(Rcode.NOERROR(),
+ self.check_prescan_result(Rcode.NOERROR,
[
self.rrset_update_del_mx_apex,
self.rrset_update_del_name,
@@ -984,36 +981,36 @@ class SessionTest(SessionTestBase):
def test_prescan_failures(self):
'''Test whether prescan fails on bad data'''
# out of zone data
- rrset = create_rrset("different.zone", RRClass.ANY(), RRType.TXT(), 0)
- self.check_prescan_result(Rcode.NOTZONE(), [ rrset ])
+ rrset = create_rrset("different.zone", RRClass.ANY, RRType.TXT, 0)
+ self.check_prescan_result(Rcode.NOTZONE, [ rrset ])
# forbidden type, zone class
- rrset = create_rrset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.ANY(), 0,
+ rrset = create_rrset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.ANY, 0,
[ b'\x00' ])
- self.check_prescan_result(Rcode.FORMERR(), [ rrset ])
+ self.check_prescan_result(Rcode.FORMERR, [ rrset ])
# non-zero TTL, class ANY
- rrset = create_rrset(TEST_ZONE_NAME, RRClass.ANY(), RRType.TXT(), 1)
- self.check_prescan_result(Rcode.FORMERR(), [ rrset ])
+ rrset = create_rrset(TEST_ZONE_NAME, RRClass.ANY, RRType.TXT, 1)
+ self.check_prescan_result(Rcode.FORMERR, [ rrset ])
# non-zero Rdata, class ANY
- rrset = create_rrset(TEST_ZONE_NAME, RRClass.ANY(), RRType.TXT(), 0,
+ rrset = create_rrset(TEST_ZONE_NAME, RRClass.ANY, RRType.TXT, 0,
[ "foo" ])
- self.check_prescan_result(Rcode.FORMERR(), [ rrset ])
+ self.check_prescan_result(Rcode.FORMERR, [ rrset ])
# forbidden type, class ANY
- rrset = create_rrset(TEST_ZONE_NAME, RRClass.ANY(), RRType.AXFR(), 0,
+ rrset = create_rrset(TEST_ZONE_NAME, RRClass.ANY, RRType.AXFR, 0,
[ b'\x00' ])
- self.check_prescan_result(Rcode.FORMERR(), [ rrset ])
+ self.check_prescan_result(Rcode.FORMERR, [ rrset ])
# non-zero TTL, class NONE
- rrset = create_rrset(TEST_ZONE_NAME, RRClass.NONE(), RRType.TXT(), 1)
- self.check_prescan_result(Rcode.FORMERR(), [ rrset ])
+ rrset = create_rrset(TEST_ZONE_NAME, RRClass.NONE, RRType.TXT, 1)
+ self.check_prescan_result(Rcode.FORMERR, [ rrset ])
# forbidden type, class NONE
- rrset = create_rrset(TEST_ZONE_NAME, RRClass.NONE(), RRType.AXFR(), 0,
+ rrset = create_rrset(TEST_ZONE_NAME, RRClass.NONE, RRType.AXFR, 0,
[ b'\x00' ])
- self.check_prescan_result(Rcode.FORMERR(), [ rrset ])
+ self.check_prescan_result(Rcode.FORMERR, [ rrset ])
def __check_inzone_data(self, expected_result, name, rrtype,
expected_rrset = None):
@@ -1054,7 +1051,7 @@ class SessionTest(SessionTestBase):
# during this test, we will extend it at some point
extended_a_rrset = create_rrset("www.example.org", TEST_RRCLASS,
- RRType.A(), 3600,
+ RRType.A, 3600,
[ "192.0.2.1",
"192.0.2.2",
"192.0.2.3" ])
@@ -1062,90 +1059,90 @@ class SessionTest(SessionTestBase):
# Sanity check, make sure original data is really there before updates
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("www.example.org"),
- RRType.A(),
+ RRType.A,
self.orig_a_rrset)
# Add two rrs
- self.check_full_handle_result(Rcode.NOERROR(), [ self.rrset_update_a ])
+ self.check_full_handle_result(Rcode.NOERROR, [ self.rrset_update_a ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("www.example.org"),
- RRType.A(),
+ RRType.A,
extended_a_rrset)
# Adding the same RRsets should not make a difference.
- self.check_full_handle_result(Rcode.NOERROR(), [ self.rrset_update_a ])
+ self.check_full_handle_result(Rcode.NOERROR, [ self.rrset_update_a ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("www.example.org"),
- RRType.A(),
+ RRType.A,
extended_a_rrset)
# Now delete those two, and we should end up with the original RRset
- self.check_full_handle_result(Rcode.NOERROR(),
+ self.check_full_handle_result(Rcode.NOERROR,
[ self.rrset_update_del_rrset_part ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("www.example.org"),
- RRType.A(),
+ RRType.A,
self.orig_a_rrset)
# 'Deleting' them again should make no difference
- self.check_full_handle_result(Rcode.NOERROR(),
+ self.check_full_handle_result(Rcode.NOERROR,
[ self.rrset_update_del_rrset_part ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("www.example.org"),
- RRType.A(),
+ RRType.A,
self.orig_a_rrset)
# But deleting the entire rrset, independent of its contents, should
# work
- self.check_full_handle_result(Rcode.NOERROR(),
+ self.check_full_handle_result(Rcode.NOERROR,
[ self.rrset_update_del_rrset ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN,
isc.dns.Name("www.example.org"),
- RRType.A())
+ RRType.A)
# Check that if we update the SOA, it is updated to our value
- self.check_full_handle_result(Rcode.NOERROR(),
+ self.check_full_handle_result(Rcode.NOERROR,
[ self.rrset_update_soa2 ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("example.org"),
- RRType.SOA(),
+ RRType.SOA,
self.rrset_update_soa2)
def test_glue_deletions(self):
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("sub.example.org."),
- RRType.NS())
+ RRType.NS)
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("ns.sub.example.org."),
- RRType.A())
+ RRType.A)
# See that we can delete glue
rrset_delete_glue = create_rrset("ns.sub.example.org.",
- RRClass.ANY(),
- RRType.A(),
+ RRClass.ANY,
+ RRType.A,
0)
- self.check_full_handle_result(Rcode.NOERROR(),
+ self.check_full_handle_result(Rcode.NOERROR,
[ rrset_delete_glue ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("sub.example.org."),
- RRType.NS())
+ RRType.NS)
self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN,
isc.dns.Name("ns.sub.example.org."),
- RRType.A())
+ RRType.A)
# Check that we don't accidentally delete a delegation if we
# try to delete non-existent glue
rrset_delete_nonexistent_glue = create_rrset("foo.sub.example.org.",
- RRClass.ANY(),
- RRType.A(),
+ RRClass.ANY,
+ RRType.A,
0)
- self.check_full_handle_result(Rcode.NOERROR(),
+ self.check_full_handle_result(Rcode.NOERROR,
[ rrset_delete_nonexistent_glue ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("sub.example.org."),
- RRType.NS())
+ RRType.NS)
def test_update_add_new_data(self):
'''
@@ -1154,26 +1151,26 @@ class SessionTest(SessionTestBase):
# Add data at a completely new name
self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN,
isc.dns.Name("new.example.org"),
- RRType.A())
- rrset = create_rrset("new.example.org", TEST_RRCLASS, RRType.A(),
+ RRType.A)
+ rrset = create_rrset("new.example.org", TEST_RRCLASS, RRType.A,
3600, [ "192.0.2.1", "192.0.2.2" ])
- self.check_full_handle_result(Rcode.NOERROR(), [ rrset ])
+ self.check_full_handle_result(Rcode.NOERROR, [ rrset ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("new.example.org"),
- RRType.A(),
+ RRType.A,
rrset)
# Also try a name where data is present, but none of this
# specific type
self.__check_inzone_data(isc.datasrc.ZoneFinder.NXRRSET,
isc.dns.Name("new.example.org"),
- RRType.TXT())
- rrset = create_rrset("new.example.org", TEST_RRCLASS, RRType.TXT(),
+ RRType.TXT)
+ rrset = create_rrset("new.example.org", TEST_RRCLASS, RRType.TXT,
3600, [ "foo" ])
- self.check_full_handle_result(Rcode.NOERROR(), [ rrset ])
+ self.check_full_handle_result(Rcode.NOERROR, [ rrset ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("new.example.org"),
- RRType.TXT(),
+ RRType.TXT,
rrset)
def test_update_add_new_data_interspersed(self):
@@ -1186,36 +1183,36 @@ class SessionTest(SessionTestBase):
# Add data at a completely new name
self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN,
isc.dns.Name("new_a.example.org"),
- RRType.A())
+ RRType.A)
self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN,
isc.dns.Name("new_txt.example.org"),
- RRType.TXT())
+ RRType.TXT)
- rrset1 = create_rrset("new_a.example.org", TEST_RRCLASS, RRType.A(),
+ rrset1 = create_rrset("new_a.example.org", TEST_RRCLASS, RRType.A,
3600, [ "192.0.2.1" ])
- rrset2 = create_rrset("new_txt.example.org", TEST_RRCLASS, RRType.TXT(),
+ rrset2 = create_rrset("new_txt.example.org", TEST_RRCLASS, RRType.TXT,
3600, [ "foo" ])
- rrset3 = create_rrset("new_a.example.org", TEST_RRCLASS, RRType.A(),
+ rrset3 = create_rrset("new_a.example.org", TEST_RRCLASS, RRType.A,
3600, [ "192.0.2.2" ])
- self.check_full_handle_result(Rcode.NOERROR(),
+ self.check_full_handle_result(Rcode.NOERROR,
[ rrset1, rrset2, rrset3 ])
# The update should have merged rrset1 and rrset3
rrset_merged = create_rrset("new_a.example.org", TEST_RRCLASS,
- RRType.A(), 3600,
+ RRType.A, 3600,
[ "192.0.2.1", "192.0.2.2" ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("new_a.example.org"),
- RRType.A(),
+ RRType.A,
rrset_merged)
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("new_txt.example.org"),
- RRType.TXT(),
+ RRType.TXT,
rrset2)
def test_update_delete_name(self):
@@ -1227,21 +1224,21 @@ class SessionTest(SessionTestBase):
# First check it is there
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("www.example.org"),
- RRType.A())
+ RRType.A)
# Delete the entire name
- self.check_full_handle_result(Rcode.NOERROR(),
+ self.check_full_handle_result(Rcode.NOERROR,
[ self.rrset_update_del_name ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN,
isc.dns.Name("www.example.org"),
- RRType.A())
+ RRType.A)
# Should still be gone after pointless second delete
- self.check_full_handle_result(Rcode.NOERROR(),
+ self.check_full_handle_result(Rcode.NOERROR,
[ self.rrset_update_del_name ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN,
isc.dns.Name("www.example.org"),
- RRType.A())
+ RRType.A)
def test_update_apex_special_cases(self):
'''
@@ -1251,23 +1248,23 @@ class SessionTest(SessionTestBase):
# the original SOA
orig_soa_rrset = create_rrset("example.org", TEST_RRCLASS,
- RRType.SOA(), 3600,
+ RRType.SOA, 3600,
[ "ns1.example.org. " +
"admin.example.org. " +
"1234 3600 1800 2419200 7200" ])
# At some point, the SOA SERIAL will be auto-incremented
incremented_soa_rrset_01 = create_rrset("example.org", TEST_RRCLASS,
- RRType.SOA(), 3600, ["ns1.example.org. " +
+ RRType.SOA, 3600, ["ns1.example.org. " +
"admin.example.org. " +
"1235 3600 1800 2419200 7200" ])
incremented_soa_rrset_02 = create_rrset("example.org", TEST_RRCLASS,
- RRType.SOA(), 3600, ["ns1.example.org. " +
+ RRType.SOA, 3600, ["ns1.example.org. " +
"admin.example.org. " +
"1236 3600 1800 2419200 7200" ])
# We will delete some of the NS records
orig_ns_rrset = create_rrset("example.org", TEST_RRCLASS,
- RRType.NS(), 3600,
+ RRType.NS, 3600,
[ "ns1.example.org.",
"ns2.example.org.",
"ns3.example.org." ])
@@ -1275,48 +1272,48 @@ class SessionTest(SessionTestBase):
# Sanity check, make sure original data is really there before updates
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("example.org"),
- RRType.NS(),
+ RRType.NS,
orig_ns_rrset)
# We will delete the MX record later in this test, so let's make
# sure that it exists (we do not care about its value)
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("example.org"),
- RRType.MX())
+ RRType.MX)
# Check that we cannot delete the SOA record by direct deletion
# both by name+type and by full rrset
- self.check_full_handle_result(Rcode.NOERROR(),
+ self.check_full_handle_result(Rcode.NOERROR,
[ self.rrset_update_del_soa_apex,
self.rrset_update_soa_del ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("example.org"),
- RRType.SOA(),
+ RRType.SOA,
incremented_soa_rrset_01)
# If we delete everything at the apex, the SOA and NS rrsets should be
# untouched (but serial will be incremented)
- self.check_full_handle_result(Rcode.NOERROR(),
+ self.check_full_handle_result(Rcode.NOERROR,
[ self.rrset_update_del_name_apex ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("example.org"),
- RRType.SOA(),
+ RRType.SOA,
incremented_soa_rrset_02)
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("example.org"),
- RRType.NS(),
+ RRType.NS,
orig_ns_rrset)
# but the MX should be gone
self.__check_inzone_data(isc.datasrc.ZoneFinder.NXRRSET,
isc.dns.Name("example.org"),
- RRType.MX())
+ RRType.MX)
# Deleting the NS rrset by name and type only, it should also be left
# untouched
- self.check_full_handle_result(Rcode.NOERROR(),
+ self.check_full_handle_result(Rcode.NOERROR,
[ self.rrset_update_del_ns_apex ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("example.org"),
- RRType.NS(),
+ RRType.NS,
orig_ns_rrset)
def test_update_apex_special_case_ns_rrset(self):
@@ -1325,28 +1322,28 @@ class SessionTest(SessionTestBase):
self.__initialize_update_rrsets()
# When we are done, we should have a reduced NS rrset
short_ns_rrset = create_rrset("example.org", TEST_RRCLASS,
- RRType.NS(), 3600,
+ RRType.NS, 3600,
[ "ns3.example.org." ])
- self.check_full_handle_result(Rcode.NOERROR(),
+ self.check_full_handle_result(Rcode.NOERROR,
[ self.rrset_update_del_rrset_ns ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("example.org"),
- RRType.NS(),
+ RRType.NS,
short_ns_rrset)
def test_update_apex_special_case_ns_rrset2(self):
# If we add new NS records, then delete all existing ones, it
# should not keep any
self.__initialize_update_rrsets()
- new_ns = create_rrset("example.org", TEST_RRCLASS, RRType.NS(), 3600,
- [ "newns1.example.org", "newns2.example.org" ])
+ new_ns = create_rrset("example.org", TEST_RRCLASS, RRType.NS, 3600,
+ [ "newns1.example.org.", "newns2.example.org." ])
- self.check_full_handle_result(Rcode.NOERROR(),
+ self.check_full_handle_result(Rcode.NOERROR,
[ new_ns,
self.rrset_update_del_rrset_ns ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("example.org"),
- RRType.NS(),
+ RRType.NS,
new_ns)
def test_update_delete_normal_rrset_at_apex(self):
@@ -1358,12 +1355,12 @@ class SessionTest(SessionTestBase):
self.__initialize_update_rrsets()
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("example.org"),
- RRType.MX())
- self.check_full_handle_result(Rcode.NOERROR(),
+ RRType.MX)
+ self.check_full_handle_result(Rcode.NOERROR,
[ self.rrset_update_del_rrset_mx ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.NXRRSET,
isc.dns.Name("example.org"),
- RRType.MX())
+ RRType.MX)
def test_update_add_then_delete_rrset(self):
# If we add data, then delete the whole rrset, added data should
@@ -1371,13 +1368,13 @@ class SessionTest(SessionTestBase):
self.__initialize_update_rrsets()
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("www.example.org"),
- RRType.A())
- self.check_full_handle_result(Rcode.NOERROR(),
+ RRType.A)
+ self.check_full_handle_result(Rcode.NOERROR,
[ self.rrset_update_a,
self.rrset_update_del_rrset ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN,
isc.dns.Name("www.example.org"),
- RRType.A())
+ RRType.A)
def test_update_add_then_delete_name(self):
# If we add data, then delete the entire name, added data should
@@ -1385,13 +1382,13 @@ class SessionTest(SessionTestBase):
self.__initialize_update_rrsets()
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("www.example.org"),
- RRType.A())
- self.check_full_handle_result(Rcode.NOERROR(),
+ RRType.A)
+ self.check_full_handle_result(Rcode.NOERROR,
[ self.rrset_update_a,
self.rrset_update_del_name ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.NXDOMAIN,
isc.dns.Name("www.example.org"),
- RRType.A())
+ RRType.A)
def test_update_delete_then_add_rrset(self):
# If we delete an entire rrset, then add something there again,
@@ -1399,13 +1396,13 @@ class SessionTest(SessionTestBase):
self.__initialize_update_rrsets()
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("www.example.org"),
- RRType.A())
- self.check_full_handle_result(Rcode.NOERROR(),
+ RRType.A)
+ self.check_full_handle_result(Rcode.NOERROR,
[ self.rrset_update_del_rrset,
self.rrset_update_a ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("www.example.org"),
- RRType.A(),
+ RRType.A,
self.rrset_update_a)
def test_update_delete_then_add_rrset(self):
@@ -1414,13 +1411,13 @@ class SessionTest(SessionTestBase):
self.__initialize_update_rrsets()
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("www.example.org"),
- RRType.A())
- self.check_full_handle_result(Rcode.NOERROR(),
+ RRType.A)
+ self.check_full_handle_result(Rcode.NOERROR,
[ self.rrset_update_del_name,
self.rrset_update_a ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("www.example.org"),
- RRType.A(),
+ RRType.A,
self.rrset_update_a)
def test_update_cname_special_cases(self):
@@ -1428,31 +1425,31 @@ class SessionTest(SessionTestBase):
# Sanity check
orig_cname_rrset = create_rrset("cname.example.org", TEST_RRCLASS,
- RRType.CNAME(), 3600,
+ RRType.CNAME, 3600,
[ "www.example.org." ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.CNAME,
isc.dns.Name("cname.example.org"),
- RRType.A(),
+ RRType.A,
orig_cname_rrset)
# If we try to add data where a cname is preset
- rrset = create_rrset("cname.example.org", TEST_RRCLASS, RRType.A(),
+ rrset = create_rrset("cname.example.org", TEST_RRCLASS, RRType.A,
3600, [ "192.0.2.1" ])
- self.check_full_handle_result(Rcode.NOERROR(), [ rrset ])
+ self.check_full_handle_result(Rcode.NOERROR, [ rrset ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.CNAME,
isc.dns.Name("cname.example.org"),
- RRType.A(),
+ RRType.A,
orig_cname_rrset)
# But updating the cname itself should work
new_cname_rrset = create_rrset("cname.example.org", TEST_RRCLASS,
- RRType.CNAME(), 3600,
+ RRType.CNAME, 3600,
[ "mail.example.org." ])
- self.check_full_handle_result(Rcode.NOERROR(), [ new_cname_rrset ])
+ self.check_full_handle_result(Rcode.NOERROR, [ new_cname_rrset ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.CNAME,
isc.dns.Name("cname.example.org"),
- RRType.A(),
+ RRType.A,
new_cname_rrset)
self.__initialize_update_rrsets()
@@ -1461,27 +1458,27 @@ class SessionTest(SessionTestBase):
# present should do nothing either
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("www.example.org"),
- RRType.A(),
+ RRType.A,
self.orig_a_rrset)
new_cname_rrset = create_rrset("www.example.org", TEST_RRCLASS,
- RRType.CNAME(), 3600,
+ RRType.CNAME, 3600,
[ "mail.example.org." ])
- self.check_full_handle_result(Rcode.NOERROR(), [ new_cname_rrset ])
+ self.check_full_handle_result(Rcode.NOERROR, [ new_cname_rrset ])
self.__check_inzone_data(isc.datasrc.ZoneFinder.SUCCESS,
isc.dns.Name("www.example.org"),
- RRType.A(),
+ RRType.A,
self.orig_a_rrset)
def test_update_bad_class(self):
- rrset = create_rrset("example.org.", RRClass.CH(), RRType.TXT(), 0,
+ rrset = create_rrset("example.org.", RRClass.CH, RRType.TXT, 0,
[ "foo" ])
- self.check_full_handle_result(Rcode.FORMERR(), [ rrset ])
+ self.check_full_handle_result(Rcode.FORMERR, [ rrset ])
def test_uncaught_exception(self):
def my_exc():
raise Exception("foo")
self._session._UpdateSession__update_soa = my_exc
- self.assertEqual(Rcode.SERVFAIL().to_text(),
+ self.assertEqual(Rcode.SERVFAIL.to_text(),
self._session._UpdateSession__do_update().to_text())
class SessionACLTest(SessionTestBase):
@@ -1527,7 +1524,7 @@ class SessionACLTest(SessionTestBase):
self._datasrc_client,
acl_map))
self.assertEqual((UPDATE_ERROR, None, None), session.handle())
- self.check_response(session.get_message(), Rcode.REFUSED())
+ self.check_response(session.get_message(), Rcode.REFUSED)
# If the message contains TSIG, it should match the ACCEPT
# ACL entry, and the request should be granted.
diff --git a/src/lib/python/isc/ddns/tests/zone_config_tests.py b/src/lib/python/isc/ddns/tests/zone_config_tests.py
index 7facb48..0ada906 100644
--- a/src/lib/python/isc/ddns/tests/zone_config_tests.py
+++ b/src/lib/python/isc/ddns/tests/zone_config_tests.py
@@ -26,7 +26,7 @@ import socket
# Some common test parameters
TEST_ZONE_NAME = Name('example.org')
TEST_SECONDARY_ZONE_NAME = Name('example.com')
-TEST_RRCLASS = RRClass.IN()
+TEST_RRCLASS = RRClass.IN
TEST_TSIG_KEY = TSIGKey("example.com:SFuWd/q99SzF8Yzd1QbB9g==")
TEST_ACL_CONTEXT = isc.acl.dns.RequestContext(
socket.getaddrinfo("192.0.2.1", 1234, 0, socket.SOCK_DGRAM,
@@ -88,12 +88,12 @@ class ZoneConfigTest(unittest.TestCase):
# zone class doesn't match (but zone name matches)
self.__datasrc_client.set_find_result(DataSourceClient.SUCCESS)
zconfig = ZoneConfig({(TEST_SECONDARY_ZONE_NAME, TEST_RRCLASS)},
- RRClass.CH(), self.__datasrc_client)
+ RRClass.CH, self.__datasrc_client)
self.assertEqual((ZONE_NOTFOUND, None),
(zconfig.find_zone(TEST_ZONE_NAME, TEST_RRCLASS)))
# similar to the previous case, but also in the secondary list
zconfig = ZoneConfig({(TEST_ZONE_NAME, TEST_RRCLASS)},
- RRClass.CH(), self.__datasrc_client)
+ RRClass.CH, self.__datasrc_client)
self.assertEqual((ZONE_NOTFOUND, None),
(zconfig.find_zone(TEST_ZONE_NAME, TEST_RRCLASS)))
@@ -107,7 +107,7 @@ class ZoneConfigTest(unittest.TestCase):
zconfig = ZoneConfig({(TEST_SECONDARY_ZONE_NAME, TEST_RRCLASS),
(Name('example'), TEST_RRCLASS),
(Name('sub.example.org'), TEST_RRCLASS),
- (TEST_ZONE_NAME, RRClass.CH())},
+ (TEST_ZONE_NAME, RRClass.CH)},
TEST_RRCLASS, self.__datasrc_client)
self.assertEqual((ZONE_PRIMARY, self.__datasrc_client),
self.zconfig.find_zone(TEST_ZONE_NAME, TEST_RRCLASS))
@@ -134,7 +134,7 @@ class ACLConfigTest(unittest.TestCase):
# 'All reject' ACL will still apply for any other zones
acl = self.__zconfig.get_update_acl(Name('example.com'), TEST_RRCLASS)
self.assertEqual(REJECT, acl.execute(TEST_ACL_CONTEXT))
- acl = self.__zconfig.get_update_acl(TEST_ZONE_NAME, RRClass.CH())
+ acl = self.__zconfig.get_update_acl(TEST_ZONE_NAME, RRClass.CH)
self.assertEqual(REJECT, acl.execute(TEST_ACL_CONTEXT))
# Test with a map with a few more ACL entries. Should be nothing
@@ -143,14 +143,14 @@ class ACLConfigTest(unittest.TestCase):
REQUEST_LOADER.load([{"action": "REJECT"}]),
(TEST_ZONE_NAME, TEST_RRCLASS):
REQUEST_LOADER.load([{"action": "ACCEPT"}]),
- (TEST_ZONE_NAME, RRClass.CH()):
+ (TEST_ZONE_NAME, RRClass.CH):
REQUEST_LOADER.load([{"action": "DROP"}])}
self.__zconfig.set_update_acl_map(acl_map)
acl = self.__zconfig.get_update_acl(TEST_ZONE_NAME, TEST_RRCLASS)
self.assertEqual(ACCEPT, acl.execute(TEST_ACL_CONTEXT))
acl = self.__zconfig.get_update_acl(Name('example.com'), TEST_RRCLASS)
self.assertEqual(REJECT, acl.execute(TEST_ACL_CONTEXT))
- acl = self.__zconfig.get_update_acl(TEST_ZONE_NAME, RRClass.CH())
+ acl = self.__zconfig.get_update_acl(TEST_ZONE_NAME, RRClass.CH)
self.assertEqual(DROP, acl.execute(TEST_ACL_CONTEXT))
if __name__ == "__main__":
diff --git a/src/lib/python/isc/log_messages/Makefile.am b/src/lib/python/isc/log_messages/Makefile.am
index 97ff6e6..c8b9c7a 100644
--- a/src/lib/python/isc/log_messages/Makefile.am
+++ b/src/lib/python/isc/log_messages/Makefile.am
@@ -1,7 +1,7 @@
SUBDIRS = work
EXTRA_DIST = __init__.py
-EXTRA_DIST += bind10_messages.py
+EXTRA_DIST += init_messages.py
EXTRA_DIST += cmdctl_messages.py
EXTRA_DIST += ddns_messages.py
EXTRA_DIST += stats_messages.py
@@ -18,9 +18,10 @@ EXTRA_DIST += loadzone_messages.py
EXTRA_DIST += server_common_messages.py
EXTRA_DIST += dbutil_messages.py
EXTRA_DIST += msgq_messages.py
+EXTRA_DIST += pycc_messages.py
CLEANFILES = __init__.pyc
-CLEANFILES += bind10_messages.pyc
+CLEANFILES += init_messages.pyc
CLEANFILES += cmdctl_messages.pyc
CLEANFILES += ddns_messages.pyc
CLEANFILES += stats_messages.pyc
@@ -37,6 +38,7 @@ CLEANFILES += loadzone_messages.pyc
CLEANFILES += server_common_messages.pyc
CLEANFILES += dbutil_messages.pyc
CLEANFILES += msgq_messages.pyc
+CLEANFILES += pycc_messages.pyc
CLEANDIRS = __pycache__
diff --git a/src/lib/python/isc/log_messages/bind10_messages.py b/src/lib/python/isc/log_messages/bind10_messages.py
deleted file mode 100644
index 68ce94c..0000000
--- a/src/lib/python/isc/log_messages/bind10_messages.py
+++ /dev/null
@@ -1 +0,0 @@
-from work.bind10_messages import *
diff --git a/src/lib/python/isc/log_messages/init_messages.py b/src/lib/python/isc/log_messages/init_messages.py
new file mode 100644
index 0000000..15288bf
--- /dev/null
+++ b/src/lib/python/isc/log_messages/init_messages.py
@@ -0,0 +1 @@
+from work.init_messages import *
diff --git a/src/lib/python/isc/log_messages/pycc_messages.py b/src/lib/python/isc/log_messages/pycc_messages.py
new file mode 100644
index 0000000..77b3804
--- /dev/null
+++ b/src/lib/python/isc/log_messages/pycc_messages.py
@@ -0,0 +1 @@
+from work.pycc_messages import *
diff --git a/src/lib/python/isc/notify/notify_out.py b/src/lib/python/isc/notify/notify_out.py
index 46bb00b..1f75256 100644
--- a/src/lib/python/isc/notify/notify_out.py
+++ b/src/lib/python/isc/notify/notify_out.py
@@ -302,12 +302,12 @@ class NotifyOut:
format_zone_str(zone_name, zone_class))
return []
- result, ns_rrset, _ = finder.find(zone_name, RRType.NS())
+ result, ns_rrset, _ = finder.find(zone_name, RRType.NS)
if result is not finder.SUCCESS or ns_rrset is None:
logger.warn(NOTIFY_OUT_ZONE_NO_NS,
format_zone_str(zone_name, zone_class))
return []
- result, soa_rrset, _ = finder.find(zone_name, RRType.SOA())
+ result, soa_rrset, _ = finder.find(zone_name, RRType.SOA)
if result is not finder.SUCCESS or soa_rrset is None or \
soa_rrset.get_rdata_count() != 1:
logger.warn(NOTIFY_OUT_ZONE_BAD_SOA,
@@ -323,11 +323,11 @@ class NotifyOut:
ns_result, ns_finder = ds_client.find_zone(ns_name)
if ns_result is DataSourceClient.SUCCESS or \
ns_result is DataSourceClient.PARTIALMATCH:
- result, rrset, _ = ns_finder.find(ns_name, RRType.A())
+ result, rrset, _ = ns_finder.find(ns_name, RRType.A)
if result is ns_finder.SUCCESS and rrset is not None:
addrs.extend([a.to_text() for a in rrset.get_rdata()])
- result, rrset, _ = ns_finder.find(ns_name, RRType.AAAA())
+ result, rrset, _ = ns_finder.find(ns_name, RRType.AAAA)
if result is ns_finder.SUCCESS and rrset is not None:
addrs.extend([aaaa.to_text()
for aaaa in rrset.get_rdata()])
@@ -509,10 +509,10 @@ class NotifyOut:
msg = Message(Message.RENDER)
qid = random.randint(0, 0xFFFF)
msg.set_qid(qid)
- msg.set_opcode(Opcode.NOTIFY())
- msg.set_rcode(Rcode.NOERROR())
+ msg.set_opcode(Opcode.NOTIFY)
+ msg.set_rcode(Rcode.NOERROR)
msg.set_header_flag(Message.HEADERFLAG_AA)
- msg.add_question(Question(zone_name, zone_class, RRType.SOA()))
+ msg.add_question(Question(zone_name, zone_class, RRType.SOA))
msg.add_rrset(Message.SECTION_ANSWER, self._get_zone_soa(zone_name,
zone_class))
return msg, qid
@@ -531,7 +531,7 @@ class NotifyOut:
zone_name.to_text() + '/' +
zone_class.to_text() + ' not found')
- result, soa_rrset, _ = finder.find(zone_name, RRType.SOA())
+ result, soa_rrset, _ = finder.find(zone_name, RRType.SOA)
if result is not finder.SUCCESS or soa_rrset is None or \
soa_rrset.get_rdata_count() != 1:
raise NotifyOutDataSourceError('_get_zone_soa: Zone ' +
@@ -566,7 +566,7 @@ class NotifyOut:
Name(zone_notify_info.zone_name).to_text())
return _BAD_QUERY_NAME
- if msg.get_opcode() != Opcode.NOTIFY():
+ if msg.get_opcode() != Opcode.NOTIFY:
logger.warn(NOTIFY_OUT_REPLY_BAD_OPCODE, from_addr[0],
from_addr[1], msg.get_opcode().to_text())
return _BAD_OPCODE
diff --git a/src/lib/python/isc/notify/tests/notify_out_test.py b/src/lib/python/isc/notify/tests/notify_out_test.py
index b9183e0..ad1107f 100644
--- a/src/lib/python/isc/notify/tests/notify_out_test.py
+++ b/src/lib/python/isc/notify/tests/notify_out_test.py
@@ -377,7 +377,7 @@ class TestNotifyOut(unittest.TestCase):
def test_get_notify_slaves_from_ns(self):
records = self._notify._get_notify_slaves_from_ns(Name('example.net.'),
- RRClass.IN())
+ RRClass.IN)
self.assertEqual(6, len(records))
self.assertEqual('8:8::8:8', records[5])
self.assertEqual('7.7.7.7', records[4])
@@ -387,7 +387,7 @@ class TestNotifyOut(unittest.TestCase):
self.assertEqual('3.3.3.3', records[0])
records = self._notify._get_notify_slaves_from_ns(Name('example.com.'),
- RRClass.IN())
+ RRClass.IN)
self.assertEqual(3, len(records))
self.assertEqual('5:5::5:5', records[2])
self.assertEqual('4:4::4:4', records[1])
@@ -396,19 +396,19 @@ class TestNotifyOut(unittest.TestCase):
def test_get_notify_slaves_from_ns_unusual(self):
self._notify._db_file = TESTDATA_SRCDIR + '/brokentest.sqlite3'
self.assertEqual([], self._notify._get_notify_slaves_from_ns(
- Name('nons.example'), RRClass.IN()))
+ Name('nons.example'), RRClass.IN))
self.assertEqual([], self._notify._get_notify_slaves_from_ns(
- Name('nosoa.example'), RRClass.IN()))
+ Name('nosoa.example'), RRClass.IN))
self.assertEqual([], self._notify._get_notify_slaves_from_ns(
- Name('multisoa.example'), RRClass.IN()))
+ Name('multisoa.example'), RRClass.IN))
self.assertEqual([], self._notify._get_notify_slaves_from_ns(
- Name('nosuchzone.example'), RRClass.IN()))
+ Name('nosuchzone.example'), RRClass.IN))
# This will cause failure in getting access to the data source.
self._notify._db_file = TESTDATA_SRCDIR + '/nodir/error.sqlite3'
self.assertEqual([], self._notify._get_notify_slaves_from_ns(
- Name('example.com'), RRClass.IN()))
+ Name('example.com'), RRClass.IN))
def test_init_notify_out(self):
self._notify._init_notify_out(self._db_file)
@@ -436,7 +436,9 @@ class TestNotifyOut(unittest.TestCase):
self._notify._notify_infos[('example.com.', 'IN')].notify_timeout = time.time() + 5
timeout, valid_fds, notifying_zones = self._notify._prepare_select_info()
self.assertEqual(timeout, 0)
- self.assertListEqual([2, 1], valid_fds)
+ self.assertEqual(len(valid_fds), 2)
+ self.assertIn(1, valid_fds)
+ self.assertIn(2, valid_fds)
def test_shutdown(self):
thread = self._notify.dispatcher()
diff --git a/src/lib/python/isc/server_common/dns_tcp.py b/src/lib/python/isc/server_common/dns_tcp.py
index 3b78d0d..9ce94fe 100644
--- a/src/lib/python/isc/server_common/dns_tcp.py
+++ b/src/lib/python/isc/server_common/dns_tcp.py
@@ -248,7 +248,7 @@ class DNSTCPContext:
ClientFormatter(self.__remote_addr),
self.__send_marker, total_len)
return self.SENDING
- logger.warn(PYSERVER_COMMON_DNS_TCP_SEND_ERROR,
+ logger.warn(PYSERVER_COMMON_DNS_TCP_SEND_FAILED,
ClientFormatter(self.__remote_addr),
self.__send_marker, total_len, ex)
self.__sock.close()
diff --git a/src/lib/python/isc/server_common/server_common_messages.mes b/src/lib/python/isc/server_common/server_common_messages.mes
index bd4e3cc..f22ce65 100644
--- a/src/lib/python/isc/server_common/server_common_messages.mes
+++ b/src/lib/python/isc/server_common/server_common_messages.mes
@@ -27,7 +27,7 @@ transmitted over a TCP connection, possibly after multiple send
operations. The destination address and the total size of the message
(including the 2-byte length field) are shown in the log message.
-% PYSERVER_COMMON_DNS_TCP_SEND_ERROR failed to send TCP message to %1 (%2/%3 bytes sent): %4
+% PYSERVER_COMMON_DNS_TCP_SEND_FAILED failed to send TCP message to %1 (%2/%3 bytes sent): %4
A DNS message has been attempted to be sent out over a TCP connection,
but it failed due to some network error. Although it's not expected
to happen too often, it can still happen for various reasons. The
diff --git a/src/lib/python/isc/statistics/tests/counters_test.py b/src/lib/python/isc/statistics/tests/counters_test.py
index ff15efc..2d791c4 100644
--- a/src/lib/python/isc/statistics/tests/counters_test.py
+++ b/src/lib/python/isc/statistics/tests/counters_test.py
@@ -120,7 +120,7 @@ class TestBasicMethods(unittest.TestCase):
'item_type': 'real',
'item_default': 0.0 }]
counters._stop_timer(t2, elem, spec, 'time')
- self.assertGreater(counters._get_counter(elem,'time'), 1)
+ self.assertGreaterEqual(counters._get_counter(elem,'time'), 1.0)
def test_rasing_incrementers(self):
""" use Thread"""
@@ -139,9 +139,9 @@ class TestBasicMethods(unittest.TestCase):
counters._get_counter(self.counters._statistics._data,
counter_name),
concurrency * number)
- self.assertGreater(
+ self.assertGreaterEqual(
counters._get_counter(self.counters._statistics._data,
- timer_name), 0)
+ timer_name), 0.0)
def test_concat(self):
# only strings
@@ -200,7 +200,7 @@ class BaseTestCounters():
if name.find('time_to_') == 0:
self.counters.start_timer(*args)
self.counters.stop_timer(*args)
- self.assertGreater(self.counters.get(*args), 0)
+ self.assertGreaterEqual(self.counters.get(*args), 0.0)
sec = self.counters.get(*args)
for zone_str in (self._entire_server, TEST_ZONE_NAME_STR):
isc.cc.data.set(self._statistics_data,
diff --git a/src/lib/python/isc/sysinfo/sysinfo.py b/src/lib/python/isc/sysinfo/sysinfo.py
index 8e4610c..099ac89 100644
--- a/src/lib/python/isc/sysinfo/sysinfo.py
+++ b/src/lib/python/isc/sysinfo/sysinfo.py
@@ -44,7 +44,7 @@ class SysInfo:
self._net_stats = 'Unknown\n'
self._net_connections = 'Unknown\n'
- # The following are Linux speicific, and should eventually be removed
+ # The following are Linux specific, and should eventually be removed
# from this level; for now we simply default to None (so they won't
# be printed)
self._platform_distro = None
@@ -162,9 +162,12 @@ class SysInfoPOSIX(SysInfo):
u = os.uname()
self._platform_name = u[0]
+ self._hostname = u[1]
self._platform_version = u[2]
self._platform_machine = u[4]
+ self._loadavg = os.getloadavg()
+
class SysInfoLinux(SysInfoPOSIX):
"""Linux implementation of the SysInfo class.
See the SysInfo class documentation for more information.
@@ -322,8 +325,8 @@ class SysInfoBSD(SysInfoPOSIX):
except (subprocess.CalledProcessError, OSError):
self._net_connections = 'Warning: "netstat -nr" command failed.\n'
-class SysInfoOpenBSD(SysInfoBSD):
- """OpenBSD implementation of the SysInfo class.
+class SysInfoNetBSD(SysInfoBSD):
+ """NetBSD and OpenBSD implementation of the SysInfo class.
See the SysInfo class documentation for more information.
"""
def __init__(self):
@@ -499,8 +502,8 @@ def SysInfoFromFactory():
osname = platform.system()
if osname == 'Linux':
return SysInfoLinux()
- elif osname == 'OpenBSD':
- return SysInfoOpenBSD()
+ elif (osname == 'NetBSD') or (osname == 'OpenBSD'):
+ return SysInfoNetBSD()
elif osname == 'FreeBSD':
return SysInfoFreeBSD()
elif osname == 'Darwin':
@@ -508,4 +511,4 @@ def SysInfoFromFactory():
elif osname == 'BIND10Testcase':
return SysInfoTestcase()
else:
- return SysInfo()
+ return SysInfoPOSIX()
diff --git a/src/lib/python/isc/testutils/rrset_utils.py b/src/lib/python/isc/testutils/rrset_utils.py
index 7eac772..eb3da28 100644
--- a/src/lib/python/isc/testutils/rrset_utils.py
+++ b/src/lib/python/isc/testutils/rrset_utils.py
@@ -30,7 +30,7 @@ def rrsets_equal(a, b):
a.get_class() == b.get_class() and \
a.get_type() == b.get_type() and \
a.get_ttl() == b.get_ttl() and \
- (a.get_type() == RRType.RRSIG() or
+ (a.get_type() == RRType.RRSIG or
sorted(a.get_rdata()) == sorted(b.get_rdata()))
# The following are short cut utilities to create an RRset of a specific
@@ -38,25 +38,25 @@ def rrsets_equal(a, b):
# tests, so we define default values for them for convenience.
def create_a(name, address, ttl=3600):
- rrset = RRset(name, RRClass.IN(), RRType.A(), RRTTL(ttl))
- rrset.add_rdata(Rdata(RRType.A(), RRClass.IN(), address))
+ rrset = RRset(name, RRClass.IN, RRType.A, RRTTL(ttl))
+ rrset.add_rdata(Rdata(RRType.A, RRClass.IN, address))
return rrset
def create_aaaa(name, address, ttl=3600):
- rrset = RRset(name, RRClass.IN(), RRType.AAAA(), RRTTL(ttl))
- rrset.add_rdata(Rdata(RRType.AAAA(), RRClass.IN(), address))
+ rrset = RRset(name, RRClass.IN, RRType.AAAA, RRTTL(ttl))
+ rrset.add_rdata(Rdata(RRType.AAAA, RRClass.IN, address))
return rrset
def create_ns(nsname, name=Name('example.com'), ttl=3600):
'''For convenience we use a default name often used as a zone name'''
- rrset = RRset(name, RRClass.IN(), RRType.NS(), RRTTL(ttl))
- rrset.add_rdata(Rdata(RRType.NS(), RRClass.IN(), nsname))
+ rrset = RRset(name, RRClass.IN, RRType.NS, RRTTL(ttl))
+ rrset.add_rdata(Rdata(RRType.NS, RRClass.IN, nsname))
return rrset
-def create_cname(target='target.example.com', name=Name('example.com'),
+def create_cname(target='target.example.com.', name=Name('example.com'),
ttl=3600):
- rrset = RRset(name, RRClass.IN(), RRType.CNAME(), RRTTL(ttl))
- rrset.add_rdata(Rdata(RRType.CNAME(), RRClass.IN(), target))
+ rrset = RRset(name, RRClass.IN, RRType.CNAME, RRTTL(ttl))
+ rrset.add_rdata(Rdata(RRType.CNAME, RRClass.IN, target))
return rrset
def create_generic(name, rdlen, type=RRType('TYPE65300'), ttl=3600):
@@ -67,16 +67,16 @@ def create_generic(name, rdlen, type=RRType('TYPE65300'), ttl=3600):
The RDATA will be filled with specified length of all-0 data.
'''
- rrset = RRset(name, RRClass.IN(), type, RRTTL(ttl))
- rrset.add_rdata(Rdata(type, RRClass.IN(), '\\# ' +
+ rrset = RRset(name, RRClass.IN, type, RRTTL(ttl))
+ rrset.add_rdata(Rdata(type, RRClass.IN, '\\# ' +
str(rdlen) + ' ' + '00' * rdlen))
return rrset
def create_soa(serial, name=Name('example.com'), ttl=3600):
'''For convenience we use a default name often used as a zone name'''
- rrset = RRset(name, RRClass.IN(), RRType.SOA(), RRTTL(ttl))
+ rrset = RRset(name, RRClass.IN, RRType.SOA, RRTTL(ttl))
rdata_str = 'master.example.com. admin.example.com. ' + \
str(serial) + ' 3600 1800 2419200 7200'
- rrset.add_rdata(Rdata(RRType.SOA(), RRClass.IN(), rdata_str))
+ rrset.add_rdata(Rdata(RRType.SOA, RRClass.IN, rdata_str))
return rrset
diff --git a/src/lib/python/isc/xfrin/diff.py b/src/lib/python/isc/xfrin/diff.py
index ea51967..4e06eea 100644
--- a/src/lib/python/isc/xfrin/diff.py
+++ b/src/lib/python/isc/xfrin/diff.py
@@ -146,12 +146,12 @@ class Diff:
"""
# first add or delete must be of type SOA
if len(buf) == 0 and\
- rr.get_type() != isc.dns.RRType.SOA():
+ rr.get_type() != isc.dns.RRType.SOA:
raise ValueError("First " + operation +
" in single update mode must be of type SOA")
# And later adds or deletes may not
elif len(buf) != 0 and\
- rr.get_type() == isc.dns.RRType.SOA():
+ rr.get_type() == isc.dns.RRType.SOA:
raise ValueError("Multiple SOA records in single " +
"update mode " + operation)
buf.append((operation, rr))
@@ -238,8 +238,8 @@ class Diff:
'''A helper routine to identify whether two RRsets are of the
same 'type'. For RRSIGs we should consider type covered, too.
'''
- if rrset1.get_type() != isc.dns.RRType.RRSIG() or \
- rrset2.get_type != isc.dns.RRType.RRSIG():
+ if rrset1.get_type() != isc.dns.RRType.RRSIG or \
+ rrset2.get_type != isc.dns.RRType.RRSIG:
return rrset1.get_type() == rrset2.get_type()
# RR type of the both RRsets is RRSIG. Compare type covered.
# We know they have exactly one RDATA.
@@ -425,7 +425,7 @@ class Diff:
return a.get_name() == b.get_name() and\
a.get_type() == b.get_type() and\
a.get_rdata()[0] == b.get_rdata()[0]
- if rr.get_type() == isc.dns.RRType.SOA():
+ if rr.get_type() == isc.dns.RRType.SOA:
return buf
else:
return [ op for op in buf if not same_rr(op[1], rr)]
@@ -584,3 +584,16 @@ class Diff:
if rr.get_name() == name:
new_rrsets.append(rr)
return result, new_rrsets, flags
+
+ def get_rrset_collection(self):
+ '''
+ This first applies all changes to the data source. Then it creates
+ and returns an RRsetCollection on top of the corresponding zone
+ updater. Notice it might be impossible to apply more changes after
+ that.
+
+ This must not be called after a commit, or it'd throw ValueError.
+ '''
+ # Apply itself will check it is not yet commited.
+ self.apply()
+ return self.__updater.get_rrset_collection()
diff --git a/src/lib/python/isc/xfrin/tests/diff_tests.py b/src/lib/python/isc/xfrin/tests/diff_tests.py
index 906406f..bb83340 100644
--- a/src/lib/python/isc/xfrin/tests/diff_tests.py
+++ b/src/lib/python/isc/xfrin/tests/diff_tests.py
@@ -16,7 +16,8 @@
import isc.log
import unittest
from isc.datasrc import ZoneFinder
-from isc.dns import Name, RRset, RRClass, RRType, RRTTL, Rdata
+from isc.dns import Name, RRset, RRClass, RRType, RRTTL, Rdata, \
+ RRsetCollectionBase
from isc.xfrin.diff import Diff, NoSuchZone
class TestError(Exception):
@@ -56,8 +57,8 @@ class DiffTest(unittest.TestCase):
self.__find_all_name = None
self.__find_all_options = None
# Some common values
- self.__rrclass = RRClass.IN()
- self.__type = RRType.A()
+ self.__rrclass = RRClass.IN
+ self.__type = RRType.A
self.__ttl = RRTTL(3600)
# And RRsets
# Create two valid rrsets
@@ -80,27 +81,27 @@ class DiffTest(unittest.TestCase):
# Also create a few other (valid) rrsets
# A SOA record
self.__rrset_soa = RRset(Name('example.org.'), self.__rrclass,
- RRType.SOA(), RRTTL(3600))
- self.__rrset_soa.add_rdata(Rdata(RRType.SOA(), self.__rrclass,
+ RRType.SOA, RRTTL(3600))
+ self.__rrset_soa.add_rdata(Rdata(RRType.SOA, self.__rrclass,
"ns1.example.org. " +
"admin.example.org. " +
"1233 3600 1800 2419200 7200"))
# A few single-rr rrsets that together would for a multi-rr rrset
self.__rrset3 = RRset(Name('c.example.org.'), self.__rrclass,
- RRType.TXT(), self.__ttl)
- self.__rrset3.add_rdata(Rdata(RRType.TXT(), self.__rrclass, "one"))
+ RRType.TXT, self.__ttl)
+ self.__rrset3.add_rdata(Rdata(RRType.TXT, self.__rrclass, "one"))
self.__rrset4 = RRset(Name('c.example.org.'), self.__rrclass,
- RRType.TXT(), self.__ttl)
- self.__rrset4.add_rdata(Rdata(RRType.TXT(), self.__rrclass, "two"))
+ RRType.TXT, self.__ttl)
+ self.__rrset4.add_rdata(Rdata(RRType.TXT, self.__rrclass, "two"))
self.__rrset5 = RRset(Name('c.example.org.'), self.__rrclass,
- RRType.TXT(), self.__ttl)
- self.__rrset5.add_rdata(Rdata(RRType.TXT(), self.__rrclass, "three"))
+ RRType.TXT, self.__ttl)
+ self.__rrset5.add_rdata(Rdata(RRType.TXT, self.__rrclass, "three"))
self.__rrset6 = RRset(Name('d.example.org.'), self.__rrclass,
- RRType.A(), self.__ttl)
- self.__rrset6.add_rdata(Rdata(RRType.A(), self.__rrclass, "192.0.2.1"))
+ RRType.A, self.__ttl)
+ self.__rrset6.add_rdata(Rdata(RRType.A, self.__rrclass, "192.0.2.1"))
self.__rrset7 = RRset(Name('d.example.org.'), self.__rrclass,
- RRType.A(), self.__ttl)
- self.__rrset7.add_rdata(Rdata(RRType.A(), self.__rrclass, "192.0.2.2"))
+ RRType.A, self.__ttl)
+ self.__rrset7.add_rdata(Rdata(RRType.A, self.__rrclass, "192.0.2.2"))
def __mock_compact(self):
"""
@@ -315,7 +316,7 @@ class DiffTest(unittest.TestCase):
self.assertRaises(ValueError, diff.add_data, self.__rrset2)
self.assertRaises(ValueError, diff.delete_data, self.__rrset1)
self.assertRaises(ValueError, diff.find, Name('foo.example.org.'),
- RRType.A())
+ RRType.A)
self.assertRaises(ValueError, diff.find_all, Name('foo.example.org.'))
diff.apply = orig_apply
self.assertRaises(ValueError, diff.apply)
@@ -434,9 +435,9 @@ class DiffTest(unittest.TestCase):
Test a wrong class of rrset is rejected.
"""
diff = Diff(self, Name('example.org.'))
- rrset = RRset(Name('a.example.org.'), RRClass.CH(), RRType.NS(),
+ rrset = RRset(Name('a.example.org.'), RRClass.CH, RRType.NS,
self.__ttl)
- rrset.add_rdata(Rdata(RRType.NS(), RRClass.CH(), 'ns.example.org.'))
+ rrset.add_rdata(Rdata(RRType.NS, RRClass.CH, 'ns.example.org.'))
self.assertRaises(ValueError, diff.add_data, rrset)
self.assertRaises(ValueError, diff.delete_data, rrset)
@@ -516,14 +517,14 @@ class DiffTest(unittest.TestCase):
'''
diff = Diff(self, Name('example.org.'))
rrsig1 = RRset(Name('example.org'), self.__rrclass,
- RRType.RRSIG(), RRTTL(3600))
- rrsig1.add_rdata(Rdata(RRType.RRSIG(), self.__rrclass,
+ RRType.RRSIG, RRTTL(3600))
+ rrsig1.add_rdata(Rdata(RRType.RRSIG, self.__rrclass,
'A 5 3 3600 20000101000000 20000201000000 ' +
'0 example.org. FAKEFAKEFAKE'))
diff.add_data(rrsig1)
rrsig2 = RRset(Name('example.org'), self.__rrclass,
- RRType.RRSIG(), RRTTL(1800))
- rrsig2.add_rdata(Rdata(RRType.RRSIG(), self.__rrclass,
+ RRType.RRSIG, RRTTL(1800))
+ rrsig2.add_rdata(Rdata(RRType.RRSIG, self.__rrclass,
'AAAA 5 3 3600 20000101000000 20000201000000 ' +
'1 example.org. FAKEFAKEFAKE'))
diff.add_data(rrsig2)
@@ -557,7 +558,7 @@ class DiffTest(unittest.TestCase):
'''
diff_multi = Diff(self, Name('example.org.'), single_update_mode=False)
self.assertRaises(ValueError, diff_multi.find_updated,
- Name('example.org.'), RRType.A())
+ Name('example.org.'), RRType.A)
self.assertRaises(ValueError, diff_multi.find_all_updated,
Name('example.org.'))
@@ -570,12 +571,12 @@ class DiffTest(unittest.TestCase):
'''
# full rrset for A (to check compact())
- txt = RRset(Name('c.example.org.'), self.__rrclass, RRType.TXT(),
+ txt = RRset(Name('c.example.org.'), self.__rrclass, RRType.TXT,
RRTTL(3600))
txt.add_rdata(Rdata(txt.get_type(), txt.get_class(), "one"))
txt.add_rdata(Rdata(txt.get_type(), txt.get_class(), "two"))
txt.add_rdata(Rdata(txt.get_type(), txt.get_class(), "three"))
- a = RRset(Name('d.example.org.'), self.__rrclass, RRType.A(),
+ a = RRset(Name('d.example.org.'), self.__rrclass, RRType.A,
RRTTL(3600))
a.add_rdata(Rdata(a.get_type(), a.get_class(), "192.0.2.1"))
a.add_rdata(Rdata(a.get_type(), a.get_class(), "192.0.2.2"))
@@ -679,7 +680,7 @@ class DiffTest(unittest.TestCase):
def test_find(self):
diff = Diff(self, Name('example.org.'))
name = Name('www.example.org.')
- rrtype = RRType.A()
+ rrtype = RRType.A
self.assertFalse(self.__find_called)
self.assertEqual(None, self.__find_name)
@@ -697,7 +698,7 @@ class DiffTest(unittest.TestCase):
def test_find_options(self):
diff = Diff(self, Name('example.org.'))
name = Name('foo.example.org.')
- rrtype = RRType.TXT()
+ rrtype = RRType.TXT
options = ZoneFinder.NO_WILDCARD
self.assertEqual("find_return", diff.find(name, rrtype, options))
@@ -997,8 +998,8 @@ class DiffTest(unittest.TestCase):
# Add a second rr with different type at same name
add_rrset = RRset(self.__rrset3.get_name(), self.__rrclass,
- RRType.A(), self.__ttl)
- add_rrset.add_rdata(Rdata(RRType.A(), self.__rrclass, "192.0.2.2"))
+ RRType.A, self.__ttl)
+ add_rrset.add_rdata(Rdata(RRType.A, self.__rrclass, "192.0.2.2"))
diff.add_data(add_rrset)
self.__check_find_all_call(diff.find_all_updated, self.__rrset3,
@@ -1087,6 +1088,52 @@ class DiffTest(unittest.TestCase):
self.__check_find_all_call(diff.find_all, self.__rrset3,
rcode)
+ class Collection(isc.dns.RRsetCollectionBase):
+ '''
+ Our own mock RRsetCollection. We only pass it through, but we
+ still define an (mostly empty) find method to satisfy the
+ expectations.
+ '''
+ def __init__(self):
+ '''
+ Empty init. The base class's __init__ can't be called,
+ so we need to provide our own to shadow it -- and make sure
+ not to call the super().__init__().
+ '''
+ pass
+
+ def find(self, name, rrclass, rrtype):
+ '''
+ Empty find method. Returns None to each query (pretends
+ the collection is empty. Present mostly for completeness.
+ '''
+ return None
+
+ def get_rrset_collection(self):
+ '''
+ Part of pretending to be the zone updater. This returns the rrset
+ collection (a mock one, unuseable) for the updater.
+ '''
+ return self.Collection()
+
+ def test_get_rrset_collection(self):
+ '''
+ Test the diff can return corresponding rrset collection. Test
+ it applies the data first.
+ '''
+ diff = Diff(self, Name('example.org'), single_update_mode=True)
+ diff.add_data(self.__rrset_soa)
+ collection = diff.get_rrset_collection()
+ # Check it is applied
+ self.assertEqual(1, len(self.__data_operations))
+ self.assertEqual('add', self.__data_operations[0][0])
+ # Check the returned one is actually RRsetCollection
+ self.assertTrue(isinstance(collection, self.Collection))
+ # The collection is just the mock from above, so this doesn't do much
+ # testing, but we check that the mock got through and didn't get hurt.
+ self.assertIsNone(collection.find(Name('example.org'), RRClass.IN,
+ RRType.SOA))
+
if __name__ == "__main__":
isc.log.init("bind10")
isc.log.resetUnitTestRootLogger()
diff --git a/src/lib/resolve/recursive_query.cc b/src/lib/resolve/recursive_query.cc
index 7eae6fe..8d4ae58 100644
--- a/src/lib/resolve/recursive_query.cc
+++ b/src/lib/resolve/recursive_query.cc
@@ -609,7 +609,7 @@ SERVFAIL:
if (category == ResponseClassifier::RCODE) {
// Special case as this message takes two arguments.
- LOG_DEBUG(logger, RESLIB_DBG_RESULTS, RESLIB_RCODE_ERROR).
+ LOG_DEBUG(logger, RESLIB_DBG_RESULTS, RESLIB_RCODE_RETURNED).
arg(questionText(question_)).arg(rcode);
} else {
diff --git a/src/lib/resolve/resolve_messages.mes b/src/lib/resolve/resolve_messages.mes
index 6447082..c89dedb 100644
--- a/src/lib/resolve/resolve_messages.mes
+++ b/src/lib/resolve/resolve_messages.mes
@@ -133,7 +133,7 @@ A debug message indicating that a protocol error was received and that
the resolver is repeating the query to the same nameserver. After this
repeated query, there will be the indicated number of retries left.
-% RESLIB_RCODE_ERROR response to query for <%1> returns RCODE of %2
+% RESLIB_RCODE_RETURNED response to query for <%1> returns RCODE of %2
A debug message, the response to the specified query indicated an error
that is not covered by a specific code path. A SERVFAIL will be returned.
diff --git a/src/lib/resolve/tests/response_classifier_unittest.cc b/src/lib/resolve/tests/response_classifier_unittest.cc
index 23c8666..30aeabb 100644
--- a/src/lib/resolve/tests/response_classifier_unittest.cc
+++ b/src/lib/resolve/tests/response_classifier_unittest.cc
@@ -131,9 +131,9 @@ public:
// ... the CNAME records
rrs_in_cname_www1->addRdata(ConstRdataPtr(
- new CNAME("www.example.com")));
+ new CNAME("www.example.com.")));
rrs_in_cname_www2->addRdata(ConstRdataPtr(
- new CNAME("www1.example.com")));
+ new CNAME("www1.example.com.")));
}
Message msg_a; // Pointer to message in RENDER state
diff --git a/src/lib/server_common/portconfig.cc b/src/lib/server_common/portconfig.cc
index 530c919..b214ef5 100644
--- a/src/lib/server_common/portconfig.cc
+++ b/src/lib/server_common/portconfig.cc
@@ -152,7 +152,7 @@ installListenAddresses(const AddressList& new_addresses,
throw;
} catch (const exception& e) {
// Any other kind of exception is fatal. It might mean we are in
- // inconsistent state with the boss/socket creator, so we abort
+ // inconsistent state with the b10-init/socket creator, so we abort
// to make sure it doesn't last.
LOG_FATAL(logger, SRVCOMM_EXCEPTION_ALLOC).arg(e.what());
abort();
diff --git a/src/lib/server_common/portconfig.h b/src/lib/server_common/portconfig.h
index 0795728..7213e09 100644
--- a/src/lib/server_common/portconfig.h
+++ b/src/lib/server_common/portconfig.h
@@ -92,8 +92,9 @@ parseAddresses(isc::data::ConstElementPtr addresses,
/// but removes all the sockets it listened on. One of the exceptions is
/// propagated.
///
-/// The ports are requested from the socket creator through boss. Therefore
-/// you need to initialize the SocketRequestor before using this function.
+/// The ports are requested from the socket creator through b10-init.
+/// Therefore you need to initialize the SocketRequestor before using this
+/// function.
///
/// \param new_addresses are the addresses you want to listen on.
/// \param address_store is the place you store your current addresses. It is
@@ -107,7 +108,7 @@ parseAddresses(isc::data::ConstElementPtr addresses,
///
/// \throw asiolink::IOError when initialization or closing of socket fails.
/// \throw isc::server_common::SocketRequestor::Socket error when the
-/// boss/socket creator doesn't want to give us the socket.
+/// b10-init/socket creator doesn't want to give us the socket.
/// \throw std::bad_alloc when allocation fails.
/// \throw isc::InvalidOperation when the function is called and the
/// SocketRequestor isn't initialized yet.
diff --git a/src/lib/server_common/socket_request.cc b/src/lib/server_common/socket_request.cc
index e471ad0..981930d 100644
--- a/src/lib/server_common/socket_request.cc
+++ b/src/lib/server_common/socket_request.cc
@@ -34,21 +34,21 @@ namespace server_common {
namespace {
SocketRequestor* requestor(NULL);
-// Before the boss process calls send_fd, it first sends this
+// Before the b10-init process calls send_fd, it first sends this
// string to indicate success, followed by the file descriptor
const std::string& CREATOR_SOCKET_OK() {
static const std::string str("1\n");
return (str);
}
-// Before the boss process calls send_fd, it sends this
+// Before the b10-init process calls send_fd, it sends this
// string to indicate failure. It will not send a file descriptor.
const std::string& CREATOR_SOCKET_UNAVAILABLE() {
static const std::string str("0\n");
return (str);
}
-// The name of the ccsession command to request a socket from boss
+// The name of the ccsession command to request a socket from b10-init
// (the actual format of command and response are hardcoded in their
// respective methods)
const std::string& REQUEST_SOCKET_COMMAND() {
@@ -56,7 +56,7 @@ const std::string& REQUEST_SOCKET_COMMAND() {
return (str);
}
-// The name of the ccsession command to tell boss we no longer need
+// The name of the ccsession command to tell b10-init we no longer need
// a socket (the actual format of command and response are hardcoded
// in their respective methods)
const std::string& RELEASE_SOCKET_COMMAND() {
@@ -69,7 +69,7 @@ const size_t SOCKET_ERROR_CODE = 2;
const size_t SHARE_ERROR_CODE = 3;
// A helper converter from numeric protocol ID to the corresponding string.
-// used both for generating a message for the boss process and for logging.
+// used both for generating a message for the b10-init process and for logging.
inline const char*
protocolString(SocketRequestor::Protocol protocol) {
switch (protocol) {
@@ -84,7 +84,7 @@ protocolString(SocketRequestor::Protocol protocol) {
// Creates the cc session message to request a socket.
// The actual command format is hardcoded, and should match
-// the format as read in bind10_src.py.in
+// the format as read in b10-init.py.in
isc::data::ConstElementPtr
createRequestSocketMessage(SocketRequestor::Protocol protocol,
const std::string& address, uint16_t port,
@@ -125,7 +125,7 @@ createReleaseSocketMessage(const std::string& token) {
return (isc::config::createCommand(RELEASE_SOCKET_COMMAND(), release));
}
-// Checks and parses the response receive from Boss
+// Checks and parses the response receive from Init
// If successful, token and path will be set to the values found in the
// answer.
// If the response was an error response, or does not contain the
@@ -158,7 +158,7 @@ readRequestSocketAnswer(isc::data::ConstElementPtr recv_msg,
path = answer->get("path")->stringValue();
}
-// Connect to the domain socket that has been received from Boss.
+// Connect to the domain socket that has been received from Init.
// (i.e. the one that is used to pass created sockets over).
//
// This should only be called if the socket had not been connected to
@@ -211,14 +211,14 @@ createFdShareSocket(const std::string& path) {
// \return the socket fd that has been read
int
getSocketFd(const std::string& token, int sock_pass_fd) {
- // Tell the boss the socket token.
+ // Tell b10-init the socket token.
const std::string token_data = token + "\n";
if (!isc::util::io::write_data(sock_pass_fd, token_data.c_str(),
token_data.size())) {
isc_throw(SocketRequestor::SocketError, "Error writing socket token");
}
- // Boss first sends some data to signal that getting the socket
+ // Init first sends some data to signal that getting the socket
// from its cache succeeded
char status[3]; // We need a space for trailing \0, hence 3
memset(status, 0, 3);
@@ -226,7 +226,7 @@ getSocketFd(const std::string& token, int sock_pass_fd) {
isc_throw(SocketRequestor::SocketError,
"Error reading status code while requesting socket");
}
- // Actual status value hardcoded by boss atm.
+ // Actual status value hardcoded by b10-init atm.
if (CREATOR_SOCKET_UNAVAILABLE() == status) {
isc_throw(SocketRequestor::SocketError,
"CREATOR_SOCKET_UNAVAILABLE returned");
@@ -258,7 +258,7 @@ getSocketFd(const std::string& token, int sock_pass_fd) {
}
// This implementation class for SocketRequestor uses
-// a CC session for communication with the boss process,
+// a CC session for communication with the b10-init process,
// and fd_share to read out the socket(s).
// Since we only use a reference to the session, it must never
// be closed during the lifetime of this class
@@ -300,10 +300,10 @@ public:
share_name.empty() ? app_name_ :
share_name);
- // Send it to boss
- const int seq = session_.group_sendmsg(request_msg, "Boss");
+ // Send it to b10-init
+ const int seq = session_.group_sendmsg(request_msg, "Init");
- // Get the answer from the boss.
+ // Get the answer from b10-init.
// Just do a blocking read, we can't really do much anyway
isc::data::ConstElementPtr env, recv_msg;
if (!session_.group_recvmsg(env, recv_msg, false, seq)) {
@@ -330,12 +330,12 @@ public:
const isc::data::ConstElementPtr release_msg =
createReleaseSocketMessage(token);
- // Send it to boss
- const int seq = session_.group_sendmsg(release_msg, "Boss");
+ // Send it to b10-init
+ const int seq = session_.group_sendmsg(release_msg, "Init");
LOG_DEBUG(logger, DBGLVL_TRACE_DETAIL, SOCKETREQUESTOR_RELEASESOCKET).
arg(token);
- // Get the answer from the boss.
+ // Get the answer from b10-init.
// Just do a blocking read, we can't really do much anyway
isc::data::ConstElementPtr env, recv_msg;
if (!session_.group_recvmsg(env, recv_msg, false, seq)) {
diff --git a/src/lib/server_common/tests/portconfig_unittest.cc b/src/lib/server_common/tests/portconfig_unittest.cc
index 0c971ee..48d69ba 100644
--- a/src/lib/server_common/tests/portconfig_unittest.cc
+++ b/src/lib/server_common/tests/portconfig_unittest.cc
@@ -330,8 +330,8 @@ TEST_F(InstallListenAddressesDeathTest, inconsistent) {
}
}
-// If we are unable to tell the boss we closed a socket, we abort, as we are
-// not consistent with the boss most probably.
+// If we are unable to tell the b10-init we closed a socket, we abort, as we
+// are not consistent with b10-init most probably.
TEST_F(InstallListenAddressesDeathTest, cantClose) {
if (!isc::util::unittests::runningOnValgrind()) {
installListenAddresses(valid_, store_, dnss_);
diff --git a/src/lib/server_common/tests/socket_requestor_test.cc b/src/lib/server_common/tests/socket_requestor_test.cc
index ac1731f..9085ba9 100644
--- a/src/lib/server_common/tests/socket_requestor_test.cc
+++ b/src/lib/server_common/tests/socket_requestor_test.cc
@@ -76,7 +76,7 @@ TEST(SocketRequestorAccess, initialized) {
initTestSocketRequestor(NULL);
}
-// This class contains a fake (module)ccsession to emulate answers from Boss
+// This class contains a fake (module)ccsession to emulate answers from Init
class SocketRequestorTest : public ::testing::Test {
public:
SocketRequestorTest() : session(ElementPtr(new ListElement),
@@ -100,7 +100,7 @@ public:
}
// Creates a valid socket request answer, as it would be sent by
- // Boss. 'valid' in terms of format, not values
+ // Init. 'valid' in terms of format, not values
void
addAnswer(const std::string& token, const std::string& path) {
ElementPtr answer_part = Element::createMap();
@@ -141,7 +141,7 @@ createExpectedRequest(const std::string& address,
// create the envelope
const ElementPtr packet = Element::createList();
- packet->add(Element::create("Boss"));
+ packet->add(Element::create("Init"));
packet->add(Element::create("*"));
packet->add(createCommand("get_socket", command_args));
packet->add(Element::create(-1));
@@ -282,7 +282,7 @@ createExpectedRelease(const std::string& token) {
// create the envelope
const ElementPtr packet = Element::createList();
- packet->add(Element::create("Boss"));
+ packet->add(Element::create("Init"));
packet->add(Element::create("*"));
packet->add(createCommand("drop_socket", command_args));
packet->add(Element::create(-1));
diff --git a/src/lib/util/unittests/fork.cc b/src/lib/util/unittests/fork.cc
index 3414a3c..7ed22f8 100644
--- a/src/lib/util/unittests/fork.cc
+++ b/src/lib/util/unittests/fork.cc
@@ -93,10 +93,10 @@ provide_input(int *read_pipe, const void *input, const size_t length)
/*
* This creates a pipe, forks and reads the pipe and compares it
- * with given data. Used to check output of run in asynchronous way.
+ * with given data. Used to check output of run in an asynchronous way.
*/
pid_t
-check_output(int *write_pipe, const void *output, const size_t length)
+check_output(int *write_pipe, const void* const output, const size_t length)
{
int pipes[2];
if (pipe(pipes)) {
@@ -109,9 +109,7 @@ check_output(int *write_pipe, const void *output, const size_t length)
return pid;
} else {
close(pipes[1]);
- // We don't return the memory, but we're in tests and end this process
- // right away.
- unsigned char *buffer = new unsigned char[length + 1];
+ unsigned char* buffer = new unsigned char[length + 1];
// Try to read one byte more to see if the output ends here
size_t got_length(read_data(pipes[0], buffer, length + 1));
bool ok(true);
@@ -133,8 +131,10 @@ check_output(int *write_pipe, const void *output, const size_t length)
fprintf(stderr, "%02hhx", output_c[i]);
}
fprintf(stderr, "\n");
+ delete [] buffer;
exit(1);
} else {
+ delete [] buffer;
exit(0);
}
}
diff --git a/src/lib/util/unittests/fork.h b/src/lib/util/unittests/fork.h
index d5623a7..6b9e749 100644
--- a/src/lib/util/unittests/fork.h
+++ b/src/lib/util/unittests/fork.h
@@ -40,10 +40,10 @@ bool
process_ok(pid_t process);
pid_t
-provide_input(int *read_pipe, const void *input, const size_t length);
+provide_input(int* read_pipe, const void* input, const size_t length);
pid_t
-check_output(int *write_pipe, const void *output, const size_t length);
+check_output(int* write_pipe, const void* const output, const size_t length);
} // End of the namespace
}
diff --git a/tests/lettuce/configurations/auth/auth_badzone.config.orig b/tests/lettuce/configurations/auth/auth_badzone.config.orig
index ab11bc9..f86882a 100644
--- a/tests/lettuce/configurations/auth/auth_badzone.config.orig
+++ b/tests/lettuce/configurations/auth/auth_badzone.config.orig
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [{
"severity": "DEBUG",
@@ -29,7 +29,7 @@
]
}
},
- "Boss": {
+ "Init": {
"components": {
"b10-auth": { "kind": "needed", "special": "auth" },
"b10-cmdctl": { "special": "cmdctl", "kind": "needed" }
diff --git a/tests/lettuce/configurations/auth/auth_basic.config.orig b/tests/lettuce/configurations/auth/auth_basic.config.orig
index 4067fb1..24f615c 100644
--- a/tests/lettuce/configurations/auth/auth_basic.config.orig
+++ b/tests/lettuce/configurations/auth/auth_basic.config.orig
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [ {
"debuglevel": 99,
@@ -13,7 +13,7 @@
"address": "127.0.0.1"
} ]
},
- "Boss": {
+ "Init": {
"components": {
"b10-auth": { "kind": "needed", "special": "auth" },
"b10-cmdctl": { "special": "cmdctl", "kind": "needed" }
diff --git a/tests/lettuce/configurations/bindctl/bindctl.config.orig b/tests/lettuce/configurations/bindctl/bindctl.config.orig
index 3530b3e..ef0e8e2 100644
--- a/tests/lettuce/configurations/bindctl/bindctl.config.orig
+++ b/tests/lettuce/configurations/bindctl/bindctl.config.orig
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [ {
"debuglevel": 99,
@@ -17,7 +17,7 @@
"data_sources": {
"classes": {}
},
- "Boss": {
+ "Init": {
"components": {
"b10-cmdctl": { "special": "cmdctl", "kind": "needed" }
}
diff --git a/tests/lettuce/configurations/bindctl_commands.config.orig b/tests/lettuce/configurations/bindctl_commands.config.orig
index b60201d..980262b 100644
--- a/tests/lettuce/configurations/bindctl_commands.config.orig
+++ b/tests/lettuce/configurations/bindctl_commands.config.orig
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [ {
"debuglevel": 99,
@@ -23,7 +23,7 @@
"address": "127.0.0.1"
} ]
},
- "Boss": {
+ "Init": {
"components": {
"b10-auth": { "kind": "dispensable", "special": "auth" },
"b10-xfrin": { "address": "Xfrin", "kind": "dispensable" },
diff --git a/tests/lettuce/configurations/ddns/ddns.config.orig b/tests/lettuce/configurations/ddns/ddns.config.orig
index 93e7c1c..02978be 100644
--- a/tests/lettuce/configurations/ddns/ddns.config.orig
+++ b/tests/lettuce/configurations/ddns/ddns.config.orig
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [
{
@@ -39,7 +39,7 @@
]
}
},
- "Boss": {
+ "Init": {
"components": {
"b10-xfrout": {
"kind": "dispensable",
diff --git a/tests/lettuce/configurations/ddns/noddns.config.orig b/tests/lettuce/configurations/ddns/noddns.config.orig
index 7a9a947..d075924 100644
--- a/tests/lettuce/configurations/ddns/noddns.config.orig
+++ b/tests/lettuce/configurations/ddns/noddns.config.orig
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [
{
@@ -35,7 +35,7 @@
]
}
},
- "Boss": {
+ "Init": {
"components": {
"b10-xfrout": {"kind": "dispensable"},
"b10-auth": {"kind": "needed", "special": "auth"},
diff --git a/tests/lettuce/configurations/default.config b/tests/lettuce/configurations/default.config
index 9e1d3d1..2713def 100644
--- a/tests/lettuce/configurations/default.config
+++ b/tests/lettuce/configurations/default.config
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [ {
"debuglevel": 99,
diff --git a/tests/lettuce/configurations/example.org.config.orig b/tests/lettuce/configurations/example.org.config.orig
index c5545ed..7da6304 100644
--- a/tests/lettuce/configurations/example.org.config.orig
+++ b/tests/lettuce/configurations/example.org.config.orig
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [ {
"debuglevel": 99,
@@ -26,7 +26,7 @@
]
}
},
- "Boss": {
+ "Init": {
"components": {
"b10-auth": { "kind": "needed", "special": "auth" },
"b10-cmdctl": { "special": "cmdctl", "kind": "needed" }
diff --git a/tests/lettuce/configurations/example.org.inmem.config b/tests/lettuce/configurations/example.org.inmem.config
index 7ea34b3..7ec921d 100644
--- a/tests/lettuce/configurations/example.org.inmem.config
+++ b/tests/lettuce/configurations/example.org.inmem.config
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [{
"severity": "DEBUG",
@@ -26,7 +26,7 @@
]
}
},
- "Boss": {
+ "Init": {
"components": {
"b10-auth": { "kind": "needed", "special": "auth" },
"b10-cmdctl": { "special": "cmdctl", "kind": "needed" }
diff --git a/tests/lettuce/configurations/example2.org.config b/tests/lettuce/configurations/example2.org.config
index eeb9733..3bb3330 100644
--- a/tests/lettuce/configurations/example2.org.config
+++ b/tests/lettuce/configurations/example2.org.config
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [ {
"severity": "DEBUG",
@@ -27,7 +27,7 @@
]
}
},
- "Boss": {
+ "Init": {
"components": {
"b10-auth": { "kind": "needed", "special": "auth" },
"b10-cmdctl": { "special": "cmdctl", "kind": "needed" }
diff --git a/tests/lettuce/configurations/inmemory_over_sqlite3/secondary.conf b/tests/lettuce/configurations/inmemory_over_sqlite3/secondary.conf
index 107c53f..d93a8c6 100644
--- a/tests/lettuce/configurations/inmemory_over_sqlite3/secondary.conf
+++ b/tests/lettuce/configurations/inmemory_over_sqlite3/secondary.conf
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [ {
"debuglevel": 99,
@@ -29,7 +29,7 @@
]
}
},
- "Boss": {
+ "Init": {
"components": {
"b10-auth": { "kind": "needed", "special": "auth" },
"b10-xfrin": { "address": "Xfrin", "kind": "dispensable" },
diff --git a/tests/lettuce/configurations/ixfr-out/testset1-config.db b/tests/lettuce/configurations/ixfr-out/testset1-config.db
index e78c84a..d5eaf83 100644
--- a/tests/lettuce/configurations/ixfr-out/testset1-config.db
+++ b/tests/lettuce/configurations/ixfr-out/testset1-config.db
@@ -9,7 +9,7 @@
}
]
},
- "version": 2,
+ "version": 3,
"Logging": {
"loggers":
[
@@ -51,7 +51,7 @@
]
}
},
- "Boss": {
+ "Init": {
"components": {
"b10-auth": { "kind": "needed", "special": "auth" },
"b10-xfrin": { "address": "Xfrin", "kind": "dispensable" },
diff --git a/tests/lettuce/configurations/multi_instance/multi_auth.config.orig b/tests/lettuce/configurations/multi_instance/multi_auth.config.orig
index fe482f9..96e25d8 100644
--- a/tests/lettuce/configurations/multi_instance/multi_auth.config.orig
+++ b/tests/lettuce/configurations/multi_instance/multi_auth.config.orig
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [ {
"debuglevel": 99,
@@ -24,7 +24,7 @@
}]
}
},
- "Boss": {
+ "Init": {
"components": {
"b10-auth-2": {"kind": "dispensable", "special": "auth"},
"b10-auth": {"kind": "dispensable", "special": "auth"},
diff --git a/tests/lettuce/configurations/no_db_file.config b/tests/lettuce/configurations/no_db_file.config
index bc4ff5f..9e6c168 100644
--- a/tests/lettuce/configurations/no_db_file.config
+++ b/tests/lettuce/configurations/no_db_file.config
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [ {
"severity": "DEBUG",
@@ -27,7 +27,7 @@
]
}
},
- "Boss": {
+ "Init": {
"components": {
"b10-auth": { "kind": "needed", "special": "auth" },
"b10-cmdctl": { "special": "cmdctl", "kind": "needed" }
diff --git a/tests/lettuce/configurations/nsec3/nsec3_auth.config b/tests/lettuce/configurations/nsec3/nsec3_auth.config
index 618c5ef..5dfffa1 100644
--- a/tests/lettuce/configurations/nsec3/nsec3_auth.config
+++ b/tests/lettuce/configurations/nsec3/nsec3_auth.config
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [
{
@@ -27,7 +27,7 @@
]
}
},
- "Boss": {
+ "Init": {
"components": {
"b10-auth": {"kind": "needed", "special": "auth"},
"b10-cmdctl": {"kind": "needed", "special": "cmdctl"}
diff --git a/tests/lettuce/configurations/resolver/resolver_basic.config.orig b/tests/lettuce/configurations/resolver/resolver_basic.config.orig
index 0adca9f..fe5ddd0 100644
--- a/tests/lettuce/configurations/resolver/resolver_basic.config.orig
+++ b/tests/lettuce/configurations/resolver/resolver_basic.config.orig
@@ -1 +1,31 @@
-{"version": 2, "Logging": {"loggers": [{"severity": "DEBUG", "name": "*", "debuglevel": 99}]}, "Resolver": {"query_acl": [{"action": "REJECT", "from": "127.0.0.1"}], "listen_on": [{"port": 47806, "address": "127.0.0.1"}]}, "Boss": {"components": {"b10-resolver": {"kind": "needed"}, "b10-cmdctl": {"kind": "needed", "special": "cmdctl"}}}}
+{
+ "version": 3,
+ "Logging": {
+ "loggers": [ {
+ "severity": "DEBUG",
+ "name": "*",
+ "debuglevel": 99
+ } ]
+ },
+ "Resolver": {
+ "query_acl": [ {
+ "action": "REJECT",
+ "from": "127.0.0.1"
+ } ],
+ "listen_on": [ {
+ "port": 47806,
+ "address": "127.0.0.1"
+ } ]
+ },
+ "Init": {
+ "components": {
+ "b10-resolver": {
+ "kind": "needed"
+ },
+ "b10-cmdctl": {
+ "kind": "needed",
+ "special": "cmdctl"
+ }
+ }
+ }
+}
diff --git a/tests/lettuce/configurations/xfrin/inmem_slave.conf b/tests/lettuce/configurations/xfrin/inmem_slave.conf
index cc1c997..fedf372 100644
--- a/tests/lettuce/configurations/xfrin/inmem_slave.conf
+++ b/tests/lettuce/configurations/xfrin/inmem_slave.conf
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [ {
"debuglevel": 99,
@@ -30,7 +30,7 @@
]
}
},
- "Boss": {
+ "Init": {
"components": {
"b10-auth": { "kind": "needed", "special": "auth" },
"b10-xfrin": { "address": "Xfrin", "kind": "dispensable" },
diff --git a/tests/lettuce/configurations/xfrin/retransfer_master.conf.orig b/tests/lettuce/configurations/xfrin/retransfer_master.conf.orig
index c04d917..1b2953d 100644
--- a/tests/lettuce/configurations/xfrin/retransfer_master.conf.orig
+++ b/tests/lettuce/configurations/xfrin/retransfer_master.conf.orig
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [ {
"debuglevel": 99,
@@ -36,7 +36,7 @@
"Stats": {
"poll-interval": 1
},
- "Boss": {
+ "Init": {
"components": {
"b10-auth": { "kind": "needed", "special": "auth" },
"b10-xfrout": { "address": "Xfrout", "kind": "dispensable" },
diff --git a/tests/lettuce/configurations/xfrin/retransfer_master_nons.conf.orig b/tests/lettuce/configurations/xfrin/retransfer_master_nons.conf.orig
new file mode 100644
index 0000000..bccadf7
--- /dev/null
+++ b/tests/lettuce/configurations/xfrin/retransfer_master_nons.conf.orig
@@ -0,0 +1,48 @@
+{
+ "version": 3,
+ "Logging": {
+ "loggers": [ {
+ "debuglevel": 99,
+ "severity": "DEBUG",
+ "name": "*"
+ } ]
+ },
+ "Auth": {
+ "database_file": "data/example.org-nons.sqlite3",
+ "listen_on": [ {
+ "address": "::1",
+ "port": 47807
+ } ]
+ },
+ "data_sources": {
+ "classes": {
+ "IN": [{
+ "type": "sqlite3",
+ "params": {
+ "database_file": "data/example.org-nons.sqlite3"
+ }
+ }]
+ }
+ },
+ "Xfrout": {
+ "zone_config": [ {
+ "origin": "example.org"
+ } ],
+ "also_notify": [ {
+ "address": "::1",
+ "port": 47806
+ } ]
+ },
+ "Stats": {
+ "poll-interval": 1
+ },
+ "Init": {
+ "components": {
+ "b10-auth": { "kind": "needed", "special": "auth" },
+ "b10-xfrout": { "address": "Xfrout", "kind": "dispensable" },
+ "b10-zonemgr": { "address": "Zonemgr", "kind": "dispensable" },
+ "b10-stats": { "address": "Stats", "kind": "dispensable" },
+ "b10-cmdctl": { "special": "cmdctl", "kind": "needed" }
+ }
+ }
+}
diff --git a/tests/lettuce/configurations/xfrin/retransfer_slave.conf.orig b/tests/lettuce/configurations/xfrin/retransfer_slave.conf.orig
index cef04cf..2e6b17f 100644
--- a/tests/lettuce/configurations/xfrin/retransfer_slave.conf.orig
+++ b/tests/lettuce/configurations/xfrin/retransfer_slave.conf.orig
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [ {
"debuglevel": 99,
@@ -24,7 +24,7 @@
}]
}
},
- "Boss": {
+ "Init": {
"components": {
"b10-auth": { "kind": "needed", "special": "auth" },
"b10-xfrin": { "address": "Xfrin", "kind": "dispensable" },
diff --git a/tests/lettuce/configurations/xfrin/retransfer_slave_notify.conf b/tests/lettuce/configurations/xfrin/retransfer_slave_notify.conf
index d977c58..a5c22b1 100644
--- a/tests/lettuce/configurations/xfrin/retransfer_slave_notify.conf
+++ b/tests/lettuce/configurations/xfrin/retransfer_slave_notify.conf
@@ -1,5 +1,5 @@
{
- "version": 2,
+ "version": 3,
"Logging": {
"loggers": [ {
"debuglevel": 99,
@@ -37,7 +37,7 @@
"class": "IN"
} ]
},
- "Boss": {
+ "Init": {
"components": {
"b10-auth": { "kind": "needed", "special": "auth" },
"b10-xfrin": { "address": "Xfrin", "kind": "dispensable" },
diff --git a/tests/lettuce/data/commands/bad_command b/tests/lettuce/data/commands/bad_command
index 95d1694..2daa7cb 100644
--- a/tests/lettuce/data/commands/bad_command
+++ b/tests/lettuce/data/commands/bad_command
@@ -1,8 +1,8 @@
!echo shouldshow
# just add something so the test can verify it's reverted
-config add /Boss/components b10-auth
-config set /Boss/components/b10-auth/kind needed
-config set /Boss/components/b10-auth/special auth
+config add /Init/components b10-auth
+config set /Init/components/b10-auth/kind needed
+config set /Init/components/b10-auth/special auth
bad command
# this should not be reached
!echo shouldnotshow
diff --git a/tests/lettuce/data/example.org-nons.sqlite3 b/tests/lettuce/data/example.org-nons.sqlite3
new file mode 100644
index 0000000..40ddbf6
Binary files /dev/null and b/tests/lettuce/data/example.org-nons.sqlite3 differ
diff --git a/tests/lettuce/features/bindctl_commands.feature b/tests/lettuce/features/bindctl_commands.feature
index 20a28fc..b9fef82 100644
--- a/tests/lettuce/features/bindctl_commands.feature
+++ b/tests/lettuce/features/bindctl_commands.feature
@@ -7,7 +7,7 @@ Feature: control with bindctl
# a number of modules. It then removes all non-essential modules,
# and checks whether they do disappear from the list of running
# modules (note that it 'misuses' the help command for this,
- # there is a Boss command 'show_processes' but it's output is
+ # there is a Init command 'show_processes' but it's output is
# currently less standardized than 'help')
Given I have bind10 running with configuration bindctl_commands.config
And wait for bind10 stderr message BIND10_STARTED_CC
@@ -17,9 +17,9 @@ Feature: control with bindctl
And wait for bind10 stderr message XFRIN_STARTED
And wait for bind10 stderr message XFROUT_STARTED
And wait for bind10 stderr message STATS_STARTING
- And wait for bind10 stderr message STATHTTPD_STARTED
+ And wait for bind10 stderr message STATSHTTPD_STARTED
- Then remove bind10 configuration Boss/components/NOSUCHMODULE
+ Then remove bind10 configuration Init/components/NOSUCHMODULE
last bindctl output should contain Error
bind10 module Xfrout should be running
@@ -30,29 +30,29 @@ Feature: control with bindctl
bind10 module StatsHttpd should be running
bind10 module Resolver should not be running
- Then remove bind10 configuration Boss/components value b10-xfrout
+ Then remove bind10 configuration Init/components value b10-xfrout
And wait for new bind10 stderr message BIND10_PROCESS_ENDED
last bindctl output should not contain Error
# assuming it won't error for further modules (if it does, the final
# 'should not be running' tests would fail anyway)
- Then remove bind10 configuration Boss/components value b10-stats-httpd
+ Then remove bind10 configuration Init/components value b10-stats-httpd
And wait for new bind10 stderr message BIND10_PROCESS_ENDED
last bindctl output should not contain Error
- Then remove bind10 configuration Boss/components value b10-stats
+ Then remove bind10 configuration Init/components value b10-stats
And wait for new bind10 stderr message BIND10_PROCESS_ENDED
last bindctl output should not contain Error
- Then remove bind10 configuration Boss/components value b10-zonemgr
+ Then remove bind10 configuration Init/components value b10-zonemgr
And wait for new bind10 stderr message BIND10_PROCESS_ENDED
last bindctl output should not contain Error
- Then remove bind10 configuration Boss/components value b10-xfrin
+ Then remove bind10 configuration Init/components value b10-xfrin
And wait for new bind10 stderr message BIND10_PROCESS_ENDED
last bindctl output should not contain Error
- Then remove bind10 configuration Boss/components value b10-auth
+ Then remove bind10 configuration Init/components value b10-auth
And wait for new bind10 stderr message BIND10_PROCESS_ENDED
last bindctl output should not contain Error
@@ -103,7 +103,7 @@ Feature: control with bindctl
last bindctl output should not contain shouldnotshow
# This would fail if the entire list was passed, or the configuration
# was committed
- send bind10 the command config show Boss/components
+ send bind10 the command config show Init/components
last bindctl output should not contain b10-auth
# nested_command contains another execute script
@@ -124,8 +124,8 @@ Feature: control with bindctl
When I send bind10 the command execute init_authoritative_server show
# just test some parts of the output
- last bindctl output should contain /Boss/components/b10-auth/special
- last bindctl output should contain /Boss/components/b10-zonemgr/kind
+ last bindctl output should contain /Init/components/b10-auth/special
+ last bindctl output should contain /Init/components/b10-zonemgr/kind
last bindctl output should contain Please
# nothing should have been changed
diff --git a/tests/lettuce/features/ddns_system.feature b/tests/lettuce/features/ddns_system.feature
index 8e279a7..184c8ae 100644
--- a/tests/lettuce/features/ddns_system.feature
+++ b/tests/lettuce/features/ddns_system.feature
@@ -48,7 +48,7 @@ Feature: DDNS System
And wait for new bind10 stderr message DDNS_STOPPED
# Test 7
- # BoB should restart it
+ # Init should restart it
And wait for new bind10 stderr message DDNS_STARTED
# Test 8
@@ -65,7 +65,7 @@ Feature: DDNS System
# Test 9
When I send bind10 the command Auth shutdown
And wait for new bind10 stderr message AUTH_SHUTDOWN
- # BoB should restart it automatically
+ # Init should restart it automatically
And wait for new bind10 stderr message AUTH_SERVER_STARTED
# Test 10
diff --git a/tests/lettuce/features/default.feature b/tests/lettuce/features/default.feature
index ce7ee1e..bd81f12 100644
--- a/tests/lettuce/features/default.feature
+++ b/tests/lettuce/features/default.feature
@@ -8,7 +8,7 @@ Feature: default bind10 config
And wait for bind10 stderr message STATS_STARTING
# These should be running
- bind10 module Boss should be running
+ bind10 module Init should be running
And bind10 module Logging should be running
And bind10 module Stats should be running
diff --git a/tests/lettuce/features/multi_instance.feature b/tests/lettuce/features/multi_instance.feature
index 4ce135a..3ab06eb 100644
--- a/tests/lettuce/features/multi_instance.feature
+++ b/tests/lettuce/features/multi_instance.feature
@@ -34,7 +34,7 @@ Feature: Multiple instances
If I remember the pid of process b10-auth
And remember the pid of process b10-auth-2
- When I remove bind10 configuration Boss/components value b10-auth-2
+ When I remove bind10 configuration Init/components value b10-auth-2
And wait for new bind10 stderr message BIND10_PROCESS_ENDED
Then the pid of process b10-auth should not have changed
@@ -42,9 +42,9 @@ Feature: Multiple instances
When I send bind10 the following commands
"""
- config add Boss/components b10-auth-2
- config set Boss/components/b10-auth-2/special auth
- config set Boss/components/b10-auth-2/kind needed
+ config add Init/components b10-auth-2
+ config set Init/components/b10-auth-2/special auth
+ config set Init/components/b10-auth-2/kind needed
config commit
"""
And wait for new bind10 stderr message AUTH_SERVER_STARTED
@@ -53,7 +53,7 @@ Feature: Multiple instances
Then the pid of process b10-auth should not have changed
A query for example.com should have rcode REFUSED
- When I remove bind10 configuration Boss/components value b10-auth
+ When I remove bind10 configuration Init/components value b10-auth
And wait for new bind10 stderr message BIND10_PROCESS_ENDED
Then the pid of process b10-auth-2 should not have changed
A query for example.com should have rcode REFUSED
diff --git a/tests/lettuce/features/stats_httpd.feature b/tests/lettuce/features/stats_httpd.feature
new file mode 100644
index 0000000..50d5efa
--- /dev/null
+++ b/tests/lettuce/features/stats_httpd.feature
@@ -0,0 +1,16 @@
+Feature: b10-stats-httpd module
+ Tests the stats httpd module
+
+ Scenario: Check that the module logs and responds to requests
+ Given I have bind10 running with configuration bindctl_commands.config
+ And wait for bind10 stderr message STATSHTTPD_STARTED
+
+ When I request the URL http://127.0.0.1:47811/
+ # Should result in redirect, so two query logs
+ And wait for new bind10 stderr message "GET / HTTP/1.0" 302 -
+ And wait for new bind10 stderr message "GET /bind10/statistics/xml/ HTTP/1.0" 200 -
+ The last http response status code should be 200
+
+ When I request the URL http://127.0.0.1:47811/no_such_url
+ And wait for new bind10 stderr message "GET /no_such_url HTTP/1.0" 404 -
+ The last http response status code should be 404
diff --git a/tests/lettuce/features/terrain/bind10_control.py b/tests/lettuce/features/terrain/bind10_control.py
index 142a78e..7cc7d3e 100644
--- a/tests/lettuce/features/terrain/bind10_control.py
+++ b/tests/lettuce/features/terrain/bind10_control.py
@@ -202,28 +202,28 @@ def parse_bindctl_output_as_data_structure():
"parseable data structure: '" + output + "': " + str(ve)
def find_process_pid(step, process_name):
- """Helper function to request the running processes from Boss, and
+ """Helper function to request the running processes from Init, and
return the pid of the process with the given process_name.
- Fails with an assert if the response from boss is not valid JSON,
+ Fails with an assert if the response from b10-init is not valid JSON,
or if the process with the given name is not found.
"""
# show_processes output is a list of lists, where the inner lists
# are of the form [ pid, "name" ]
# Not checking data form; errors will show anyway (if these turn
# out to be too vague, we can change this)
- step.given('send bind10 the command Boss show_processes')
+ step.given('send bind10 the command Init show_processes')
running_processes = parse_bindctl_output_as_data_structure()
for process in running_processes:
if process[1] == process_name:
return process[0]
assert False, "Process named " + process_name +\
- " not found in output of Boss show_processes";
+ " not found in output of Init show_processes";
@step("remember the pid of process ([\S]+)")
def remember_pid(step, process_name):
"""Stores the PID of the process with the given name as returned by
- Boss show_processes command.
+ Init show_processes command.
Fails if the process with the given name does not appear to exist.
Stores the component_name->pid value in the dict world.process_pids.
This should only be used by the related step
@@ -239,7 +239,7 @@ def remember_pid(step, process_name):
@step('pid of process ([\S]+) should not have changed')
def check_pid(step, process_name):
"""Checks the PID of the process with the given name as returned by
- Boss show_processes command.
+ Init show_processes command.
Fails if the process with the given name does not appear to exist.
Fails if the process with the given name exists, but has a different
pid than it had when the step 'remember the pid of process' was
@@ -343,9 +343,9 @@ def configure_ddns_on(step):
step.behave_as("""
When I send bind10 the following commands
\"\"\"
- config add Boss/components b10-ddns
- config set Boss/components/b10-ddns/kind dispensable
- config set Boss/components/b10-ddns/address DDNS
+ config add Init/components b10-ddns
+ config set Init/components/b10-ddns/kind dispensable
+ config set Init/components/b10-ddns/address DDNS
config commit
\"\"\"
""")
@@ -358,7 +358,7 @@ def configure_ddns_off(step):
step.behave_as("""
When I send bind10 the following commands
\"\"\"
- config remove Boss/components b10-ddns
+ config remove Init/components b10-ddns
config commit
\"\"\"
""")
diff --git a/tests/lettuce/features/terrain/http.py b/tests/lettuce/features/terrain/http.py
new file mode 100644
index 0000000..f1f72f5
--- /dev/null
+++ b/tests/lettuce/features/terrain/http.py
@@ -0,0 +1,41 @@
+# Copyright (C) 2013 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+from lettuce import *
+import urllib
+
+# Basic request
+ at step('request the URL (.*)')
+def request_url(step, url):
+ """
+ Performs one basic HTTP GET request. The resulting HTTPResponse object
+ will we placed in world.last_http_response
+ Parameters:
+ url: the full URL to query
+ """
+ world.last_http_response = urllib.urlopen(url)
+
+ at step('last http response status code should be ([0-9]+)')
+def check_last_response_code(step, code):
+ """
+ Checks whether the last call to request_url resulted in a response
+ with the given (numeric) status code
+ Fails if it does not, or if there never was a complete request_url
+ operation
+ """
+ assert world.last_http_response != None, "No HTTP request made yet"
+ assert int(code) == world.last_http_response.getcode(),\
+ code + " != " +\
+ str(world.last_http_response.getcode())
diff --git a/tests/lettuce/features/terrain/steps.py b/tests/lettuce/features/terrain/steps.py
index 8df0bae..e470acf 100644
--- a/tests/lettuce/features/terrain/steps.py
+++ b/tests/lettuce/features/terrain/steps.py
@@ -30,7 +30,7 @@ def stop_a_named_process(step, process_name):
"""
world.processes.stop_process(process_name)
- at step('wait (?:(\d+) times )?for (new )?(\w+) stderr message (\w+)(?: not (\w+))?')
+ at step('wait (?:(\d+) times )?for (new )?(\w+) stderr message (\S+)(?: not (\S+))?')
def wait_for_stderr_message(step, times, new, process_name, message, not_message):
"""
Block until the given message is printed to the given process's stderr
diff --git a/tests/lettuce/features/terrain/terrain.py b/tests/lettuce/features/terrain/terrain.py
index bc05341..ce7426b 100644
--- a/tests/lettuce/features/terrain/terrain.py
+++ b/tests/lettuce/features/terrain/terrain.py
@@ -1,3 +1,4 @@
+
# Copyright (C) 2011 Internet Systems Consortium.
#
# Permission to use, copy, modify, and distribute this software for any
@@ -63,6 +64,8 @@ copylist = [
"configurations/ddns/noddns.config"],
["configurations/xfrin/retransfer_master.conf.orig",
"configurations/xfrin/retransfer_master.conf"],
+ ["configurations/xfrin/retransfer_master_nons.conf.orig",
+ "configurations/xfrin/retransfer_master_nons.conf"],
["configurations/xfrin/retransfer_slave.conf.orig",
"configurations/xfrin/retransfer_slave.conf"],
["data/inmem-xfrin.sqlite3.orig",
@@ -83,7 +86,7 @@ removelist = [
# If we have waited OUTPUT_WAIT_MAX_INTERVALS times, we will abort with an
# error (so as not to hang indefinitely)
OUTPUT_WAIT_INTERVAL = 0.5
-OUTPUT_WAIT_MAX_INTERVALS = 20
+OUTPUT_WAIT_MAX_INTERVALS = 120
# class that keeps track of one running process and the files
# we created for it.
@@ -380,6 +383,9 @@ def initialize(scenario):
# Convenience variable to access the last query result from querying.py
world.last_query_result = None
+ # Convenience variable to access the last HTTP response from http.py
+ world.last_http_response = None
+
# For slightly better errors, initialize a process_pids for the relevant
# steps
world.process_pids = None
diff --git a/tests/lettuce/features/xfrin_bind10.feature b/tests/lettuce/features/xfrin_bind10.feature
index 34674ca..7ba1ca0 100644
--- a/tests/lettuce/features/xfrin_bind10.feature
+++ b/tests/lettuce/features/xfrin_bind10.feature
@@ -25,6 +25,13 @@ Feature: Xfrin
A query for www.example.org to [::1]:47806 should have rcode REFUSED
When I send bind10 the command Xfrin retransfer example.org IN ::1 47807
+ # The data we receive contain a NS RRset that refers to three names in the
+ # example.org. zone. All these three are nonexistent in the data, producing
+ # 3 separate warning messages in the log.
+ And wait for new bind10 stderr message XFRIN_ZONE_WARN
+ And wait for new bind10 stderr message XFRIN_ZONE_WARN
+ And wait for new bind10 stderr message XFRIN_ZONE_WARN
+ # But after complaining, the zone data should be accepted.
Then wait for new bind10 stderr message XFRIN_TRANSFER_SUCCESS not XFRIN_XFR_PROCESS_FAILURE
Then wait for new bind10 stderr message ZONEMGR_RECEIVE_XFRIN_SUCCESS
A query for www.example.org to [::1]:47806 should have rcode NOERROR
@@ -38,7 +45,20 @@ Feature: Xfrin
When I do an AXFR transfer of example.org
Then transfer result should have 13 rrs
-
+ # Now try to offer another update. However, the validation of
+ # data should fail. The old version shoud still be available.
+ When I send bind10 the following commands with cmdctl port 47804:
+ """
+ config set data_sources/classes/IN[0]/params/database_file data/example.org-nons.sqlite3
+ config set Auth/database_file data/example.org-nons.sqlite3
+ config commit
+ """
+ Then I send bind10 the command Xfrin retransfer example.org IN ::1 47807
+ And wait for new bind10 stderr message XFRIN_ZONE_INVALID
+ And wait for new bind10 stderr message XFRIN_INVALID_ZONE_DATA
+ Then wait for new bind10 stderr message ZONEMGR_RECEIVE_XFRIN_FAILED
+ A query for example.org type NS to [::1]:47806 should have rcode NOERROR
+ And transfer result should have 13 rrs
Scenario: Transfer with TSIG
# Similar setup to the test above, but this time, we add TSIG configuration
@@ -74,7 +94,7 @@ Feature: Xfrin
# Transfer should fail
When I send bind10 the command Xfrin retransfer example.org
- Then wait for new bind10 stderr message XFRIN_XFR_TRANSFER_PROTOCOL_ERROR not XFRIN_TRANSFER_SUCCESS
+ Then wait for new bind10 stderr message XFRIN_XFR_TRANSFER_PROTOCOL_VIOLATION not XFRIN_TRANSFER_SUCCESS
# Set client to use TSIG as well
When I send bind10 the following commands:
"""
@@ -86,3 +106,41 @@ Feature: Xfrin
# Transwer should succeed now
When I send bind10 the command Xfrin retransfer example.org
Then wait for new bind10 stderr message XFRIN_TRANSFER_SUCCESS not XFRIN_XFR_PROCESS_FAILURE
+
+ Scenario: Validation fails
+ # In this test, the source data of the XFR is invalid (missing NS record
+ # at the origin). We check it is rejected after the transfer.
+ #
+ # We use abuse the fact that we do not check data when we read it from
+ # the sqlite3 database (unless we load into in-memory, which we don't
+ # do here).
+ The file data/test_nonexistent_db.sqlite3 should not exist
+
+ Given I have bind10 running with configuration xfrin/retransfer_master_nons.conf with cmdctl port 47804 as master
+ And wait for master stderr message BIND10_STARTED_CC
+ And wait for master stderr message CMDCTL_STARTED
+ And wait for master stderr message AUTH_SERVER_STARTED
+ And wait for master stderr message XFROUT_STARTED
+ And wait for master stderr message ZONEMGR_STARTED
+
+ And I have bind10 running with configuration xfrin/retransfer_slave.conf
+ And wait for bind10 stderr message BIND10_STARTED_CC
+ And wait for bind10 stderr message CMDCTL_STARTED
+ And wait for bind10 stderr message AUTH_SERVER_STARTED
+ And wait for bind10 stderr message XFRIN_STARTED
+ And wait for bind10 stderr message ZONEMGR_STARTED
+
+ # Now we use the first step again to see if the file has been created
+ The file data/test_nonexistent_db.sqlite3 should exist
+
+ A query for www.example.org to [::1]:47806 should have rcode REFUSED
+ When I send bind10 the command Xfrin retransfer example.org IN ::1 47807
+ # It should complain once about invalid data, then again that the whole
+ # zone is invalid and then reject it.
+ And wait for new bind10 stderr message XFRIN_ZONE_INVALID
+ And wait for new bind10 stderr message XFRIN_INVALID_ZONE_DATA
+ Then wait for new bind10 stderr message ZONEMGR_RECEIVE_XFRIN_FAILED
+ # The zone still doesn't exist as it is rejected.
+ # FIXME: This step fails. Probably an empty zone is created in the data
+ # source :-|. This should be REFUSED, not SERVFAIL.
+ A query for www.example.org to [::1]:47806 should have rcode SERVFAIL
diff --git a/tests/lettuce/features/xfrin_notify_handling.feature b/tests/lettuce/features/xfrin_notify_handling.feature
index 80a8873..03b18c1 100644
--- a/tests/lettuce/features/xfrin_notify_handling.feature
+++ b/tests/lettuce/features/xfrin_notify_handling.feature
@@ -123,7 +123,7 @@ Feature: Xfrin incoming notify handling
Then wait for new bind10 stderr message AUTH_RECEIVED_NOTIFY
Then wait for new bind10 stderr message ZONEMGR_RECEIVE_NOTIFY
Then wait for new bind10 stderr message XFRIN_XFR_TRANSFER_STARTED
- Then wait for new bind10 stderr message XFRIN_XFR_TRANSFER_PROTOCOL_ERROR not XFRIN_XFR_TRANSFER_STARTED
+ Then wait for new bind10 stderr message XFRIN_XFR_TRANSFER_PROTOCOL_VIOLATION not XFRIN_XFR_TRANSFER_STARTED
Then wait for new bind10 stderr message ZONEMGR_RECEIVE_XFRIN_FAILED not ZONEMGR_RECEIVE_XFRIN_SUCCESS
Then wait 5 times for new master stderr message NOTIFY_OUT_SENDING_NOTIFY
Then wait for new master stderr message NOTIFY_OUT_RETRY_EXCEEDED
diff --git a/tests/system/Makefile.am b/tests/system/Makefile.am
index 663258b..aed1d79 100644
--- a/tests/system/Makefile.am
+++ b/tests/system/Makefile.am
@@ -6,7 +6,7 @@ distclean-local:
# Most of the files under this directory (including test subdirectories)
# must be listed in EXTRA_DIST.
-EXTRA_DIST = README cleanall.sh ifconfig.sh start.pl stop.pl run.sh runall.sh
+EXTRA_DIST = README cleanall.sh ifconfig.sh start.pl stop.pl runall.sh
EXTRA_DIST += common/default_user.csv
EXTRA_DIST += glue/auth.good glue/example.good glue/noglue.good glue/test.good
EXTRA_DIST += glue/tests.sh glue/clean.sh
diff --git a/tests/system/bindctl/tests.sh b/tests/system/bindctl/tests.sh
index 75c91de..ca58240 100755
--- a/tests/system/bindctl/tests.sh
+++ b/tests/system/bindctl/tests.sh
@@ -38,8 +38,8 @@ if [ $status != 0 ]; then echo "I:failed"; fi
n=`expr $n + 1`
echo "I:Starting b10-auth and checking that it works ($n)"
-echo 'config add Boss/components b10-auth
-config set Boss/components/b10-auth { "special": "auth", "kind": "needed" }
+echo 'config add Init/components b10-auth
+config set Init/components/b10-auth { "special": "auth", "kind": "needed" }
config commit
quit
' | $RUN_BINDCTL \
@@ -68,7 +68,7 @@ if [ $status != 0 ]; then echo "I:failed"; fi
n=`expr $n + 1`
echo "I:Stopping b10-auth and checking that ($n)"
-echo 'config remove Boss/components b10-auth
+echo 'config remove Init/components b10-auth
config commit
quit
' | $RUN_BINDCTL \
@@ -79,8 +79,8 @@ if [ $status != 0 ]; then echo "I:failed"; fi
n=`expr $n + 1`
echo "I:Restarting b10-auth and checking that ($n)"
-echo 'config add Boss/components b10-auth
-config set Boss/components/b10-auth { "special": "auth", "kind": "needed" }
+echo 'config add Init/components b10-auth
+config set Init/components/b10-auth { "special": "auth", "kind": "needed" }
config commit
quit
' | $RUN_BINDCTL \
@@ -143,8 +143,8 @@ n=`expr $n + 1`
echo "I:Starting more b10-auths and checking that ($n)"
for i in 2 3
do
- echo 'config add Boss/components b10-auth-'$i'
-config set Boss/components/b10-auth-'$i' { "special": "auth", "kind": "needed" }
+ echo 'config add Init/components b10-auth-'$i'
+config set Init/components/b10-auth-'$i' { "special": "auth", "kind": "needed" }
config commit
quit
' | $RUN_BINDCTL \
@@ -180,7 +180,7 @@ n=`expr $n + 1`
echo "I:Stopping extra b10-auths and checking that ($n)"
for i in 3 2
do
- echo 'config remove Boss/components b10-auth-'$i'
+ echo 'config remove Init/components b10-auth-'$i'
config commit
quit
' | $RUN_BINDCTL \
diff --git a/tests/system/glue/nsx1/b10-config.db.in b/tests/system/glue/nsx1/b10-config.db.in
index 5f93f3b..6802c53 100644
--- a/tests/system/glue/nsx1/b10-config.db.in
+++ b/tests/system/glue/nsx1/b10-config.db.in
@@ -23,7 +23,7 @@
}
]
},
- "Boss": {
+ "Init": {
"components": {
"b10-auth": {"kind": "needed", "special": "auth" },
"b10-xfrin": { "address": "Xfrin", "kind": "dispensable" },
diff --git a/tests/system/ixfr/b10-config.db.in b/tests/system/ixfr/b10-config.db.in
index b3b27a4..a36117d 100644
--- a/tests/system/ixfr/b10-config.db.in
+++ b/tests/system/ixfr/b10-config.db.in
@@ -38,7 +38,7 @@
"class": "IN"
}]
},
- "Boss": {
+ "Init": {
"components": {
"b10-auth": {"kind": "needed", "special": "auth" },
"b10-xfrin": { "address": "Xfrin", "kind": "dispensable" },
diff --git a/tools/query_cmp/src/lib/handledns.py b/tools/query_cmp/src/lib/handledns.py
index e33ce9e..e906bae 100755
--- a/tools/query_cmp/src/lib/handledns.py
+++ b/tools/query_cmp/src/lib/handledns.py
@@ -187,7 +187,7 @@ def send_req(query, server, port=53, timeout=5):
msg = Message(Message.RENDER)
msg.set_qid(int(qheader['id']))
- msg.set_opcode(Opcode.QUERY())
+ msg.set_opcode(Opcode.QUERY)
msg.set_rcode(Rcode(int(qheader['rcode'])))
if qheader['qr'] == 1:
More information about the bind10-changes
mailing list