BIND 10 trac1704, updated. 149ac337e9a8e43896e84268a462463429061aed [master] added boss configuration to the newly added lettuce tests with #1792.
BIND 10 source code commits
bind10-changes at lists.isc.org
Tue Apr 17 08:35:27 UTC 2012
The branch, trac1704 has been updated
via 149ac337e9a8e43896e84268a462463429061aed (commit)
via 93f11d2a96ce4dba9308889bdb9be6be4a765b27 (commit)
via a7dbb75a88c71513c2ff2d0c86eb98b1f287e6f4 (commit)
via 952cf4c1d92cf00179347abbc15fa37318c5ec90 (commit)
via 277af902ee91a90c1ef8ebce3895ecc27d8dec2d (commit)
via 56bd002f57d397ec4c1fffb40c499425e36b21de (commit)
via 4653281fb863709e2ab9ca26514fe9e31cd8444d (commit)
via 0f9b1d45d37223a5e1f7167a97c9760b52e744e9 (commit)
via d3fd5c7fa44e0f19a97d2dcb99ee937eaa80d704 (commit)
via ae46e78f9e1d4261bbf86a3dcaf7bd813851edf0 (commit)
via 0464833becda85dca7deae50a7f4adbf9589076f (commit)
via 2c8983c96edd2e98f285d35343a5e4958fb1971c (commit)
via c89bd8e38d3fcad506676876cc110c937cc3fbfe (commit)
via 22041b8ed4dd46d27981c3e645973de62c34a25e (commit)
via 66a9862c8ec60bd4c64f1535590ac16fae2fd088 (commit)
via 2fd8732e2b16ad5dffcbf12345e97a05b146c592 (commit)
via 5a3ba49ea9cdd4cde08d1855a0944f6899eaae8b (commit)
via efc43a67d27a14c468d3cf53df87ed3f808fd730 (commit)
via d833b85d17a2958890faa25927c1d1ed3443279c (commit)
via 3e27e1508a29251494a907a6b00258900a5200b2 (commit)
via 412304add5726cd16f91c010471fd895d65c2a6d (commit)
via a2da847353f3fd79feb1a4b902faebd74b31fa64 (commit)
via 8b8dbb612ba7f55ae49d113297a8e119c51f492d (commit)
via 837533a24607e825f2dc72f7fb45665c4e8aec99 (commit)
via c545fba97414013e52eda55f2d61ba23f9e076e9 (commit)
via 2e25759b8ff76ff81d75201bc4c8ef6eb243d144 (commit)
via 0a9dad718d84b7de43478df6804da1da25e5dab9 (commit)
via ff240f0bc7b13b7932054f435b7159f788eaf912 (commit)
via 7544617e812757e1d1f02ebf9d1c44e1fd01a4df (commit)
via c537d66e97d1a10a544baeba5e5490bd061f1590 (commit)
via cf96b2ad155c63f0ffceaf69b634e5f772d22770 (commit)
via 92c00e5601f7816113eab6be8a165cca0fe6956e (commit)
via 8cdf67b6ff82596aa51e56fd5eccbd75763e9011 (commit)
via 69bcd5348b07321645cbd749dc964d96bf654076 (commit)
via a83554ce6da992e5d060ceb12baa6948c6e4bf33 (commit)
via 9cc9ffd9f10433f85a6b056d7face686b5522a13 (commit)
via c3b46d0b46f88f759c7a4738040e6edaf6845d5b (commit)
via 034bc4e02eb482913c2e2c0892d5cf10e5525aae (commit)
via b303b434d956c7b5842fc0ca5c43f4aa69351a34 (commit)
via b7f87132783143fab9f8f6ecc0977fe220f2e58e (commit)
via 22aa6eec949ced4a40a599019a275c7e732cbb70 (commit)
via b23c36ef2e61cb4133f8c2708aa8cf85e4809d61 (commit)
via a17f42a08fa2d0fbd2de357d90dbbfeaaba32335 (commit)
via 0d2ebae0cc3b6119ae0cc2f7fa509a5792290ecf (commit)
via 325b81328b624d467c674a7bf74e73a271892d4a (commit)
via be50e95723a83adcb63e9622eaada199f3f17463 (commit)
via 31de885ba0409f54d9a1615eff5a4b03ed420393 (commit)
via 29264296474f103f8ece3e299171f835b81d9e9a (commit)
via 65e64487c28d3688d65ed625cdb1531ce5691095 (commit)
via 4108ca766cb9bce434bfbb786436b7c08adb87a4 (commit)
via de9ccf5df44c42526b2cd8e7f078d6073729c35a (commit)
via 7efc7ddb385ef56c5df5a23474a10900bbe36cf9 (commit)
via fcdc7a7b7fbf23338d1c7165e6f3a8ffe4a20459 (commit)
via e0bcbb981c3b22f3380fcee592ae7cc2dadd0c00 (commit)
via 57512ac60b298b3873b9ee74d46f4f56ab746494 (commit)
via 00a36e752802df3cc683023d256687bf222e256a (commit)
via 5dcf1dadd19d9b251fa6dbefefbc98f16fb62c66 (commit)
via 7c75154f52853de902bfd7c880b6e16fe3a79c0f (commit)
via d127560b02816a7ba12869d19ca51a30e695ff35 (commit)
via 793772f302013324ee4964e5b2c3439c0eed2221 (commit)
via be2b8d67e266598e0fba9e658042986c6833d220 (commit)
via 6cd82ad2a63dfc9bcd15dabdf650da242eaf924b (commit)
via 493f952e6937adf7045732a2f7e0c4a4313fc0a5 (commit)
via 9b6993002b4ba9019551e50613c8a2c6c7ff9fec (commit)
via d8b8e46b853f13d3e9ef2857f0fce424f1876ef5 (commit)
via 7116ee3c764180aad581e52b36dc67124b7d72f0 (commit)
via fb2317550507bb357cba2eee89c3a469f1a89803 (commit)
via 291d0cbfdc964c1d60542edbe9f442cc383657cd (commit)
via 52e971851f0c7ee8f45c511d810497e3c038dc71 (commit)
via 07274f662a772c856f0bf80213b246e689582409 (commit)
via c3bc4e02519d15e27b8e32291bda1a59ed08f42b (commit)
via b77375be27718eea1619f4e4fdb4899a29eea18e (commit)
via 05793b5a18793908b17190008382a27e133e5979 (commit)
via 186bacfc7ba324647f6689fd627c8d9d2d724c0c (commit)
via 058af3dc4b9f1e03d46c549f3ea848fb1a5c7960 (commit)
via c47c4c3541a5a9ed7a77f47610d8f14c29295969 (commit)
via 7130e28820b2e9e603f64546deebf12b410b897d (commit)
via a29df11575ac9b1aa036ab212b49f4706ba1e607 (commit)
via 56338ac70f174880ff1ca73bda0afe73dad2e7d4 (commit)
via 2d99288b3400b01e3eb1402717a182f5e828c7b7 (commit)
via 52ee8a28742a20415742651cbdf7982387050641 (commit)
via 33f9ea32c67a6e5e7df816432c98f3c9772b0b0f (commit)
via ed4c07d55c90e871d0c2b8ce571273cf83740e66 (commit)
via 283053c1a3dd96ff3811f25361f900dd7c9d97ef (commit)
via f173bdf07ec3f4d099176dcc6b7f878b218fb93a (commit)
via 7ce82914391a42fde56946b284f386bf0f3fe169 (commit)
via 5c9b7307a56c8b578be2b5b1ad73799b899a2e93 (commit)
via 77b918b70668c7755f3b7fb8335f5fc1f9f119a6 (commit)
via 0c0e8a5f4ddafbd7724545c08bc813c70f360faa (commit)
via 4eeff0e79de122645ffd3bf117a1486147fc9541 (commit)
via a98fc15799b0d8898ab3b5071ba55dd8935d45dc (commit)
via 94077743ff440dfbcfd4a723f3fd676acbfbefe4 (commit)
via b407617f16ae9d672ef0e7812a8eb9e18509b6e6 (commit)
via 76f364b152f174c57982f294f97274a212684121 (commit)
via d12134b3a9f135f9ae4317587eef31f45e6c0454 (commit)
via 55cecba6f3c43d725fc7c1614e5b47bc7729d5ec (commit)
via 7ef140c77fa2ddd4194ffdc344781a55232d68b3 (commit)
via 7a0dc75cee48aceeb218147331ddc81a05376358 (commit)
via 61446fd4004c89a7d568988776a0fc2c1b67046a (commit)
via 9ad569a0f568d9faf5f213f1403dce51cbcf08b8 (commit)
via 9b060a79e29c1691d1e84da5d6a656e99f2ec8a2 (commit)
via 501c5f296f27b565b6e2229e62f8f19754818abf (commit)
via 99adadb0496046011060ec22e6834dfede9f1ce3 (commit)
via e267a6928ace0651795f188f43a6a5a85c479d44 (commit)
via 08d13321640259a5036053852ff0c8731b54239c (commit)
via 7610a7bacea56b07ae06975ee771d010846addd2 (commit)
via 0996aa92a103e7af59bf1ae9c501f5feb2a5894c (commit)
via f1f0bc00441057e7050241415ee0367a09c35032 (commit)
via fb9b8cd9f1200dcd6d7146a45fdbfd2c7675be56 (commit)
via 16a1358aa2960039e192ac66d504150a55b374fe (commit)
via c02b9f4c1f0970cd50c5486f739dd750046ae97b (commit)
via aaf4fb3d70f5868ba9c6fa16c490abd7ed5035d3 (commit)
via 2397c86b17e71f1472a867dfa1fb4e7223cb811c (commit)
via 2fa92e23966279921c57bd1fa67ec79e56b328ed (commit)
via 9858d6dd9af2640765266d6b99056e93dc368277 (commit)
via 654f0cbd1850a78758194cf3dcc7d5a90d37e495 (commit)
via dcdd4e10f22f795754f91805b741988ef8a389c2 (commit)
via 68308bd95eb0a3d52f048790cd16611b5fdc7ee9 (commit)
via d5531c9856c5bb85f63d2e3168fc8b08c9700418 (commit)
via ea528f50fddfb83618aa338bdd4607791fd788ef (commit)
via b4466188150a50872bc3c426242bc7bba4c5f38d (commit)
via 4df912c903ef7469b0034f1750de4cf0711e9e2d (commit)
via 81300648c7274effe356e4ba421cdf3cdab8d316 (commit)
via 97389bad9bf82cd32329407557aed4ea669401f4 (commit)
via 8f3e82a987df1e5abc1ab2d4f0785ca3f9ab97ff (commit)
via 44d2850a0d3e96ad2b405aadb222a975c0462826 (commit)
via e663cfecd5385a0d2e27c301f089e1f60d3e9a28 (commit)
via efb79bb638d7c88d3f46c9486d7f29f2d2e6e8df (commit)
via 2645b194340a5a7fe315505f6d4cab874573c2ae (commit)
via 00bc99c5a2c8412994dd67b0e01318abb2bde893 (commit)
via f48ddb14605e0ba0bf226ab074dc881e4a782a9b (commit)
via cf16578b3ecf2da1b38a724107dfaa802b03339c (commit)
via 635e3dae01973485a2d81487fb684d40c8db930a (commit)
via ceaa247d89ac7d97594572bc17f005144c5efb8d (commit)
via dddc267e555071b02a90623758f947d18c46356c (commit)
via d5e250eff9f9c7996f16eebfc6f4bb7e0dd02aee (commit)
via 923c859c0ff6fcfc3d0c268cd20d493b71e66d29 (commit)
via 7f720276658e694a1edbb8ce105b04a8dcf1d36c (commit)
via 8644866497053f91ada4e99abe444d7876ed00ff (commit)
via 36ec15a2721f408c1868001f8dfa4c358f2f0ec3 (commit)
via c30d4ccd7f07819b60695d11dd5f946b61e8fe17 (commit)
via 8a33648413f84e7a7be65db8890d217d91f7c8f6 (commit)
via 49c83cc4d393dd69453f4fc43cd7d801e03300ef (commit)
via f4489fa39cb50a2d0a80925ac6709d4c2beffc29 (commit)
via 350e65820efee213ef09122b88cdfdb7f4ab38b0 (commit)
via 8ae80ada66174a7748d0ac5a02d9fbe6f443303f (commit)
via fe9349ae3a6f283089af635b2453f3b7a6c66ac6 (commit)
via d9ede029ea8224a62edbdb2dc890a14e068a870c (commit)
via 16b92d014e713933d591ebde9cbc4540044ce8fe (commit)
via 365b5c443aa170831bc1d6a40b0e3323192fb532 (commit)
via fa6a9dbe36ffc55bcadf54a523bedcfb7118474c (commit)
via 3b09268518e4e90032218083bcfebf7821be7bd5 (commit)
via daf2b85fc21361544855324799f2d3af777044e4 (commit)
via 098da24dddad497810aa2787f54126488bb1095c (commit)
via 69e76e0edb5a28773bd8e29d82902c4dbbb412ad (commit)
via 9e77d650e31a106518ad263092f6a7e2a69d22a1 (commit)
via 1eb1ca43bb8682d49c2396395919d8445a28c012 (commit)
via a3f1538ebc5aa4b0a6ee49fee142646ff3bfee2c (commit)
via 297646b5cd8cee65d80ef74d07cc7a3b4890d555 (commit)
via cc73f7e20f8534b0986bcc9a41e99511109546d7 (commit)
via 1c45a4e0bc84ab2d3e006ceb63ce18b9c32adf53 (commit)
via 80c297d9ae634e44c510884ede894c6737941820 (commit)
via 7a74c2d58e76b8fcc9aea301bd4ea42a0269aca8 (commit)
via bbe403303f87ef541114aceaa0aba02d2df714b4 (commit)
via 698534e5564fb14a384d1cf28eda058453dcb1a5 (commit)
via 947f08c755bd91255b2f8034a65b99445f13b024 (commit)
via 4df83ec7d1b92d7fb8c6fd2c0906694683da8c30 (commit)
via 9ee14145b2879b044fdf0c32678838ee67343094 (commit)
via 6e57b76919731d8f5a15d21d2c008336362f44c9 (commit)
via a29f99f27df0d39ad097b393dff0ef12f6e89388 (commit)
via 7733a51887b6370c92cfb02a2446b10d15e224dc (commit)
via 66300a3c4769a48b765f70e2d0dbf8bbb714435b (commit)
via 952cb2ceeb8e23145a18839d42b12641e520ecdb (commit)
via 318ecd4e65de22d957ed55f571e868ceb3152f97 (commit)
via 9a56eb7ebbefcaa29ea623bbf8d7102b6970ff70 (commit)
via ead53d5c0f105d4f60d451734d66c8f202cbbd15 (commit)
via c31cfdf8449030f874406f4efad754ba7eb786e6 (commit)
via cd6faa250e082808a86d9ccb1f11ea07ab81a618 (commit)
via 5893305969aec78850e2462859b3bf4b7a157057 (commit)
via 419665026524f1a1b46efba377d41bd1f7f806d0 (commit)
via ad0ce258df14fa88a299ef37238d4c2527f273c8 (commit)
via fa4aa9c87c599a985bb19b78ae3f2d1d4ab9bb63 (commit)
via 70515e4857db49a5d2cec93f00839ce2cf1ea52d (commit)
via 65b293e59954c95c245f44691f5c82fdcd6579c9 (commit)
via 532aa507e57aaa8c406cd4cc537058de6fcb5a88 (commit)
via 94793e41d922cb10e35e0ff146b19c38ace415b1 (commit)
via c1a0b11d84b6953c0c53bfe26d7c1a946bb772fb (commit)
via 737c4eb744e7edcd77410a11000b49e17ca3af8b (commit)
via 1a4d0ae65b2c1012611f4c15c5e7a29d65339104 (commit)
via f95e202d7e972f92ebf5ee9ca11c4ad846cd1524 (commit)
via 6ac392ce798a63a024a2890e3838df8a15a233d0 (commit)
via 376c01fc1b6114e6717a9f515999dd1bf67db39a (commit)
via 9d192aa44e2cb1495e78b70ad5af23c21acfe34d (commit)
via 88a80126ce73428584f8f36657cb159e581c700e (commit)
via 77eddb37c51caf6efb688a27aa60ec15fd0d3535 (commit)
via 252fc353bab9ae9eac99cc5c54b66b6e493d91b3 (commit)
via 1a46994e855e3a2a0b5b407df94e52ada3efee7e (commit)
via c80eae98a8dd78a2c0c711f642a02a4b24a3a819 (commit)
via ae9eae29057da3aa8c585b86b662967801ed8bb2 (commit)
via 48cd72d455b5bd1298485f58fc7dcf82c7e4c3cf (commit)
via 06280cc6b480f6976f25ccf8b9cedbdf7aa00e34 (commit)
via af354fd9eb8390cb388348ad66a4d8269b945ac3 (commit)
via 48cfa00bb988cfeebab4e6ac7ae440cd685137e5 (commit)
via 16ad60fe0d9ba8cf53fc6a190ad25d92ece8e8bd (commit)
via 9aa4dd78f5cc00acb5ecc4fdc5ba1beb423aa40d (commit)
via 61cdd0f353e20153706af04ac884a4375386648c (commit)
via eee654360f42459c1930f83e78f901cd4e9170f7 (commit)
via 6cce379ad4e562fae0e899b2134f59f947d54f3e (commit)
via 8ef09a5063e52e52e1192f81f001c186a32aa18d (commit)
via 5b0ea09b5f533d2cc8c572693d882061b5d55e44 (commit)
via 52f1b58737e7c7e276cc7ee7daabf2f2e2d7f2a8 (commit)
via 6736c51047cfe2ac2c4678299b5b768ba3585db0 (commit)
via 3104e910b9a4753d2bf4025d59f0f1a13f01e519 (commit)
via 01d11e9fe77f1ea869aa1a0e800a965dcc638050 (commit)
via a3ccc5c6476526739e363d3aafbd2e497cee069b (commit)
via 07403ec675bf43e0936aea2ab5d4f5d903770f13 (commit)
via ce81fab6415ae7f50ed29c201162e912d41eb50d (commit)
via 061f5a1ae10248ac161b9c5a489eb0405b2c8426 (commit)
via 9f4292fa5d27c5569384ca57b7878be69cbf5499 (commit)
via 5a4cf2dd5915ed8f499570faa1005462396b233b (commit)
via fe5549bb7e546732e338db24abb7ad571f34f668 (commit)
via 63e4fc15cc2c66b07168bb15e2e6af464c235a3d (commit)
via 2df61b4c21596ca9b1f9e2e2364d6ed352c2eca0 (commit)
via 6a9dbeff7d28a8ef91ceddff5dbcdef9a7d1452e (commit)
via 5ef450d431113ab2aa349d6200f7f69f55e975f4 (commit)
via f8c0d29e25d71f77314ffc997e678c7b24c34ca5 (commit)
via 34da803fd804c6c0fa7acb90ff46780df69d5898 (commit)
via 6db3556488ae8a75f5250a175f3507cc609ada19 (commit)
via f07fcf3c8e3f28d97772994f26520ac20436eba2 (commit)
via c9337b1576d127bfdf11ed0e87db9c6cc1f98a09 (commit)
via 342f5b2ae66112e9416da86436b7932ea8e6f9c4 (commit)
via f7ae819d576cd95231c89c269654e23afd16aeff (commit)
via 5b0ef5ce8137711c67bc9ce0e4561b7adc2ebcc2 (commit)
via 3a3f1942d99e404b9e8a7b83f6688fb8b8518afb (commit)
via 434d8db8dfcd23a87b8e798e5702e91f0bbbdcf6 (commit)
via 58283fa48ac4c232e20edb144de0dad791f429cb (commit)
via 8a3564c8893c5b9df451ff29d58f65604740b310 (commit)
via 435436f1402c345fec712d923848f934999f3107 (commit)
via 98483c34555c48143e6bfcdab63a9ec68a6fb86a (commit)
via 99afc8ab28a7748f9dda301da98e64a2b0b5e04c (commit)
via 724baae9fa3e9cb0e6e0835ae3dafc83fd30b84f (commit)
via 93e12f75baf215fdc8434461a2d5428e73f06860 (commit)
via 9d84626f72a70b3b71330c6fa31d767109a67d9b (commit)
via da214e3e13d2ee898a409df35cff9f208654503b (commit)
via 418a6dc9ee0369d13d14bfa825229c0e35a694e4 (commit)
via 8ab7817d7c346d35a7be2a1d6bfe51aed4fef15c (commit)
via 9e5da4f42072e2bee454178eb1422103504a40dc (commit)
via 78bb8f4b9676d6345f3fdd1e5cc89039806a9aba (commit)
via a8486dda670bf22c27c7cb2ee95b84cc53c1fc08 (commit)
via 73a6349b16ce1c95ff6216fdbff9551d574988d9 (commit)
via 5f7f84779f66576555728e70ea75383945e0700a (commit)
via eb3cc6a30fc6ddf4124a0051d44bcfede286520e (commit)
via 59464e665cd90e476e6c45843151ab6209a8517d (commit)
via 29f876f89516fb0696d85e760bb9cb15e7e398b2 (commit)
via 17ab7fae73ebcf624a0c719b16f45b9f16db1419 (commit)
via 21b1adb387dab476f88c301ec1538fb6be87ea1e (commit)
via a2004cbca71aef99c8f640c4d7f06944c63b4c7a (commit)
via 49ba2cf8ac63246f389ab5e8ea3b3d081dba9adf (commit)
via 423640029ecf6f138eb8b160fc40ca5afef44e4a (commit)
via 52b36c921ee59ec69deefb6123cbdb1b91dc3bc7 (commit)
via b0a028bf381e5467acca8d66f37e778d569e331d (commit)
via 46cd22e159df5387c036e285e8398e9f1320e2b4 (commit)
via ad40537d9f91fcf489d3e9a313a73385ed5fb241 (commit)
via 6353f50a59f621f96a3ccf0003e5ecd527dcd07c (commit)
via d2661eddfcbca6532d6cbe18954d345ab8566345 (commit)
via a02f49d16d40c8b38514af00b62001225f9f7a6d (commit)
via 774aee5f844849e58608ca86bec27e731c3eac5b (commit)
via 194ef6477663b3c8671724bb6ab7b0bb13ae0b09 (commit)
via 631e9f192cf33fb58c0a945ef57592fe7813eaa8 (commit)
via c6fc5883c41ed20f77ebaea543057521f76fe4d9 (commit)
via b3388dfb13dffdd653139673eceef34824eab072 (commit)
via 774554f46b20ca5ec2ef6c6d5e608114f14e2102 (commit)
via 91abb7aed95fb3cf15d25a971af2fa0b952e9570 (commit)
via e3f4b290d17a68db728166cdffcbe93517966e8b (commit)
via 7d2867d02fc20295ae0622c987a25e3119d3ca7f (commit)
via f027496530a43f7b5eb5beb840266a9bc5aaffec (commit)
via 0e9213727bb177ede9253fa8d0c1e1316487e33b (commit)
via 5e5a33213b60d89e146cd5e47d65f3f9833a9297 (commit)
via 4e80da3959ae51851ce68294bd59fed429977f4d (commit)
via d01a04e0f57552e78fe19beb99789888fecab7f4 (commit)
via deafd46a146bf83ea3af9076c9ec808cfd1c168b (commit)
via a7254a232bedb445f4c7dd47b8f623a94f7056b1 (commit)
via 90df5f971e7988a7a024ce95fc83dd9dea3c9b6f (commit)
via 0161c02ad4d593dc6a7ad5b352cf7db4e70b3b48 (commit)
via d98c6ee0863c784e79204242a3d868d4aedc3d5a (commit)
via f20a049cb0e6b29464acec69d26a46f26b7e2170 (commit)
via 46f994f729d0f554ad4f58abc82acbc7b526ffc5 (commit)
via 22a8be53ada3f90b8aa226f08081d579b377784a (commit)
via 5e70d900e46fb148acfebf8c2aa068159e58d91b (commit)
via ab560987671d76c85ce36a09bfc66cf5eb9398dd (commit)
via b91bb9719ff759417ce8f6412ca5f8be57a2e19c (commit)
via 2c0a8fc3bfa0fb8f5f6cb2df504b326741996025 (commit)
via b204a39d5b9003f991104c2bd6896013f19a05d0 (commit)
via 0f487a998063a01ea16e8be048076f53b11afd02 (commit)
via dfd2aeefef39f064183c84c23451637247e32399 (commit)
via 1c57fb2350d9b8440ae4cf50a94be7c61cc462b0 (commit)
via 8c55200c178773691ace2785240bfc65f4e351a9 (commit)
via ae40f56b7e12776479161c7d7a2d6616fae09850 (commit)
via dd8d9d4ab35db98d5269dd9c0728a5af6e748f1a (commit)
via 04e0ef7fe56b4c9c65b71bba010147af4506675d (commit)
via 29e01b55a5acd72e651c2b2bd5cc63ffe8b21da8 (commit)
via 24b2851dc08da2e9d63a072c1c8b4550ef919996 (commit)
via e17fed2f2bf0061fe942cc8ae9c31f44f0d6e303 (commit)
via d85eda60ea4f2304859ff935f998585dae2f4854 (commit)
via 181d405a1e606d4581d84b7fb110875c6561387a (commit)
via c96ac865ac20c4e80b3206a00c15fa998cb85bfd (commit)
via 538350b7db14e063f716b040a5b0f0ca2aa35278 (commit)
via 7e7b44723b80637d14bbbb714b69fe4ef7ea1f9a (commit)
via 2e06048a76a016e90baef2d7a6b218457f97e3a1 (commit)
via 8e4c96040527d952da60338f7cf061f976780543 (commit)
via 0b1b0da6ffc60991c0ccad85695631dec02db4da (commit)
via b587cb1fb68c3392af63abfdb4c91055dab6060f (commit)
from 6600a2c2981b2c37ab42c325e905d4a70a415342 (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
- Log -----------------------------------------------------------------
-----------------------------------------------------------------------
Summary of changes:
COPYING | 6 +
ChangeLog | 109 ++++-
Makefile.am | 2 +-
compatcheck/Makefile.am | 12 +-
compatcheck/sqlite3-difftbl-check.py.in | 60 --
configure.ac | 39 +-
doc/guide/bind10-guide.xml | 204 ++++++-
doc/guide/bind10-messages.xml | 297 +++++++++-
ext/LICENSE_1_0.txt | 23 +
src/bin/Makefile.am | 2 +-
src/bin/auth/auth.spec.pre.in | 4 +
src/bin/auth/auth_config.cc | 68 ++-
src/bin/auth/b10-auth.8 | 6 +-
src/bin/auth/b10-auth.xml | 4 +-
src/bin/auth/query.cc | 6 +-
src/bin/auth/tests/Makefile.am | 6 +
src/bin/auth/tests/config_syntax_unittest.cc | 71 +++
src/bin/auth/tests/config_unittest.cc | 74 +++-
src/bin/auth/tests/datasrc_util.cc | 77 +++
src/bin/auth/tests/datasrc_util.h | 61 ++
src/bin/auth/tests/query_unittest.cc | 22 +-
src/bin/auth/tests/testdata/example.sqlite3 | Bin 11264 -> 15360 bytes
src/bin/bind10/bind10.8 | 9 +-
src/bin/bind10/bind10.xml | 28 +-
src/bin/bind10/bind10_src.py.in | 64 ++-
src/bin/bind10/bob.spec | 4 -
src/bin/bind10/tests/bind10_test.py.in | 46 ++-
src/bin/bindctl/tests/bindctl_test.py | 10 +
src/bin/cfgmgr/b10-cfgmgr.py.in | 7 +-
src/bin/cfgmgr/tests/b10-cfgmgr_test.py.in | 15 +-
src/bin/dbutil/.gitignore | 3 +
src/bin/dbutil/Makefile.am | 39 ++
src/bin/dbutil/b10-dbutil.8 | 92 +++
src/bin/dbutil/b10-dbutil.xml | 192 ++++++
src/bin/dbutil/dbutil.py.in | 608 ++++++++++++++++++++
src/bin/dbutil/dbutil_messages.mes | 114 ++++
src/bin/dbutil/run_dbutil.sh.in | 40 ++
src/bin/dbutil/tests/.gitignore | 2 +
src/bin/dbutil/tests/Makefile.am | 6 +
src/bin/dbutil/tests/dbutil_test.sh.in | 481 ++++++++++++++++
src/bin/dbutil/tests/testdata/Makefile.am | 12 +
src/bin/dbutil/tests/testdata/README | 41 ++
src/bin/dbutil/tests/testdata/corrupt.sqlite3 | Bin 0 -> 215040 bytes
src/bin/dbutil/tests/testdata/empty_schema.sqlite3 | Bin 0 -> 215040 bytes
src/bin/dbutil/tests/testdata/empty_v1.sqlite3 | Bin 0 -> 215040 bytes
.../dbutil/tests/testdata/empty_version.sqlite3 | Bin 0 -> 13312 bytes
src/bin/dbutil/tests/testdata/invalid_v1.sqlite3 | Bin 0 -> 215040 bytes
src/bin/dbutil/tests/testdata/new_v1.sqlite3 | Bin 0 -> 215040 bytes
src/bin/dbutil/tests/testdata/no_schema.sqlite3 | Bin 0 -> 2048 bytes
src/bin/dbutil/tests/testdata/old_v1.sqlite3 | Bin 0 -> 215040 bytes
.../dbutil/tests/testdata/too_many_version.sqlite3 | Bin 0 -> 13312 bytes
src/bin/dbutil/tests/testdata/v2_0.sqlite3 | Bin 0 -> 13312 bytes
src/bin/host/host.cc | 2 +-
src/bin/loadzone/b10-loadzone.8 | 6 +-
src/bin/loadzone/b10-loadzone.xml | 4 +-
src/bin/xfrin/tests/testdata/example.com.sqlite3 | Bin 12288 -> 15360 bytes
src/bin/xfrin/tests/xfrin_test.py | 131 +++++
src/bin/xfrin/xfrin.py.in | 106 +++-
src/bin/xfrin/xfrin_messages.mes | 5 +-
src/bin/xfrout/b10-xfrout.8 | 9 +
src/bin/xfrout/tests/testdata/test.sqlite3 | Bin 12288 -> 15360 bytes
src/bin/xfrout/xfrout.py.in | 7 +
src/bin/xfrout/xfrout_messages.mes | 4 +
src/bin/zonemgr/tests/Makefile.am | 1 +
src/bin/zonemgr/tests/zonemgr_test.py | 33 +-
src/bin/zonemgr/zonemgr.py.in | 6 +
src/bin/zonemgr/zonemgr_messages.mes | 4 +
src/cppcheck-suppress.lst | 25 +-
src/lib/asiodns/dns_service.cc | 160 +-----
src/lib/asiodns/dns_service.h | 34 +-
src/lib/asiodns/sync_udp_server.cc | 23 -
src/lib/asiodns/sync_udp_server.h | 13 -
src/lib/asiodns/tcp_server.cc | 22 -
src/lib/asiodns/tcp_server.h | 6 -
src/lib/asiodns/tests/dns_server_unittest.cc | 48 +--
src/lib/asiodns/tests/dns_service_unittest.cc | 91 ---
src/lib/asiodns/udp_server.cc | 6 -
src/lib/asiodns/udp_server.h | 13 -
src/lib/bench/benchmark.h | 52 +-
src/lib/bench/example/search_bench.cc | 4 +-
src/lib/bench/tests/benchmark_unittest.cc | 8 +-
src/lib/cc/cc_messages.mes | 2 +-
src/lib/cc/data.cc | 99 +++-
src/lib/cc/session.cc | 2 +-
src/lib/cc/tests/Makefile.am | 7 +-
src/lib/cc/tests/data_unittests.cc | 53 ++-
src/lib/config/config_data.h | 2 +-
src/lib/datasrc/Makefile.am | 1 +
src/lib/datasrc/database.cc | 511 ++++++++++++-----
src/lib/datasrc/database.h | 157 +++++-
src/lib/datasrc/datasrc_messages.mes | 58 ++-
src/lib/datasrc/factory.h | 2 +-
src/lib/datasrc/memory_datasrc.cc | 127 ++++-
src/lib/datasrc/memory_datasrc.h | 30 +-
src/lib/datasrc/sqlite3_accessor.cc | 210 +++++---
src/lib/datasrc/sqlite3_accessor.h | 16 +-
src/lib/datasrc/sqlite3_accessor_link.cc | 6 +-
src/lib/datasrc/sqlite3_datasrc.cc | 194 +++++--
src/lib/datasrc/sqlite3_datasrc.h | 6 +
src/lib/datasrc/static_datasrc.cc | 1 +
src/lib/datasrc/tests/Makefile.am | 7 +-
src/lib/datasrc/tests/database_unittest.cc | 562 +++++++++++++-----
src/lib/datasrc/tests/faked_nsec3.cc | 196 +++++++
src/lib/datasrc/tests/faked_nsec3.h | 86 +++
src/lib/datasrc/tests/memory_datasrc_unittest.cc | 296 +++-------
src/lib/datasrc/tests/sqlite3_accessor_unittest.cc | 103 +++--
src/lib/datasrc/tests/sqlite3_unittest.cc | 15 +
src/lib/datasrc/tests/static_unittest.cc | 1 +
src/lib/datasrc/tests/test_client.cc | 92 +++
src/lib/datasrc/tests/test_client.h | 71 +++
src/lib/datasrc/tests/testdata/diffs.sqlite3 | Bin 16384 -> 20480 bytes
src/lib/datasrc/tests/testdata/example.org.sqlite3 | Bin 14336 -> 16384 bytes
.../datasrc/tests/testdata/example2.com.sqlite3 | Bin 14336 -> 16384 bytes
.../tests/testdata/new_minor_schema.sqlite3 | Bin 0 -> 2048 bytes
src/lib/datasrc/tests/testdata/newschema.sqlite3 | Bin 0 -> 2048 bytes
src/lib/datasrc/tests/testdata/oldschema.sqlite3 | Bin 0 -> 2048 bytes
src/lib/datasrc/tests/testdata/rwtest.sqlite3 | Bin 13312 -> 0 bytes
src/lib/datasrc/tests/testdata/test-root.sqlite3 | Bin 17408 -> 22528 bytes
src/lib/datasrc/tests/testdata/test.sqlite3 | Bin 44032 -> 70656 bytes
.../datasrc/tests/testdata/test.sqlite3.nodiffs | Bin 43008 -> 0 bytes
.../datasrc/tests/zone_finder_context_unittest.cc | 27 +-
src/lib/datasrc/zone.h | 14 +-
src/lib/dns/Makefile.am | 2 +-
src/lib/dns/name.cc | 33 +-
src/lib/dns/name.h | 33 +-
src/lib/dns/rdata/template.cc | 1 +
src/lib/dns/tests/name_unittest.cc | 56 ++-
src/lib/dns/tests/rdata_dhcid_unittest.cc | 1 +
src/lib/dns/tests/rdata_in_a_unittest.cc | 1 +
src/lib/dns/tests/rdata_in_aaaa_unittest.cc | 1 +
src/lib/dns/tests/rdata_mx_unittest.cc | 1 +
src/lib/dns/tests/rdata_unittest.cc | 1 +
src/lib/dns/tests/rrttl_unittest.cc | 2 +
src/lib/python/Makefile.am | 2 +-
src/lib/python/isc/acl/tests/dns_test.py | 2 +-
src/lib/python/isc/cc/session.py | 25 +-
src/lib/python/isc/cc/tests/session_test.py | 31 +-
src/lib/python/isc/config/Makefile.am | 1 +
src/lib/python/isc/config/cfgmgr.py | 31 +-
src/lib/python/isc/config/cfgmgr_messages.mes | 6 +-
src/lib/python/isc/config/config_data.py | 21 +-
src/lib/python/isc/config/tests/cfgmgr_test.py | 57 ++-
.../python/isc/config/tests/config_data_test.py | 63 ++-
src/lib/python/isc/datasrc/datasrc.cc | 3 +
src/lib/python/isc/datasrc/finder_inc.cc | 7 +-
src/lib/python/isc/datasrc/finder_python.cc | 3 +
src/lib/python/isc/datasrc/sqlite3_ds.py | 49 +-
src/lib/python/isc/datasrc/tests/Makefile.am | 8 +-
src/lib/python/isc/datasrc/tests/datasrc_test.py | 24 +-
.../python/isc/datasrc/tests/sqlite3_ds_test.py | 124 +----
.../isc/datasrc/tests/testdata/example.com.sqlite3 | Bin 44032 -> 70656 bytes
.../tests/testdata/new_minor_schema.sqlite3 | Bin 0 -> 2048 bytes
.../isc/datasrc/tests/testdata/newschema.sqlite3 | Bin 0 -> 2048 bytes
.../isc/datasrc/tests/testdata/oldschema.sqlite3 | Bin 0 -> 2048 bytes
.../datasrc/tests/testdata/test.sqlite3.nodiffs | Bin 43008 -> 0 bytes
src/lib/python/isc/log_messages/Makefile.am | 2 +
src/lib/python/isc/log_messages/dbutil_messages.py | 1 +
src/lib/python/isc/log_messages/work/Makefile.am | 2 +-
src/lib/python/isc/notify/notify_out.py | 24 +-
.../isc/notify/tests/testdata/brokentest.sqlite3 | Bin 11264 -> 15360 bytes
.../python/isc/notify/tests/testdata/test.sqlite3 | Bin 13312 -> 19456 bytes
src/lib/resolve/tests/recursive_query_unittest.cc | 247 +++++---
src/lib/testutils/testdata/Makefile.am | 1 +
src/lib/testutils/testdata/auth_test.sqlite3 | Bin 0 -> 16384 bytes
.../testutils/testdata/auth_test.sqlite3.copied | Bin 0 -> 16384 bytes
src/lib/testutils/testdata/example.sqlite3 | Bin 11264 -> 15360 bytes
src/lib/testutils/testdata/rwtest.sqlite3 | Bin 0 -> 16384 bytes
tests/lettuce/.gitignore | 1 +
tests/lettuce/configurations/.gitignore | 2 +
.../configurations/bindctl_commands.config.orig | 34 ++
tests/lettuce/configurations/default.config | 16 +
.../lettuce/configurations/example.org.config.orig | 8 +-
.../configurations/example.org.inmem.config | 9 +-
tests/lettuce/configurations/example2.org.config | 10 +-
.../inmemory_over_sqlite3/secondary.conf | 32 +
.../configurations/ixfr-out/testset1-config.db | 12 +-
.../configurations/multi_instance/.gitignore | 1 +
.../multi_instance/multi_auth.config.orig | 2 +-
tests/lettuce/configurations/no_db_file.config | 14 +
.../lettuce/configurations/nsec3/nsec3_auth.config | 2 +-
tests/lettuce/configurations/resolver/.gitignore | 1 +
.../resolver/resolver_basic.config.orig | 2 +-
.../configurations/xfrin/retransfer_master.conf | 12 +-
.../configurations/xfrin/retransfer_slave.conf | 10 +-
tests/lettuce/data/.gitignore | 1 +
tests/lettuce/data/empty_db.sqlite3 | Bin 11264 -> 14336 bytes
tests/lettuce/data/example.org.sqlite3 | Bin 14336 -> 15360 bytes
tests/lettuce/data/ixfr-out/.gitignore | 1 +
tests/lettuce/data/ixfr-out/zones.slite3 | Bin 246784 -> 0 bytes
tests/lettuce/data/ixfr-out/zones.sqlite3 | Bin 0 -> 468992 bytes
tests/lettuce/features/bindctl_commands.feature | 33 +-
tests/lettuce/features/default.feature | 22 +
tests/lettuce/features/example.feature | 64 ++-
.../lettuce/features/inmemory_over_sqlite3.feature | 9 +
tests/lettuce/features/ixfr_out_bind10.feature | 18 +-
tests/lettuce/features/multi_instance.feature | 19 +-
tests/lettuce/features/nsec3_auth.feature | 360 +++++++++---
tests/lettuce/features/queries.feature | 76 ++-
tests/lettuce/features/resolver_basic.feature | 12 +-
tests/lettuce/features/terrain/.gitignore | 1 +
tests/lettuce/features/terrain/bind10_control.py | 29 +-
tests/lettuce/features/terrain/querying.py | 8 +-
tests/lettuce/features/terrain/steps.py | 18 +-
tests/lettuce/features/terrain/terrain.py | 38 +-
tests/lettuce/features/xfrin_bind10.feature | 14 +-
tests/lettuce/setup_intree_bind10.sh.in | 2 +-
tests/system/bindctl/nsx1/.gitignore | 2 +
tests/system/bindctl/tests.sh | 22 +-
tests/system/glue/nsx1/.gitignore | 2 +
tests/system/glue/nsx1/b10-config.db.in | 10 +
tests/system/ixfr/b10-config.db.in | 10 +
tests/system/ixfr/in-2/ns1/.gitignore | 1 +
tests/system/ixfr/in-2/nsx2/.gitignore | 1 +
tests/system/start.pl | 7 +-
214 files changed, 6621 insertions(+), 1936 deletions(-)
delete mode 100755 compatcheck/sqlite3-difftbl-check.py.in
create mode 100644 ext/LICENSE_1_0.txt
create mode 100644 src/bin/auth/tests/config_syntax_unittest.cc
create mode 100644 src/bin/auth/tests/datasrc_util.cc
create mode 100644 src/bin/auth/tests/datasrc_util.h
create mode 100644 src/bin/dbutil/.gitignore
create mode 100644 src/bin/dbutil/Makefile.am
create mode 100644 src/bin/dbutil/b10-dbutil.8
create mode 100644 src/bin/dbutil/b10-dbutil.xml
create mode 100755 src/bin/dbutil/dbutil.py.in
create mode 100644 src/bin/dbutil/dbutil_messages.mes
create mode 100755 src/bin/dbutil/run_dbutil.sh.in
create mode 100644 src/bin/dbutil/tests/.gitignore
create mode 100644 src/bin/dbutil/tests/Makefile.am
create mode 100755 src/bin/dbutil/tests/dbutil_test.sh.in
create mode 100644 src/bin/dbutil/tests/testdata/Makefile.am
create mode 100644 src/bin/dbutil/tests/testdata/README
create mode 100644 src/bin/dbutil/tests/testdata/corrupt.sqlite3
create mode 100644 src/bin/dbutil/tests/testdata/empty_schema.sqlite3
create mode 100644 src/bin/dbutil/tests/testdata/empty_v1.sqlite3
create mode 100644 src/bin/dbutil/tests/testdata/empty_version.sqlite3
create mode 100644 src/bin/dbutil/tests/testdata/invalid_v1.sqlite3
create mode 100644 src/bin/dbutil/tests/testdata/new_v1.sqlite3
create mode 100644 src/bin/dbutil/tests/testdata/no_schema.sqlite3
create mode 100644 src/bin/dbutil/tests/testdata/old_v1.sqlite3
create mode 100644 src/bin/dbutil/tests/testdata/too_many_version.sqlite3
create mode 100644 src/bin/dbutil/tests/testdata/v2_0.sqlite3
create mode 100644 src/lib/datasrc/tests/faked_nsec3.cc
create mode 100644 src/lib/datasrc/tests/faked_nsec3.h
create mode 100644 src/lib/datasrc/tests/test_client.cc
create mode 100644 src/lib/datasrc/tests/test_client.h
create mode 100644 src/lib/datasrc/tests/testdata/new_minor_schema.sqlite3
create mode 100644 src/lib/datasrc/tests/testdata/newschema.sqlite3
create mode 100644 src/lib/datasrc/tests/testdata/oldschema.sqlite3
delete mode 100644 src/lib/datasrc/tests/testdata/rwtest.sqlite3
delete mode 100644 src/lib/datasrc/tests/testdata/test.sqlite3.nodiffs
create mode 100644 src/lib/python/isc/datasrc/tests/testdata/new_minor_schema.sqlite3
create mode 100644 src/lib/python/isc/datasrc/tests/testdata/newschema.sqlite3
create mode 100644 src/lib/python/isc/datasrc/tests/testdata/oldschema.sqlite3
delete mode 100644 src/lib/python/isc/datasrc/tests/testdata/test.sqlite3.nodiffs
create mode 100644 src/lib/python/isc/log_messages/dbutil_messages.py
create mode 100755 src/lib/testutils/testdata/auth_test.sqlite3
create mode 100755 src/lib/testutils/testdata/auth_test.sqlite3.copied
create mode 100644 src/lib/testutils/testdata/rwtest.sqlite3
create mode 100644 tests/lettuce/configurations/.gitignore
create mode 100644 tests/lettuce/configurations/bindctl_commands.config.orig
create mode 100644 tests/lettuce/configurations/default.config
create mode 100644 tests/lettuce/configurations/inmemory_over_sqlite3/secondary.conf
create mode 100644 tests/lettuce/configurations/multi_instance/.gitignore
create mode 100644 tests/lettuce/configurations/resolver/.gitignore
create mode 100644 tests/lettuce/data/.gitignore
create mode 100644 tests/lettuce/data/ixfr-out/.gitignore
delete mode 100644 tests/lettuce/data/ixfr-out/zones.slite3
create mode 100644 tests/lettuce/data/ixfr-out/zones.sqlite3
create mode 100644 tests/lettuce/features/default.feature
create mode 100644 tests/lettuce/features/inmemory_over_sqlite3.feature
create mode 100644 tests/lettuce/features/terrain/.gitignore
create mode 100644 tests/system/ixfr/in-2/ns1/.gitignore
create mode 100644 tests/system/ixfr/in-2/nsx2/.gitignore
-----------------------------------------------------------------------
diff --git a/COPYING b/COPYING
index 557bdfb..63717af 100644
--- a/COPYING
+++ b/COPYING
@@ -11,3 +11,9 @@ INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
+
+-----------------------------------------------------------------------------
+
+The ext/asio and ext/coroutine code is externally maintained and
+distributed under the Boost Software License, Version 1.0.
+(See accompanying file ext/LICENSE_1_0.txt.)
diff --git a/ChangeLog b/ChangeLog
index 3097b64..d1a0ad2 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,110 @@
+425. [func]* muks
+ Don't autostart b10-auth, b10-xfrin, b10-xfrout and b10-zonemgr in
+ the default configuration.
+ (Trac #1818, git 31de885ba0409f54d9a1615eff5a4b03ed420393)
+
+424. [bug] jelte
+ Fixed a bug in bindctl where in some cases, configuration settings
+ in a named set could disappear, if a child element is modified.
+ (Trac #1491, git 00a36e752802df3cc683023d256687bf222e256a)
+
+423. [bug] jinmei
+ The database based zone iterator now correctly resets mixed TTLs
+ of the same RRset (when that happens) to the lowest one. The
+ previous implementation could miss lower ones if it appears in a
+ later part of the RRset.
+ (part of Trac #1791, git f1f0bc00441057e7050241415ee0367a09c35032)
+
+422. [bug] jinmei
+ The database based zone iterator now separates RRSIGs of the same
+ name and type but for different covered types.
+ (part of Trac #1791, git b4466188150a50872bc3c426242bc7bba4c5f38d)
+
+421. [build] jinmei
+ Made sure BIND 10 can be built with clang++ 3.1. (It failed on
+ MacOS 10.7 using Xcode 4.3, but it's more likely to be a matter of
+ clang version.)
+ (Trac #1773, git ceaa247d89ac7d97594572bc17f005144c5efb8d)
+
+420. [bug]* jinmei, stephen
+ Updated the DB schema used in the SQLite3 data source so it can
+ use SQL indices more effectively. The previous schema had several
+ issues in this sense and could be very slow for some queries on a
+ very large zone (especially for negative answers). This change
+ requires a major version up of the schema; use b10-dbutil to
+ upgrade existing database files. Note: 'make install' will fail
+ unless old DB files installed in the standard location have been
+ upgraded.
+ (Trac #324, git 8644866497053f91ada4e99abe444d7876ed00ff)
+
+419. [bug] jelte
+ JSON handler has been improved; escaping now works correctly
+ (including quotes in strings), and it now rejects more types of
+ malformed input.
+ (Trac #1626, git 3b09268518e4e90032218083bcfebf7821be7bd5)
+
+418. [bug] vorner
+ Fixed crash in bindctl when config unset was called.
+ (Trac #1715, git 098da24dddad497810aa2787f54126488bb1095c)
+
+417. [bug] jelte
+ The notify-out code now looks up notify targets in their correct
+ zones (and no longer just in the zone that the notify is about).
+ (Trac #1535, git 66300a3c4769a48b765f70e2d0dbf8bbb714435b)
+
+416. [func]* jelte
+ The implementations of ZoneFinder::find() now throw an OutOfZone
+ exception when the name argument is not in or below the zone this
+ zonefinder contains.
+ (Trac #1535, git 66300a3c4769a48b765f70e2d0dbf8bbb714435b)
+
+bind10-devel-20120329 released on March 29, 2012
+
+415. [doc] jinmei, jreed
+ BIND 10 Guide updated to now describe the in-memory data source
+ configurations for b10-auth.
+ (Trac #1732, git 434d8db8dfcd23a87b8e798e5702e91f0bbbdcf6)
+
+414. [bug] jinmei
+ b10-auth now correctly handles delegation from an unsigned zone
+ (defined in the in-memory data source) when the query has DNSSEC
+ DO bit on. It previously returned SERVFAIL.
+ (Trac #1836, git 78bb8f4b9676d6345f3fdd1e5cc89039806a9aba)
+
+413. [func] stephen, jelte
+ Created a new tool b10-dbutil, that can check and upgrade database
+ schemas, to be used when incompatible changes are introduced in the
+ backend database schema. Currently it only supports sqlite3 databases.
+ Note: there's no schema change that requires this utility as of
+ the March 29th release. While running it shouldn't break
+ an existing database file, it should be even more advisable not to
+ run it at the moment.
+ (Trac #963, git 49ba2cf8ac63246f389ab5e8ea3b3d081dba9adf)
+
+412. [func] jelte
+ Added a command-line option '--clear-config' to bind10, which causes
+ the system to create a backup of the existing configuration database
+ file, and start out with a clean default configuration. This can be
+ used if the configuration file is corrupted to the point where it
+ cannot be read anymore, and BIND 10 refuses to start. The name of
+ the backup file can be found in the logs (CFGMGR_RENAMED_CONFIG_FILE).
+ (Trac #1443, git 52b36c921ee59ec69deefb6123cbdb1b91dc3bc7)
+
+411. [func] muks
+ Add a -i/--no-kill command-line argument to bind10, which stops
+ it from sending SIGTERM and SIGKILL to other b10 processes when
+ they're shutting down.
+ (Trac #1819, git 774554f46b20ca5ec2ef6c6d5e608114f14e2102)
+
+410. [bug] jinmei
+ Python CC library now ensures write operations transmit all given
+ data (unless an error happens). Previously it didn't check the
+ size of transmitted data, which could result in partial write on
+ some systems (notably on OpenBSD) and subsequently cause system
+ hang up or other broken state. This fix specifically solves start
+ up failure on OpenBSD.
+ (Trac #1829, git 5e5a33213b60d89e146cd5e47d65f3f9833a9297)
+
409. [bug] jelte
Fixed a parser bug in bindctl that could make bindctl crash. Also
improved 'command help' output; argument order is now shown
@@ -11,7 +118,7 @@
also be used for others soon.
(Trac #1688, git b77baca56ffb1b9016698c00ae0a1496d603d197)
-407. [build] haikuo
+407. [build] haikuo
Remove "--enable-boost-threads" switch in configure command. This
thread lock mechanism is useless for bind10 and causes performance
hits.
diff --git a/Makefile.am b/Makefile.am
index cc91a56..55a28aa 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -79,7 +79,7 @@ report-coverage: report-cpp-coverage report-python-coverage
# for static C++ check using cppcheck (when available)
cppcheck:
- cppcheck --enable=all --suppressions src/cppcheck-suppress.lst \
+ cppcheck --enable=all --suppressions src/cppcheck-suppress.lst --inline-suppr \
--quiet --error-exitcode=1 \
--template '{file}:{line}: check_fail: {message} ({severity},{id})' \
src
diff --git a/compatcheck/Makefile.am b/compatcheck/Makefile.am
index 029578d..15ef017 100644
--- a/compatcheck/Makefile.am
+++ b/compatcheck/Makefile.am
@@ -1,8 +1,12 @@
-noinst_SCRIPTS = sqlite3-difftbl-check.py
-
# We're going to abuse install-data-local for a pre-install check.
# This is to be considered a short term hack and is expected to be removed
# in a near future version.
install-data-local:
- $(PYTHON) sqlite3-difftbl-check.py \
- $(localstatedir)/$(PACKAGE)/zone.sqlite3
+ if test -e $(localstatedir)/$(PACKAGE)/zone.sqlite3; then \
+ $(SHELL) $(top_builddir)/src/bin/dbutil/run_dbutil.sh --check \
+ $(localstatedir)/$(PACKAGE)/zone.sqlite3 || \
+ (echo "\nSQLite3 DB file schema version is old. " \
+ "Please run: " \
+ "$(abs_top_builddir)/src/bin/dbutil/run_dbutil.sh --upgrade " \
+ "$(localstatedir)/$(PACKAGE)/zone.sqlite3"; exit 1) \
+ fi
diff --git a/compatcheck/sqlite3-difftbl-check.py.in b/compatcheck/sqlite3-difftbl-check.py.in
deleted file mode 100755
index e3b7b91..0000000
--- a/compatcheck/sqlite3-difftbl-check.py.in
+++ /dev/null
@@ -1,60 +0,0 @@
-#!@PYTHON@
-
-# Copyright (C) 2011 Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-import os, sqlite3, sys
-from optparse import OptionParser
-
-usage = 'usage: %prog [options] db_file'
-parser = OptionParser(usage=usage)
-parser.add_option("-u", "--upgrade", action="store_true",
- dest="upgrade", default=False,
- help="Upgrade the database file [default: %default]")
-(options, args) = parser.parse_args()
-if len(args) == 0:
- parser.error('missing argument')
-
-db_file = args[0]
-
-# If the file doesn't exist, there's nothing to do
-if not os.path.exists(db_file):
- sys.exit(0)
-
-conn = sqlite3.connect(db_file)
-cur = conn.cursor()
-try:
- # This can be anything that works iff the "diffs" table exists
- cur.execute('SELECT name FROM diffs DESC LIMIT 1')
-except sqlite3.OperationalError as ex:
- # If it fails with 'no such table', create a new one or fail with
- # warning depending on the --upgrade command line option.
- if str(ex) == 'no such table: diffs':
- if options.upgrade:
- cur.execute('CREATE TABLE diffs (id INTEGER PRIMARY KEY, ' +
- 'zone_id INTEGER NOT NULL, ' +
- 'version INTEGER NOT NULL, ' +
- 'operation INTEGER NOT NULL, ' +
- 'name STRING NOT NULL COLLATE NOCASE, ' +
- 'rrtype STRING NOT NULL COLLATE NOCASE, ' +
- 'ttl INTEGER NOT NULL, rdata STRING NOT NULL)')
- else:
- sys.stdout.write('Found an older version of SQLite3 DB file: ' +
- db_file + '\n' + "Perform '" + os.getcwd() +
- "/sqlite3-difftbl-check.py --upgrade " +
- db_file + "'\n" +
- 'before continuing install.\n')
- sys.exit(1)
-conn.close()
diff --git a/configure.ac b/configure.ac
index 6e3b9d8..a35da4c 100644
--- a/configure.ac
+++ b/configure.ac
@@ -2,7 +2,7 @@
# Process this file with autoconf to produce a configure script.
AC_PREREQ([2.59])
-AC_INIT(bind10-devel, 20120316, bind10-dev at isc.org)
+AC_INIT(bind10-devel, 20120405, bind10-dev at isc.org)
AC_CONFIG_SRCDIR(README)
AM_INIT_AUTOMAKE
m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])dnl be backward compatible
@@ -924,29 +924,6 @@ CPPFLAGS="$CPPFLAGS -I\$(top_srcdir)/ext/coroutine"
#
# Disable threads: Currently we don't use them.
CPPFLAGS="$CPPFLAGS -DASIO_DISABLE_THREADS=1"
-#
-# kqueue portability: ASIO uses kqueue by default if it's available (it's
-# generally available in BSD variants). Unfortunately, some public
-# implementation of kqueue forces a conversion from a pointer to an integer,
-# which is prohibited in C++ unless reinterpret_cast, C++'s most evil beast
-# (and ASIO doesn't use it anyway) is used. This will cause build error for
-# some of our C++ files including ASIO header files. The following check
-# detects such cases and tells ASIO not to use kqueue if so.
-AC_CHECK_FUNC(kqueue, ac_cv_have_kqueue=yes, ac_cv_have_kqueue=no)
-if test "X$ac_cv_have_kqueue" = "Xyes"; then
- AC_MSG_CHECKING([whether kqueue EV_SET compiles in C++])
- AC_TRY_COMPILE([
-#include <sys/types.h>
-#include <sys/param.h>
-#include <sys/event.h>],
-[char* udata;
-struct kevent kevent;
-EV_SET(&kevent, 0, 0, 0, 0, 0, udata);],
- [AC_MSG_RESULT(yes)],
- [AC_MSG_RESULT([no, disable kqueue for ASIO])
- CPPFLAGS="$CPPFLAGS -DASIO_DISABLE_KQUEUE=1"
- ])
-fi
# Check for functions that are not available on all platforms
AC_CHECK_FUNCS([pselect])
@@ -1015,6 +992,9 @@ AC_CONFIG_FILES([Makefile
src/bin/cfgmgr/plugins/Makefile
src/bin/cfgmgr/plugins/tests/Makefile
src/bin/cfgmgr/tests/Makefile
+ src/bin/dbutil/Makefile
+ src/bin/dbutil/tests/Makefile
+ src/bin/dbutil/tests/testdata/Makefile
src/bin/host/Makefile
src/bin/loadzone/Makefile
src/bin/loadzone/tests/correct/Makefile
@@ -1028,8 +1008,8 @@ AC_CONFIG_FILES([Makefile
src/bin/ddns/tests/Makefile
src/bin/dhcp6/Makefile
src/bin/dhcp6/tests/Makefile
- src/bin/dhcp4/Makefile
- src/bin/dhcp4/tests/Makefile
+ src/bin/dhcp4/Makefile
+ src/bin/dhcp4/tests/Makefile
src/bin/resolver/Makefile
src/bin/resolver/tests/Makefile
src/bin/sockcreator/Makefile
@@ -1136,13 +1116,15 @@ AC_CONFIG_FILES([Makefile
tests/tools/perfdhcp/Makefile
])
AC_OUTPUT([doc/version.ent
- compatcheck/sqlite3-difftbl-check.py
src/bin/cfgmgr/b10-cfgmgr.py
src/bin/cfgmgr/tests/b10-cfgmgr_test.py
src/bin/cmdctl/cmdctl.py
src/bin/cmdctl/run_b10-cmdctl.sh
src/bin/cmdctl/tests/cmdctl_test
src/bin/cmdctl/cmdctl.spec.pre
+ src/bin/dbutil/dbutil.py
+ src/bin/dbutil/run_dbutil.sh
+ src/bin/dbutil/tests/dbutil_test.sh
src/bin/ddns/ddns.py
src/bin/xfrin/tests/xfrin_test
src/bin/xfrin/xfrin.py
@@ -1219,13 +1201,14 @@ AC_OUTPUT([doc/version.ent
tests/system/ixfr/in-3/setup.sh
tests/system/ixfr/in-4/setup.sh
], [
- chmod +x compatcheck/sqlite3-difftbl-check.py
chmod +x src/bin/cmdctl/run_b10-cmdctl.sh
chmod +x src/bin/xfrin/run_b10-xfrin.sh
chmod +x src/bin/xfrout/run_b10-xfrout.sh
chmod +x src/bin/zonemgr/run_b10-zonemgr.sh
chmod +x src/bin/bind10/run_bind10.sh
chmod +x src/bin/cmdctl/tests/cmdctl_test
+ chmod +x src/bin/dbutil/run_dbutil.sh
+ chmod +x src/bin/dbutil/tests/dbutil_test.sh
chmod +x src/bin/xfrin/tests/xfrin_test
chmod +x src/bin/xfrout/tests/xfrout_test
chmod +x src/bin/zonemgr/tests/zonemgr_test
diff --git a/doc/guide/bind10-guide.xml b/doc/guide/bind10-guide.xml
index 3599e25..1eaad90 100644
--- a/doc/guide/bind10-guide.xml
+++ b/doc/guide/bind10-guide.xml
@@ -771,12 +771,8 @@ as a dependency earlier -->
master process will also start up
<command>b10-cmdctl</command> for administration tools to
communicate with the system,
- <command>b10-auth</command> for authoritative DNS service,
- <command>b10-stats</command> for statistics collection,
- <command>b10-stats-httpd</command> for statistics reporting,
- <command>b10-xfrin</command> for inbound DNS zone transfers,
- <command>b10-xfrout</command> for outbound DNS zone transfers,
- and <command>b10-zonemgr</command> for secondary service.
+ <command>b10-stats</command> for statistics collection, and
+ <command>b10-stats-httpd</command> for statistics reporting.
</para>
<section id="start">
@@ -810,12 +806,7 @@ as a dependency earlier -->
The configuration is in the Boss/components section. Each element
represents one component, which is an abstraction of a process
(currently there's also one component which doesn't represent
- a process). If you didn't want to transfer out at all (your server
- is a slave only), you would just remove the corresponding component
- from the set, like this and the process would be stopped immediately
- (and not started on the next startup):
- <screen>> <userinput>config remove Boss/components b10-xfrout</userinput>
-> <userinput>config commit</userinput></screen>
+ a process).
</para>
<para>
@@ -1301,7 +1292,7 @@ since we used bind10 -->
<command>b10-auth</command> is configured via the
<command>b10-cfgmgr</command> configuration manager.
The module name is <quote>Auth</quote>.
- The configuration data item is:
+ The configuration data items are:
<variablelist>
@@ -1317,22 +1308,119 @@ This may be a temporary setting until then.
</listitem>
</varlistentry>
+<!-- NOTE: docs pulled in verbatim from the b10-auth.xml manual page.
+ TODO: automate this if want this or rewrite
+-->
+ <varlistentry>
+ <term>datasources</term>
+ <listitem>
+ <simpara>
+ <varname>datasources</varname> configures data sources.
+ The list items include:
+ <varname>type</varname> to define the required data source type
+ (such as <quote>memory</quote>);
+ <varname>class</varname> to optionally select the class
+ (it defaults to <quote>IN</quote>);
+ and
+ <varname>zones</varname> to define the
+ <varname>file</varname> path name and the
+ <varname>origin</varname> (default domain).
+
+ By default, this is empty.
+
+ <note><simpara>
+ In this development version, currently this is only used for the
+ memory data source.
+ Only the IN class is supported at this time.
+ By default, the memory data source is disabled.
+ Also, currently the zone file must be canonical such as
+ generated by <command>named-compilezone -D</command>.
+ </simpara></note>
+
+ </simpara>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>listen_on</term>
+ <listitem>
+ <simpara>
+ <varname>listen_on</varname> is a list of addresses and ports for
+ <command>b10-auth</command> to listen on.
+ The list items are the <varname>address</varname> string
+ and <varname>port</varname> number.
+ By default, <command>b10-auth</command> listens on port 53
+ on the IPv6 (::) and IPv4 (0.0.0.0) wildcard addresses.
+ </simpara>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>statistics-interval</term>
+ <listitem>
+ <simpara>
+ <varname>statistics-interval</varname> is the timer interval
+ in seconds for <command>b10-auth</command> to share its
+ statistics information to
+ <citerefentry><refentrytitle>b10-stats</refentrytitle><manvolnum>8</manvolnum></citerefentry>.
+ Statistics updates can be disabled by setting this to 0.
+ The default is 60.
+ </simpara>
+ </listitem>
+ </varlistentry>
+
</variablelist>
</para>
<para>
- The configuration command is:
+ The configuration commands are:
<variablelist>
<varlistentry>
+ <term>loadzone</term>
+ <listitem>
+ <simpara>
+ <command>loadzone</command> tells <command>b10-auth</command>
+ to load or reload a zone file. The arguments include:
+ <varname>class</varname> which optionally defines the class
+ (it defaults to <quote>IN</quote>);
+ <varname>origin</varname> is the domain name of the zone;
+ and
+ <varname>datasrc</varname> optionally defines the type of datasource
+ (it defaults to <quote>memory</quote>).
+
+ <note><simpara>
+ In this development version, currently this only supports the
+ IN class and the memory data source.
+ </simpara></note>
+ </simpara>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>sendstats</term>
+ <listitem>
+ <simpara>
+ <command>sendstats</command> tells <command>b10-auth</command>
+ to send its statistics data to
+ <citerefentry><refentrytitle>b10-stats</refentrytitle><manvolnum>8</manvolnum></citerefentry>
+ immediately.
+ </simpara>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
<term>shutdown</term>
<listitem>
<simpara>Stop the authoritative DNS server.
+ This has an optional <varname>pid</varname> argument to
+ select the process ID to stop.
+ (Note that the BIND 10 boss process may restart this service
+ if configured.)
</simpara>
-<!-- TODO: what happens when this is sent, will bind10 restart? -->
</listitem>
</varlistentry>
@@ -1362,10 +1450,79 @@ This may be a temporary setting until then.
(The full path is what was defined at build configure time for
<option>--localstatedir</option>.
The default is <filename>/usr/local/var/</filename>.)
- This data file location may be changed by defining the
- <quote>database_file</quote> configuration.
+ This data file location may be changed by defining the
+ <quote>database_file</quote> configuration.
</para>
+ <section id="in-memory-datasource">
+ <title>In-memory Data Source</title>
+
+ <para>
+<!-- How to configure it. -->
+ The following commands to <command>bindctl</command>
+ provide an example of configuring an in-memory data
+ source containing the <quote>example.com</quote> zone
+ with the zone file named <quote>example.com.zone</quote>:
+
+<!--
+ <screen>> <userinput> config set Auth/datasources/ [{"type": "memory", "zones": [{"origin": "example.com", "file": "example.com.zone"}]}]</userinput></screen>
+-->
+
+ <screen>> <userinput>config add Auth/datasources</userinput>
+> <userinput>config set Auth/datasources[0]/type "<option>memory</option>"</userinput>
+> <userinput>config add Auth/datasources[0]/zones</userinput>
+> <userinput>config set Auth/datasources[0]/zones[0]/origin "<option>example.com</option>"</userinput>
+> <userinput>config set Auth/datasources[0]/zones[0]/file "<option>example.com.zone</option>"</userinput>
+> <userinput>config commit</userinput></screen>
+
+ The authoritative server will begin serving it immediately
+ after it is loaded.
+ </para>
+
+ <para>
+ Use the <command>Auth loadzone</command> command in
+ <command>bindctl</command> to reload a changed master
+ file into memory; for example:
+
+ <screen>> <userinput>Auth loadzone origin="example.com"</userinput>
+</screen>
+
+ </para>
+
+<!--
+ <para>
+ The <varname>file</varname> may be an absolute path to the
+ master zone file or it is relative to the directory BIND 10 is
+ started from.
+ </para>
+-->
+
+ <para>
+ By default, the memory data source is disabled; it must be
+ configured explicitly. To disable all the in-memory zones,
+ specify a null list for <varname>Auth/datasources</varname>:
+
+<!-- TODO: this assumes that Auth/datasources is for memory only -->
+
+ <screen>> <userinput>config set Auth/datasources/ []</userinput>
+> <userinput>config commit</userinput></screen>
+ </para>
+
+ <para>
+ The following example stops serving a specific zone:
+
+ <screen>> <userinput>config remove Auth/datasources[<option>0</option>]/zones[<option>0</option>]</userinput>
+> <userinput>config commit</userinput></screen>
+
+ (Replace the list number(s) in
+ <varname>datasources[<replaceable>0</replaceable>]</varname>
+ and/or <varname>zones[<replaceable>0</replaceable>]</varname>
+ for the relevant zone as needed.)
+
+ </para>
+
+ </section>
+
</section>
<section>
@@ -1373,7 +1530,7 @@ This may be a temporary setting until then.
<para>
RFC 1035 style DNS master zone files may imported
- into a BIND 10 data source by using the
+ into a BIND 10 SQLite3 data source by using the
<command>b10-loadzone</command> utility.
</para>
@@ -1420,7 +1577,7 @@ This may be a temporary setting until then.
<note>
<para>
In the development prototype release, only the SQLite3 back
- end is used.
+ end is used by <command>b10-loadzone</command>.
By default, it stores the zone data in
<filename>/usr/local/var/bind10-devel/zone.sqlite3</filename>
unless the <option>-d</option> switch is used to set the
@@ -1695,15 +1852,10 @@ what is XfroutClient xfr_client??
<para>
The main <command>bind10</command> process can be configured
to select to run either the authoritative or resolver or both.
- By default, it starts the authoritative service.
-<!-- TODO: later both -->
-
- You may change this using <command>bindctl</command>, for example:
+ By default, it doesn't start either one. You may change this using
+ <command>bindctl</command>, for example:
<screen>
-> <userinput>config remove Boss/components b10-xfrout</userinput>
-> <userinput>config remove Boss/components b10-xfrin</userinput>
-> <userinput>config remove Boss/components b10-auth</userinput>
> <userinput>config add Boss/components b10-resolver</userinput>
> <userinput>config set Boss/components/b10-resolver/special resolver</userinput>
> <userinput>config set Boss/components/b10-resolver/kind needed</userinput>
diff --git a/doc/guide/bind10-messages.xml b/doc/guide/bind10-messages.xml
index b0cbb26..60f9665 100644
--- a/doc/guide/bind10-messages.xml
+++ b/doc/guide/bind10-messages.xml
@@ -405,6 +405,27 @@ message associated with it has its own code.
</para></listitem>
</varlistentry>
+<varlistentry id="AUTH_RESPONSE_FAILURE">
+<term>AUTH_RESPONSE_FAILURE exception while building response to query: %1</term>
+<listitem><para>
+This is a debug message, generated by the authoritative server when an
+attempt to create a response to a received DNS packet has failed. The
+reason for the failure is given in the log message. A SERVFAIL response
+is sent back. The most likely cause of this is an error in the data
+source implementation; it is either creating bad responses or raising
+exceptions itself.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_RESPONSE_FAILURE_UNKNOWN">
+<term>AUTH_RESPONSE_FAILURE_UNKNOWN unknown exception while building response to query</term>
+<listitem><para>
+This debug message is similar to AUTH_RESPONSE_FAILURE, but further
+details about the error are unknown, because it was signaled by something
+which is not an exception. This is definitely a bug.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="AUTH_RESPONSE_RECEIVED">
<term>AUTH_RESPONSE_RECEIVED received response message, ignoring</term>
<listitem><para>
@@ -1618,6 +1639,15 @@ configuration is not stored.
</para></listitem>
</varlistentry>
+<varlistentry id="CFGMGR_RENAMED_CONFIG_FILE">
+<term>CFGMGR_RENAMED_CONFIG_FILE renamed configuration file %1 to %2, will create new %1</term>
+<listitem><para>
+BIND 10 has been started with the command to clear the configuration file.
+The existing file is backed up to the given file name, so that data is not
+immediately lost if this was done by accident.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="CFGMGR_STOPPED_BY_KEYBOARD">
<term>CFGMGR_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
<listitem><para>
@@ -2943,8 +2973,10 @@ not have any DS record. This indicates problem with the provided data.
<varlistentry id="DATASRC_QUERY_NO_ZONE">
<term>DATASRC_QUERY_NO_ZONE no zone containing '%1' in class '%2'</term>
<listitem><para>
-Lookup of domain failed because the data have no zone that contain the
-domain. Maybe someone sent a query to the wrong server for some reason.
+Debug information. Lookup of domain failed because the datasource
+has no zone that contains the domain. Maybe someone sent a query
+to the wrong server for some reason. This may also happen when
+looking in the datasource for addresses for NS records.
</para></listitem>
</varlistentry>
@@ -3281,6 +3313,200 @@ generated.
</para></listitem>
</varlistentry>
+<varlistentry id="DBUTIL_BACKUP">
+<term>DBUTIL_BACKUP created backup of %1 in %2</term>
+<listitem><para>
+A backup for the given database file was created. Same of original file and
+backup are given in the output message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DBUTIL_CHECK_ERROR">
+<term>DBUTIL_CHECK_ERROR unable to check database version: %1</term>
+<listitem><para>
+There was an error while trying to check the current version of the database
+schema. The error is shown in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DBUTIL_CHECK_NOCONFIRM">
+<term>DBUTIL_CHECK_NOCONFIRM --noconfirm is not compatible with --check</term>
+<listitem><para>
+b10-dbutil was called with --check and --noconfirm. --noconfirm only has
+meaning with --upgrade, so this is considered an error.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DBUTIL_CHECK_OK">
+<term>DBUTIL_CHECK_OK this is the latest version of the database schema. No upgrade is required</term>
+<listitem><para>
+The database schema version has been checked, and is up to date.
+No action is required.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DBUTIL_CHECK_UPGRADE_NEEDED">
+<term>DBUTIL_CHECK_UPGRADE_NEEDED re-run this program with the --upgrade switch to upgrade</term>
+<listitem><para>
+The database schema version is not up to date, and an update is required.
+Please run the dbutil tool again, with the --upgrade argument.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DBUTIL_COMMAND_NONE">
+<term>DBUTIL_COMMAND_NONE must select one of --check or --upgrade</term>
+<listitem><para>
+b10-dbutil was called with neither --check nor --upgrade. One action must be
+provided.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DBUTIL_COMMAND_UPGRADE_CHECK">
+<term>DBUTIL_COMMAND_UPGRADE_CHECK --upgrade is not compatible with --check</term>
+<listitem><para>
+b10-dbutil was called with both the commands --upgrade and --check. Only one
+action can be performed at a time.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DBUTIL_DATABASE_MAY_BE_CORRUPT">
+<term>DBUTIL_DATABASE_MAY_BE_CORRUPT database file %1 may be corrupt, restore it from backup (%2)</term>
+<listitem><para>
+The upgrade failed while it was in progress; the database may now be in an
+inconsistent state, and it is advised to restore it from the backup that was
+created when b10-dbutil started.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DBUTIL_EXECUTE">
+<term>DBUTIL_EXECUTE Executing SQL statement: %1</term>
+<listitem><para>
+Debug message; the given SQL statement is executed
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DBUTIL_FILE">
+<term>DBUTIL_FILE Database file: %1</term>
+<listitem><para>
+The database file that is being checked.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DBUTIL_NO_FILE">
+<term>DBUTIL_NO_FILE must supply name of the database file to upgrade</term>
+<listitem><para>
+b10-dbutil was called without a database file. Currently, it cannot find this
+file on its own, and it must be provided.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DBUTIL_STATEMENT_ERROR">
+<term>DBUTIL_STATEMENT_ERROR failed to execute %1: %2</term>
+<listitem><para>
+The given database statement failed to execute. The error is shown in the
+message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DBUTIL_TOO_MANY_ARGUMENTS">
+<term>DBUTIL_TOO_MANY_ARGUMENTS too many arguments to the command, maximum of one expected</term>
+<listitem><para>
+There were too many command-line arguments to b10-dbutil
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DBUTIL_UPGRADE_CANCELED">
+<term>DBUTIL_UPGRADE_CANCELED upgrade canceled; database has not been changed</term>
+<listitem><para>
+The user aborted the upgrade, and b10-dbutil will now exit.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DBUTIL_UPGRADE_DBUTIL">
+<term>DBUTIL_UPGRADE_DBUTIL please get the latest version of b10-dbutil and re-run</term>
+<listitem><para>
+A database schema was found that was newer than this version of dbutil, which
+is apparently out of date and should be upgraded itself.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DBUTIL_UPGRADE_FAILED">
+<term>DBUTIL_UPGRADE_FAILED upgrade failed: %1</term>
+<listitem><para>
+While the upgrade was in progress, an unexpected error occurred. The error
+is shown in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DBUTIL_UPGRADE_NOT_ATTEMPTED">
+<term>DBUTIL_UPGRADE_NOT_ATTEMPTED database upgrade was not attempted</term>
+<listitem><para>
+Due to the earlier failure, the database schema upgrade was not attempted,
+and b10-dbutil will now exit.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DBUTIL_UPGRADE_NOT_NEEDED">
+<term>DBUTIL_UPGRADE_NOT_NEEDED database already at latest version, no upgrade necessary</term>
+<listitem><para>
+b10-dbutil was told to upgrade the database schema, but it is already at the
+latest version.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DBUTIL_UPGRADE_NOT_POSSIBLE">
+<term>DBUTIL_UPGRADE_NOT_POSSIBLE database at a later version than this utility can support</term>
+<listitem><para>
+b10-dbutil was told to upgrade the database schema, but it is at a higher
+version than this tool currently supports. Please update b10-dbutil and try
+again.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DBUTIL_UPGRADE_PREPARATION_FAILED">
+<term>DBUTIL_UPGRADE_PREPARATION_FAILED upgrade preparation failed: %1</term>
+<listitem><para>
+An unexpected error occurred while b10-dbutil was preparing to upgrade the
+database schema. The error is shown in the message
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DBUTIL_UPGRADE_SUCCESFUL">
+<term>DBUTIL_UPGRADE_SUCCESFUL database upgrade successfully completed</term>
+<listitem><para>
+The database schema update was completed successfully.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DBUTIL_UPGRADING">
+<term>DBUTIL_UPGRADING upgrading database from %1 to %2</term>
+<listitem><para>
+An upgrade is in progress, the versions of the current upgrade action are shown.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DBUTIL_VERSION_CURRENT">
+<term>DBUTIL_VERSION_CURRENT database version %1</term>
+<listitem><para>
+The current version of the database schema.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DBUTIL_VERSION_HIGH">
+<term>DBUTIL_VERSION_HIGH database is at a later version (%1) than this program can cope with (%2)</term>
+<listitem><para>
+The database schema is at a higher version than b10-dbutil knows about.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DBUTIL_VERSION_LOW">
+<term>DBUTIL_VERSION_LOW database version %1, latest version is %2.</term>
+<listitem><para>
+The database schema is not up to date, the current version and the latest
+version are in the message.
+</para></listitem>
+</varlistentry>
+
<varlistentry id="DDNS_ACCEPT_FAILURE">
<term>DDNS_ACCEPT_FAILURE error accepting a connection: %1</term>
<listitem><para>
@@ -4686,8 +4912,8 @@ This informational message is output when the resolver has shut down.
</para></listitem>
</varlistentry>
-<varlistentry id="RESOLVER_SHUTDOWN (1)">
-<term>RESOLVER_SHUTDOWN (1) asked to shut down, doing so</term>
+<varlistentry id="RESOLVER_SHUTDOWN_RECEIVED">
+<term>RESOLVER_SHUTDOWN_RECEIVED received command to shut down</term>
<listitem><para>
A debug message noting that the server was asked to terminate and is
complying to the request.
@@ -5264,6 +5490,35 @@ likely cause is a PYTHONPATH problem.
</para></listitem>
</varlistentry>
+<varlistentry id="XFRIN_IXFR_TRANSFER_SUCCESS">
+<term>XFRIN_IXFR_TRANSFER_SUCCESS incremental IXFR transfer of zone %1 succeeded (messages: %2, changesets: %3, deletions: %4, additions: %5, bytes: %6, run time: %7 seconds, %8 bytes/second)</term>
+<listitem><para>
+The IXFR transfer for the given zone was successful.
+The provided information contains the following values:
+</para><para>
+messages: Number of overhead DNS messages in the transfer.
+</para><para>
+changesets: Number of difference sequences.
+</para><para>
+deletions: Number of Resource Records deleted by all the changesets combined,
+including the SOA records.
+</para><para>
+additions: Number of Resource Records added by all the changesets combined,
+including the SOA records.
+</para><para>
+bytes: Full size of the transfer data on the wire.
+</para><para>
+run time: Time (in seconds) the complete ixfr took.
+</para><para>
+bytes/second: Transfer speed.
+</para><para>
+Note that there is no cross-checking of additions and deletions; if the same
+RR gets added and deleted in multiple changesets, it is counted each time;
+therefore, for each changeset, there should at least be 1 deletion and 1
+addition (the updated SOA record).
+</para></listitem>
+</varlistentry>
+
<varlistentry id="XFRIN_IXFR_UPTODATE">
<term>XFRIN_IXFR_UPTODATE IXFR requested serial for %1 is %2, master has %3, not updating</term>
<listitem><para>
@@ -5330,6 +5585,25 @@ daemon will now shut down.
</para></listitem>
</varlistentry>
+<varlistentry id="XFRIN_TRANSFER_SUCCESS">
+<term>XFRIN_TRANSFER_SUCCESS full %1 transfer of zone %2 succeeded (messages: %3, records: %4, bytes: %5, run time: %6 seconds, %7 bytes/second)</term>
+<listitem><para>
+The AXFR transfer of the given zone was successful.
+The provided information contains the following values:
+</para><para>
+messages: Number of overhead DNS messages in the transfer
+</para><para>
+records: Number of Resource Records in the full transfer, excluding the
+final SOA record that marks the end of the AXFR.
+</para><para>
+bytes: Full size of the transfer data on the wire.
+</para><para>
+run time: Time (in seconds) the complete axfr took
+</para><para>
+bytes/second: Transfer speed
+</para></listitem>
+</varlistentry>
+
<varlistentry id="XFRIN_UNKNOWN_ERROR">
<term>XFRIN_UNKNOWN_ERROR unknown error: %1</term>
<listitem><para>
@@ -5406,13 +5680,6 @@ the SOA record has been checked, and a zone transfer has been started.
</para></listitem>
</varlistentry>
-<varlistentry id="XFRIN_XFR_TRANSFER_SUCCESS">
-<term>XFRIN_XFR_TRANSFER_SUCCESS %1 transfer of zone %2 succeeded</term>
-<listitem><para>
-The XFR transfer of the given zone was successfully completed.
-</para></listitem>
-</varlistentry>
-
<varlistentry id="XFRIN_ZONE_CREATED">
<term>XFRIN_ZONE_CREATED Zone %1 not found in the given data source, newly created</term>
<listitem><para>
@@ -6012,9 +6279,11 @@ a bug report.
<term>ZONEMGR_UNKNOWN_ZONE_FAIL zone %1 (class %2) is not known to the zone manager</term>
<listitem><para>
An XFRIN operation has failed but the zone that was the subject of the
-operation is not being managed by the zone manager. This may indicate
-an error in the program (as the operation should not have been initiated
-if this were the case). Please submit a bug report.
+operation is not being managed by the zone manager. This can be either the
+result of a bindctl command to transfer in a currently unknown (or mistyped)
+zone, or, if this error appears without the administrator giving transfer
+commands, it can indicate an error in the program, as it should not have
+initiated transfers of unknown zones on its own.
</para></listitem>
</varlistentry>
diff --git a/ext/LICENSE_1_0.txt b/ext/LICENSE_1_0.txt
new file mode 100644
index 0000000..36b7cd9
--- /dev/null
+++ b/ext/LICENSE_1_0.txt
@@ -0,0 +1,23 @@
+Boost Software License - Version 1.0 - August 17th, 2003
+
+Permission is hereby granted, free of charge, to any person or organization
+obtaining a copy of the software and accompanying documentation covered by
+this license (the "Software") to use, reproduce, display, distribute,
+execute, and transmit the Software, and to prepare derivative works of the
+Software, and to permit third-parties to whom the Software is furnished to
+do so, all subject to the following:
+
+The copyright notices in the Software and this entire statement, including
+the above license grant, this restriction and the following disclaimer,
+must be included in all copies of the Software, in whole or in part, and
+all derivative works of the Software, unless such copies or derivative
+works are solely in the form of machine-executable object code generated by
+a source language processor.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
+SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
+FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/src/bin/Makefile.am b/src/bin/Makefile.am
index 7c6cdb8..499e209 100644
--- a/src/bin/Makefile.am
+++ b/src/bin/Makefile.am
@@ -1,4 +1,4 @@
SUBDIRS = bind10 bindctl cfgmgr ddns loadzone msgq host cmdctl auth xfrin \
- xfrout usermgr zonemgr stats tests resolver sockcreator dhcp4 dhcp6
+ xfrout usermgr zonemgr stats tests resolver sockcreator dhcp4 dhcp6 dbutil
check-recursive: all-recursive
diff --git a/src/bin/auth/auth.spec.pre.in b/src/bin/auth/auth.spec.pre.in
index 97b0e79..3eeb35e 100644
--- a/src/bin/auth/auth.spec.pre.in
+++ b/src/bin/auth/auth.spec.pre.in
@@ -47,6 +47,10 @@
"item_type": "string",
"item_optional": false,
"item_default": ""
+ },
+ { "item_name": "filetype",
+ "item_type": "string",
+ "item_optional": true
}]
}
}]
diff --git a/src/bin/auth/auth_config.cc b/src/bin/auth/auth_config.cc
index 2ae520c..3a04dc8 100644
--- a/src/bin/auth/auth_config.cc
+++ b/src/bin/auth/auth_config.cc
@@ -12,14 +12,6 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
-#include <set>
-#include <string>
-#include <utility>
-#include <vector>
-
-#include <boost/foreach.hpp>
-#include <boost/shared_ptr.hpp>
-
#include <dns/name.h>
#include <dns/rrclass.h>
@@ -27,6 +19,7 @@
#include <datasrc/memory_datasrc.h>
#include <datasrc/zonetable.h>
+#include <datasrc/factory.h>
#include <auth/auth_srv.h>
#include <auth/auth_config.h>
@@ -34,6 +27,15 @@
#include <server_common/portconfig.h>
+#include <boost/foreach.hpp>
+#include <boost/shared_ptr.hpp>
+#include <boost/scoped_ptr.hpp>
+
+#include <set>
+#include <string>
+#include <utility>
+#include <vector>
+
using namespace std;
using namespace isc::dns;
using namespace isc::data;
@@ -155,17 +157,48 @@ MemoryDatasourceConfig::build(ConstElementPtr config_value) {
BOOST_FOREACH(ConstElementPtr zone_config, zones_config->listValue()) {
ConstElementPtr origin = zone_config->get("origin");
- if (!origin) {
+ const string origin_txt = origin ? origin->stringValue() : "";
+ if (origin_txt.empty()) {
isc_throw(AuthConfigError, "Missing zone origin");
}
ConstElementPtr file = zone_config->get("file");
- if (!file) {
+ const string file_txt = file ? file->stringValue() : "";
+ if (file_txt.empty()) {
isc_throw(AuthConfigError, "Missing zone file for zone: "
- << origin->str());
+ << origin_txt);
}
- boost::shared_ptr<InMemoryZoneFinder> zone_finder(new
- InMemoryZoneFinder(rrclass_,
- Name(origin->stringValue())));
+
+ // We support the traditional text type and SQLite3 backend. For the
+ // latter we create a client for the underlying SQLite3 data source,
+ // and build the in-memory zone using an iterator of the underlying
+ // zone.
+ ConstElementPtr filetype = zone_config->get("filetype");
+ const string filetype_txt = filetype ? filetype->stringValue() :
+ "text";
+ boost::scoped_ptr<DataSourceClientContainer> container;
+ if (filetype_txt == "sqlite3") {
+ container.reset(new DataSourceClientContainer(
+ "sqlite3",
+ Element::fromJSON("{\"database_file\": \"" +
+ file_txt + "\"}")));
+ } else if (filetype_txt != "text") {
+ isc_throw(AuthConfigError, "Invalid filetype for zone "
+ << origin_txt << ": " << filetype_txt);
+ }
+
+ // Note: we don't want to have such small try-catch blocks for each
+ // specific error. We may eventually want to introduce some unified
+ // error handling framework as we have more configuration parameters.
+ // See bug #1627 for the relevant discussion.
+ InMemoryZoneFinder* imzf = NULL;
+ try {
+ imzf = new InMemoryZoneFinder(rrclass_, Name(origin_txt));
+ } catch (const isc::dns::NameParserException& ex) {
+ isc_throw(AuthConfigError, "unable to parse zone's origin: " <<
+ ex.what());
+ }
+
+ boost::shared_ptr<InMemoryZoneFinder> zone_finder(imzf);
const result::Result result = memory_client_->addZone(zone_finder);
if (result == result::EXIST) {
isc_throw(AuthConfigError, "zone "<< origin->str()
@@ -178,7 +211,12 @@ MemoryDatasourceConfig::build(ConstElementPtr config_value) {
* need the load method to be split into some kind of build and
* commit/abort parts.
*/
- zone_finder->load(file->stringValue());
+ if (filetype_txt == "text") {
+ zone_finder->load(file_txt);
+ } else {
+ zone_finder->load(*container->getInstance().getIterator(
+ Name(origin_txt)));
+ }
}
}
diff --git a/src/bin/auth/b10-auth.8 b/src/bin/auth/b10-auth.8
index 14ba2ae..a5ef4fb 100644
--- a/src/bin/auth/b10-auth.8
+++ b/src/bin/auth/b10-auth.8
@@ -2,12 +2,12 @@
.\" Title: b10-auth
.\" Author: [FIXME: author] [see http://docbook.sf.net/el/author]
.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\" Date: March 1, 2012
+.\" Date: March 28, 2012
.\" Manual: BIND10
.\" Source: BIND10
.\" Language: English
.\"
-.TH "B10\-AUTH" "8" "March 1, 2012" "BIND10" "BIND10"
+.TH "B10\-AUTH" "8" "March 28, 2012" "BIND10" "BIND10"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
@@ -64,7 +64,7 @@ defines the path to the SQLite3 zone file when using the sqlite datasource\&. Th
\fIdatasources\fR
configures data sources\&. The list items include:
\fItype\fR
-to optionally choose the data source type (such as
+to define the required data source type (such as
\(lqmemory\(rq);
\fIclass\fR
to optionally select the class (it defaults to
diff --git a/src/bin/auth/b10-auth.xml b/src/bin/auth/b10-auth.xml
index 7575217..7f3a492 100644
--- a/src/bin/auth/b10-auth.xml
+++ b/src/bin/auth/b10-auth.xml
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>March 1, 2012</date>
+ <date>March 28, 2012</date>
</refentryinfo>
<refmeta>
@@ -119,7 +119,7 @@
<para>
<varname>datasources</varname> configures data sources.
The list items include:
- <varname>type</varname> to optionally choose the data source type
+ <varname>type</varname> to define the required data source type
(such as <quote>memory</quote>);
<varname>class</varname> to optionally select the class
(it defaults to <quote>IN</quote>);
diff --git a/src/bin/auth/query.cc b/src/bin/auth/query.cc
index 793a64f..f215c04 100644
--- a/src/bin/auth/query.cc
+++ b/src/bin/auth/query.cc
@@ -278,8 +278,10 @@ Query::addDS(ZoneFinder& finder, const Name& dname) {
ds_context->isNSEC3Signed()) {
// Add no DS proof with NSEC3 as specified in RFC 5155 Section 7.2.7.
addClosestEncloserProof(finder, dname, true);
- } else {
- // Any other case should be an error
+ } else if (ds_context->code != ZoneFinder::NXRRSET) {
+ // We know this domain should exist, so the result must be NXRRSET.
+ // If not, the zone is broken, so we'll return SERVFAIL by triggering
+ // an exception.
isc_throw(BadDS, "Unexpected result for DS lookup for delegation");
}
}
diff --git a/src/bin/auth/tests/Makefile.am b/src/bin/auth/tests/Makefile.am
index d24ba89..b33c7af 100644
--- a/src/bin/auth/tests/Makefile.am
+++ b/src/bin/auth/tests/Makefile.am
@@ -3,6 +3,7 @@ AM_CPPFLAGS += -I$(top_builddir)/src/bin # for generated spec_config.h header
AM_CPPFLAGS += -I$(top_builddir)/src/lib/dns -I$(top_srcdir)/src/bin
AM_CPPFLAGS += -I$(top_builddir)/src/lib/cc
AM_CPPFLAGS += $(BOOST_INCLUDES)
+AM_CPPFLAGS += -DAUTH_OBJ_DIR=\"$(abs_top_builddir)/src/bin/auth\"
AM_CPPFLAGS += -DTEST_DATA_DIR=\"$(abs_top_srcdir)/src/lib/testutils/testdata\"
AM_CPPFLAGS += -DTEST_DATA_BUILDDIR=\"$(abs_top_builddir)/src/lib/testutils/testdata\"
AM_CPPFLAGS += -DINSTALL_PROG=\"$(abs_top_srcdir)/install-sh\"
@@ -11,6 +12,9 @@ AM_CXXFLAGS = $(B10_CXXFLAGS)
if USE_STATIC_LINK
AM_LDFLAGS = -static
+# Some test cases cannot work with static link. To selectively disable such
+# tests we signal it via a definition.
+AM_CPPFLAGS += -DUSE_STATIC_LINK=1
endif
CLEANFILES = *.gcno *.gcda
@@ -28,8 +32,10 @@ run_unittests_SOURCES += ../auth_config.h ../auth_config.cc
run_unittests_SOURCES += ../command.h ../command.cc
run_unittests_SOURCES += ../common.h ../common.cc
run_unittests_SOURCES += ../statistics.h ../statistics.cc
+run_unittests_SOURCES += datasrc_util.h datasrc_util.cc
run_unittests_SOURCES += auth_srv_unittest.cc
run_unittests_SOURCES += config_unittest.cc
+run_unittests_SOURCES += config_syntax_unittest.cc
run_unittests_SOURCES += command_unittest.cc
run_unittests_SOURCES += common_unittest.cc
run_unittests_SOURCES += query_unittest.cc
diff --git a/src/bin/auth/tests/config_syntax_unittest.cc b/src/bin/auth/tests/config_syntax_unittest.cc
new file mode 100644
index 0000000..8caedfd
--- /dev/null
+++ b/src/bin/auth/tests/config_syntax_unittest.cc
@@ -0,0 +1,71 @@
+// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <cc/data.h>
+#include <config/module_spec.h>
+
+#include <gtest/gtest.h>
+
+using namespace isc::data;
+using namespace isc::config;
+
+namespace {
+
+const char* const SPEC_FILE = AUTH_OBJ_DIR "/auth.spec";
+
+class AuthConfigSyntaxTest : public ::testing::Test {
+protected:
+ AuthConfigSyntaxTest() : mspec_(moduleSpecFromFile(SPEC_FILE))
+ {}
+ ModuleSpec mspec_;
+};
+
+TEST_F(AuthConfigSyntaxTest, inmemoryDefaultFileType) {
+ // filetype is optional
+ EXPECT_TRUE(
+ mspec_.validateConfig(
+ Element::fromJSON(
+ "{\"listen_on\": [], \"datasources\": "
+ " [{\"type\": \"memory\", \"class\": \"IN\", "
+ " \"zones\": [{\"origin\": \"example.com\","
+ " \"file\": \""
+ TEST_DATA_DIR "/example.zone\"}]}]}"), true));
+}
+
+TEST_F(AuthConfigSyntaxTest, inmemorySQLite3Backend) {
+ // Specifying non-default in-memory filetype
+ EXPECT_TRUE(
+ mspec_.validateConfig(
+ Element::fromJSON(
+ "{\"datasources\": "
+ " [{\"type\": \"memory\","
+ " \"zones\": [{\"origin\": \"example.com\","
+ " \"file\": \""
+ TEST_DATA_DIR "/example.zone\","
+ " \"filetype\": \"sqlite3\"}]}]}"), false));
+}
+
+TEST_F(AuthConfigSyntaxTest, badInmemoryFileType) {
+ // filetype must be a string
+ EXPECT_FALSE(
+ mspec_.validateConfig(
+ Element::fromJSON(
+ "{\"datasources\": "
+ " [{\"type\": \"memory\","
+ " \"zones\": [{\"origin\": \"example.com\","
+ " \"file\": \""
+ TEST_DATA_DIR "/example.zone\","
+ " \"filetype\": 42}]}]}"), false));
+}
+}
diff --git a/src/bin/auth/tests/config_unittest.cc b/src/bin/auth/tests/config_unittest.cc
index fb5067e..d471a53 100644
--- a/src/bin/auth/tests/config_unittest.cc
+++ b/src/bin/auth/tests/config_unittest.cc
@@ -21,6 +21,7 @@
#include <cc/data.h>
+#include <datasrc/data_source.h>
#include <datasrc/memory_datasrc.h>
#include <xfr/xfrout_client.h>
@@ -29,14 +30,20 @@
#include <auth/auth_config.h>
#include <auth/common.h>
+#include "datasrc_util.h"
+
#include <testutils/mockups.h>
#include <testutils/portconfig.h>
#include <testutils/socket_request.h>
+#include <sstream>
+
+using namespace std;
using namespace isc::dns;
using namespace isc::data;
using namespace isc::datasrc;
using namespace isc::asiodns;
+using namespace isc::auth::unittest;
using namespace isc::testutils;
namespace {
@@ -201,6 +208,55 @@ TEST_F(MemoryDatasrcConfigTest, addOneZone) {
RRType::A())->code);
}
+// This test uses dynamic load of a data source module, and won't work when
+// statically linked.
+TEST_F(MemoryDatasrcConfigTest,
+#ifdef USE_STATIC_LINK
+ DISABLED_addOneWithFiletypeSQLite3
+#else
+ addOneWithFiletypeSQLite3
+#endif
+ )
+{
+ const string test_db = TEST_DATA_BUILDDIR "/auth_test.sqlite3.copied";
+ stringstream ss("example.org. 3600 IN SOA . . 0 0 0 0 0\n");
+ createSQLite3DB(rrclass, Name("example.org"), test_db.c_str(), ss);
+
+ // In-memory with an SQLite3 data source as the backend.
+ parser->build(Element::fromJSON(
+ "[{\"type\": \"memory\","
+ " \"zones\": [{\"origin\": \"example.org\","
+ " \"file\": \""
+ + test_db + "\","
+ " \"filetype\": \"sqlite3\"}]}]"));
+ parser->commit();
+ EXPECT_EQ(1, server.getInMemoryClient(rrclass)->getZoneCount());
+
+ // Failure case: the specified zone doesn't exist in the DB file.
+ delete parser;
+ parser = createAuthConfigParser(server, "datasources");
+ EXPECT_THROW(parser->build(
+ Element::fromJSON(
+ "[{\"type\": \"memory\","
+ " \"zones\": [{\"origin\": \"example.com\","
+ " \"file\": \""
+ + test_db + "\","
+ " \"filetype\": \"sqlite3\"}]}]")),
+ DataSourceError);
+}
+
+TEST_F(MemoryDatasrcConfigTest, addOneWithFiletypeText) {
+ // Explicitly specifying "text" is okay.
+ parser->build(Element::fromJSON(
+ "[{\"type\": \"memory\","
+ " \"zones\": [{\"origin\": \"example.com\","
+ " \"file\": \""
+ TEST_DATA_DIR "/example.zone\","
+ " \"filetype\": \"text\"}]}]"));
+ parser->commit();
+ EXPECT_EQ(1, server.getInMemoryClient(rrclass)->getZoneCount());
+}
+
TEST_F(MemoryDatasrcConfigTest, addMultiZones) {
EXPECT_NO_THROW(parser->build(Element::fromJSON(
"[{\"type\": \"memory\","
@@ -299,7 +355,7 @@ TEST_F(MemoryDatasrcConfigTest, remove) {
EXPECT_EQ(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
}
-TEST_F(MemoryDatasrcConfigTest, adDuplicateZones) {
+TEST_F(MemoryDatasrcConfigTest, addDuplicateZones) {
EXPECT_THROW(parser->build(
Element::fromJSON(
"[{\"type\": \"memory\","
@@ -313,6 +369,13 @@ TEST_F(MemoryDatasrcConfigTest, adDuplicateZones) {
}
TEST_F(MemoryDatasrcConfigTest, addBadZone) {
+ // origin and file are missing
+ EXPECT_THROW(parser->build(
+ Element::fromJSON(
+ "[{\"type\": \"memory\","
+ " \"zones\": [{}]}]")),
+ AuthConfigError);
+
// origin is missing
EXPECT_THROW(parser->build(
Element::fromJSON(
@@ -320,6 +383,13 @@ TEST_F(MemoryDatasrcConfigTest, addBadZone) {
" \"zones\": [{\"file\": \"example.zone\"}]}]")),
AuthConfigError);
+ // file is missing
+ EXPECT_THROW(parser->build(
+ Element::fromJSON(
+ "[{\"type\": \"memory\","
+ " \"zones\": [{\"origin\": \"example.com\"}]}]")),
+ AuthConfigError);
+
// missing zone file
EXPECT_THROW(parser->build(
Element::fromJSON(
@@ -332,7 +402,7 @@ TEST_F(MemoryDatasrcConfigTest, addBadZone) {
"[{\"type\": \"memory\","
" \"zones\": [{\"origin\": \"example..com\","
" \"file\": \"example.zone\"}]}]")),
- EmptyLabel);
+ AuthConfigError);
// bogus RR class name
EXPECT_THROW(parser->build(
diff --git a/src/bin/auth/tests/datasrc_util.cc b/src/bin/auth/tests/datasrc_util.cc
new file mode 100644
index 0000000..d9e99b6
--- /dev/null
+++ b/src/bin/auth/tests/datasrc_util.cc
@@ -0,0 +1,77 @@
+// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <exceptions/exceptions.h>
+
+#include <dns/masterload.h>
+#include <dns/name.h>
+#include <dns/rrclass.h>
+
+#include <cc/data.h>
+
+#include <datasrc/client.h>
+#include <datasrc/zone.h>
+#include <datasrc/factory.h>
+
+#include "datasrc_util.h"
+
+#include <boost/bind.hpp>
+
+#include <istream>
+
+#include <cstdlib>
+
+using namespace std;
+
+using namespace isc::dns;
+using namespace isc::data;
+using namespace isc::datasrc;
+
+namespace isc {
+namespace auth {
+namespace unittest {
+
+namespace {
+void
+addRRset(ZoneUpdaterPtr updater, ConstRRsetPtr rrset) {
+ updater->addRRset(*rrset);
+}
+}
+
+void
+createSQLite3DB(RRClass zclass, const Name& zname,
+ const char* const db_file, istream& rr_stream)
+{
+ // We always begin with an empty template SQLite3 DB file and install
+ // the zone data from the zone file.
+ const char* const install_cmd_prefix = INSTALL_PROG " " TEST_DATA_DIR
+ "/rwtest.sqlite3 ";
+ const string install_cmd = string(install_cmd_prefix) + db_file;
+ if (system(install_cmd.c_str()) != 0) {
+ isc_throw(isc::Unexpected,
+ "Error setting up; command failed: " << install_cmd);
+ }
+
+ DataSourceClientContainer container("sqlite3",
+ Element::fromJSON(
+ "{\"database_file\": \"" +
+ string(db_file) + "\"}"));
+ ZoneUpdaterPtr updater = container.getInstance().getUpdater(zname, true);
+ masterLoad(rr_stream, zname, zclass, boost::bind(addRRset, updater, _1));
+ updater->commit();
+}
+
+} // end of unittest
+} // end of auth
+} // end of isc
diff --git a/src/bin/auth/tests/datasrc_util.h b/src/bin/auth/tests/datasrc_util.h
new file mode 100644
index 0000000..fc4109b
--- /dev/null
+++ b/src/bin/auth/tests/datasrc_util.h
@@ -0,0 +1,61 @@
+// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __AUTH_DATA_SOURCE_UTIL_H
+#define __AUTH_DATA_SOURCE_UTIL_H 1
+
+#include <dns/name.h>
+#include <dns/rrclass.h>
+
+#include <istream>
+
+namespace isc {
+namespace auth {
+namespace unittest {
+
+// Here we define utility modules for the convenience of tests that create
+// a data source client according to the specified conditions.
+
+/// \brief Create an SQLite3 data source client from a stream.
+///
+/// This function creates an SQLite3 DB file for the specified zone
+/// with specified content. The zone will be created in the given
+/// SQLite3 database file. The database file does not have to exist;
+/// this function will automatically create a new file for the test
+/// based on a template that only contains the necessary schema. If
+/// the given file already exists this function overrides the content
+/// (so basically the file must be an ephemeral one only for that test
+/// case).
+///
+/// The input stream must produce strings as the corresponding
+/// \c dns::masterLoad() function expects.
+///
+/// \param zclass The RR class of the zone
+/// \param zname The origin name of the zone
+/// \param db_file The SQLite3 data base file in which the zone data should be
+/// installed.
+/// \param rr_stream An input stream that produces zone data.
+void
+createSQLite3DB(dns::RRClass zclass, const dns::Name& zname,
+ const char* const db_file, std::istream& rr_stream);
+
+} // end of unittest
+} // end of auth
+} // end of isc
+
+#endif // __AUTH_DATA_SOURCE_UTIL_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/bin/auth/tests/query_unittest.cc b/src/bin/auth/tests/query_unittest.cc
index fea9cd0..63429ae 100644
--- a/src/bin/auth/tests/query_unittest.cc
+++ b/src/bin/auth/tests/query_unittest.cc
@@ -239,6 +239,10 @@ const char* const unsigned_delegation_optout_nsec_txt =
const char* const bad_delegation_txt =
"bad-delegation.example.com. 3600 IN NS ns.example.net.\n";
+// Delegation from an unsigned parent. There's no DS, and there's no NSEC
+// or NSEC3 that proves it.
+const char* const nosec_delegation_txt =
+ "nosec-delegation.example.com. 3600 IN NS ns.nosec.example.net.\n";
// A helper function that generates a textual representation of RRSIG RDATA
// for the given covered type. The resulting RRSIG may not necessarily make
@@ -314,7 +318,7 @@ public:
unsigned_delegation_txt << unsigned_delegation_nsec_txt <<
unsigned_delegation_optout_txt <<
unsigned_delegation_optout_nsec_txt <<
- bad_delegation_txt;
+ bad_delegation_txt << nosec_delegation_txt;
masterLoad(zone_stream, origin_, rrclass_,
boost::bind(&MockZoneFinder::loadRRset, this, _1));
@@ -715,8 +719,9 @@ MockZoneFinder::find(const Name& name, const RRType& type,
RESULT_NSEC_SIGNED));
}
}
- return (createContext(options, NXRRSET, RRsetPtr(),
- RESULT_NSEC_SIGNED));
+ // If no NSEC is found or DNSSEC isn't specified, behave as if the
+ // zone is unsigned.
+ return (createContext(options, NXRRSET, RRsetPtr()));
}
// query name isn't found in our domains.
@@ -1097,6 +1102,17 @@ TEST_F(QueryTest, delegation) {
NULL, delegation_txt, ns_addrs_txt);
}
+TEST_F(QueryTest, delegationWithDNSSEC) {
+ // Similar to the previous one, but with requesting DNSSEC.
+ // In this case the parent zone would behave as unsigned, so the result
+ // should be just like non DNSSEC delegation.
+ query.process(memory_client, Name("www.nosec-delegation.example.com"),
+ qtype, response, true);
+
+ responseCheck(response, Rcode::NOERROR(), 0, 0, 1, 0,
+ NULL, nosec_delegation_txt, NULL);
+}
+
TEST_F(QueryTest, secureDelegation) {
EXPECT_NO_THROW(query.process(memory_client,
Name("foo.signed-delegation.example.com"),
diff --git a/src/bin/auth/tests/testdata/example.sqlite3 b/src/bin/auth/tests/testdata/example.sqlite3
index e8e255b..0f6ee02 100644
Binary files a/src/bin/auth/tests/testdata/example.sqlite3 and b/src/bin/auth/tests/testdata/example.sqlite3 differ
diff --git a/src/bin/bind10/bind10.8 b/src/bin/bind10/bind10.8
index 11aee7a..2dafaab 100644
--- a/src/bin/bind10/bind10.8
+++ b/src/bin/bind10/bind10.8
@@ -22,7 +22,7 @@
bind10 \- BIND 10 boss process
.SH "SYNOPSIS"
.HP \w'\fBbind10\fR\ 'u
-\fBbind10\fR [\fB\-c\ \fR\fB\fIconfig\-filename\fR\fR] [\fB\-m\ \fR\fB\fIfile\fR\fR] [\fB\-n\fR] [\fB\-p\ \fR\fB\fIdata_path\fR\fR] [\fB\-u\ \fR\fB\fIuser\fR\fR] [\fB\-v\fR] [\fB\-w\ \fR\fB\fIwait_time\fR\fR] [\fB\-\-cmdctl\-port\fR\ \fIport\fR] [\fB\-\-config\-file\fR\ \fIconfig\-filename\fR] [\fB\-\-data\-path\fR\ \fIdirectory\fR] [\fB\-\-msgq\-socket\-file\ \fR\fB\fIfile\fR\fR] [\fB\-\-no\-cache\fR] [\fB\-\-pid\-file\fR\ \fIfilename\fR] [\fB\-\-pretty\-name\ \fR\fB\fIname\fR\fR] [\fB\-\-user\ \fR\fB\fIuser\fR\fR] [\fB\-\-verbose\fR] [\fB\-\-wait\ \fR\fB\fIwait_time\fR\fR]
+\fBbind10\fR [\fB\-c\ \fR\fB\fIconfig\-filename\fR\fR] [\fB\-i\fR] [\fB\-m\ \fR\fB\fIfile\fR\fR] [\fB\-n\fR] [\fB\-p\ \fR\fB\fIdata_path\fR\fR] [\fB\-u\ \fR\fB\fIuser\fR\fR] [\fB\-v\fR] [\fB\-w\ \fR\fB\fIwait_time\fR\fR] [\fB\-\-cmdctl\-port\fR\ \fIport\fR] [\fB\-\-config\-file\fR\ \fIconfig\-filename\fR] [\fB\-\-data\-path\fR\ \fIdirectory\fR] [\fB\-\-msgq\-socket\-file\ \fR\fB\fIfile\fR\fR] [\fB\-\-no\-cache\fR] [\fB\-\-no\-kill\fR] [\fB\-\-pid\-file\fR\ \fIfilename\fR] [\fB\-\-pretty\-name\ \fR\fB\fIname\fR\fR] [\fB\-\-user\ \fR\fB\fIuser\fR\fR] [\fB\-\-verbose\fR] [\fB\-\-wait\ \fR\fB\fIwait_time\fR\fR]
.SH "DESCRIPTION"
.PP
The
@@ -69,6 +69,13 @@ Disables the hot\-spot caching used by the
daemon\&.
.RE
.PP
+\fB\-i\fR, \fB\-\-no\-kill\fR
+.RS 4
+When this option is passed,
+\fBbind10\fR
+does not send SIGTERM and SIGKILL signals to modules during shutdown\&. (This option was introduced for use during testing\&.)
+.RE
+.PP
\fB\-u\fR \fIuser\fR, \fB\-\-user\fR \fIname\fR
.RS 4
The username for
diff --git a/src/bin/bind10/bind10.xml b/src/bin/bind10/bind10.xml
index 1f3cb68..9a8f2fe 100644
--- a/src/bin/bind10/bind10.xml
+++ b/src/bin/bind10/bind10.xml
@@ -45,6 +45,7 @@
<cmdsynopsis>
<command>bind10</command>
<arg><option>-c <replaceable>config-filename</replaceable></option></arg>
+ <arg><option>-i</option></arg>
<arg><option>-m <replaceable>file</replaceable></option></arg>
<arg><option>-n</option></arg>
<arg><option>-p <replaceable>data_path</replaceable></option></arg>
@@ -56,6 +57,7 @@
<arg><option>--data-path</option> <replaceable>directory</replaceable></arg>
<arg><option>--msgq-socket-file <replaceable>file</replaceable></option></arg>
<arg><option>--no-cache</option></arg>
+ <arg><option>--no-kill</option></arg>
<arg><option>--pid-file</option> <replaceable>filename</replaceable></arg>
<arg><option>--pretty-name <replaceable>name</replaceable></option></arg>
<arg><option>--user <replaceable>user</replaceable></option></arg>
@@ -156,6 +158,16 @@
</varlistentry>
<varlistentry>
+ <term><option>-i</option>, <option>--no-kill</option></term>
+ <listitem>
+ <para>When this option is passed, <command>bind10</command>
+ does not send SIGTERM and SIGKILL signals to modules during
+ shutdown. (This option was introduced for use during
+ testing.)</para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
<term><option>-u</option> <replaceable>user</replaceable>, <option>--user</option> <replaceable>name</replaceable></term>
<!-- TODO: example more detail. -->
<listitem>
@@ -232,10 +244,6 @@ TODO: configuration section
<itemizedlist>
<listitem>
- <para> <varname>/Boss/components/b10-auth</varname> </para>
- </listitem>
-
- <listitem>
<para> <varname>/Boss/components/b10-cmdctl</varname> </para>
</listitem>
@@ -247,18 +255,6 @@ TODO: configuration section
<para> <varname>/Boss/components/b10-stats-httpd</varname> </para>
</listitem>
- <listitem>
- <para> <varname>/Boss/components/b10-xfrin</varname> </para>
- </listitem>
-
- <listitem>
- <para> <varname>/Boss/components/b10-xfrout</varname> </para>
- </listitem>
-
- <listitem>
- <para> <varname>/Boss/components/b10-zonemgr</varname> </para>
- </listitem>
-
</itemizedlist>
<para>
diff --git a/src/bin/bind10/bind10_src.py.in b/src/bin/bind10/bind10_src.py.in
index edc1b69..37b845d 100755
--- a/src/bin/bind10/bind10_src.py.in
+++ b/src/bin/bind10/bind10_src.py.in
@@ -167,8 +167,9 @@ class BoB:
"""Boss of BIND class."""
def __init__(self, msgq_socket_file=None, data_path=None,
- config_filename=None, nocache=False, verbose=False, setuid=None,
- username=None, cmdctl_port=None, wait_time=10):
+ config_filename=None, clear_config=False, nocache=False,
+ verbose=False, nokill=False, setuid=None, username=None,
+ cmdctl_port=None, wait_time=10):
"""
Initialize the Boss of BIND. This is a singleton (only one can run).
@@ -208,8 +209,10 @@ class BoB:
self.uid = setuid
self.username = username
self.verbose = verbose
+ self.nokill = nokill
self.data_path = data_path
self.config_filename = config_filename
+ self.clear_config = clear_config
self.cmdctl_port = cmdctl_port
self.wait_time = wait_time
self._component_configurator = isc.bind10.component.Configurator(self,
@@ -465,6 +468,8 @@ class BoB:
args.append("--data-path=" + self.data_path)
if self.config_filename is not None:
args.append("--config-filename=" + self.config_filename)
+ if self.clear_config:
+ args.append("--clear-config")
bind_cfgd = ProcessInfo("b10-cfgmgr", args,
self.c_channel_env)
bind_cfgd.spawn()
@@ -702,32 +707,36 @@ class BoB:
# still not enough.
time.sleep(1)
self.reap_children()
- # next try sending a SIGTERM
- components_to_stop = list(self.components.values())
- for component in components_to_stop:
- logger.info(BIND10_SEND_SIGTERM, component.name(), component.pid())
- try:
- component.kill()
- except OSError:
- # ignore these (usually ESRCH because the child
- # finally exited)
- pass
- # finally, send SIGKILL (unmaskable termination) until everybody dies
- while self.components:
- # XXX: some delay probably useful... how much is uncertain
- time.sleep(0.1)
- self.reap_children()
+
+ # Send TERM and KILL signals to modules if we're not prevented
+ # from doing so
+ if not self.nokill:
+ # next try sending a SIGTERM
components_to_stop = list(self.components.values())
for component in components_to_stop:
- logger.info(BIND10_SEND_SIGKILL, component.name(),
- component.pid())
+ logger.info(BIND10_SEND_SIGTERM, component.name(), component.pid())
try:
- component.kill(True)
+ component.kill()
except OSError:
# ignore these (usually ESRCH because the child
# finally exited)
pass
- logger.info(BIND10_SHUTDOWN_COMPLETE)
+ # finally, send SIGKILL (unmaskable termination) until everybody dies
+ while self.components:
+ # XXX: some delay probably useful... how much is uncertain
+ time.sleep(0.1)
+ self.reap_children()
+ components_to_stop = list(self.components.values())
+ for component in components_to_stop:
+ logger.info(BIND10_SEND_SIGKILL, component.name(),
+ component.pid())
+ try:
+ component.kill(True)
+ except OSError:
+ # ignore these (usually ESRCH because the child
+ # finally exited)
+ pass
+ logger.info(BIND10_SHUTDOWN_COMPLETE)
def _get_process_exit_status(self):
return os.waitpid(-1, os.WNOHANG)
@@ -1043,6 +1052,8 @@ def parse_args(args=sys.argv[1:], Parser=OptionParser):
help="UNIX domain socket file the b10-msgq daemon will use")
parser.add_option("-n", "--no-cache", action="store_true", dest="nocache",
default=False, help="disable hot-spot cache in authoritative DNS server")
+ parser.add_option("-i", "--no-kill", action="store_true", dest="nokill",
+ default=False, help="do not send SIGTERM and SIGKILL signals to modules during shutdown")
parser.add_option("-u", "--user", dest="user", type="string", default=None,
help="Change user after startup (must run as root)")
parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
@@ -1053,6 +1064,10 @@ def parse_args(args=sys.argv[1:], Parser=OptionParser):
parser.add_option("-c", "--config-file", action="store",
dest="config_file", default=None,
help="Configuration database filename")
+ parser.add_option("--clear-config", action="store_true",
+ dest="clear_config", default=False,
+ help="Create backup of the configuration file and " +
+ "start with a clean configuration")
parser.add_option("-p", "--data-path", dest="data_path",
help="Directory to search for configuration files",
default=None)
@@ -1165,9 +1180,10 @@ def main():
try:
# Go bob!
boss_of_bind = BoB(options.msgq_socket_file, options.data_path,
- options.config_file, options.nocache,
- options.verbose, setuid, username,
- options.cmdctl_port, options.wait_time)
+ options.config_file, options.clear_config,
+ options.nocache, options.verbose, options.nokill,
+ setuid, username, options.cmdctl_port,
+ options.wait_time)
startup_result = boss_of_bind.startup()
if startup_result:
logger.fatal(BIND10_STARTUP_ERROR, startup_result)
diff --git a/src/bin/bind10/bob.spec b/src/bin/bind10/bob.spec
index 29b1f40..b358f96 100644
--- a/src/bin/bind10/bob.spec
+++ b/src/bin/bind10/bob.spec
@@ -8,10 +8,6 @@
"item_type": "named_set",
"item_optional": false,
"item_default": {
- "b10-auth": { "special": "auth", "kind": "needed" },
- "b10-xfrin": { "address": "Xfrin", "kind": "dispensable" },
- "b10-xfrout": { "address": "Xfrout", "kind": "dispensable" },
- "b10-zonemgr": { "address": "Zonemgr", "kind": "dispensable" },
"b10-stats": { "address": "Stats", "kind": "dispensable" },
"b10-stats-httpd": {
"address": "StatsHttpd",
diff --git a/src/bin/bind10/tests/bind10_test.py.in b/src/bin/bind10/tests/bind10_test.py.in
index 882824d..84a9da9 100644
--- a/src/bin/bind10/tests/bind10_test.py.in
+++ b/src/bin/bind10/tests/bind10_test.py.in
@@ -1012,6 +1012,22 @@ class TestParseArgs(unittest.TestCase):
options = parse_args(['--config-file=config-file'], TestOptParser)
self.assertEqual('config-file', options.config_file)
+ def test_clear_config(self):
+ options = parse_args([], TestOptParser)
+ self.assertEqual(False, options.clear_config)
+ options = parse_args(['--clear-config'], TestOptParser)
+ self.assertEqual(True, options.clear_config)
+
+ def test_nokill(self):
+ options = parse_args([], TestOptParser)
+ self.assertEqual(False, options.nokill)
+ options = parse_args(['--no-kill'], TestOptParser)
+ self.assertEqual(True, options.nokill)
+ options = parse_args([], TestOptParser)
+ self.assertEqual(False, options.nokill)
+ options = parse_args(['-i'], TestOptParser)
+ self.assertEqual(True, options.nokill)
+
def test_cmdctl_port(self):
"""
Test it can parse the command control port.
@@ -1160,11 +1176,13 @@ class TestBossComponents(unittest.TestCase):
# We check somewhere else that the shutdown is actually called
# from there (the test_kills).
- def test_kills(self):
+ def __real_test_kill(self, nokill = False):
"""
- Test that the boss kills components which don't want to stop.
+ Helper function that does the actual kill functionality testing.
"""
bob = MockBob()
+ bob.nokill = nokill
+
killed = []
class ImmortalComponent:
"""
@@ -1194,11 +1212,33 @@ class TestBossComponents(unittest.TestCase):
bob.shutdown()
self.assertTrue(bob.ccs.stopped)
- self.assertEqual([False, True], killed)
+
+ # Here, killed is an array where False is added if SIGTERM
+ # should be sent, or True if SIGKILL should be sent, in order in
+ # which they're sent.
+ if nokill:
+ self.assertEqual([], killed)
+ else:
+ self.assertEqual([False, True], killed)
+
self.assertTrue(self.__called)
bob._component_configurator.shutdown = orig
+ def test_kills(self):
+ """
+ Test that the boss kills components which don't want to stop.
+ """
+ self.__real_test_kill()
+
+ def test_nokill(self):
+ """
+ Test that the boss *doesn't* kill components which don't want to
+ stop, when asked not to (by passing the --no-kill option which
+ sets bob.nokill to True).
+ """
+ self.__real_test_kill(True)
+
def test_component_shutdown(self):
"""
Test the component_shutdown sets all variables accordingly.
diff --git a/src/bin/bindctl/tests/bindctl_test.py b/src/bin/bindctl/tests/bindctl_test.py
index 31a6bda..1ddb916 100644
--- a/src/bin/bindctl/tests/bindctl_test.py
+++ b/src/bin/bindctl/tests/bindctl_test.py
@@ -365,10 +365,20 @@ class TestConfigCommands(unittest.TestCase):
self.assertEqual((5, MultiConfigData.LOCAL),
self.tool.config_data.get_value("/foo/an_int"))
+ cmd = cmdparse.BindCmdParse("config unset identifier=\"foo/an_int\"")
+ self.tool.apply_config_cmd(cmd)
+
+ self.assertEqual((1, MultiConfigData.DEFAULT),
+ self.tool.config_data.get_value("/foo/an_int"))
+
# this should raise a NotFoundError
cmd = cmdparse.BindCmdParse("config set identifier=\"foo/bar\" value=\"[]\"")
self.assertRaises(isc.cc.data.DataNotFoundError, self.tool.apply_config_cmd, cmd)
+ cmd = cmdparse.BindCmdParse("config unset identifier=\"foo/bar\"")
+ self.assertRaises(isc.cc.data.DataNotFoundError,
+ self.tool.apply_config_cmd, cmd)
+
# this should raise a TypeError
cmd = cmdparse.BindCmdParse("config set identifier=\"foo/an_int\" value=\"[]\"")
self.assertRaises(isc.cc.data.DataTypeError, self.tool.apply_config_cmd, cmd)
diff --git a/src/bin/cfgmgr/b10-cfgmgr.py.in b/src/bin/cfgmgr/b10-cfgmgr.py.in
index 2ccc430..760b6d8 100755
--- a/src/bin/cfgmgr/b10-cfgmgr.py.in
+++ b/src/bin/cfgmgr/b10-cfgmgr.py.in
@@ -49,6 +49,10 @@ def parse_options(args=sys.argv[1:], Parser=OptionParser):
help="Configuration database filename " +
"(default=" + DEFAULT_CONFIG_FILE + ")",
default=DEFAULT_CONFIG_FILE)
+ parser.add_option("--clear-config", action="store_true",
+ dest="clear_config", default=False,
+ help="Back up the configuration file and start with " +
+ "a clean one")
(options, args) = parser.parse_args(args)
if args:
parser.error("No non-option arguments allowed")
@@ -85,7 +89,8 @@ def main():
options = parse_options()
global cm
try:
- cm = ConfigManager(options.data_path, options.config_file)
+ cm = ConfigManager(options.data_path, options.config_file,
+ None, options.clear_config)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
cm.read_config()
diff --git a/src/bin/cfgmgr/tests/b10-cfgmgr_test.py.in b/src/bin/cfgmgr/tests/b10-cfgmgr_test.py.in
index ea5fc8b..ca91c9c 100644
--- a/src/bin/cfgmgr/tests/b10-cfgmgr_test.py.in
+++ b/src/bin/cfgmgr/tests/b10-cfgmgr_test.py.in
@@ -24,12 +24,13 @@ import bind10_config
from isc.testutils.parse_args import OptsError, TestOptParser
class MyConfigManager:
- def __init__(self, path, filename):
+ def __init__(self, path, filename, session=None, rename_config_file=False):
self._path = path
self.read_config_called = False
self.notify_boss_called = False
self.run_called = False
self.write_config_called = False
+ self.rename_config_called = False
self.running = True
self.virtual_modules = []
@@ -45,6 +46,9 @@ class MyConfigManager:
def write_config(self):
self.write_config_called = True
+ def rename_config_file(self, ofile, nfile):
+ self.rename_config_called = True
+
def set_virtual_module(self, spec, function):
self.virtual_modules.append((spec, function))
@@ -90,6 +94,7 @@ class TestConfigManagerStartup(unittest.TestCase):
self.assertTrue(self.loaded_plugins)
# if there are no changes, config is not written
self.assertFalse(b.cm.write_config_called)
+ self.assertFalse(b.cm.rename_config_called)
self.assertTrue(b.cm.running)
b.signal_handler(None, None)
@@ -187,6 +192,14 @@ class TestParseArgs(unittest.TestCase):
self.assertRaises(OptsError, b.parse_options, ['--config-filename'],
TestOptParser)
+ def test_clear_config(self):
+ b = __import__("b10-cfgmgr")
+ parsed = b.parse_options([], TestOptParser)
+ self.assertFalse(parsed.clear_config)
+ parsed = b.parse_options(['--clear-config'], TestOptParser)
+ self.assertTrue(parsed.clear_config)
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/src/bin/dbutil/.gitignore b/src/bin/dbutil/.gitignore
new file mode 100644
index 0000000..abb63d5
--- /dev/null
+++ b/src/bin/dbutil/.gitignore
@@ -0,0 +1,3 @@
+/b10-dbutil
+/dbutil.py
+/run_dbutil.sh
diff --git a/src/bin/dbutil/Makefile.am b/src/bin/dbutil/Makefile.am
new file mode 100644
index 0000000..e05055f
--- /dev/null
+++ b/src/bin/dbutil/Makefile.am
@@ -0,0 +1,39 @@
+SUBDIRS = . tests
+
+bin_SCRIPTS = b10-dbutil
+man_MANS = b10-dbutil.8
+
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/dbutil_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+EXTRA_DIST = $(man_MANS) b10-dbutil.xml dbutil_messages.mes
+
+noinst_SCRIPTS = run_dbutil.sh
+
+CLEANFILES = b10-dbutil b10-dbutil.pyc
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/dbutil_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/dbutil_messages.pyc
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/dbutil_messages.pyo
+
+if ENABLE_MAN
+
+b10-dbutil.8: b10-dbutil.xml
+ xsltproc --novalid --xinclude --nonet -o $@ http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl $(srcdir)/b10-dbutil.xml
+
+endif
+
+# Define rule to build logging source files from message file
+$(PYTHON_LOGMSGPKG_DIR)/work/dbutil_messages.py : dbutil_messages.mes
+ $(top_builddir)/src/lib/log/compiler/message \
+ -d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/dbutil_messages.mes
+
+b10-dbutil: dbutil.py $(PYTHON_LOGMSGPKG_DIR)/work/dbutil_messages.py
+ $(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
+ -e "s|@@SYSCONFDIR@@|@sysconfdir@|" \
+ -e "s|@@LIBEXECDIR@@|$(pkglibexecdir)|" dbutil.py >$@
+ chmod a+x $@
+
+CLEANDIRS = __pycache__
+
+clean-local:
+ rm -rf $(CLEANDIRS)
diff --git a/src/bin/dbutil/b10-dbutil.8 b/src/bin/dbutil/b10-dbutil.8
new file mode 100644
index 0000000..437a69d
--- /dev/null
+++ b/src/bin/dbutil/b10-dbutil.8
@@ -0,0 +1,92 @@
+'\" t
+.\" Title: b10-dbutil
+.\" Author: [FIXME: author] [see http://docbook.sf.net/el/author]
+.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
+.\" Date: March 20, 2012
+.\" Manual: BIND10
+.\" Source: BIND10
+.\" Language: English
+.\"
+.TH "B10\-DBUTIL" "8" "March 20, 2012" "BIND10" "BIND10"
+.\" -----------------------------------------------------------------
+.\" * Define some portability stuff
+.\" -----------------------------------------------------------------
+.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.\" http://bugs.debian.org/507673
+.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html
+.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.ie \n(.g .ds Aq \(aq
+.el .ds Aq '
+.\" -----------------------------------------------------------------
+.\" * set default formatting
+.\" -----------------------------------------------------------------
+.\" disable hyphenation
+.nh
+.\" disable justification (adjust text to left margin only)
+.ad l
+.\" -----------------------------------------------------------------
+.\" * MAIN CONTENT STARTS HERE *
+.\" -----------------------------------------------------------------
+.SH "NAME"
+b10-dbutil \- Zone Database Maintenance Utility
+.SH "SYNOPSIS"
+.HP \w'\fBb10\-dbutil\ \-\-check\fR\ 'u
+\fBb10\-dbutil \-\-check\fR [\-\-verbose] [\-\-quiet] [\fIdbfile\fR]
+.HP \w'\fBb10\-dbutil\ \-\-upgrade\fR\ 'u
+\fBb10\-dbutil \-\-upgrade\fR [\-\-noconfirm] [\-\-verbose] [\-\-quiet] [\fIdbfile\fR]
+.SH "DESCRIPTION"
+.PP
+The
+\fBb10\-dbutil\fR
+utility is a general administration utility for SQL databases\&. (Currently only SQLite is supported by BIND 10\&.) It can report the current verion of the schema, and upgrade an existing database to the latest version of the schema\&.
+.PP
+
+\fBb10\-dbutil\fR
+operates in one of two modes, check mode or upgrade mode\&.
+.PP
+In check mode (\fBb10\-dbutil \-\-check\fR), the utility reads the version of the database schema from the database and prints it\&. It will tell you whether the schema is at the latest version supported by BIND 10\&. Exit status is 0 if the schema is at the correct version, 1 if the schema is at an older version, 2 if the schema is at a version not yet supported by this version of b10\-dbutil\&. Any higher value indicates an error during command\-line parsing or execution\&.
+.PP
+When the upgrade function is selected (\fBb10\-dbutil \-\-upgrade\fR), the utility takes a copy of the database, then upgrades it to the latest version of the schema\&. The contents of the database remain intact\&. (The backup file is a file in the same directory as the database file\&. It has the same name, with "\&.backup" appended to it\&. If a file of that name already exists, the file will have the suffix "\&.backup\-1"\&. If that exists, the file will be suffixed "\&.backup\-2", and so on)\&. Exit status is 0 if the upgrade is either succesful or aborted by the user, and non\-zero if there is an error\&.
+.PP
+When upgrading the database, it is
+\fIstrongly\fR
+recommended that BIND 10 not be running while the upgrade is in progress\&.
+.SH "ARGUMENTS"
+.PP
+The arguments are as follows:
+.PP
+\fB\-\-check\fR
+.RS 4
+Selects the version check function, which reports the current version of the database\&. This is incompatible with the \-\-upgrade option\&.
+.RE
+.PP
+\fB\-\-noconfirm\fR
+.RS 4
+Only valid with \-\-upgrade, this disables the prompt\&. Normally the utility will print a warning that an upgrade is about to take place and request that you type "Yes" to continue\&. If this switch is given on the command line, no prompt will be issued: the utility will just perform the upgrade\&.
+.RE
+.PP
+\fB\-\-upgrade\fR
+.RS 4
+Selects the upgrade function, which upgrades the database to the latest version of the schema\&. This is incompatible with the \-\-upgrade option\&.
+.sp
+The upgrade function will upgrade a BIND 10 database \- no matter how old the schema \- preserving all data\&. A backup file is created before the upgrade (with the same name as the database, but with "\&.backup" suffixed to it)\&. If the upgrade fails, this file can be copied back to restore the original database\&.
+.RE
+.PP
+\fB\-\-verbose\fR
+.RS 4
+Enable verbose mode\&. Each SQL command issued by the utility will be printed to stderr before it is executed\&.
+.RE
+.PP
+\fB\-\-quiet\fR
+.RS 4
+Enable quiet mode\&. No output is printed, except errors during command\-line argument parsing, or the user confirmation dialog\&.
+.RE
+.PP
+\fB\fIdbfile\fR\fR
+.RS 4
+Name of the database file to check of upgrade\&.
+.RE
+.SH "COPYRIGHT"
+.br
+Copyright \(co 2012 Internet Systems Consortium, Inc. ("ISC")
+.br
diff --git a/src/bin/dbutil/b10-dbutil.xml b/src/bin/dbutil/b10-dbutil.xml
new file mode 100644
index 0000000..c1c0dee
--- /dev/null
+++ b/src/bin/dbutil/b10-dbutil.xml
@@ -0,0 +1,192 @@
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd"
+ [<!ENTITY mdash "—">]>
+<!--
+ - Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+ -
+ - Permission to use, copy, modify, and/or distribute this software for any
+ - purpose with or without fee is hereby granted, provided that the above
+ - copyright notice and this permission notice appear in all copies.
+ -
+ - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+ - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ - AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+ - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ - PERFORMANCE OF THIS SOFTWARE.
+-->
+
+<refentry>
+
+ <refentryinfo>
+ <date>March 20, 2012</date>
+ </refentryinfo>
+
+ <refmeta>
+ <refentrytitle>b10-dbutil</refentrytitle>
+ <manvolnum>8</manvolnum>
+ <refmiscinfo>BIND10</refmiscinfo>
+ </refmeta>
+
+ <refnamediv>
+ <refname>b10-dbutil</refname>
+ <refpurpose>Zone Database Maintenance Utility</refpurpose>
+ </refnamediv>
+
+ <docinfo>
+ <copyright>
+ <year>2012</year>
+ <holder>Internet Systems Consortium, Inc. ("ISC")</holder>
+ </copyright>
+ </docinfo>
+
+ <refsynopsisdiv>
+ <cmdsynopsis>
+ <command>b10-dbutil --check</command>
+ <arg>--verbose</arg>
+ <arg>--quiet</arg>
+ <arg><replaceable choice='req'>dbfile</replaceable></arg>
+ </cmdsynopsis>
+ <cmdsynopsis>
+ <command>b10-dbutil --upgrade</command>
+ <arg>--noconfirm</arg>
+ <arg>--verbose</arg>
+ <arg>--quiet</arg>
+ <arg><replaceable choice='req'>dbfile</replaceable></arg>
+ </cmdsynopsis>
+ </refsynopsisdiv>
+
+ <refsect1>
+ <title>DESCRIPTION</title>
+ <para>
+ The <command>b10-dbutil</command> utility is a general administration
+ utility for SQL databases. (Currently only SQLite is supported by
+ BIND 10.) It can report the current verion of the schema, and upgrade
+ an existing database to the latest version of the schema.
+ </para>
+
+ <para>
+ <command>b10-dbutil</command> operates in one of two modes, check mode
+ or upgrade mode.
+ </para>
+
+ <para>
+ In check mode (<command>b10-dbutil --check</command>), the
+ utility reads the version of the database schema from the database
+ and prints it. It will tell you whether the schema is at the latest
+ version supported by BIND 10. Exit status is 0 if the schema is at
+ the correct version, 1 if the schema is at an older version, 2 if
+ the schema is at a version not yet supported by this version of
+ b10-dbutil. Any higher value indicates an error during command-line
+ parsing or execution.
+ </para>
+
+ <para>
+ When the upgrade function is selected
+ (<command>b10-dbutil --upgrade</command>), the
+ utility takes a copy of the database, then upgrades it to the latest
+ version of the schema. The contents of the database remain intact.
+ (The backup file is a file in the same directory as the database
+ file. It has the same name, with ".backup" appended to it. If a
+ file of that name already exists, the file will have the suffix
+ ".backup-1". If that exists, the file will be suffixed ".backup-2",
+ and so on). Exit status is 0 if the upgrade is either succesful or
+ aborted by the user, and non-zero if there is an error.
+ </para>
+
+ <para>
+ When upgrading the database, it is <emphasis>strongly</emphasis>
+ recommended that BIND 10 not be running while the upgrade is in
+ progress.
+ </para>
+
+ </refsect1>
+
+ <refsect1>
+ <title>ARGUMENTS</title>
+
+ <para>The arguments are as follows:</para>
+
+ <variablelist>
+ <varlistentry>
+ <term>
+ <option>--check</option>
+ </term>
+ <listitem>
+ <para>Selects the version check function, which reports the
+ current version of the database. This is incompatible
+ with the --upgrade option.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>
+ <option>--noconfirm</option>
+ </term>
+ <listitem>
+ <para>Only valid with --upgrade, this disables the prompt.
+ Normally the utility will print a warning that an upgrade is
+ about to take place and request that you type "Yes" to continue.
+ If this switch is given on the command line, no prompt will
+ be issued: the utility will just perform the upgrade.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>
+ <option>--upgrade</option>
+ </term>
+ <listitem>
+ <para>Selects the upgrade function, which upgrades the database
+ to the latest version of the schema. This is incompatible
+ with the --upgrade option.
+ </para>
+ <para>
+ The upgrade function will upgrade a BIND 10 database - no matter how
+ old the schema - preserving all data. A backup file is created
+ before the upgrade (with the same name as the database, but with
+ ".backup" suffixed to it). If the upgrade fails, this file can
+ be copied back to restore the original database.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>
+ <option>--verbose</option>
+ </term>
+ <listitem>
+ <para>Enable verbose mode. Each SQL command issued by the
+ utility will be printed to stderr before it is executed.</para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>
+ <option>--quiet</option>
+ </term>
+ <listitem>
+ <para>Enable quiet mode. No output is printed, except errors during
+ command-line argument parsing, or the user confirmation dialog.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>
+ <option><replaceable choice='req'>dbfile</replaceable></option>
+ </term>
+ <listitem>
+ <para>
+ Name of the database file to check of upgrade.
+ </para>
+ </listitem>
+ </varlistentry>
+
+
+ </variablelist>
+ </refsect1>
+</refentry>
diff --git a/src/bin/dbutil/dbutil.py.in b/src/bin/dbutil/dbutil.py.in
new file mode 100755
index 0000000..81f351e
--- /dev/null
+++ b/src/bin/dbutil/dbutil.py.in
@@ -0,0 +1,608 @@
+#!@PYTHON@
+
+# Copyright (C) 2012 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""
+ at file Dabase Utilities
+
+This file holds the "dbutil" program, a general utility program for doing
+management of the BIND 10 database. There are two modes of operation:
+
+ b10-dbutil --check [--verbose] database
+ b10-dbutil --upgrade [--noconfirm] [--verbose] database
+
+The first form checks the version of the given database. The second form
+upgrades the database to the latest version of the schema, omitting the
+warning prompt if --noconfirm is given.
+
+For maximum safety, prior to the upgrade a backup database is created.
+The is the database name with ".backup" appended to it (or ".backup-n" if
+".backup" already exists). This is used to restore the database if the
+upgrade fails.
+"""
+
+# Exit codes
+# These are defined here because one of them is already used before most
+# of the import statements.
+EXIT_SUCCESS = 0
+EXIT_NEED_UPDATE = 1
+EXIT_VERSION_TOO_HIGH = 2
+EXIT_COMMAND_ERROR = 3
+EXIT_READ_ERROR = 4
+EXIT_UPGRADE_ERROR = 5
+EXIT_UNCAUGHT_EXCEPTION = 6
+
+import sys; sys.path.append("@@PYTHONPATH@@")
+
+# Normally, python exits with a status code of 1 on uncaught exceptions
+# Since we reserve exit status 1 for 'database needs upgrade', we
+# override the excepthook to exit with a different status
+def my_except_hook(a, b, c):
+ sys.__excepthook__(a,b,c)
+ sys.exit(EXIT_UNCAUGHT_EXCEPTION)
+sys.excepthook = my_except_hook
+
+import os, sqlite3, shutil
+from optparse import OptionParser
+import isc.util.process
+import isc.log
+from isc.log_messages.dbutil_messages import *
+
+isc.log.init("b10-dbutil")
+logger = isc.log.Logger("dbutil")
+isc.util.process.rename()
+
+TRACE_BASIC = logger.DBGLVL_TRACE_BASIC
+
+
+# @brief Version String
+# This is the version displayed to the user. It comprises the module name,
+# the module version number, and the overall BIND 10 version number (set in
+# configure.ac)
+VERSION = "b10-dbutil 20120319 (BIND 10 @PACKAGE_VERSION@)"
+
+# @brief Statements to Update the Database
+# These are in the form of a list of dictionaries, each of which contains the
+# information to perform an incremental upgrade from one version of the
+# database to the next. The information is:
+#
+# a) from: (major, minor) version that the database is expected to be at
+# to perform this upgrade.
+# b) to: (major, minor) version of the database to which this set of statements
+# upgrades the database to. (This is used for documentation purposes,
+# and to update the schema_version table when the upgrade is complete.)
+# c) statements: List of SQL statments to perform the upgrade.
+#
+# The incremental upgrades are performed one after the other. If the version
+# of the database does not exactly match that required for the incremental
+# upgrade, the upgrade is skipped. For this reason, the list must be in
+# ascending order (e.g. upgrade 1.0 to 2.0, 2.0 to 2.1, 2.1 to 2.2 etc.).
+#
+# Note that apart from the 1.0 to 2.0 upgrade, no upgrade need alter the
+# schema_version table: that is done by the upgrade process using the
+# information in the "to" field.
+UPGRADES = [
+ {'from': (1, 0), 'to': (2, 0),
+ 'statements': [
+
+ # Move to the latest "V1" state of the database if not there
+ # already.
+ "CREATE TABLE IF NOT EXISTS diffs (" +
+ "id INTEGER PRIMARY KEY, " +
+ "zone_id INTEGER NOT NULL," +
+ "version INTEGER NOT NULL, " +
+ "operation INTEGER NOT NULL, " +
+ "name STRING NOT NULL COLLATE NOCASE, " +
+ "rrtype STRING NOT NULL COLLATE NOCASE, " +
+ "ttl INTEGER NOT NULL, " +
+ "rdata STRING NOT NULL)",
+
+ # Within SQLite with can only rename tables and add columns; we
+ # can't drop columns nor can we alter column characteristics.
+ # So the strategy is to rename the table, create the new table,
+ # then copy all data across. This means creating new indexes
+ # as well; these are created after the data has been copied.
+
+ # zones table
+ "DROP INDEX zones_byname",
+ "ALTER TABLE zones RENAME TO old_zones",
+ "CREATE TABLE zones (" +
+ "id INTEGER PRIMARY KEY, " +
+ "name TEXT NOT NULL COLLATE NOCASE, " +
+ "rdclass TEXT NOT NULL COLLATE NOCASE DEFAULT 'IN', " +
+ "dnssec BOOLEAN NOT NULL DEFAULT 0)",
+ "INSERT INTO ZONES " +
+ "SELECT id, name, rdclass, dnssec FROM old_zones",
+ "CREATE INDEX zones_byname ON zones (name)",
+ "DROP TABLE old_zones",
+
+ # records table
+ "DROP INDEX records_byname",
+ "DROP INDEX records_byrname",
+ "ALTER TABLE records RENAME TO old_records",
+ "CREATE TABLE records (" +
+ "id INTEGER PRIMARY KEY, " +
+ "zone_id INTEGER NOT NULL, " +
+ "name TEXT NOT NULL COLLATE NOCASE, " +
+ "rname TEXT NOT NULL COLLATE NOCASE, " +
+ "ttl INTEGER NOT NULL, " +
+ "rdtype TEXT NOT NULL COLLATE NOCASE, " +
+ "sigtype TEXT COLLATE NOCASE, " +
+ "rdata TEXT NOT NULL)",
+ "INSERT INTO records " +
+ "SELECT id, zone_id, name, rname, ttl, rdtype, sigtype, " +
+ "rdata FROM old_records",
+ "CREATE INDEX records_byname ON records (name)",
+ "CREATE INDEX records_byrname ON records (rname)",
+ "CREATE INDEX records_bytype_and_rname ON records (rdtype, rname)",
+ "DROP TABLE old_records",
+
+ # nsec3 table
+ "DROP INDEX nsec3_byhash",
+ "ALTER TABLE nsec3 RENAME TO old_nsec3",
+ "CREATE TABLE nsec3 (" +
+ "id INTEGER PRIMARY KEY, " +
+ "zone_id INTEGER NOT NULL, " +
+ "hash TEXT NOT NULL COLLATE NOCASE, " +
+ "owner TEXT NOT NULL COLLATE NOCASE, " +
+ "ttl INTEGER NOT NULL, " +
+ "rdtype TEXT NOT NULL COLLATE NOCASE, " +
+ "rdata TEXT NOT NULL)",
+ "INSERT INTO nsec3 " +
+ "SELECT id, zone_id, hash, owner, ttl, rdtype, rdata " +
+ "FROM old_nsec3",
+ "CREATE INDEX nsec3_byhash ON nsec3 (hash)",
+ "DROP TABLE old_nsec3",
+
+ # diffs table
+ "ALTER TABLE diffs RENAME TO old_diffs",
+ "CREATE TABLE diffs (" +
+ "id INTEGER PRIMARY KEY, " +
+ "zone_id INTEGER NOT NULL, " +
+ "version INTEGER NOT NULL, " +
+ "operation INTEGER NOT NULL, " +
+ "name TEXT NOT NULL COLLATE NOCASE, " +
+ "rrtype TEXT NOT NULL COLLATE NOCASE, " +
+ "ttl INTEGER NOT NULL, " +
+ "rdata TEXT NOT NULL)",
+ "INSERT INTO diffs " +
+ "SELECT id, zone_id, version, operation, name, rrtype, " +
+ "ttl, rdata FROM old_diffs",
+ "DROP TABLE old_diffs",
+
+ # Schema table. This is updated to include a second column for
+ # future changes. The idea is that if a version of BIND 10 is
+ # written for schema M.N, it should be able to work for all
+ # versions of N; if not, M must be incremented.
+ #
+ # For backwards compatibility, the column holding the major
+ # version number is left named "version".
+ "ALTER TABLE schema_version " +
+ "ADD COLUMN minor INTEGER NOT NULL DEFAULT 0"
+ ]
+ }
+
+# To extend this, leave the above statements in place and add another
+# dictionary to the list. The "from" version should be (2, 0), the "to"
+# version whatever the version the update is to, and the SQL statements are
+# the statements required to perform the upgrade. This way, the upgrade
+# program will be able to upgrade both a V1.0 and a V2.0 database.
+]
+
+class DbutilException(Exception):
+ """
+ @brief Exception class to indicate error exit
+ """
+ pass
+
+class Database:
+ """
+ @brief Database Encapsulation
+
+ Encapsulates the SQL database, both the connection and the cursor. The
+ methods will cause a program exit on any error.
+ """
+ def __init__(self, db_file):
+ """
+ @brief Constructor
+
+ @param db_file Name of the database file
+ """
+ self.connection = None
+ self.cursor = None
+ self.db_file = db_file
+ self.backup_file = None
+
+ def open(self):
+ """
+ @brief Open Database
+
+ Opens the passed file as an sqlite3 database and stores a connection
+ and a cursor.
+ """
+ if not os.path.exists(self.db_file):
+ raise DbutilException("database " + self.db_file +
+ " does not exist");
+
+ try:
+ self.connection = sqlite3.connect(self.db_file)
+ self.connection.isolation_level = None # set autocommit
+ self.cursor = self.connection.cursor()
+ except sqlite3.OperationalError as ex:
+ raise DbutilException("unable to open " + self.db_file +
+ " - " + str(ex))
+
+ def close(self):
+ """
+ @brief Closes the database
+ """
+ if self.connection is not None:
+ self.connection.close()
+
+ def execute(self, statement):
+ """
+ @brief Execute Statement
+
+ Executes the given statement, exiting the program on error.
+
+ @param statement SQL statement to execute
+ """
+ logger.debug(TRACE_BASIC, DBUTIL_EXECUTE, statement)
+
+ try:
+ self.cursor.execute(statement)
+ except Exception as ex:
+ logger.error(DBUTIL_STATEMENT_ERROR, statement, ex)
+ raise DbutilException(str(ex))
+
+ def result(self):
+ """
+ @brief Return result of last execute
+
+ Returns a single row that is the result of the last "execute".
+ """
+ return self.cursor.fetchone()
+
+ def backup(self):
+ """
+ @brief Backup Database
+
+ Attempts to copy the given database file to a backup database, the
+ backup database file being the file name with ".backup" appended.
+ If the ".backup" file exists, a new name is constructed by appending
+ ".backup-n" (n starting at 1) and the action repeated until an
+ unused filename is found.
+
+ @param db_file Database file to backup
+ """
+ if not os.path.exists(self.db_file):
+ raise DbutilException("database " + self.db_file +
+ " does not exist");
+
+ self.backup_file = self.db_file + ".backup"
+ count = 0
+ while os.path.exists(self.backup_file):
+ count = count + 1
+ self.backup_file = self.db_file + ".backup-" + str(count)
+
+ # Do the backup
+ shutil.copyfile(self.db_file, self.backup_file)
+ logger.info(DBUTIL_BACKUP, self.db_file, self.backup_file)
+
+def prompt_user():
+ """
+ @brief Prompt the User
+
+ Explains about the upgrade and requests authorisation to continue.
+
+ @return True if user entered 'Yes', False if 'No'
+ """
+ sys.stdout.write(
+"""You have selected the upgrade option. This will upgrade the schema of the
+selected BIND 10 zone database to the latest version.
+
+The utility will take a copy of the zone database file before executing so, in
+the event of a problem, you will be able to restore the zone database from
+the backup. To ensure that the integrity of this backup, please ensure that
+BIND 10 is not running before continuing.
+""")
+ yes_entered = False
+ no_entered = False
+ while (not yes_entered) and (not no_entered):
+ sys.stdout.write("Enter 'Yes' to proceed with the upgrade, " +
+ "'No' to exit the program: \n")
+ response = sys.stdin.readline()
+ if response.lower() == "yes\n":
+ yes_entered = True
+ elif response.lower() == "no\n":
+ no_entered = True
+ else:
+ sys.stdout.write("Please enter 'Yes' or 'No'\n")
+
+ return yes_entered
+
+
+def version_string(version):
+ """
+ @brief Format Database Version
+
+ Converts a (major, minor) tuple into a 'Vn.m' string.
+
+ @param version Version tuple to convert
+
+ @return Version string
+ """
+ return "V" + str(version[0]) + "." + str(version[1])
+
+
+def compare_versions(first, second):
+ """
+ @brief Compare Versions
+
+ Compares two database version numbers.
+
+ @param first First version number to check (in the form of a
+ "(major, minor)" tuple).
+ @param second Second version number to check (in the form of a
+ "(major, minor)" tuple).
+
+ @return -1, 0, +1 if "first" is <, ==, > "second"
+ """
+ if first == second:
+ return 0
+
+ elif ((first[0] < second[0]) or
+ ((first[0] == second[0]) and (first[1] < second[1]))):
+ return -1
+
+ else:
+ return 1
+
+
+def get_latest_version():
+ """
+ @brief Returns the version to which this utility can upgrade the database
+
+ This is the 'to' version held in the last element of the upgrades list
+ """
+ return UPGRADES[-1]['to']
+
+
+def get_version(db):
+ """
+ @brief Return version of database
+
+ @return Version of database in form (major version, minor version)
+ """
+
+ # Get the version information.
+ db.execute("SELECT * FROM schema_version")
+ result = db.result()
+ if result is None:
+ raise DbutilException("nothing in schema_version table")
+
+ major = result[0]
+ if (major == 1):
+ # If the version number is 1, there will be no "minor" column, so
+ # assume a minor version number of 0.
+ minor = 0
+ else:
+ minor = result[1]
+
+ result = db.result()
+ if result is not None:
+ raise DbutilException("too many rows in schema_version table")
+
+ return (major, minor)
+
+
+def check_version(db):
+ """
+ @brief Check the version
+
+ Checks the version of the database and the latest version, and advises if
+ an upgrade is needed.
+
+ @param db Database object
+
+ returns 0 if the database is up to date
+ returns EXIT_NEED_UPDATE if the database needs updating
+ returns EXIT_VERSION_TOO_HIGH if the database is at a later version
+ than this program knows about
+ These return values are intended to be passed on to sys.exit.
+ """
+ current = get_version(db)
+ latest = get_latest_version()
+
+ match = compare_versions(current, latest)
+ if match == 0:
+ logger.info(DBUTIL_VERSION_CURRENT, version_string(current))
+ logger.info(DBUTIL_CHECK_OK)
+ return EXIT_SUCCESS
+ elif match < 0:
+ logger.info(DBUTIL_VERSION_LOW, version_string(current),
+ version_string(latest))
+ logger.info(DBUTIL_CHECK_UPGRADE_NEEDED)
+ return EXIT_NEED_UPDATE
+ else:
+ logger.warn(DBUTIL_VERSION_HIGH, version_string(current),
+ version_string(get_latest_version()))
+ logger.info(DBUTIL_UPGRADE_DBUTIL)
+ return EXIT_VERSION_TOO_HIGH
+
+def perform_upgrade(db, upgrade):
+ """
+ @brief Perform upgrade
+
+ Performs the upgrade. At the end of the upgrade, updates the schema_version
+ table with the expected version.
+
+ @param db Database object
+ @param upgrade Upgrade dictionary, holding "from", "to" and "statements".
+ """
+ logger.info(DBUTIL_UPGRADING, version_string(upgrade['from']),
+ version_string(upgrade['to']))
+ for statement in upgrade['statements']:
+ db.execute(statement)
+
+ # Update the version information
+ db.execute("DELETE FROM schema_version")
+ db.execute("INSERT INTO schema_version VALUES (" +
+ str(upgrade['to'][0]) + "," + str(upgrade['to'][1]) + ")")
+
+
+def perform_all_upgrades(db):
+ """
+ @brief Performs all the upgrades
+
+ @brief db Database object
+
+ For each upgrade, checks that the database is at the expected version.
+ If so, calls perform_upgrade to update the database.
+ """
+ match = compare_versions(get_version(db), get_latest_version())
+ if match == 0:
+ logger.info(DBUTIL_UPGRADE_NOT_NEEDED)
+
+ elif match > 0:
+ logger.warn(DBUTIL_UPGRADE_NOT_POSSIBLE)
+
+ else:
+ # Work our way through all upgrade increments
+ count = 0
+ for upgrade in UPGRADES:
+ if compare_versions(get_version(db), upgrade['from']) == 0:
+ perform_upgrade(db, upgrade)
+ count = count + 1
+
+ if count > 0:
+ logger.info(DBUTIL_UPGRADE_SUCCESFUL)
+ else:
+ # Should not get here, as we established earlier that the database
+ # was not at the latest version so we should have upgraded.
+ raise DbutilException("internal error in upgrade tool - no " +
+ "upgrade was performed on an old version " +
+ "the database")
+
+
+def parse_command():
+ """
+ @brief Parse Command
+
+ Parses the command line and sets the global command options.
+
+ @return Tuple of parser options and parser arguments
+ """
+ usage = ("usage: %prog --check [options] db_file\n" +
+ " %prog --upgrade [--noconfirm] [options] db_file")
+ parser = OptionParser(usage = usage, version = VERSION)
+ parser.add_option("-c", "--check", action="store_true",
+ dest="check", default=False,
+ help="Print database version and check if it " +
+ "needs upgrading")
+ parser.add_option("-n", "--noconfirm", action="store_true",
+ dest="noconfirm", default=False,
+ help="Do not prompt for confirmation before upgrading")
+ parser.add_option("-u", "--upgrade", action="store_true",
+ dest="upgrade", default=False,
+ help="Upgrade the database file to the latest version")
+ parser.add_option("-v", "--verbose", action="store_true",
+ dest="verbose", default=False,
+ help="Print SQL statements as they are executed")
+ parser.add_option("-q", "--quiet", action="store_true",
+ dest="quiet", default=False,
+ help="Don't print any info, warnings or errors")
+ (options, args) = parser.parse_args()
+
+ # Set the database file on which to operate
+ if (len(args) > 1):
+ logger.error(DBUTIL_TOO_MANY_ARGUMENTS)
+ parser.print_usage()
+ sys.exit(EXIT_COMMAND_ERROR)
+ elif len(args) == 0:
+ logger.error(DBUTIL_NO_FILE)
+ parser.print_usage()
+ sys.exit(EXIT_COMMAND_ERROR)
+
+ # Check for conflicting options. If some are found, output a suitable
+ # error message and print the usage.
+ if options.check and options.upgrade:
+ logger.error(DBUTIL_COMMAND_UPGRADE_CHECK)
+ elif (not options.check) and (not options.upgrade):
+ logger.error(DBUTIL_COMMAND_NONE)
+ elif (options.check and options.noconfirm):
+ logger.error(DBUTIL_CHECK_NOCONFIRM)
+ else:
+ return (options, args)
+
+ # Only get here on conflicting options
+ parser.print_usage()
+ sys.exit(EXIT_COMMAND_ERROR)
+
+
+if __name__ == "__main__":
+ (options, args) = parse_command()
+
+ if options.verbose:
+ isc.log.init("b10-dbutil", "DEBUG", 99)
+ logger = isc.log.Logger("dbutil")
+ elif options.quiet:
+ # We don't use FATAL, so setting the logger to use
+ # it should essentially make it silent.
+ isc.log.init("b10-dbutil", "FATAL")
+ logger = isc.log.Logger("dbutil")
+
+ db = Database(args[0])
+ exit_code = EXIT_SUCCESS
+
+ logger.info(DBUTIL_FILE, args[0])
+ if options.check:
+ # Check database - open, report, and close
+ try:
+ db.open()
+ exit_code = check_version(db)
+ db.close()
+ except Exception as ex:
+ logger.error(DBUTIL_CHECK_ERROR, ex)
+ exit_code = EXIT_READ_ERROR
+
+ elif options.upgrade:
+ # Upgrade. Check if this is what they really want to do
+ if not options.noconfirm:
+ proceed = prompt_user()
+ if not proceed:
+ logger.info(DBUTIL_UPGRADE_CANCELED)
+ sys.exit(EXIT_SUCCESS)
+
+ # It is. Do a backup then do the upgrade.
+ in_progress = False
+ try:
+ db.backup()
+ db.open()
+ in_progress = True
+ perform_all_upgrades(db)
+ db.close()
+ except Exception as ex:
+ if in_progress:
+ logger.error(DBUTIL_UPGRADE_FAILED, ex)
+ logger.warn(DBUTIL_DATABASE_MAY_BE_CORRUPT, db.db_file,
+ db.backup_file)
+ else:
+ logger.error(DBUTIL_UPGRADE_PREPARATION_FAILED, ex)
+ logger.info(DBUTIL_UPGRADE_NOT_ATTEMPTED)
+ exit_code = EXIT_UPGRADE_ERROR
+
+ sys.exit(exit_code)
diff --git a/src/bin/dbutil/dbutil_messages.mes b/src/bin/dbutil/dbutil_messages.mes
new file mode 100644
index 0000000..90ede92
--- /dev/null
+++ b/src/bin/dbutil/dbutil_messages.mes
@@ -0,0 +1,114 @@
+# Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the ddns messages python module.
+
+# When you add a message to this file, it is a good idea to run
+# <topsrcdir>/tools/reorder_message_file.py to make sure the
+# messages are in the correct order.
+
+% DBUTIL_BACKUP created backup of %1 in %2
+A backup for the given database file was created. Same of original file and
+backup are given in the output message.
+
+% DBUTIL_CHECK_ERROR unable to check database version: %1
+There was an error while trying to check the current version of the database
+schema. The error is shown in the message.
+
+% DBUTIL_CHECK_NOCONFIRM --noconfirm is not compatible with --check
+b10-dbutil was called with --check and --noconfirm. --noconfirm only has
+meaning with --upgrade, so this is considered an error.
+
+% DBUTIL_CHECK_OK this is the latest version of the database schema. No upgrade is required
+The database schema version has been checked, and is up to date.
+No action is required.
+
+% DBUTIL_CHECK_UPGRADE_NEEDED re-run this program with the --upgrade switch to upgrade
+The database schema version is not up to date, and an update is required.
+Please run the dbutil tool again, with the --upgrade argument.
+
+% DBUTIL_COMMAND_NONE must select one of --check or --upgrade
+b10-dbutil was called with neither --check nor --upgrade. One action must be
+provided.
+
+% DBUTIL_COMMAND_UPGRADE_CHECK --upgrade is not compatible with --check
+b10-dbutil was called with both the commands --upgrade and --check. Only one
+action can be performed at a time.
+
+% DBUTIL_DATABASE_MAY_BE_CORRUPT database file %1 may be corrupt, restore it from backup (%2)
+The upgrade failed while it was in progress; the database may now be in an
+inconsistent state, and it is advised to restore it from the backup that was
+created when b10-dbutil started.
+
+% DBUTIL_EXECUTE Executing SQL statement: %1
+Debug message; the given SQL statement is executed
+
+% DBUTIL_FILE Database file: %1
+The database file that is being checked.
+
+% DBUTIL_NO_FILE must supply name of the database file to upgrade
+b10-dbutil was called without a database file. Currently, it cannot find this
+file on its own, and it must be provided.
+
+% DBUTIL_STATEMENT_ERROR failed to execute %1: %2
+The given database statement failed to execute. The error is shown in the
+message.
+
+% DBUTIL_TOO_MANY_ARGUMENTS too many arguments to the command, maximum of one expected
+There were too many command-line arguments to b10-dbutil
+
+% DBUTIL_UPGRADE_CANCELED upgrade canceled; database has not been changed
+The user aborted the upgrade, and b10-dbutil will now exit.
+
+% DBUTIL_UPGRADE_DBUTIL please get the latest version of b10-dbutil and re-run
+A database schema was found that was newer than this version of dbutil, which
+is apparently out of date and should be upgraded itself.
+
+% DBUTIL_UPGRADE_FAILED upgrade failed: %1
+While the upgrade was in progress, an unexpected error occurred. The error
+is shown in the message.
+
+% DBUTIL_UPGRADE_NOT_ATTEMPTED database upgrade was not attempted
+Due to the earlier failure, the database schema upgrade was not attempted,
+and b10-dbutil will now exit.
+
+% DBUTIL_UPGRADE_NOT_NEEDED database already at latest version, no upgrade necessary
+b10-dbutil was told to upgrade the database schema, but it is already at the
+latest version.
+
+% DBUTIL_UPGRADE_NOT_POSSIBLE database at a later version than this utility can support
+b10-dbutil was told to upgrade the database schema, but it is at a higher
+version than this tool currently supports. Please update b10-dbutil and try
+again.
+
+% DBUTIL_UPGRADE_PREPARATION_FAILED upgrade preparation failed: %1
+An unexpected error occurred while b10-dbutil was preparing to upgrade the
+database schema. The error is shown in the message
+
+% DBUTIL_UPGRADE_SUCCESFUL database upgrade successfully completed
+The database schema update was completed successfully.
+
+% DBUTIL_UPGRADING upgrading database from %1 to %2
+An upgrade is in progress, the versions of the current upgrade action are shown.
+
+% DBUTIL_VERSION_CURRENT database version %1
+The current version of the database schema.
+
+% DBUTIL_VERSION_HIGH database is at a later version (%1) than this program can cope with (%2)
+The database schema is at a higher version than b10-dbutil knows about.
+
+% DBUTIL_VERSION_LOW database version %1, latest version is %2.
+The database schema is not up to date, the current version and the latest
+version are in the message.
diff --git a/src/bin/dbutil/run_dbutil.sh.in b/src/bin/dbutil/run_dbutil.sh.in
new file mode 100755
index 0000000..fea7482
--- /dev/null
+++ b/src/bin/dbutil/run_dbutil.sh.in
@@ -0,0 +1,40 @@
+#! /bin/sh
+
+# Copyright (C) 2010 Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+PYTHON_EXEC=${PYTHON_EXEC:- at PYTHON@}
+export PYTHON_EXEC
+
+DBUTIL_PATH=@abs_top_builddir@/src/bin/dbutil
+
+PYTHONPATH=@abs_top_srcdir@/src/bin:@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/bin:@abs_top_srcdir@/src/lib/python
+export PYTHONPATH
+
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
+if test $SET_ENV_LIBRARY_PATH = yes; then
+ @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
+ export @ENV_LIBRARY_PATH@
+fi
+
+B10_FROM_SOURCE=@abs_top_srcdir@
+export B10_FROM_SOURCE
+
+BIND10_MSGQ_SOCKET_FILE=@abs_top_builddir@/msgq_socket
+export BIND10_MSGQ_SOCKET_FILE
+
+exec ${PYTHON_EXEC} -O ${DBUTIL_PATH}/b10-dbutil "$@"
diff --git a/src/bin/dbutil/tests/.gitignore b/src/bin/dbutil/tests/.gitignore
new file mode 100644
index 0000000..8248611
--- /dev/null
+++ b/src/bin/dbutil/tests/.gitignore
@@ -0,0 +1,2 @@
+/dbutil_test.sh
+/dbutil_test_verfile_*
diff --git a/src/bin/dbutil/tests/Makefile.am b/src/bin/dbutil/tests/Makefile.am
new file mode 100644
index 0000000..c03b262
--- /dev/null
+++ b/src/bin/dbutil/tests/Makefile.am
@@ -0,0 +1,6 @@
+SUBDIRS = . testdata
+
+# Tests of the update script.
+
+check-local:
+ $(SHELL) $(abs_builddir)/dbutil_test.sh
diff --git a/src/bin/dbutil/tests/dbutil_test.sh.in b/src/bin/dbutil/tests/dbutil_test.sh.in
new file mode 100755
index 0000000..f82eeb0
--- /dev/null
+++ b/src/bin/dbutil/tests/dbutil_test.sh.in
@@ -0,0 +1,481 @@
+#!/bin/sh
+# Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# Checks that the logger will limit the output of messages less severe than
+# the severity/debug setting.
+
+testname="Database Upgrade Test"
+echo $testname
+
+failcount=0
+tempfile=@abs_builddir@/dbutil_test_tempfile_$$
+backupfile=${tempfile}.backup
+testdata=@abs_srcdir@/testdata
+verfile=@abs_builddir@/dbutil_test_verfile_$$
+
+# @brief Record a success
+succeed() {
+ echo "--- PASS"
+}
+
+
+# @brief Record a fail
+#
+# @param $1 Optional additional reason to output
+fail() {
+ if [ "$1" != "" ]
+ then
+ echo "ERROR: $1"
+ fi
+ echo "*** FAIL"
+ failcount=`expr $failcount + 1`
+}
+
+
+# @brief Record a pass if the argument is zero
+#
+# @param $1 Value to test
+passzero() {
+ if [ $1 -eq 0 ]; then
+ succeed
+ else
+ fail
+ fi
+}
+
+
+# @brief Record a fail if the argument is non-zero
+#
+# @param $1 Value to test
+failzero() {
+ if [ $1 -ne 0 ]; then
+ succeed
+ else
+ fail
+ fi
+}
+
+
+# @brief Copy File
+#
+# Executes a "cp" operation followed by a "chmod" to make the target writeable.
+#
+# @param $1 Source file
+# @param $2 Target file
+copy_file () {
+ cp $1 $2
+ chmod a+w $2
+}
+
+
+
+# @brief Check backup file
+#
+# Record a failure if the backup file does not exist or if it is different
+# to the data file. (N.B. No success is recorded if they are the same.)
+#
+# @param $1 Source database file
+# @param $2 Backup file
+check_backup() {
+ if [ ! -e $1 ]
+ then
+ fail "database file $1 not found"
+
+ elif [ ! -e $2 ]
+ then
+ fail "backup file $2 not found"
+
+ else
+ diff $1 $2 > /dev/null
+ if [ $? -ne 0 ]
+ then
+ fail "database file $1 different to backup file $2"
+ fi
+ fi
+}
+
+
+# @brief Check No Backup File
+#
+# Record a failure if the backup file exists. (N.B. No success is recorded if
+# it does not.)
+#
+# @param $1 Source database file (unused, present for symmetry)
+# @param $2 Backup file
+check_no_backup() {
+ if [ -e $2 ]
+ then
+ fail "backup of database $2 exists when it should not"
+ fi
+}
+
+
+# @brief Get Database Schema
+#
+# As the schema stored in the database is format-dependent - how it is printed
+# depends on how the commands were entered (on one line, split across two
+# lines etc.) - comparing schema is awkward.
+#
+# The function sets the local variable db_schema to the output of the
+# .schema command, with spaces removed and upper converted to lowercase.
+#
+# The database is copied before the schema is taken (and removed after)
+# as SQLite3 assummes a writeable database, which may not be the case if
+# getting the schema from a reference copy.
+#
+# @param $1 Database for which the schema is required
+get_schema() {
+ db1=@abs_builddir@/dbutil_test_schema_$$
+ copy_file $1 $db1
+
+ db_schema=`sqlite3 $db1 '.schema' | \
+ awk '{line = line $0} END {print line}' | \
+ sed -e 's/ //g' | \
+ tr [:upper:] [:lower:]`
+ rm -f $db1
+}
+
+
+# @brief Successful Schema Upgrade Test
+#
+# This test is done where the upgrade is expected to be successful - when
+# the end result of the test is that the test database is upgraded to a
+# database of the expected schema.
+#
+# Note: the caller must ensure that $tempfile and $backupfile do not exist
+# on entry, and is responsible for removing them afterwards.
+#
+# @param $1 Database to upgrade
+# @param $2 Expected backup file
+upgrade_ok_test() {
+ copy_file $1 $tempfile
+ ../run_dbutil.sh --upgrade --noconfirm $tempfile
+ if [ $? -eq 0 ]
+ then
+ # Compare schema with the reference
+ get_schema $testdata/v2_0.sqlite3
+ expected_schema=$db_schema
+ get_schema $tempfile
+ actual_schema=$db_schema
+ if [ "$expected_schema" = "$actual_schema" ]
+ then
+ succeed
+ else
+ fail "upgraded schema not as expected"
+ fi
+
+ # Check the version is set correctly
+ check_version $tempfile "V2.0"
+
+ # Check that a backup was made
+ check_backup $1 $2
+ else
+ # Error should have been output already
+ fail
+ fi
+}
+
+
+# @brief Unsuccessful Upgrade Test
+#
+# Checks that an upgrade of the specified database fails.
+#
+# Note: the caller must ensure that $tempfile and $backupfile do not exist
+# on entry, and is responsible for removing them afterwards.
+#
+# @param $1 Database to upgrade
+# @param $2 Expected backup file
+upgrade_fail_test() {
+ copy_file $1 $tempfile
+ ../run_dbutil.sh --upgrade --noconfirm $tempfile
+ failzero $?
+ check_backup $1 $backupfile
+}
+
+
+# @brief Record Count Test
+#
+# Checks that the count of records in each table is preserved in the upgrade.
+#
+# Note 1: This test assumes that the "diffs" table is present.
+# Note 2: The caller must ensure that $tempfile and $backupfile do not exist
+# on entry, and is responsible for removing them afterwards.
+#
+# @brief $1 Database to upgrade
+record_count_test() {
+ copy_file $1 $tempfile
+
+ diffs_count=`sqlite3 $tempfile 'select count(*) from diffs'`
+ nsec3_count=`sqlite3 $tempfile 'select count(*) from nsec3'`
+ records_count=`sqlite3 $tempfile 'select count(*) from records'`
+ zones_count=`sqlite3 $tempfile 'select count(*) from zones'`
+
+ ../run_dbutil.sh --upgrade --noconfirm $tempfile
+ if [ $? -ne 0 ]
+ then
+ # Reason for failure should already have been output
+ fail
+ else
+ new_diffs_count=`sqlite3 $tempfile 'select count(*) from diffs'`
+ new_nsec3_count=`sqlite3 $tempfile 'select count(*) from nsec3'`
+ new_records_count=`sqlite3 $tempfile 'select count(*) from records'`
+ new_zones_count=`sqlite3 $tempfile 'select count(*) from zones'`
+
+ if [ $diffs_count -ne $new_diffs_count ]
+ then
+ fail "diffs table was not completely copied"
+ fi
+
+ if [ $nsec3_count -ne $new_nsec3_count ]
+ then
+ fail "nsec3 table was not completely copied"
+ fi
+
+ if [ $records_count -ne $new_records_count ]
+ then
+ fail "records table was not completely copied"
+ fi
+
+ if [ $zones_count -ne $new_zones_count ]
+ then
+ fail "zones table was not completely copied"
+ fi
+
+ # As an extra check, test that the backup was successful
+ check_backup $1 $backupfile
+ fi
+}
+
+
+# @brief Version Check
+#
+# Checks that the database is at the specified version (and so checks the
+# --check function). On success, a pass is recorded.
+#
+# @param $1 Database to check
+# @param $2 Expected version string
+check_version() {
+ copy_file $1 $verfile
+ ../run_dbutil.sh --check $verfile
+ if [ $? -gt 2 ]
+ then
+ fail "version check failed on database $1; return code $?"
+ else
+ ../run_dbutil.sh --check $verfile 2>&1 | grep "$2" > /dev/null
+ if [ $? -ne 0 ]
+ then
+ fail "database $1 not at expected version $2 (output: $?)"
+ else
+ succeed
+ fi
+ fi
+ rm -f $verfile
+}
+
+
+# @brief Version Check Fail
+#
+# Does a version check but expected the check to fail
+#
+# @param $1 Database to check
+# @param $2 Backup file
+check_version_fail() {
+ copy_file $1 $verfile
+ ../run_dbutil.sh --check $verfile
+ failzero $?
+ check_no_backup $tempfile $backupfile
+}
+
+
+# Main test sequence
+
+rm -f $tempfile $backupfile
+
+# Test 1 - check that the utility fails if the database does not exist
+echo "1.1. Non-existent database - check"
+../run_dbutil.sh --check $tempfile
+failzero $?
+check_no_backup $tempfile $backupfile
+
+echo "1.2. Non-existent database - upgrade"
+../run_dbutil.sh --upgrade --noconfirm $tempfile
+failzero $?
+check_no_backup $tempfile $backupfile
+rm -f $tempfile $backupfile
+
+
+# Test 2 - should fail to check an empty file and fail to upgrade it
+echo "2.1. Database is an empty file - check"
+touch $tempfile
+check_version_fail $tempfile $backupfile
+rm -f $tempfile $backupfile
+
+echo "2.2. Database is an empty file - upgrade"
+touch $tempfile
+../run_dbutil.sh --upgrade --noconfirm $tempfile
+failzero $?
+# A backup is performed before anything else, so the backup should exist.
+check_backup $tempfile $backupfile
+rm -f $tempfile $backupfile
+
+
+echo "3.1. Database is not an SQLite file - check"
+echo "This is not an sqlite3 database" > $tempfile
+check_version_fail $tempfile $backupfile
+rm -f $tempfile $backupfile
+
+echo "3.2. Database is not an SQLite file - upgrade"
+echo "This is not an sqlite3 database" > $tempfile
+../run_dbutil.sh --upgrade --noconfirm $tempfile
+failzero $?
+# ...and as before, a backup should have been created
+check_backup $tempfile $backupfile
+rm -f $tempfile $backupfile
+
+
+echo "4.1. Database is an SQLite3 file without the schema table - check"
+check_version_fail $testdata/no_schema.sqlite3 $backupfile
+rm -f $tempfile $backupfile
+
+echo "4.1. Database is an SQLite3 file without the schema table - upgrade"
+upgrade_fail_test $testdata/no_schema.sqlite3 $backupfile
+rm -f $tempfile $backupfile
+
+
+echo "5.1. Database is an old V1 database - check"
+check_version $testdata/old_v1.sqlite3 "V1.0"
+check_no_backup $tempfile $backupfile
+rm -f $tempfile $backupfile
+
+echo "5.2. Database is an old V1 database - upgrade"
+upgrade_ok_test $testdata/old_v1.sqlite3 $backupfile
+rm -f $tempfile $backupfile
+
+
+echo "6.1. Database is new V1 database - check"
+check_version $testdata/new_v1.sqlite3 "V1.0"
+check_no_backup $tempfile $backupfile
+rm -f $tempfile $backupfile
+
+echo "6.2. Database is a new V1 database - upgrade"
+upgrade_ok_test $testdata/new_v1.sqlite3 $backupfile
+rm -f $tempfile $backupfile
+
+
+echo "7.1. Database is V2.0 database - check"
+check_version $testdata/v2_0.sqlite3 "V2.0"
+check_no_backup $tempfile $backupfile
+rm -f $tempfile $backupfile
+
+echo "7.2. Database is a V2.0 database - upgrade"
+upgrade_ok_test $testdata/v2_0.sqlite3 $backupfile
+rm -f $tempfile $backupfile
+
+
+echo "8.1. Database is V2.0 database with empty schema table - check"
+check_version_fail $testdata/empty_version.sqlite3 $backupfile
+rm -f $tempfile $backupfile
+
+echo "8.2. Database is V2.0 database with empty schema table - upgrade"
+upgrade_fail_test $testdata/empty_version.sqlite3 $backupfile
+rm -f $tempfile $backupfile
+
+
+echo "9.1. Database is V2.0 database with over-full schema table - check"
+check_version_fail $testdata/too_many_version.sqlite3 $backupfile
+rm -f $tempfile $backupfile
+
+echo "9.2. Database is V2.0 database with over-full schema table - upgrade"
+upgrade_fail_test $testdata/too_many_version.sqlite3 $backupfile
+rm -f $tempfile $backupfile
+
+
+echo "10.0. Upgrade corrupt database"
+upgrade_fail_test $testdata/corrupt.sqlite3 $backupfile
+rm -f $tempfile $backupfile
+
+
+echo "11. Record count test"
+record_count_test $testdata/new_v1.sqlite3
+rm -f $tempfile $backupfile
+
+
+echo "12. Backup file already exists"
+touch $backupfile
+touch ${backupfile}-1
+upgrade_ok_test $testdata/v2_0.sqlite3 ${backupfile}-2
+rm -f $tempfile $backupfile ${backupfile}-1 ${backupfile}-2
+
+
+echo "13.1 Command-line errors"
+copy_file $testdata/old_v1.sqlite3 $tempfile
+../run_dbutil.sh $tempfile
+failzero $?
+../run_dbutil.sh --upgrade --check $tempfile
+failzero $?
+../run_dbutil.sh --noconfirm --check $tempfile
+failzero $?
+../run_dbutil.sh --check
+failzero $?
+../run_dbutil.sh --upgrade --noconfirm
+failzero $?
+../run_dbutil.sh --check $tempfile $backupfile
+failzero $?
+../run_dbutil.sh --upgrade --noconfirm $tempfile $backupfile
+failzero $?
+rm -f $tempfile $backupfile
+
+echo "13.2 verbose flag"
+copy_file $testdata/old_v1.sqlite3 $tempfile
+../run_dbutil.sh --upgrade --noconfirm --verbose $tempfile
+passzero $?
+rm -f $tempfile $backupfile
+
+echo "13.3 Interactive prompt - yes"
+copy_file $testdata/old_v1.sqlite3 $tempfile
+../run_dbutil.sh --upgrade $tempfile << .
+Yes
+.
+passzero $?
+check_version $tempfile "V2.0"
+rm -f $tempfile $backupfile
+
+echo "13.4 Interactive prompt - no"
+copy_file $testdata/old_v1.sqlite3 $tempfile
+../run_dbutil.sh --upgrade $tempfile << .
+no
+.
+passzero $?
+diff $testdata/old_v1.sqlite3 $tempfile > /dev/null
+passzero $?
+rm -f $tempfile $backupfile
+
+echo "13.5 quiet flag"
+copy_file $testdata/old_v1.sqlite3 $tempfile
+../run_dbutil.sh --check --quiet $tempfile 2>&1 | grep .
+failzero $?
+rm -f $tempfile $backupfile
+
+# Report the result
+if [ $failcount -eq 0 ]; then
+ echo "PASS: $testname"
+elif [ $failcount -eq 1 ]; then
+ echo "FAIL: $testname - 1 test failed"
+else
+ echo "FAIL: $testname - $failcount tests failed"
+fi
+
+# Exit with appropriate error status
+exit $failcount
diff --git a/src/bin/dbutil/tests/testdata/Makefile.am b/src/bin/dbutil/tests/testdata/Makefile.am
new file mode 100644
index 0000000..0d850a7
--- /dev/null
+++ b/src/bin/dbutil/tests/testdata/Makefile.am
@@ -0,0 +1,12 @@
+EXTRA_DIST =
+EXTRA_DIST += corrupt.sqlite3
+EXTRA_DIST += empty_schema.sqlite3
+EXTRA_DIST += empty_v1.sqlite3
+EXTRA_DIST += empty_version.sqlite3
+EXTRA_DIST += invalid_v1.sqlite3
+EXTRA_DIST += new_v1.sqlite3
+EXTRA_DIST += no_schema.sqlite3
+EXTRA_DIST += old_v1.sqlite3
+EXTRA_DIST += README
+EXTRA_DIST += too_many_version.sqlite3
+EXTRA_DIST += v2_0.sqlite3
diff --git a/src/bin/dbutil/tests/testdata/README b/src/bin/dbutil/tests/testdata/README
new file mode 100644
index 0000000..83ce01f
--- /dev/null
+++ b/src/bin/dbutil/tests/testdata/README
@@ -0,0 +1,41 @@
+The versioning of BIND 10 databases to date has not been the best:
+
+The original database is known here as the "old V1" schema. It had a
+schema_version table, with the single "version" value set to 1.
+
+The schema was then updated with a "diffs" table. This is referred to
+here as the "new V1" schema.
+
+The Spring 2012 release of BIND 10 modified the schema. The
+schema_version table was updated to include a "minor" column, holding the
+minor version number. Other changes to the database included redefining
+"STRING" columns as "TEXT" columns. This is referred to as the "V2.0
+schema".
+
+The following test data files are present:
+
+empty_schema.sqlite3: A database conforming to the new V1 schema.
+However, there is nothing in the schema_version table.
+
+empty_v1.sqlite3: A database conforming to the new V1 schema.
+The database is empty, except for the schema_version table, where the
+"version" column is set to 1.
+
+empty_version.sqlite3: A database conforming to the V2.0 schema but without
+anything in the schema_version table.
+
+no_schema.sqlite3: A valid SQLite3 database, but without a schema_version
+table.
+
+old_v1.sqlite3: A valid SQLite3 database conforming to the old V1 schema.
+It does not have a diffs table.
+
+invalid_v1.sqlite3: A valid SQLite3 database that, although the schema
+is marked as V1, does not have the nsec3 table.
+
+new_v1.sqlite3: A valid SQLite3 database with data in all the tables
+(although the single rows in both the nsec3 and diffs table make no
+sense, but are valid).
+
+too_many_version.sqlite3: A database conforming to the V2.0 schema but with
+too many rows of data.
diff --git a/src/bin/dbutil/tests/testdata/corrupt.sqlite3 b/src/bin/dbutil/tests/testdata/corrupt.sqlite3
new file mode 100644
index 0000000..69683b7
Binary files /dev/null and b/src/bin/dbutil/tests/testdata/corrupt.sqlite3 differ
diff --git a/src/bin/dbutil/tests/testdata/empty_schema.sqlite3 b/src/bin/dbutil/tests/testdata/empty_schema.sqlite3
new file mode 100644
index 0000000..b803149
Binary files /dev/null and b/src/bin/dbutil/tests/testdata/empty_schema.sqlite3 differ
diff --git a/src/bin/dbutil/tests/testdata/empty_v1.sqlite3 b/src/bin/dbutil/tests/testdata/empty_v1.sqlite3
new file mode 100644
index 0000000..5ad2136
Binary files /dev/null and b/src/bin/dbutil/tests/testdata/empty_v1.sqlite3 differ
diff --git a/src/bin/dbutil/tests/testdata/empty_version.sqlite3 b/src/bin/dbutil/tests/testdata/empty_version.sqlite3
new file mode 100644
index 0000000..b820fa9
Binary files /dev/null and b/src/bin/dbutil/tests/testdata/empty_version.sqlite3 differ
diff --git a/src/bin/dbutil/tests/testdata/invalid_v1.sqlite3 b/src/bin/dbutil/tests/testdata/invalid_v1.sqlite3
new file mode 100644
index 0000000..e411fd0
Binary files /dev/null and b/src/bin/dbutil/tests/testdata/invalid_v1.sqlite3 differ
diff --git a/src/bin/dbutil/tests/testdata/new_v1.sqlite3 b/src/bin/dbutil/tests/testdata/new_v1.sqlite3
new file mode 100644
index 0000000..9a885a4
Binary files /dev/null and b/src/bin/dbutil/tests/testdata/new_v1.sqlite3 differ
diff --git a/src/bin/dbutil/tests/testdata/no_schema.sqlite3 b/src/bin/dbutil/tests/testdata/no_schema.sqlite3
new file mode 100644
index 0000000..9dd0614
Binary files /dev/null and b/src/bin/dbutil/tests/testdata/no_schema.sqlite3 differ
diff --git a/src/bin/dbutil/tests/testdata/old_v1.sqlite3 b/src/bin/dbutil/tests/testdata/old_v1.sqlite3
new file mode 100644
index 0000000..32dbb9b
Binary files /dev/null and b/src/bin/dbutil/tests/testdata/old_v1.sqlite3 differ
diff --git a/src/bin/dbutil/tests/testdata/too_many_version.sqlite3 b/src/bin/dbutil/tests/testdata/too_many_version.sqlite3
new file mode 100644
index 0000000..5dc8ae3
Binary files /dev/null and b/src/bin/dbutil/tests/testdata/too_many_version.sqlite3 differ
diff --git a/src/bin/dbutil/tests/testdata/v2_0.sqlite3 b/src/bin/dbutil/tests/testdata/v2_0.sqlite3
new file mode 100644
index 0000000..18784fd
Binary files /dev/null and b/src/bin/dbutil/tests/testdata/v2_0.sqlite3 differ
diff --git a/src/bin/host/host.cc b/src/bin/host/host.cc
index f1bb415..a5c6522 100644
--- a/src/bin/host/host.cc
+++ b/src/bin/host/host.cc
@@ -232,7 +232,7 @@ main(int argc, char* argv[]) {
argv += optind;
if (argc < 1) {
- cout << "Usage: host [-adprv] [-c class] [-t type] hostname [server]\n";
+ cout << "Usage: host [-adrv] [-c class] [-p port] [-t type] hostname [server]\n";
exit(1);
}
diff --git a/src/bin/loadzone/b10-loadzone.8 b/src/bin/loadzone/b10-loadzone.8
index d563ff2..cf1c26b 100644
--- a/src/bin/loadzone/b10-loadzone.8
+++ b/src/bin/loadzone/b10-loadzone.8
@@ -2,12 +2,12 @@
.\" Title: b10-loadzone
.\" Author: [see the "AUTHORS" section]
.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\" Date: March 8, 2010
+.\" Date: March 26, 2012
.\" Manual: BIND10
.\" Source: BIND10
.\" Language: English
.\"
-.TH "B10\-LOADZONE" "8" "March 8, 2010" "BIND10" "BIND10"
+.TH "B10\-LOADZONE" "8" "March 26, 2012" "BIND10" "BIND10"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
@@ -22,7 +22,7 @@
b10-loadzone \- Load DNS Zone File
.SH "SYNOPSIS"
.HP \w'\fBb10\-loadzone\fR\ 'u
-\fBb10\-loadzone\fR [\fB\-d\ \fR\fB\fIdatabase\fR\fR] [\fB\-o\ \fR\fB\fIorigin\fR\fR] [filename]
+\fBb10\-loadzone\fR [\fB\-d\ \fR\fB\fIdatabase\fR\fR] [\fB\-o\ \fR\fB\fIorigin\fR\fR] {filename}
.SH "DESCRIPTION"
.PP
The
diff --git a/src/bin/loadzone/b10-loadzone.xml b/src/bin/loadzone/b10-loadzone.xml
index 25e23a5..8c41e54 100644
--- a/src/bin/loadzone/b10-loadzone.xml
+++ b/src/bin/loadzone/b10-loadzone.xml
@@ -20,7 +20,7 @@
<refentry>
<refentryinfo>
- <date>March 8, 2010</date>
+ <date>March 26, 2012</date>
</refentryinfo>
<refmeta>
@@ -46,7 +46,7 @@
<command>b10-loadzone</command>
<arg><option>-d <replaceable class="parameter">database</replaceable></option></arg>
<arg><option>-o <replaceable class="parameter">origin</replaceable></option></arg>
- <arg chose="req">filename</arg>
+ <arg choice="req">filename</arg>
</cmdsynopsis>
</refsynopsisdiv>
diff --git a/src/bin/xfrin/tests/testdata/example.com.sqlite3 b/src/bin/xfrin/tests/testdata/example.com.sqlite3
index 3538e3d..1008249 100644
Binary files a/src/bin/xfrin/tests/testdata/example.com.sqlite3 and b/src/bin/xfrin/tests/testdata/example.com.sqlite3 differ
diff --git a/src/bin/xfrin/tests/xfrin_test.py b/src/bin/xfrin/tests/xfrin_test.py
index a5c92ab..b88d6a9 100644
--- a/src/bin/xfrin/tests/xfrin_test.py
+++ b/src/bin/xfrin/tests/xfrin_test.py
@@ -139,6 +139,9 @@ class MockCC(MockModuleCCSession):
if identifier == "zones/use_ixfr":
return False
+ def remove_remote_config(self, module_name):
+ pass
+
class MockDataSourceClient():
'''A simple mock data source client.
@@ -2574,6 +2577,134 @@ class TestXfrin(unittest.TestCase):
self.common_ixfr_setup('refresh', False)
self.assertEqual(RRType.AXFR(), self.xfr.xfrin_started_request_type)
+class TextXfrinMemoryZones(unittest.TestCase):
+ def setUp(self):
+ self.xfr = MockXfrin()
+ # Configuration snippet containing 2 memory datasources,
+ # one for IN and one for CH. Both contain a zone 'example.com'
+ # the IN ds also contains a zone example2.com, and a zone example3.com,
+ # which is of file type 'text' (and hence, should be ignored)
+ self.config = { 'datasources': [
+ { 'type': 'memory',
+ 'class': 'IN',
+ 'zones': [
+ { 'origin': 'example.com',
+ 'filetype': 'sqlite3' },
+ { 'origin': 'EXAMPLE2.com.',
+ 'filetype': 'sqlite3' },
+ { 'origin': 'example3.com',
+ 'filetype': 'text' }
+ ]
+ },
+ { 'type': 'memory',
+ 'class': 'ch',
+ 'zones': [
+ { 'origin': 'example.com',
+ 'filetype': 'sqlite3' }
+ ]
+ }
+ ] }
+
+ def test_updates(self):
+ self.assertFalse(self.xfr._is_memory_zone("example.com", "IN"))
+ self.assertFalse(self.xfr._is_memory_zone("example2.com", "IN"))
+ self.assertFalse(self.xfr._is_memory_zone("example3.com", "IN"))
+ self.assertFalse(self.xfr._is_memory_zone("example.com", "CH"))
+
+ # add them all
+ self.xfr._set_memory_zones(self.config, None)
+ self.assertTrue(self.xfr._is_memory_zone("example.com", "IN"))
+ self.assertTrue(self.xfr._is_memory_zone("example2.com", "IN"))
+ self.assertFalse(self.xfr._is_memory_zone("example3.com", "IN"))
+ self.assertTrue(self.xfr._is_memory_zone("example.com", "CH"))
+
+ # Remove the CH data source from the self.config snippet, and update
+ del self.config['datasources'][1]
+ self.xfr._set_memory_zones(self.config, None)
+ self.assertTrue(self.xfr._is_memory_zone("example.com", "IN"))
+ self.assertTrue(self.xfr._is_memory_zone("example2.com", "IN"))
+ self.assertFalse(self.xfr._is_memory_zone("example3.com", "IN"))
+ self.assertFalse(self.xfr._is_memory_zone("example.com", "CH"))
+
+ # Remove example2.com from the datasource, and update
+ del self.config['datasources'][0]['zones'][1]
+ self.xfr._set_memory_zones(self.config, None)
+ self.assertTrue(self.xfr._is_memory_zone("example.com", "IN"))
+ self.assertFalse(self.xfr._is_memory_zone("example2.com", "IN"))
+ self.assertFalse(self.xfr._is_memory_zone("example3.com", "IN"))
+ self.assertFalse(self.xfr._is_memory_zone("example.com", "CH"))
+
+ # If 'datasources' is not in the self.config update list (i.e. its
+ # self.config has not changed), no difference should be found
+ self.xfr._set_memory_zones({}, None)
+ self.assertTrue(self.xfr._is_memory_zone("example.com", "IN"))
+ self.assertFalse(self.xfr._is_memory_zone("example2.com", "IN"))
+ self.assertFalse(self.xfr._is_memory_zone("example3.com", "IN"))
+ self.assertFalse(self.xfr._is_memory_zone("example.com", "CH"))
+
+ # If datasources list becomes empty, everything should be removed
+ self.config['datasources'][0]['zones'] = []
+ self.xfr._set_memory_zones(self.config, None)
+ self.assertFalse(self.xfr._is_memory_zone("example.com", "IN"))
+ self.assertFalse(self.xfr._is_memory_zone("example2.com", "IN"))
+ self.assertFalse(self.xfr._is_memory_zone("example3.com", "IN"))
+ self.assertFalse(self.xfr._is_memory_zone("example.com", "CH"))
+
+ def test_normalization(self):
+ self.xfr._set_memory_zones(self.config, None)
+ # make sure it is case insensitive, root-dot-insensitive,
+ # and supports CLASSXXX notation
+ self.assertTrue(self.xfr._is_memory_zone("EXAMPLE.com", "IN"))
+ self.assertTrue(self.xfr._is_memory_zone("example.com", "in"))
+ self.assertTrue(self.xfr._is_memory_zone("example2.com.", "IN"))
+ self.assertTrue(self.xfr._is_memory_zone("example.com", "CLASS3"))
+
+ def test_bad_name(self):
+ # First set it to some config
+ self.xfr._set_memory_zones(self.config, None)
+
+ # Error checking; bad owner name should result in no changes
+ self.config['datasources'][1]['zones'][0]['origin'] = ".."
+ self.xfr._set_memory_zones(self.config, None)
+ self.assertTrue(self.xfr._is_memory_zone("example.com", "IN"))
+ self.assertTrue(self.xfr._is_memory_zone("example2.com", "IN"))
+ self.assertFalse(self.xfr._is_memory_zone("example3.com", "IN"))
+ self.assertTrue(self.xfr._is_memory_zone("example.com", "CH"))
+
+ def test_bad_class(self):
+ # First set it to some config
+ self.xfr._set_memory_zones(self.config, None)
+
+ # Error checking; bad owner name should result in no changes
+ self.config['datasources'][1]['class'] = "Foo"
+ self.xfr._set_memory_zones(self.config, None)
+ self.assertTrue(self.xfr._is_memory_zone("example.com", "IN"))
+ self.assertTrue(self.xfr._is_memory_zone("example2.com", "IN"))
+ self.assertFalse(self.xfr._is_memory_zone("example3.com", "IN"))
+ self.assertTrue(self.xfr._is_memory_zone("example.com", "CH"))
+
+ def test_no_filetype(self):
+ # omitting the filetype should leave that zone out, but not
+ # the rest
+ del self.config['datasources'][1]['zones'][0]['filetype']
+ self.xfr._set_memory_zones(self.config, None)
+ self.assertTrue(self.xfr._is_memory_zone("example.com", "IN"))
+ self.assertTrue(self.xfr._is_memory_zone("example2.com", "IN"))
+ self.assertFalse(self.xfr._is_memory_zone("example3.com", "IN"))
+ self.assertFalse(self.xfr._is_memory_zone("example.com", "CH"))
+
+ def test_class_filetype(self):
+ # omitting the class should have it default to what is in the
+ # specfile for Auth.
+ AuthConfigData = isc.config.config_data.ConfigData(
+ isc.config.module_spec_from_file(xfrin.AUTH_SPECFILE_LOCATION))
+ del self.config['datasources'][0]['class']
+ self.xfr._set_memory_zones(self.config, AuthConfigData)
+ self.assertTrue(self.xfr._is_memory_zone("example.com", "IN"))
+ self.assertTrue(self.xfr._is_memory_zone("example2.com", "IN"))
+ self.assertFalse(self.xfr._is_memory_zone("example3.com", "IN"))
+ self.assertTrue(self.xfr._is_memory_zone("example.com", "CH"))
+
def raise_interrupt():
raise KeyboardInterrupt()
diff --git a/src/bin/xfrin/xfrin.py.in b/src/bin/xfrin/xfrin.py.in
index 863c5b9..58713be 100755
--- a/src/bin/xfrin/xfrin.py.in
+++ b/src/bin/xfrin/xfrin.py.in
@@ -38,6 +38,11 @@ from isc.log_messages.xfrin_messages import *
isc.log.init("b10-xfrin")
logger = isc.log.Logger("xfrin")
+# Pending system-wide debug level definitions, the ones we
+# use here are hardcoded for now
+DBG_PROCESS = logger.DBGLVL_TRACE_BASIC
+DBG_COMMANDS = logger.DBGLVL_TRACE_DETAIL
+
try:
from pydnspp import *
except ImportError as e:
@@ -1246,6 +1251,11 @@ class Xfrin:
def __init__(self):
self._max_transfers_in = 10
self._zones = {}
+ # This is a set of (zone/class) tuples (both as strings),
+ # representing the in-memory zones maintaned by Xfrin. It
+ # is used to trigger Auth/in-memory so that it reloads
+ # zones when they have been transfered in
+ self._memory_zones = set()
self._cc_setup()
self.recorder = XfrinRecorder()
self._shutdown_event = threading.Event()
@@ -1264,6 +1274,8 @@ class Xfrin:
self._module_cc.start()
config_data = self._module_cc.get_full_config()
self.config_handler(config_data)
+ self._module_cc.add_remote_config(AUTH_SPECFILE_LOCATION,
+ self._auth_config_handler)
def _cc_check_command(self):
'''This is a straightforward wrapper for cc.check_command,
@@ -1310,10 +1322,78 @@ class Xfrin:
return create_answer(0)
+ def _auth_config_handler(self, new_config, config_data):
+ # Config handler for changes in Auth configuration
+ self._set_db_file()
+ self._set_memory_zones(new_config, config_data)
+
+ def _clear_memory_zones(self):
+ """Clears the memory_zones set; called before processing the
+ changed list of memory datasource zones that have file type
+ sqlite3"""
+ self._memory_zones.clear()
+
+ def _is_memory_zone(self, zone_name_str, zone_class_str):
+ """Returns true if the given zone/class combination is configured
+ in the in-memory datasource of the Auth process with file type
+ 'sqlite3'.
+ Note: this method is not thread-safe. We are considering
+ changing the threaded model here, but if we do not, take
+ care in accessing and updating the memory zone set (or add
+ locks)
+ """
+ # Normalize them first, if either conversion fails, return false
+ # (they won't be in the set anyway)
+ try:
+ zone_name_str = Name(zone_name_str).to_text().lower()
+ zone_class_str = RRClass(zone_class_str).to_text()
+ except Exception:
+ return False
+ return (zone_name_str, zone_class_str) in self._memory_zones
+
+ def _set_memory_zones(self, new_config, config_data):
+ """Part of the _auth_config_handler function, keeps an internal set
+ of zones in the datasources config subset that have 'sqlite3' as
+ their file type.
+ Note: this method is not thread-safe. We are considering
+ changing the threaded model here, but if we do not, take
+ care in accessing and updating the memory zone set (or add
+ locks)
+ """
+ # walk through the data and collect the memory zones
+ # If this causes any exception, assume we were passed bad data
+ # and keep the original set
+ new_memory_zones = set()
+ try:
+ if "datasources" in new_config:
+ for datasource in new_config["datasources"]:
+ if "class" in datasource:
+ ds_class = RRClass(datasource["class"])
+ else:
+ # Get the default
+ ds_class = RRClass(config_data.get_default_value(
+ "datasources/class"))
+ if datasource["type"] == "memory":
+ for zone in datasource["zones"]:
+ if "filetype" in zone and \
+ zone["filetype"] == "sqlite3":
+ zone_name = Name(zone["origin"])
+ zone_name_str = zone_name.to_text().lower()
+ new_memory_zones.add((zone_name_str,
+ ds_class.to_text()))
+ # Ok, we can use the data, update our list
+ self._memory_zones = new_memory_zones
+ except Exception:
+ # Something is wrong with the data. If this data even reached us,
+ # we cannot do more than assume the real module has logged and
+ # reported an error. Keep the old set.
+ return
+
def shutdown(self):
''' shutdown the xfrin process. the thread which is doing xfrin should be
terminated.
'''
+ self._module_cc.remove_remote_config(AUTH_SPECFILE_LOCATION)
self._module_cc.send_stopping()
self._shutdown_event.set()
main_thread = threading.currentThread()
@@ -1446,20 +1526,19 @@ class Xfrin:
return (addr.family, socket.SOCK_STREAM, (str(addr), port))
def _get_db_file(self):
- #TODO, the db file path should be got in auth server's configuration
- # if we need access to this configuration more often, we
- # should add it on start, and not remove it here
- # (or, if we have writable ds, we might not need this in
- # the first place)
- self._module_cc.add_remote_config(AUTH_SPECFILE_LOCATION)
- db_file, is_default = self._module_cc.get_remote_config_value("Auth", "database_file")
+ return self._db_file
+
+ def _set_db_file(self):
+ db_file, is_default =\
+ self._module_cc.get_remote_config_value("Auth", "database_file")
if is_default and "B10_FROM_BUILD" in os.environ:
- # this too should be unnecessary, but currently the
- # 'from build' override isn't stored in the config
- # (and we don't have writable datasources yet)
- db_file = os.environ["B10_FROM_BUILD"] + os.sep + "bind10_zones.sqlite3"
- self._module_cc.remove_remote_config(AUTH_SPECFILE_LOCATION)
- return db_file
+ # override the local database setting if it is default and we
+ # are running from the source tree
+ # This should be hidden inside the data source library and/or
+ # done as a configuration, and this special case should be gone).
+ db_file = os.environ["B10_FROM_BUILD"] + os.sep +\
+ "bind10_zones.sqlite3"
+ self._db_file = db_file
def publish_xfrin_news(self, zone_name, zone_class, xfr_result):
'''Send command to xfrout/zone manager module.
@@ -1502,6 +1581,7 @@ class Xfrin:
logger.error(XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER, ZONE_MANAGER_MODULE_NAME)
def startup(self):
+ logger.debug(DBG_PROCESS, XFRIN_STARTED)
while not self._shutdown_event.is_set():
self._cc_check_command()
diff --git a/src/bin/xfrin/xfrin_messages.mes b/src/bin/xfrin/xfrin_messages.mes
index eae1c69..25a1fc1 100644
--- a/src/bin/xfrin/xfrin_messages.mes
+++ b/src/bin/xfrin/xfrin_messages.mes
@@ -129,8 +129,9 @@ zone is not known to the system. This may indicate that the configuration
for xfrin is incomplete, or there was a typographical error in the
zone name in the configuration.
-% XFRIN_STARTING starting resolver with command line '%1'
-An informational message, this is output when the resolver starts up.
+% XFRIN_STARTED xfrin started
+This informational message is output by xfrin when all initialization
+has been completed and it is entering its main loop.
% XFRIN_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down
There was a keyboard interrupt signal to stop the xfrin daemon. The
diff --git a/src/bin/xfrout/b10-xfrout.8 b/src/bin/xfrout/b10-xfrout.8
index 483e2c0..b3200b5 100644
--- a/src/bin/xfrout/b10-xfrout.8
+++ b/src/bin/xfrout/b10-xfrout.8
@@ -9,6 +9,15 @@
.\"
.TH "B10\-XFROUT" "8" "March 16\&. 2012" "BIND10" "BIND10"
.\" -----------------------------------------------------------------
+.\" * Define some portability stuff
+.\" -----------------------------------------------------------------
+.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.\" http://bugs.debian.org/507673
+.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html
+.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.ie \n(.g .ds Aq \(aq
+.el .ds Aq '
+.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
.\" disable hyphenation
diff --git a/src/bin/xfrout/tests/testdata/test.sqlite3 b/src/bin/xfrout/tests/testdata/test.sqlite3
index 9eb14f1..a594b44 100644
Binary files a/src/bin/xfrout/tests/testdata/test.sqlite3 and b/src/bin/xfrout/tests/testdata/test.sqlite3 differ
diff --git a/src/bin/xfrout/xfrout.py.in b/src/bin/xfrout/xfrout.py.in
index 8f236ec..4dd12ce 100755
--- a/src/bin/xfrout/xfrout.py.in
+++ b/src/bin/xfrout/xfrout.py.in
@@ -40,6 +40,12 @@ from isc.log_messages.xfrout_messages import *
isc.log.init("b10-xfrout")
logger = isc.log.Logger("xfrout")
+
+# Pending system-wide debug level definitions, the ones we
+# use here are hardcoded for now
+DBG_PROCESS = logger.DBGLVL_TRACE_BASIC
+DBG_COMMANDS = logger.DBGLVL_TRACE_DETAIL
+
DBG_XFROUT_TRACE = logger.DBGLVL_TRACE_BASIC
try:
@@ -1002,6 +1008,7 @@ class XfroutServer:
def run(self):
'''Get and process all commands sent from cfgmgr or other modules. '''
+ logger.debug(DBG_PROCESS, XFROUT_STARTED)
while not self._shutdown_event.is_set():
self._cc.check_command(False)
diff --git a/src/bin/xfrout/xfrout_messages.mes b/src/bin/xfrout/xfrout_messages.mes
index fcc2e59..9996a5a 100644
--- a/src/bin/xfrout/xfrout_messages.mes
+++ b/src/bin/xfrout/xfrout_messages.mes
@@ -133,6 +133,10 @@ be a result of rare local error such as memory allocation failure and
shouldn't happen under normal conditions. The error is included in the
log message.
+% XFROUT_STARTED xfrout started
+This informational message is output by xfrout when all initialization
+has been completed and it is entering its main loop.
+
% XFROUT_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down
There was a keyboard interrupt signal to stop the xfrout daemon. The
daemon will now shut down.
diff --git a/src/bin/zonemgr/tests/Makefile.am b/src/bin/zonemgr/tests/Makefile.am
index 8c6b904..b60fae7 100644
--- a/src/bin/zonemgr/tests/Makefile.am
+++ b/src/bin/zonemgr/tests/Makefile.am
@@ -20,6 +20,7 @@ endif
for pytest in $(PYTESTS) ; do \
echo Running test: $$pytest ; \
$(LIBRARY_PATH_PLACEHOLDER) \
+ TESTDATAOBJDIR=$(abs_top_builddir)/src/bin/zonemgr/tests/ \
B10_FROM_BUILD=$(abs_top_builddir) \
PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/zonemgr:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/xfr/.libs \
$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
diff --git a/src/bin/zonemgr/tests/zonemgr_test.py b/src/bin/zonemgr/tests/zonemgr_test.py
index 29924c8..548d921 100644
--- a/src/bin/zonemgr/tests/zonemgr_test.py
+++ b/src/bin/zonemgr/tests/zonemgr_test.py
@@ -35,6 +35,8 @@ LOWERBOUND_RETRY = 5
REFRESH_JITTER = 0.10
RELOAD_JITTER = 0.75
+TEST_SQLITE3_DBFILE = os.getenv("TESTDATAOBJDIR") + '/initdb.file'
+
class ZonemgrTestException(Exception):
pass
@@ -57,7 +59,7 @@ class FakeCCSession(isc.config.ConfigData, MockModuleCCSession):
def get_remote_config_value(self, module_name, identifier):
if module_name == "Auth" and identifier == "database_file":
- return "initdb.file", False
+ return TEST_SQLITE3_DBFILE, False
else:
return "unknown", False
@@ -81,7 +83,7 @@ class MyZonemgrRefresh(ZonemgrRefresh):
return None
sqlite3_ds.get_zone_soa = get_zone_soa
- ZonemgrRefresh.__init__(self, MySession(), "initdb.file",
+ ZonemgrRefresh.__init__(self, MySession(), TEST_SQLITE3_DBFILE,
self._slave_socket, FakeCCSession())
current_time = time.time()
self._zonemgr_refresh_info = {
@@ -99,11 +101,18 @@ class MyZonemgrRefresh(ZonemgrRefresh):
class TestZonemgrRefresh(unittest.TestCase):
def setUp(self):
+ if os.path.exists(TEST_SQLITE3_DBFILE):
+ os.unlink(TEST_SQLITE3_DBFILE)
self.stderr_backup = sys.stderr
sys.stderr = open(os.devnull, 'w')
self.zone_refresh = MyZonemgrRefresh()
self.cc_session = FakeCCSession()
+ def tearDown(self):
+ if os.path.exists(TEST_SQLITE3_DBFILE):
+ os.unlink(TEST_SQLITE3_DBFILE)
+ sys.stderr = self.stderr_backup
+
def test_random_jitter(self):
max = 100025.120
jitter = 0
@@ -602,13 +611,10 @@ class TestZonemgrRefresh(unittest.TestCase):
self.zone_refresh.update_config_data,
config, self.cc_session)
- def tearDown(self):
- sys.stderr= self.stderr_backup
-
class MyZonemgr(Zonemgr):
def __init__(self):
- self._db_file = "initdb.file"
+ self._db_file = TEST_SQLITE3_DBFILE
self._zone_refresh = None
self._shutdown_event = threading.Event()
self._cc = MySession()
@@ -628,8 +634,14 @@ class MyZonemgr(Zonemgr):
class TestZonemgr(unittest.TestCase):
def setUp(self):
+ if os.path.exists(TEST_SQLITE3_DBFILE):
+ os.unlink(TEST_SQLITE3_DBFILE)
self.zonemgr = MyZonemgr()
+ def tearDown(self):
+ if os.path.exists(TEST_SQLITE3_DBFILE):
+ os.unlink(TEST_SQLITE3_DBFILE)
+
def test_config_handler(self):
config_data1 = {
"lowerbound_refresh" : 60,
@@ -650,8 +662,8 @@ class TestZonemgr(unittest.TestCase):
self.zonemgr.config_handler(config_data3)
self.assertEqual(0.5, self.zonemgr._config_data.get("refresh_jitter"))
# The zone doesn't exist in database, simply skip loading soa for it and log an warning
- self.zonemgr._zone_refresh = ZonemgrRefresh(None, "initdb.file", None,
- FakeCCSession())
+ self.zonemgr._zone_refresh = ZonemgrRefresh(None, TEST_SQLITE3_DBFILE,
+ None, FakeCCSession())
config_data1["secondary_zones"] = [{"name": "nonexistent.example",
"class": "IN"}]
self.assertEqual(self.zonemgr.config_handler(config_data1),
@@ -663,7 +675,7 @@ class TestZonemgr(unittest.TestCase):
self.assertEqual(0.1, self.zonemgr._config_data.get("refresh_jitter"))
def test_get_db_file(self):
- self.assertEqual("initdb.file", self.zonemgr.get_db_file())
+ self.assertEqual(TEST_SQLITE3_DBFILE, self.zonemgr.get_db_file())
def test_parse_cmd_params(self):
params1 = {"zone_name" : "example.com.", "zone_class" : "CH", "master" : "127.0.0.1"}
@@ -691,9 +703,6 @@ class TestZonemgr(unittest.TestCase):
self.zonemgr.run()
self.assertTrue(self.zonemgr._module_cc.stopped)
- def tearDown(self):
- pass
-
if __name__== "__main__":
isc.log.resetUnitTestRootLogger()
unittest.main()
diff --git a/src/bin/zonemgr/zonemgr.py.in b/src/bin/zonemgr/zonemgr.py.in
index 7b16f1b..87589a8 100755
--- a/src/bin/zonemgr/zonemgr.py.in
+++ b/src/bin/zonemgr/zonemgr.py.in
@@ -44,6 +44,11 @@ from isc.log_messages.zonemgr_messages import *
isc.log.init("b10-zonemgr")
logger = isc.log.Logger("zonemgr")
+# Pending system-wide debug level definitions, the ones we
+# use here are hardcoded for now
+DBG_PROCESS = logger.DBGLVL_TRACE_BASIC
+DBG_COMMANDS = logger.DBGLVL_TRACE_DETAIL
+
# Constants for debug levels.
DBG_START_SHUT = logger.DBGLVL_START_SHUT
DBG_ZONEMGR_COMMAND = logger.DBGLVL_COMMAND
@@ -657,6 +662,7 @@ class Zonemgr:
return answer
def run(self):
+ logger.debug(DBG_PROCESS, ZONEMGR_STARTED)
self.running = True
try:
while not self._shutdown_event.is_set():
diff --git a/src/bin/zonemgr/zonemgr_messages.mes b/src/bin/zonemgr/zonemgr_messages.mes
index d33e263..c866b79 100644
--- a/src/bin/zonemgr/zonemgr_messages.mes
+++ b/src/bin/zonemgr/zonemgr_messages.mes
@@ -67,6 +67,10 @@ zone manager to record the master server for the zone and start a timer;
when the timer expires, the master will be polled to see if it contains
new data.
+% ZONEMGR_STARTED zonemgr started
+This informational message is output by zonemgr when all initialization
+has been completed and it is entering its main loop.
+
% ZONEMGR_RECEIVE_SHUTDOWN received SHUTDOWN command
This is a debug message indicating that the zone manager has received
a SHUTDOWN command over the command channel from the Boss process.
diff --git a/src/cppcheck-suppress.lst b/src/cppcheck-suppress.lst
index 164c549..ff4a79a 100644
--- a/src/cppcheck-suppress.lst
+++ b/src/cppcheck-suppress.lst
@@ -2,21 +2,12 @@
// the following two will suppress, depending on the cppcheck version
debug
missingInclude
-// This is a template, and should be excluded from the check
-unreadVariable:src/lib/dns/rdata/template.cc:61
-// Intentional self-comparisons
-duplicateExpression:src/lib/dns/tests/name_unittest.cc:569
-duplicateExpression:src/lib/dns/tests/name_unittest.cc:580
-duplicateExpression:src/lib/dns/tests/rrttl_unittest.cc:164
-duplicateExpression:src/lib/dns/tests/rrttl_unittest.cc:175
-duplicateExpression:src/lib/dns/tests/name_unittest.cc:568
-duplicateExpression:src/lib/dns/tests/name_unittest.cc:579
-
-// Intentional self-comparisons
-uselessCallsCompare:src/lib/dns/tests/rdata_dhcid_unittest.cc:96
-uselessCallsCompare:src/lib/dns/tests/rdata_in_a_unittest.cc:98
-uselessCallsCompare:src/lib/dns/tests/rdata_in_aaaa_unittest.cc:94
-uselessCallsCompare:src/lib/dns/tests/rdata_mx_unittest.cc:104
-uselessCallsCompare:src/lib/dns/tests/rdata_unittest.cc:254
-uselessCallsCompare:src/lib/dns/tests/rdata_unittest.cc:253
+// Please don't add any suppressions here. We now use inline
+// suppressions (in the .cc files) so that we don't have to
+// maintain line numbers in this file.
+//
+// See the cppcheck manual for syntax. It is something like:
+//
+// // cppcheck-suppress duplicateExpression
+// EXPECT_FALSE(small_name < small_name);
diff --git a/src/lib/asiodns/dns_service.cc b/src/lib/asiodns/dns_service.cc
index ee7cf74..2cfdea5 100644
--- a/src/lib/asiodns/dns_service.cc
+++ b/src/lib/asiodns/dns_service.cc
@@ -14,30 +14,19 @@
#include <config.h>
-#include <unistd.h> // for some IPC/network system calls
-#include <netinet/in.h>
-#include <sys/socket.h>
-
-#include <boost/lexical_cast.hpp>
-
-#include <log/dummylog.h>
-
#include <exceptions/exceptions.h>
-#include <asio.hpp>
#include <dns_service.h>
+
#include <asiolink/io_service.h>
+
+#include <asio.hpp> // xxx_server.h requires this to be included first
#include <tcp_server.h>
#include <udp_server.h>
#include <sync_udp_server.h>
-#include <log/dummylog.h>
-
-#include <boost/lexical_cast.hpp>
#include <boost/foreach.hpp>
-using isc::log::dlog;
-
using namespace isc::asiolink;
namespace isc {
@@ -46,29 +35,13 @@ namespace asiodns {
class DNSLookup;
class DNSAnswer;
-namespace {
-
-asio::ip::address
-convertAddr(const std::string& address) {
- asio::error_code err;
- asio::ip::address addr = asio::ip::address::from_string(address, err);
- if (err) {
- isc_throw(IOError, "Invalid IP address '" << &address << "': "
- << err.message());
- }
- return (addr);
-}
-
-}
-
-
class DNSServiceImpl {
public:
- DNSServiceImpl(IOService& io_service, const char& port,
- const asio::ip::address* v4addr,
- const asio::ip::address* v6addr,
- SimpleCallback* checkin, DNSLookup* lookup,
- DNSAnswer* answe);
+ DNSServiceImpl(IOService& io_service, SimpleCallback* checkin,
+ DNSLookup* lookup, DNSAnswer* answer) :
+ io_service_(io_service), checkin_(checkin), lookup_(lookup),
+ answer_(answer)
+ {}
IOService& io_service_;
@@ -77,9 +50,9 @@ public:
typedef boost::shared_ptr<TCPServer> TCPServerPtr;
typedef boost::shared_ptr<DNSServer> DNSServerPtr;
std::vector<DNSServerPtr> servers_;
- SimpleCallback *checkin_;
- DNSLookup *lookup_;
- DNSAnswer *answer_;
+ SimpleCallback* checkin_;
+ DNSLookup* lookup_;
+ DNSAnswer* answer_;
template<class Ptr, class Server> void addServerFromFD(int fd, int af) {
Ptr server(new Server(io_service_.get_io_service(), fd, af, checkin_,
@@ -87,107 +60,12 @@ public:
(*server)();
servers_.push_back(server);
}
-
- void addServer(uint16_t port, const asio::ip::address& address) {
- try {
- dlog(std::string("Initialize TCP server at ") +
- address.to_string() + ":" +
- boost::lexical_cast<std::string>(port));
- TCPServerPtr tcpServer(new TCPServer(io_service_.get_io_service(),
- address, port, checkin_,
- lookup_, answer_));
- (*tcpServer)();
- servers_.push_back(tcpServer);
- dlog(std::string("Initialize UDP server at ") +
- address.to_string() + ":" +
- boost::lexical_cast<std::string>(port));
- UDPServerPtr udpServer(new UDPServer(io_service_.get_io_service(),
- address, port, checkin_, lookup_, answer_));
- (*udpServer)();
- servers_.push_back(udpServer);
- } catch (const asio::system_error& err) {
- // We need to catch and convert any ASIO level exceptions.
- // This can happen for unavailable address, binding a privilege port
- // without the privilege, etc.
- isc_throw(IOError, "Failed to initialize network servers: " <<
- err.what());
- }
- }
- void addServer(const char& port, const asio::ip::address& address) {
- uint16_t portnum;
- try {
- // XXX: SunStudio with stlport4 doesn't reject some invalid
- // representation such as "-1" by lexical_cast<uint16_t>, so
- // we convert it into a signed integer of a larger size and perform
- // range check ourselves.
- const int32_t portnum32 = boost::lexical_cast<int32_t>(&port);
- if (portnum32 < 0 || portnum32 > 65535) {
- isc_throw(IOError, "Invalid port number '" << &port);
- }
- portnum = portnum32;
- } catch (const boost::bad_lexical_cast& ex) {
- isc_throw(IOError, "Invalid port number '" << &port << "': " <<
- ex.what());
- }
- addServer(portnum, address);
- }
};
-DNSServiceImpl::DNSServiceImpl(IOService& io_service,
- const char& port,
- const asio::ip::address* const v4addr,
- const asio::ip::address* const v6addr,
- SimpleCallback* checkin,
- DNSLookup* lookup,
- DNSAnswer* answer) :
- io_service_(io_service),
- checkin_(checkin),
- lookup_(lookup),
- answer_(answer)
-{
-
- if (v4addr) {
- addServer(port, *v4addr);
- }
- if (v6addr) {
- addServer(port, *v6addr);
- }
-}
-
-DNSService::DNSService(IOService& io_service,
- const char& port, const char& address,
- SimpleCallback* checkin,
- DNSLookup* lookup,
- DNSAnswer* answer) :
- impl_(new DNSServiceImpl(io_service, port, NULL, NULL, checkin, lookup,
- answer)),
- io_service_(io_service)
-{
- addServer(port, &address);
-}
-
-DNSService::DNSService(IOService& io_service,
- const char& port,
- const bool use_ipv4, const bool use_ipv6,
- SimpleCallback* checkin,
- DNSLookup* lookup,
- DNSAnswer* answer) :
- impl_(NULL), io_service_(io_service)
-{
- const asio::ip::address v4addr_any =
- asio::ip::address(asio::ip::address_v4::any());
- const asio::ip::address* const v4addrp = use_ipv4 ? &v4addr_any : NULL;
- const asio::ip::address v6addr_any =
- asio::ip::address(asio::ip::address_v6::any());
- const asio::ip::address* const v6addrp = use_ipv6 ? &v6addr_any : NULL;
- impl_ = new DNSServiceImpl(io_service, port, v4addrp, v6addrp, checkin,
- lookup, answer);
-}
-
DNSService::DNSService(IOService& io_service, SimpleCallback* checkin,
- DNSLookup* lookup, DNSAnswer *answer) :
- impl_(new DNSServiceImpl(io_service, *"0", NULL, NULL, checkin, lookup,
- answer)), io_service_(io_service)
+ DNSLookup* lookup, DNSAnswer *answer) :
+ impl_(new DNSServiceImpl(io_service, checkin, lookup, answer)),
+ io_service_(io_service)
{
}
@@ -195,16 +73,6 @@ DNSService::~DNSService() {
delete impl_;
}
-void
-DNSService::addServer(const char& port, const std::string& address) {
- impl_->addServer(port, convertAddr(address));
-}
-
-void
-DNSService::addServer(uint16_t port, const std::string& address) {
- impl_->addServer(port, convertAddr(address));
-}
-
void DNSService::addServerTCPFromFD(int fd, int af) {
impl_->addServerFromFD<DNSServiceImpl::TCPServerPtr, TCPServer>(fd, af);
}
diff --git a/src/lib/asiodns/dns_service.h b/src/lib/asiodns/dns_service.h
index 161658f..8f2f6d7 100644
--- a/src/lib/asiodns/dns_service.h
+++ b/src/lib/asiodns/dns_service.h
@@ -112,40 +112,14 @@ private:
static const unsigned int SERVER_DEFINED_FLAGS = 1;
public:
- /// \brief The constructor with a specific IP address and port on which
- /// the services listen on.
- ///
- /// \param io_service The IOService to work with
- /// \param port the port to listen on
- /// \param address the IP address to listen on
- /// \param checkin Provider for cc-channel events (see \c SimpleCallback)
- /// \param lookup The lookup provider (see \c DNSLookup)
- /// \param answer The answer provider (see \c DNSAnswer)
- DNSService(asiolink::IOService& io_service, const char& port,
- const char& address, isc::asiolink::SimpleCallback* checkin,
- DNSLookup* lookup, DNSAnswer* answer);
-
- /// \brief The constructor with a specific port on which the services
- /// listen on.
+ /// \brief The constructor without any servers.
///
- /// It effectively listens on "any" IPv4 and/or IPv6 addresses.
- /// IPv4/IPv6 services will be available if and only if \c use_ipv4
- /// or \c use_ipv6 is \c true, respectively.
+ /// Use addServerTCPFromFD() or addServerUDPFromFD() to add some servers.
///
/// \param io_service The IOService to work with
- /// \param port the port to listen on
- /// \param use_ipv4 If true, listen on ipv4 'any'
- /// \param use_ipv6 If true, listen on ipv6 'any'
/// \param checkin Provider for cc-channel events (see \c SimpleCallback)
/// \param lookup The lookup provider (see \c DNSLookup)
/// \param answer The answer provider (see \c DNSAnswer)
- DNSService(asiolink::IOService& io_service, const char& port,
- const bool use_ipv4, const bool use_ipv6,
- isc::asiolink::SimpleCallback* checkin, DNSLookup* lookup,
- DNSAnswer* answer);
- /// \brief The constructor without any servers.
- ///
- /// Use addServer() to add some servers.
DNSService(asiolink::IOService& io_service,
isc::asiolink::SimpleCallback* checkin,
DNSLookup* lookup, DNSAnswer* answer);
@@ -154,10 +128,6 @@ public:
virtual ~DNSService();
//@}
- /// \brief Add another server to the service
- void addServer(uint16_t port, const std::string &address);
- void addServer(const char& port, const std::string& address);
-
/// \brief Add another TCP server/listener to the service from already
/// opened file descriptor
///
diff --git a/src/lib/asiodns/sync_udp_server.cc b/src/lib/asiodns/sync_udp_server.cc
index fb53fba..a31301d 100644
--- a/src/lib/asiodns/sync_udp_server.cc
+++ b/src/lib/asiodns/sync_udp_server.cc
@@ -38,29 +38,6 @@ using namespace isc::asiolink;
namespace isc {
namespace asiodns {
-SyncUDPServer::SyncUDPServer(asio::io_service& io_service,
- const asio::ip::address& addr,
- const uint16_t port,
- asiolink::SimpleCallback* checkin,
- DNSLookup* lookup, DNSAnswer* answer) :
- output_buffer_(new isc::util::OutputBuffer(0)),
- query_(new isc::dns::Message(isc::dns::Message::PARSE)),
- answer_(new isc::dns::Message(isc::dns::Message::RENDER)),
- io_(io_service), checkin_callback_(checkin), lookup_callback_(lookup),
- answer_callback_(answer), stopped_(false)
-{
- // We must use different instantiations for v4 and v6;
- // otherwise ASIO will bind to both
- asio::ip::udp proto = addr.is_v4() ? asio::ip::udp::v4() :
- asio::ip::udp::v6();
- socket_.reset(new asio::ip::udp::socket(io_service, proto));
- socket_->set_option(asio::socket_base::reuse_address(true));
- if (addr.is_v6()) {
- socket_->set_option(asio::ip::v6_only(true));
- }
- socket_->bind(asio::ip::udp::endpoint(addr, port));
-}
-
SyncUDPServer::SyncUDPServer(asio::io_service& io_service, const int fd,
const int af, asiolink::SimpleCallback* checkin,
DNSLookup* lookup, DNSAnswer* answer) :
diff --git a/src/lib/asiodns/sync_udp_server.h b/src/lib/asiodns/sync_udp_server.h
index f21d3e5..9718422 100644
--- a/src/lib/asiodns/sync_udp_server.h
+++ b/src/lib/asiodns/sync_udp_server.h
@@ -44,19 +44,6 @@ class SyncUDPServer : public DNSServer, public boost::noncopyable {
public:
/// \brief Constructor
/// \param io_service the asio::io_service to work with
- /// \param addr the IP address to listen for queries on
- /// \param port the port to listen for queries on
- /// \param checkin the callbackprovider for non-DNS events
- /// \param lookup the callbackprovider for DNS lookup events
- /// \param answer the callbackprovider for DNS answer events
- explicit SyncUDPServer(asio::io_service& io_service,
- const asio::ip::address& addr, const uint16_t port,
- isc::asiolink::SimpleCallback* checkin = NULL,
- DNSLookup* lookup = NULL,
- DNSAnswer* answer = NULL);
-
- /// \brief Constructor
- /// \param io_service the asio::io_service to work with
/// \param fd the file descriptor of opened UDP socket
/// \param af address family, either AF_INET or AF_INET6
/// \param checkin the callbackprovider for non-DNS events
diff --git a/src/lib/asiodns/tcp_server.cc b/src/lib/asiodns/tcp_server.cc
index 3e97e14..8e4b4d6 100644
--- a/src/lib/asiodns/tcp_server.cc
+++ b/src/lib/asiodns/tcp_server.cc
@@ -47,28 +47,6 @@ namespace asiodns {
/// The following functions implement the \c TCPServer class.
///
/// The constructor
-TCPServer::TCPServer(io_service& io_service,
- const ip::address& addr, const uint16_t port,
- const SimpleCallback* checkin,
- const DNSLookup* lookup,
- const DNSAnswer* answer) :
- io_(io_service), done_(false),
- checkin_callback_(checkin), lookup_callback_(lookup),
- answer_callback_(answer)
-{
- tcp::endpoint endpoint(addr, port);
- acceptor_.reset(new tcp::acceptor(io_service));
- acceptor_->open(endpoint.protocol());
- // Set v6-only (we use a separate instantiation for v4,
- // otherwise asio will bind to both v4 and v6
- if (addr.is_v6()) {
- acceptor_->set_option(ip::v6_only(true));
- }
- acceptor_->set_option(tcp::acceptor::reuse_address(true));
- acceptor_->bind(endpoint);
- acceptor_->listen();
-}
-
TCPServer::TCPServer(io_service& io_service, int fd, int af,
const SimpleCallback* checkin,
const DNSLookup* lookup,
diff --git a/src/lib/asiodns/tcp_server.h b/src/lib/asiodns/tcp_server.h
index a75fddb..01695e4 100644
--- a/src/lib/asiodns/tcp_server.h
+++ b/src/lib/asiodns/tcp_server.h
@@ -37,12 +37,6 @@ namespace asiodns {
/// defined in coroutine.h.
class TCPServer : public virtual DNSServer, public virtual coroutine {
public:
- explicit TCPServer(asio::io_service& io_service,
- const asio::ip::address& addr, const uint16_t port,
- const isc::asiolink::SimpleCallback* checkin = NULL,
- const DNSLookup* lookup = NULL,
- const DNSAnswer* answer = NULL);
-
/// \brief Constructor
/// \param io_service the asio::io_service to work with
/// \param fd the file descriptor of opened TCP socket
diff --git a/src/lib/asiodns/tests/dns_server_unittest.cc b/src/lib/asiodns/tests/dns_server_unittest.cc
index 0064bba..a5e83c7 100644
--- a/src/lib/asiodns/tests/dns_server_unittest.cc
+++ b/src/lib/asiodns/tests/dns_server_unittest.cc
@@ -414,22 +414,7 @@ class DNSServerTestBase : public::testing::Test {
static bool io_service_is_time_out;
};
-// Initialization with name and port
-template<class UDPServerClass>
-class AddrPortInit : public DNSServerTestBase<UDPServerClass> {
-protected:
- AddrPortInit() {
- this->udp_server_ = new UDPServerClass(this->service,
- this->server_address_,
- server_port, this->checker_,
- this->lookup_, this->answer_);
- this->tcp_server_ = new TCPServer(this->service, this->server_address_,
- server_port, this->checker_,
- this->lookup_, this->answer_);
- }
-};
-
-// Initialization by the file descriptor
+// Initialization (by the file descriptor)
template<class UDPServerClass>
class FdInit : public DNSServerTestBase<UDPServerClass> {
private:
@@ -494,8 +479,7 @@ protected:
template<class Parent>
class DNSServerTest : public Parent { };
-typedef ::testing::Types<AddrPortInit<UDPServer>, AddrPortInit<SyncUDPServer>,
- FdInit<UDPServer>, FdInit<SyncUDPServer> >
+typedef ::testing::Types<FdInit<UDPServer>, FdInit<SyncUDPServer> >
ServerTypes;
TYPED_TEST_CASE(DNSServerTest, ServerTypes);
@@ -507,12 +491,6 @@ bool DNSServerTestBase<UDPServerClass>::io_service_is_time_out = false;
template<class UDPServerClass>
asio::io_service* DNSServerTestBase<UDPServerClass>::current_service(NULL);
-typedef ::testing::Types<AddrPortInit<SyncUDPServer>, FdInit<SyncUDPServer> >
- SyncTypes;
-template<class Parent>
-class SyncServerTest : public Parent { };
-TYPED_TEST_CASE(SyncServerTest, SyncTypes);
-
// Test whether server stopped successfully after client get response
// client will send query and start to wait for response, once client
// get response, udp server will be stopped, the io service won't quit
@@ -558,7 +536,8 @@ TYPED_TEST(DNSServerTest, stopUDPServerDuringPrepareAnswer) {
EXPECT_TRUE(this->serverStopSucceed());
}
-static void stopServerManyTimes(DNSServer *server, unsigned int times) {
+void
+stopServerManyTimes(DNSServer *server, unsigned int times) {
for (unsigned int i = 0; i < times; ++i) {
server->stop();
}
@@ -680,18 +659,19 @@ TYPED_TEST(DNSServerTestBase, DISABLED_invalidUDPFD) {
isc::asiolink::IOError);
}
-// Check it rejects some of the unsupported operatirons
-TYPED_TEST(SyncServerTest, unsupportedOps) {
- EXPECT_THROW(this->udp_server_->clone(), isc::Unexpected);
- EXPECT_THROW(this->udp_server_->asyncLookup(), isc::Unexpected);
+// A specialized test type for SyncUDPServer.
+typedef FdInit<SyncUDPServer> SyncServerTest;
+
+// Check it rejects some of the unsupported operations
+TEST_F(SyncServerTest, unsupportedOps) {
+ EXPECT_THROW(udp_server_->clone(), isc::Unexpected);
+ EXPECT_THROW(udp_server_->asyncLookup(), isc::Unexpected);
}
// Check it rejects forgotten resume (eg. insists that it is synchronous)
-TYPED_TEST(SyncServerTest, mustResume) {
- this->lookup_->allow_resume_ = false;
- ASSERT_THROW(this->testStopServerByStopper(this->udp_server_,
- this->udp_client_,
- this->lookup_),
+TEST_F(SyncServerTest, mustResume) {
+ lookup_->allow_resume_ = false;
+ ASSERT_THROW(testStopServerByStopper(udp_server_, udp_client_, lookup_),
isc::Unexpected);
}
diff --git a/src/lib/asiodns/tests/dns_service_unittest.cc b/src/lib/asiodns/tests/dns_service_unittest.cc
index beac02b..ce8eee9 100644
--- a/src/lib/asiodns/tests/dns_service_unittest.cc
+++ b/src/lib/asiodns/tests/dns_service_unittest.cc
@@ -39,98 +39,7 @@ using boost::lexical_cast;
namespace {
const char* const TEST_SERVER_PORT = "53535";
-const char* const TEST_CLIENT_PORT = "53536";
const char* const TEST_IPV6_ADDR = "::1";
-const char* const TEST_IPV4_ADDR = "127.0.0.1";
-
-TEST(IOServiceTest, badPort) {
- IOService io_service;
- EXPECT_THROW(DNSService(io_service, *"65536", true, false, NULL, NULL, NULL), IOError);
- EXPECT_THROW(DNSService(io_service, *"53210.0", true, false, NULL, NULL, NULL), IOError);
- EXPECT_THROW(DNSService(io_service, *"-1", true, false, NULL, NULL, NULL), IOError);
- EXPECT_THROW(DNSService(io_service, *"domain", true, false, NULL, NULL, NULL), IOError);
-}
-
-TEST(IOServiceTest, badAddress) {
- IOService io_service;
- EXPECT_THROW(DNSService(io_service, *TEST_SERVER_PORT, *"192.0.2.1.1", NULL, NULL, NULL), IOError);
- EXPECT_THROW(DNSService(io_service, *TEST_SERVER_PORT, *"2001:db8:::1", NULL, NULL, NULL), IOError);
- EXPECT_THROW(DNSService(io_service, *TEST_SERVER_PORT, *"localhost", NULL, NULL, NULL), IOError);
-}
-
-TEST(IOServiceTest, unavailableAddress) {
- IOService io_service;
- // These addresses should generally be unavailable as a valid local
- // address, although there's no guarantee in theory.
- EXPECT_THROW(DNSService(io_service, *TEST_SERVER_PORT, *"192.0.2.0", NULL, NULL, NULL), IOError);
-
- // Some OSes would simply reject binding attempt for an AF_INET6 socket
- // to an IPv4-mapped IPv6 address. Even if those that allow it, since
- // the corresponding IPv4 address is the same as the one used in the
- // AF_INET socket case above, it should at least show the same result
- // as the previous one.
- EXPECT_THROW(DNSService(io_service, *TEST_SERVER_PORT, *"::ffff:192.0.2.0", NULL, NULL, NULL), IOError);
-}
-
-TEST(IOServiceTest, duplicateBind_v6) {
- // In each sub test case, second attempt should fail due to duplicate bind
- IOService io_service;
-
- // IPv6, "any" address
- DNSService* dns_service = new DNSService(io_service, *TEST_SERVER_PORT, false, true, NULL, NULL, NULL);
- EXPECT_THROW(DNSService(io_service, *TEST_SERVER_PORT, false, true, NULL, NULL, NULL), IOError);
- delete dns_service;
-
-}
-
-TEST(IOServiceTest, duplicateBind_v6_address) {
- // In each sub test case, second attempt should fail due to duplicate bind
- IOService io_service;
-
- // IPv6, specific address
- DNSService* dns_service = new DNSService(io_service, *TEST_SERVER_PORT, *TEST_IPV6_ADDR, NULL, NULL, NULL);
- EXPECT_THROW(DNSService(io_service, *TEST_SERVER_PORT, *TEST_IPV6_ADDR, NULL, NULL, NULL), IOError);
- delete dns_service;
-
-}
-
-TEST(IOServiceTest, duplicateBind_v4) {
- // In each sub test case, second attempt should fail due to duplicate bind
- IOService io_service;
-
- // IPv4, "any" address
- DNSService* dns_service = new DNSService(io_service, *TEST_SERVER_PORT, true, false, NULL, NULL, NULL);
- EXPECT_THROW(DNSService(io_service, *TEST_SERVER_PORT, true, false, NULL, NULL, NULL), IOError);
- delete dns_service;
-
-}
-
-TEST(IOServiceTest, duplicateBind_v4_address) {
- // In each sub test case, second attempt should fail due to duplicate bind
- IOService io_service;
-
- // IPv4, specific address
- DNSService* dns_service = new DNSService(io_service, *TEST_SERVER_PORT, *TEST_IPV4_ADDR, NULL, NULL, NULL);
- EXPECT_THROW(DNSService(io_service, *TEST_SERVER_PORT, *TEST_IPV4_ADDR, NULL, NULL, NULL), IOError);
- delete dns_service;
-}
-
-// Disabled because IPv4-mapped addresses don't seem to be working with
-// the IOService constructor
-TEST(IOServiceTest, DISABLED_IPv4MappedDuplicateBind) {
- IOService io_service;
- // Duplicate bind on IPv4-mapped IPv6 address
- DNSService* dns_service = new DNSService(io_service, *TEST_SERVER_PORT, *"127.0.0.1", NULL, NULL, NULL);
- EXPECT_THROW(DNSService(io_service, *TEST_SERVER_PORT, *"::ffff:127.0.0.1", NULL, NULL, NULL), IOError);
- delete dns_service;
-
- // XXX:
- // Currently, this throws an "invalid argument" exception. I have
- // not been able to get IPv4-mapped addresses to work.
- dns_service = new DNSService(io_service, *TEST_SERVER_PORT, *"::ffff:127.0.0.1", NULL, NULL, NULL);
- EXPECT_THROW(DNSService(io_service, *TEST_SERVER_PORT, *"127.0.0.1", NULL, NULL, NULL), IOError);
- delete dns_service;
-}
// A simple lookup callback for DNS services. It records the pointer value of
// to given output buffer each time the callback is called (up to two times)
diff --git a/src/lib/asiodns/udp_server.cc b/src/lib/asiodns/udp_server.cc
index 0fb8bec..0f5456b 100644
--- a/src/lib/asiodns/udp_server.cc
+++ b/src/lib/asiodns/udp_server.cc
@@ -182,12 +182,6 @@ struct UDPServer::Data {
///
/// The constructor. It just creates new internal state object
/// and lets it handle the initialization.
-UDPServer::UDPServer(io_service& io_service, const ip::address& addr,
- const uint16_t port, SimpleCallback* checkin,
- DNSLookup* lookup, DNSAnswer* answer) :
- data_(new Data(io_service, addr, port, checkin, lookup, answer))
-{ }
-
UDPServer::UDPServer(io_service& io_service, int fd, int af,
SimpleCallback* checkin, DNSLookup* lookup,
DNSAnswer* answer) :
diff --git a/src/lib/asiodns/udp_server.h b/src/lib/asiodns/udp_server.h
index 2b6a574..b32c06c 100644
--- a/src/lib/asiodns/udp_server.h
+++ b/src/lib/asiodns/udp_server.h
@@ -41,19 +41,6 @@ class UDPServer : public virtual DNSServer, public virtual coroutine {
public:
/// \brief Constructor
/// \param io_service the asio::io_service to work with
- /// \param addr the IP address to listen for queries on
- /// \param port the port to listen for queries on
- /// \param checkin the callbackprovider for non-DNS events
- /// \param lookup the callbackprovider for DNS lookup events
- /// \param answer the callbackprovider for DNS answer events
- explicit UDPServer(asio::io_service& io_service,
- const asio::ip::address& addr, const uint16_t port,
- isc::asiolink::SimpleCallback* checkin = NULL,
- DNSLookup* lookup = NULL,
- DNSAnswer* answer = NULL);
-
- /// \brief Constructor
- /// \param io_service the asio::io_service to work with
/// \param fd the file descriptor of opened UDP socket
/// \param af address family, either AF_INET or AF_INET6
/// \param checkin the callbackprovider for non-DNS events
diff --git a/src/lib/bench/benchmark.h b/src/lib/bench/benchmark.h
index 7f77aa1..a5c6fd4 100644
--- a/src/lib/bench/benchmark.h
+++ b/src/lib/bench/benchmark.h
@@ -17,6 +17,7 @@
#include <sys/time.h>
+#include <cassert>
#include <iostream>
#include <ios>
@@ -210,9 +211,9 @@ public:
/// \param target The templated class object that
/// implements the code to be benchmarked.
BenchMark(const int iterations, T target) :
- iterations_(iterations), sub_iterations_(0), target_(target)
+ iterations_(iterations), sub_iterations_(0), target_(NULL)
{
- initialize(true);
+ initialize(target, true);
}
/// \brief Constructor for finer-grained control.
@@ -230,9 +231,9 @@ public:
/// \param immediate If \c true the benchmark will be performed within
/// the constructor; otherwise it only does initialization.
BenchMark(const int iterations, T& target, const bool immediate) :
- iterations_(iterations), sub_iterations_(0), target_(target)
+ iterations_(iterations), sub_iterations_(0), target_(&target)
{
- initialize(immediate);
+ initialize(target, immediate);
}
//@}
@@ -240,15 +241,17 @@ public:
///
/// This method will be called from \c run() before starting the benchmark.
/// By default it's empty, but can be customized via template
- /// specialization.
- void setUp() {}
+ /// specialization. When specialized, a reference to the target object
+ /// given to the constructor will be passed to the implementation.
+ void setUp(T&) {}
/// \brief Hook to be called after benchmark.
///
/// This method will be called from \c run() when the benchmark completes.
/// By default it's empty, but can be customized via template
- /// specialization.
- void tearDown() {}
+ /// specialization. When specialized, a reference to the target object
+ /// given to the constructor will be passed to the implementation.
+ void tearDown(T&) {}
/// \brief Perform benchmark.
///
@@ -257,17 +260,8 @@ public:
/// of times specified on construction, and records the time on completion.
/// Finally, it calls \c tearDown().
void run() {
- setUp();
-
- struct timeval beg, end;
- gettimeofday(&beg, NULL);
- for (unsigned int i = 0; i < iterations_; ++i) {
- sub_iterations_ += target_.run();
- }
- gettimeofday(&end, NULL);
- tv_diff_ = tv_subtract(end, beg);
-
- tearDown();
+ assert(target_ != NULL);
+ run(*target_);
}
/// \brief Print the benchmark result.
@@ -361,9 +355,23 @@ public:
/// performed implicitly.
static const int ITERATION_FAILURE = -1;
private:
- void initialize(const bool immediate) {
+ void run(T& target) {
+ setUp(target);
+
+ struct timeval beg, end;
+ gettimeofday(&beg, NULL);
+ for (unsigned int i = 0; i < iterations_; ++i) {
+ sub_iterations_ += target.run();
+ }
+ gettimeofday(&end, NULL);
+ tv_diff_ = tv_subtract(end, beg);
+
+ tearDown(target);
+ }
+
+ void initialize(T& target, const bool immediate) {
if (immediate) {
- run();
+ run(target);
printResult();
}
}
@@ -388,7 +396,7 @@ private:
static const int ONE_MILLION = 1000000;
const unsigned int iterations_;
unsigned int sub_iterations_;
- T& target_;
+ T* target_;
struct timeval tv_diff_;
};
diff --git a/src/lib/bench/example/search_bench.cc b/src/lib/bench/example/search_bench.cc
index 851d815..84f95d9 100644
--- a/src/lib/bench/example/search_bench.cc
+++ b/src/lib/bench/example/search_bench.cc
@@ -79,9 +79,9 @@ namespace isc {
namespace bench {
template<>
void
-BenchMark<SetSearchBenchMark>::setUp() {
+BenchMark<SetSearchBenchMark>::setUp(SetSearchBenchMark& target) {
cout << "Benchmark for searching std::set (size="
- << target_.data_.size() << ")" << endl;
+ << target.data_.size() << ")" << endl;
}
}
}
diff --git a/src/lib/bench/tests/benchmark_unittest.cc b/src/lib/bench/tests/benchmark_unittest.cc
index 9b476cd..dfe7df9 100644
--- a/src/lib/bench/tests/benchmark_unittest.cc
+++ b/src/lib/bench/tests/benchmark_unittest.cc
@@ -46,14 +46,14 @@ namespace isc {
namespace bench {
template <>
void
-BenchMark<TestBenchMark>::setUp() {
- target_.setup_completed_ = true;
+BenchMark<TestBenchMark>::setUp(TestBenchMark& target) {
+ target.setup_completed_ = true;
};
template <>
void
-BenchMark<TestBenchMark>::tearDown() {
- target_.teardown_completed_ = true;
+BenchMark<TestBenchMark>::tearDown(TestBenchMark& target) {
+ target.teardown_completed_ = true;
};
// XXX: some compilers cannot find class static constants used in
diff --git a/src/lib/cc/cc_messages.mes b/src/lib/cc/cc_messages.mes
index 8370cdd..94b955a 100644
--- a/src/lib/cc/cc_messages.mes
+++ b/src/lib/cc/cc_messages.mes
@@ -14,7 +14,7 @@
$NAMESPACE isc::cc
-% CC_ASYNC_READ_FAILED asynchronous read failed
+% CC_ASYNC_READ_FAILED asynchronous read failed (error code = %1)
This marks a low level error, we tried to read data from the message queue
daemon asynchronously, but the ASIO library returned an error.
diff --git a/src/lib/cc/data.cc b/src/lib/cc/data.cc
index 77f948a..6ec243a 100644
--- a/src/lib/cc/data.cc
+++ b/src/lib/cc/data.cc
@@ -30,6 +30,10 @@
using namespace std;
+namespace {
+const char* WHITESPACE = " \b\f\n\r\t";
+} // end anonymous namespace
+
namespace isc {
namespace data {
@@ -314,15 +318,49 @@ str_from_stringstream(std::istream &in, const std::string& file, const int line,
} else {
throwJSONError("String expected", file, line, pos);
}
+
while (c != EOF && c != '"') {
- ss << c;
- if (c == '\\' && in.peek() == '"') {
- ss << in.get();
+ if (c == '\\') {
+ // see the spec for allowed escape characters
+ switch (in.peek()) {
+ case '"':
+ c = '"';
+ break;
+ case '/':
+ c = '/';
+ break;
+ case '\\':
+ c = '\\';
+ break;
+ case 'b':
+ c = '\b';
+ break;
+ case 'f':
+ c = '\f';
+ break;
+ case 'n':
+ c = '\n';
+ break;
+ case 'r':
+ c = '\r';
+ break;
+ case 't':
+ c = '\t';
+ break;
+ default:
+ throwJSONError("Bad escape", file, line, pos);
+ }
+ // drop the escaped char
+ in.get();
++pos;
}
+ ss << c;
c = in.get();
++pos;
}
+ if (c == EOF) {
+ throwJSONError("Unterminated string", file, line, pos);
+ }
return (ss.str());
}
@@ -427,12 +465,12 @@ from_stringstream_list(std::istream &in, const std::string& file, int& line,
ElementPtr list = Element::createList();
ConstElementPtr cur_list_element;
- skip_chars(in, " \t\n", line, pos);
+ skip_chars(in, WHITESPACE, line, pos);
while (c != EOF && c != ']') {
if (in.peek() != ']') {
cur_list_element = Element::fromJSON(in, file, line, pos);
list->add(cur_list_element);
- skip_to(in, file, line, pos, ",]", " \t\n");
+ skip_to(in, file, line, pos, ",]", WHITESPACE);
}
c = in.get();
pos++;
@@ -445,7 +483,7 @@ from_stringstream_map(std::istream &in, const std::string& file, int& line,
int& pos)
{
ElementPtr map = Element::createMap();
- skip_chars(in, " \t\n", line, pos);
+ skip_chars(in, WHITESPACE, line, pos);
char c = in.peek();
if (c == EOF) {
throwJSONError(std::string("Unterminated map, <string> or } expected"), file, line, pos);
@@ -456,7 +494,7 @@ from_stringstream_map(std::istream &in, const std::string& file, int& line,
while (c != EOF && c != '}') {
std::string key = str_from_stringstream(in, file, line, pos);
- skip_to(in, file, line, pos, ":", " \t\n");
+ skip_to(in, file, line, pos, ":", WHITESPACE);
// skip the :
in.get();
pos++;
@@ -464,7 +502,7 @@ from_stringstream_map(std::istream &in, const std::string& file, int& line,
ConstElementPtr value = Element::fromJSON(in, file, line, pos);
map->set(key, value);
- skip_to(in, file, line, pos, ",}", " \t\n");
+ skip_to(in, file, line, pos, ",}", WHITESPACE);
c = in.get();
pos++;
}
@@ -543,7 +581,7 @@ Element::fromJSON(std::istream &in, const std::string& file, int& line,
char c = 0;
ElementPtr element;
bool el_read = false;
- skip_chars(in, " \n\t", line, pos);
+ skip_chars(in, WHITESPACE, line, pos);
while (c != EOF && !el_read) {
c = in.get();
pos++;
@@ -610,7 +648,14 @@ ElementPtr
Element::fromJSON(const std::string &in) {
std::stringstream ss;
ss << in;
- return (fromJSON(ss, "<string>"));
+ int line = 1, pos = 1;
+ ElementPtr result(fromJSON(ss, "<string>", line, pos));
+ skip_chars(ss, WHITESPACE, line, pos);
+ // ss must now be at end
+ if (ss.peek() != EOF) {
+ throwJSONError("Extra data", "<string>", line, pos);
+ }
+ return result;
}
// to JSON format
@@ -642,7 +687,39 @@ NullElement::toJSON(std::ostream& ss) const {
void
StringElement::toJSON(std::ostream& ss) const {
ss << "\"";
- ss << stringValue();
+ char c;
+ const std::string& str = stringValue();
+ for (size_t i = 0; i < str.size(); ++i) {
+ c = str[i];
+ // Escape characters as defined in JSON spec
+ // Note that we do not escape forward slash; this
+ // is allowed, but not mandatory.
+ switch (c) {
+ case '"':
+ ss << '\\' << c;
+ break;
+ case '\\':
+ ss << '\\' << c;
+ break;
+ case '\b':
+ ss << '\\' << 'b';
+ break;
+ case '\f':
+ ss << '\\' << 'f';
+ break;
+ case '\n':
+ ss << '\\' << 'n';
+ break;
+ case '\r':
+ ss << '\\' << 'r';
+ break;
+ case '\t':
+ ss << '\\' << 't';
+ break;
+ default:
+ ss << c;
+ }
+ }
ss << "\"";
}
diff --git a/src/lib/cc/session.cc b/src/lib/cc/session.cc
index 1b21d21..40ab86d 100644
--- a/src/lib/cc/session.cc
+++ b/src/lib/cc/session.cc
@@ -249,7 +249,7 @@ SessionImpl::internalRead(const asio::error_code& error,
}
user_handler_();
} else {
- LOG_ERROR(logger, CC_ASYNC_READ_FAILED);
+ LOG_ERROR(logger, CC_ASYNC_READ_FAILED).arg(error.value());
isc_throw(SessionError, "asynchronous read failed");
}
}
diff --git a/src/lib/cc/tests/Makefile.am b/src/lib/cc/tests/Makefile.am
index 4760855..08b7f33 100644
--- a/src/lib/cc/tests/Makefile.am
+++ b/src/lib/cc/tests/Makefile.am
@@ -24,11 +24,14 @@ run_unittests_SOURCES = data_unittests.cc session_unittests.cc run_unittests.cc
run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
-run_unittests_LDADD = $(GTEST_LDADD)
-run_unittests_LDADD += $(top_builddir)/src/lib/cc/libcc.la
+# We need to put our libs first, in case gtest (or any dependency, really)
+# is installed in the same location as a different version of bind10
+# Otherwise the linker may not use the source tree libs
+run_unittests_LDADD = $(top_builddir)/src/lib/cc/libcc.la
run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
+run_unittests_LDADD += $(GTEST_LDADD)
endif
diff --git a/src/lib/cc/tests/data_unittests.cc b/src/lib/cc/tests/data_unittests.cc
index d8624cb..87d92f6 100644
--- a/src/lib/cc/tests/data_unittests.cc
+++ b/src/lib/cc/tests/data_unittests.cc
@@ -20,6 +20,7 @@
using namespace isc::data;
+#include <sstream>
#include <iostream>
using std::oct;
#include <iomanip>
@@ -90,7 +91,7 @@ TEST(Element, from_and_to_json) {
sv.push_back("-1");
sv.push_back("-1.234");
sv.push_back("-123.456");
-
+
BOOST_FOREACH(const std::string& s, sv) {
// test << operator, which uses Element::str()
std::ostringstream stream;
@@ -122,8 +123,16 @@ TEST(Element, from_and_to_json) {
sv.push_back("{ \"a\": None}");
sv.push_back("");
sv.push_back("nul");
+ sv.push_back("hello\"foobar\"");
+ sv.push_back("\"foobar\"hello");
+ sv.push_back("[]hello");
+ sv.push_back("{}hello");
+ // String not delimited correctly
+ sv.push_back("\"hello");
+ sv.push_back("hello\"");
+
+
BOOST_FOREACH(std::string s, sv) {
-
EXPECT_THROW(el = Element::fromJSON(s), isc::data::JSONError);
}
@@ -150,6 +159,9 @@ TEST(Element, from_and_to_json) {
EXPECT_EQ("false", Element::fromJSON("FALSE")->str());
EXPECT_EQ("true", Element::fromJSON("True")->str());
EXPECT_EQ("true", Element::fromJSON("TRUE")->str());
+ EXPECT_EQ("\"\"", Element::fromJSON(" \n \t \r \f \b \"\" \n \f \t \r \b")->str());
+ EXPECT_EQ("{ }", Element::fromJSON("{ \n \r \t \b \f }")->str());
+ EXPECT_EQ("[ ]", Element::fromJSON("[ \n \r \f \t \b ]")->str());
// number overflows
EXPECT_THROW(Element::fromJSON("12345678901234567890")->str(), JSONError);
@@ -299,6 +311,43 @@ TEST(Element, create_and_value_throws) {
}
+// Helper for escape check; it puts the given string in a StringElement,
+// then checks for the following conditions:
+// stringValue() must be same as input
+// toJSON() output must be escaped
+// fromJSON() on the previous output must result in original input
+void
+escapeHelper(const std::string& input, const std::string& expected) {
+ StringElement str_element = StringElement(input);
+ EXPECT_EQ(input, str_element.stringValue());
+ std::stringstream os;
+ str_element.toJSON(os);
+ EXPECT_EQ(expected, os.str());
+ ElementPtr str_element2 = Element::fromJSON(os.str());
+ EXPECT_EQ(str_element.stringValue(), str_element2->stringValue());
+}
+
+TEST(Element, escape) {
+ // Test whether quotes are escaped correctly when creating direct
+ // String elements.
+ escapeHelper("foo\"bar", "\"foo\\\"bar\"");
+ escapeHelper("foo\\bar", "\"foo\\\\bar\"");
+ escapeHelper("foo\bbar", "\"foo\\bbar\"");
+ escapeHelper("foo\fbar", "\"foo\\fbar\"");
+ escapeHelper("foo\nbar", "\"foo\\nbar\"");
+ escapeHelper("foo\rbar", "\"foo\\rbar\"");
+ escapeHelper("foo\tbar", "\"foo\\tbar\"");
+ // Bad escapes
+ EXPECT_THROW(Element::fromJSON("\\a"), JSONError);
+ EXPECT_THROW(Element::fromJSON("\\"), JSONError);
+ // Can't have escaped quotes outside strings
+ EXPECT_THROW(Element::fromJSON("\\\"\\\""), JSONError);
+ // Inside strings is OK
+ EXPECT_NO_THROW(Element::fromJSON("\"\\\"\\\"\""));
+ // A whitespace test
+ EXPECT_NO_THROW(Element::fromJSON("\" \n \r \t \f \n \n \t\""));
+}
+
TEST(Element, ListElement) {
// this function checks the specific functions for ListElements
ElementPtr el = Element::fromJSON("[ 1, \"bar\", 3 ]");
diff --git a/src/lib/config/config_data.h b/src/lib/config/config_data.h
index 197d319..3fdbc25 100644
--- a/src/lib/config/config_data.h
+++ b/src/lib/config/config_data.h
@@ -32,7 +32,7 @@ public:
DataNotFoundError(const char* file, size_t line, const std::string& what) :
isc::Exception(file, line, what) {}
};
-
+
class ConfigData {
public:
/// Constructs a ConfigData option with no specification and an
diff --git a/src/lib/datasrc/Makefile.am b/src/lib/datasrc/Makefile.am
index 1e743dd..2cdb8ea 100644
--- a/src/lib/datasrc/Makefile.am
+++ b/src/lib/datasrc/Makefile.am
@@ -31,6 +31,7 @@ libdatasrc_la_SOURCES += client.h iterator.h
libdatasrc_la_SOURCES += database.h database.cc
libdatasrc_la_SOURCES += factory.h factory.cc
nodist_libdatasrc_la_SOURCES = datasrc_messages.h datasrc_messages.cc
+libdatasrc_la_LDFLAGS = -no-undefined -version-info 1:0:1
pkglib_LTLIBRARIES = sqlite3_ds.la memory_ds.la
diff --git a/src/lib/datasrc/database.cc b/src/lib/datasrc/database.cc
index 4e2fb15..fcb2971 100644
--- a/src/lib/datasrc/database.cc
+++ b/src/lib/datasrc/database.cc
@@ -27,15 +27,18 @@
#include <dns/rrset.h>
#include <dns/rdata.h>
#include <dns/rdataclass.h>
+#include <dns/nsec3hash.h>
#include <datasrc/data_source.h>
#include <datasrc/logger.h>
#include <boost/foreach.hpp>
+#include <boost/scoped_ptr.hpp>
using namespace isc::dns;
using namespace std;
using namespace isc::dns::rdata;
+using namespace boost;
namespace isc {
namespace datasrc {
@@ -177,15 +180,17 @@ private:
DatabaseClient::Finder::FoundRRsets
DatabaseClient::Finder::getRRsets(const string& name, const WantedTypes& types,
bool check_ns, const string* construct_name,
- bool any)
+ bool any,
+ DatabaseAccessor::IteratorContextPtr context)
{
RRsigStore sig_store;
bool records_found = false;
std::map<RRType, RRsetPtr> result;
- // Request the context
- DatabaseAccessor::IteratorContextPtr
- context(accessor_->getRecords(name, zone_id_));
+ // Request the context in case we didn't get one
+ if (!context) {
+ context = accessor_->getRecords(name, zone_id_);
+ }
// It must not return NULL, that's a bug of the implementation
if (!context) {
isc_throw(isc::Unexpected, "Iterator context null at " + name);
@@ -286,13 +291,11 @@ DatabaseClient::Finder::getRRsets(const string& name, const WantedTypes& types,
i != result.end(); ++ i) {
sig_store.appendSignatures(i->second);
}
-
if (records_found && any) {
result[RRType::ANY()] = RRsetPtr();
// These will be sitting on the other RRsets.
result.erase(RRType::RRSIG());
}
-
return (FoundRRsets(records_found, result));
}
@@ -318,6 +321,30 @@ namespace {
typedef std::set<RRType> WantedTypes;
const WantedTypes&
+NSEC3_TYPES() {
+ static bool initialized(false);
+ static WantedTypes result;
+
+ if (!initialized) {
+ result.insert(RRType::NSEC3());
+ initialized = true;
+ }
+ return (result);
+}
+
+const WantedTypes&
+NSEC3PARAM_TYPES() {
+ static bool initialized(false);
+ static WantedTypes result;
+
+ if (!initialized) {
+ result.insert(RRType::NSEC3PARAM());
+ initialized = true;
+ }
+ return (result);
+}
+
+const WantedTypes&
NSEC_TYPES() {
static bool initialized(false);
static WantedTypes result;
@@ -355,45 +382,6 @@ FINAL_TYPES() {
}
return (result);
}
-
-}
-
-ConstRRsetPtr
-DatabaseClient::Finder::findNSECCover(const Name& name) {
- try {
- // Which one should contain the NSEC record?
- const Name coverName(findPreviousName(name));
- // Get the record and copy it out
- const FoundRRsets found = getRRsets(coverName.toText(), NSEC_TYPES(),
- coverName != getOrigin());
- const FoundIterator
- nci(found.second.find(RRType::NSEC()));
- if (nci != found.second.end()) {
- return (nci->second);
- } else {
- // The previous doesn't contain NSEC.
- // Badly signed zone or a bug?
-
- // FIXME: Currently, if the zone is not signed, we could get
- // here. In that case we can't really throw, but for now, we can't
- // recognize it. So we don't throw at all, enable it once
- // we have a is_signed flag or something.
-#if 0
- isc_throw(DataSourceError, "No NSEC in " +
- coverName.toText() + ", but it was "
- "returned as previous - "
- "accessor error? Badly signed zone?");
-#endif
- }
- }
- catch (const isc::NotImplemented&) {
- // Well, they want DNSSEC, but there is no available.
- // So we don't provide anything.
- LOG_INFO(logger, DATASRC_DATABASE_COVER_NSEC_UNSUPPORTED).
- arg(accessor_->getDBName()).arg(name);
- }
- // We didn't find it, return nothing
- return (ConstRRsetPtr());
}
ZoneFinderContextPtr
@@ -416,8 +404,8 @@ DatabaseClient::Finder::find(const isc::dns::Name& name,
isc_throw(isc::Unexpected, "Use findAll to answer ANY");
}
return (ZoneFinderContextPtr(new Context(*this, options,
- findInternal(name, type,
- NULL, options))));
+ findInternal(name, type, NULL,
+ options))));
}
DatabaseClient::Finder::DelegationSearchResult
@@ -580,9 +568,9 @@ DatabaseClient::Finder::findDelegationPoint(const isc::dns::Name& name,
// If none of the above applies in any level, the search fails with NXDOMAIN.
ZoneFinder::ResultContext
DatabaseClient::Finder::findWildcardMatch(
- const isc::dns::Name& name, const isc::dns::RRType& type,
- const FindOptions options, const DelegationSearchResult& dresult,
- std::vector<isc::dns::ConstRRsetPtr>* target)
+ const Name& name, const RRType& type, const FindOptions options,
+ const DelegationSearchResult& dresult, vector<ConstRRsetPtr>* target,
+ FindDNSSECContext& dnssec_ctx)
{
// Note that during the search we are going to search not only for the
// requested type, but also for types that indicate a delegation -
@@ -625,8 +613,8 @@ DatabaseClient::Finder::findWildcardMatch(
} else if (!hasSubdomains(name.split(i - 1).toText())) {
// The wildcard match is the best one, find the final result
// at it. Note that wildcard should never be the zone origin.
- return (findOnNameResult(name, type, options, false,
- found, &wildcard, target));
+ return (findOnNameResult(name, type, options, false, found,
+ &wildcard, target, dnssec_ctx));
} else {
// more specified match found, cancel wildcard match
@@ -642,15 +630,11 @@ DatabaseClient::Finder::findWildcardMatch(
LOG_DEBUG(logger, DBG_TRACE_DETAILED,
DATASRC_DATABASE_WILDCARD_EMPTY).
arg(accessor_->getDBName()).arg(wildcard).arg(name);
- if ((options & FIND_DNSSEC) != 0) {
- ConstRRsetPtr nsec = findNSECCover(Name(wildcard));
- if (nsec) {
- return (ResultContext(NXRRSET, nsec,
- RESULT_WILDCARD |
- RESULT_NSEC_SIGNED));
- }
- }
- return (ResultContext(NXRRSET, ConstRRsetPtr(), RESULT_WILDCARD));
+ const FindResultFlags flags = (RESULT_WILDCARD |
+ dnssec_ctx.getResultFlags());
+ return (ResultContext(NXRRSET,
+ dnssec_ctx.getDNSSECRRset(Name(wildcard),
+ true), flags));
}
}
@@ -688,6 +672,121 @@ DatabaseClient::Finder::logAndCreateResult(
return (ResultContext(code, rrset, flags));
}
+DatabaseClient::Finder::FindDNSSECContext::FindDNSSECContext(
+ DatabaseClient::Finder& finder,
+ const FindOptions options) :
+ finder_(finder),
+ need_dnssec_((options & FIND_DNSSEC) != 0),
+ is_nsec3_(false),
+ is_nsec_(false),
+ probed_(false)
+{}
+
+void
+DatabaseClient::Finder::FindDNSSECContext::probe() {
+ if (!probed_) {
+ probed_ = true;
+ if (need_dnssec_) {
+ // If an NSEC3PARAM RR exists at the zone apex, it's quite likely
+ // that the zone is signed with NSEC3. (If not the zone is more
+ // or less broken, but it's caller's responsibility how to handle
+ // such cases).
+ const string origin = finder_.getOrigin().toText();
+ const FoundRRsets nsec3_found =
+ finder_.getRRsets(origin, NSEC3PARAM_TYPES(), false);
+ const FoundIterator nfi=
+ nsec3_found.second.find(RRType::NSEC3PARAM());
+ is_nsec3_ = (nfi != nsec3_found.second.end());
+
+ // Likewise for NSEC, depending on the apex has an NSEC RR.
+ // If we know the zone is NSEC3-signed, however, we don't bother
+ // to check that. This is aligned with the transition guideline
+ // described in Section 10.4 of RFC 5155.
+ if (!is_nsec3_) {
+ const FoundRRsets nsec_found =
+ finder_.getRRsets(origin, NSEC_TYPES(), false);
+ const FoundIterator nfi =
+ nsec_found.second.find(RRType::NSEC());
+ is_nsec_ = (nfi != nsec_found.second.end());
+ }
+ }
+ }
+}
+
+bool
+DatabaseClient::Finder::FindDNSSECContext::isNSEC3() {
+ if (!probed_) {
+ probe();
+ }
+ return (is_nsec3_);
+}
+
+bool
+DatabaseClient::Finder::FindDNSSECContext::isNSEC() {
+ if (!probed_) {
+ probe();
+ }
+ return (is_nsec_);
+}
+
+isc::dns::ConstRRsetPtr
+DatabaseClient::Finder::FindDNSSECContext::getDNSSECRRset(
+ const FoundRRsets& found_set)
+{
+ if (!isNSEC()) {
+ return (ConstRRsetPtr());
+ }
+
+ const FoundIterator nci = found_set.second.find(RRType::NSEC());
+ if (nci != found_set.second.end()) {
+ return (nci->second);
+ } else {
+ return (ConstRRsetPtr());
+ }
+}
+
+isc::dns::ConstRRsetPtr
+DatabaseClient::Finder::FindDNSSECContext::getDNSSECRRset(const Name &name,
+ bool covering)
+{
+ if (!isNSEC()) {
+ return (ConstRRsetPtr());
+ }
+
+ try {
+ const Name& nsec_name =
+ covering ? finder_.findPreviousName(name) : name;
+ const bool need_nscheck = (nsec_name != finder_.getOrigin());
+ const FoundRRsets found = finder_.getRRsets(nsec_name.toText(),
+ NSEC_TYPES(),
+ need_nscheck);
+ const FoundIterator nci = found.second.find(RRType::NSEC());
+ if (nci != found.second.end()) {
+ return (nci->second);
+ }
+ } catch (const isc::NotImplemented&) {
+ // This happens when the underlying database accessor doesn't support
+ // findPreviousName() (it probably doesn't support DNSSEC at all) but
+ // there is somehow an NSEC RR at the zone apex. We log the fact but
+ // otherwise let the caller decide what to do (so, for example, a
+ // higher level query processing won't completely fail but can return
+ // anything it can get).
+ LOG_INFO(logger, DATASRC_DATABASE_COVER_NSEC_UNSUPPORTED).
+ arg(finder_.accessor_->getDBName()).arg(name);
+ }
+ return (ConstRRsetPtr());
+}
+
+ZoneFinder::FindResultFlags
+DatabaseClient::Finder::FindDNSSECContext::getResultFlags() {
+ if (isNSEC3()) {
+ return (RESULT_NSEC3_SIGNED);
+ } else if (isNSEC()) {
+ return (RESULT_NSEC_SIGNED);
+ }
+ return (RESULT_DEFAULT);
+}
+
ZoneFinder::ResultContext
DatabaseClient::Finder::findOnNameResult(const Name& name,
const RRType& type,
@@ -696,28 +795,22 @@ DatabaseClient::Finder::findOnNameResult(const Name& name,
const FoundRRsets& found,
const string* wildname,
std::vector<isc::dns::ConstRRsetPtr>*
- target)
+ target, FindDNSSECContext& dnssec_ctx)
{
const bool wild = (wildname != NULL);
- FindResultFlags flags = wild ? RESULT_WILDCARD : RESULT_DEFAULT;
+ // For wildcard case with DNSSEC required, the caller would need to
+ // know whether it's NSEC or NSEC3 signed. getResultFlags returns
+ // appropriate flag based on the query context and zone status.
+ const FindResultFlags flags =
+ wild ? (RESULT_WILDCARD | dnssec_ctx.getResultFlags()) : RESULT_DEFAULT;
// Get iterators for the different types of records we are interested in -
// CNAME, NS and Wanted types.
const FoundIterator nsi(found.second.find(RRType::NS()));
const FoundIterator cni(found.second.find(RRType::CNAME()));
const FoundIterator wti(found.second.find(type));
- // For wildcard case with DNSSEC required, the caller would need to know
- // whether it's NSEC or NSEC3 signed. So we need to do an additional
- // search here, even though the NSEC RR may not be returned.
- // TODO: this part should be revised when we support NSEC3; ideally we
- // should use more effective and efficient way to identify (whether and)
- // in which way the zone is signed.
- if (wild && (options & FIND_DNSSEC) != 0 &&
- found.second.find(RRType::NSEC()) != found.second.end()) {
- flags = flags | RESULT_NSEC_SIGNED;
- }
-
- if (!is_origin && ((options & FIND_GLUE_OK) == 0) &&
+
+ if (!is_origin && (options & FIND_GLUE_OK) == 0 &&
nsi != found.second.end()) {
// A NS RRset was found at the domain we were searching for. As it is
// not at the origin of the zone, it is a delegation and indicates that
@@ -744,7 +837,6 @@ DatabaseClient::Finder::findOnNameResult(const Name& name,
wild ? DATASRC_DATABASE_WILDCARD_CNAME :
DATASRC_DATABASE_FOUND_CNAME,
flags));
-
} else if (wti != found.second.end()) {
bool any(type == RRType::ANY());
isc::log::MessageID lid(wild ? DATASRC_DATABASE_WILDCARD_MATCH :
@@ -776,32 +868,20 @@ DatabaseClient::Finder::findOnNameResult(const Name& name,
// provide the NSEC records. If it's for wildcard, we need to get the
// NSEC records in the name of the wildcard, not the substituted one,
// so we need to search the tree again.
- ConstRRsetPtr nsec_rrset; // possibly used with DNSSEC, otherwise NULL
- if ((options & FIND_DNSSEC) != 0) {
- if (wild) {
- const FoundRRsets wfound = getRRsets(*wildname, NSEC_TYPES(),
- true);
- const FoundIterator nci = wfound.second.find(RRType::NSEC());
- if (nci != wfound.second.end()) {
- nsec_rrset = nci->second;
- }
- } else {
- const FoundIterator nci = found.second.find(RRType::NSEC());
- if (nci != found.second.end()) {
- nsec_rrset = nci->second;
- }
- }
- }
- if (nsec_rrset) {
+ const ConstRRsetPtr dnssec_rrset =
+ wild ? dnssec_ctx.getDNSSECRRset(Name(*wildname), false) :
+ dnssec_ctx.getDNSSECRRset(found);
+ if (dnssec_rrset) {
// This log message covers both normal and wildcard cases, so we pass
// NULL for 'wildname'.
- return (logAndCreateResult(name, NULL, type, NXRRSET, nsec_rrset,
+ return (logAndCreateResult(name, NULL, type, NXRRSET, dnssec_rrset,
DATASRC_DATABASE_FOUND_NXRRSET_NSEC,
flags | RESULT_NSEC_SIGNED));
}
- return (logAndCreateResult(name, wildname, type, NXRRSET, nsec_rrset,
+ return (logAndCreateResult(name, wildname, type, NXRRSET, dnssec_rrset,
wild ? DATASRC_DATABASE_WILDCARD_NXRRSET :
- DATASRC_DATABASE_FOUND_NXRRSET, flags));
+ DATASRC_DATABASE_FOUND_NXRRSET,
+ flags | dnssec_ctx.getResultFlags()));
}
ZoneFinder::ResultContext
@@ -809,10 +889,8 @@ DatabaseClient::Finder::findNoNameResult(const Name& name, const RRType& type,
FindOptions options,
const DelegationSearchResult& dresult,
std::vector<isc::dns::ConstRRsetPtr>*
- target)
+ target, FindDNSSECContext& dnssec_ctx)
{
- const bool dnssec_data = ((options & FIND_DNSSEC) != 0);
-
// On entry to this method, we know that the database doesn't have any
// entry for this name. Before returning NXDOMAIN, we need to check
// for special cases.
@@ -824,17 +902,16 @@ DatabaseClient::Finder::findNoNameResult(const Name& name, const RRType& type,
LOG_DEBUG(logger, DBG_TRACE_DETAILED,
DATASRC_DATABASE_FOUND_EMPTY_NONTERMINAL).
arg(accessor_->getDBName()).arg(name);
- const ConstRRsetPtr nsec = dnssec_data ? findNSECCover(name) :
- ConstRRsetPtr();
- return (ResultContext(NXRRSET, nsec,
- nsec ? RESULT_NSEC_SIGNED : RESULT_DEFAULT));
+ return (ResultContext(NXRRSET, dnssec_ctx.getDNSSECRRset(name, true),
+ dnssec_ctx.getResultFlags()));
} else if ((options & NO_WILDCARD) == 0) {
// It's not an empty non-terminal and wildcard matching is not
// disabled, so check for wildcards. If there is a wildcard match
// (i.e. all results except NXDOMAIN) return it; otherwise fall
// through to the NXDOMAIN case below.
const ResultContext wcontext =
- findWildcardMatch(name, type, options, dresult, target);
+ findWildcardMatch(name, type, options, dresult, target,
+ dnssec_ctx);
if (wcontext.code != NXDOMAIN) {
return (wcontext);
}
@@ -844,10 +921,8 @@ DatabaseClient::Finder::findNoNameResult(const Name& name, const RRType& type,
// NSEC records if requested).
LOG_DEBUG(logger, DBG_TRACE_DETAILED, DATASRC_DATABASE_NO_MATCH).
arg(accessor_->getDBName()).arg(name).arg(type).arg(getClass());
- const ConstRRsetPtr nsec = dnssec_data ? findNSECCover(name) :
- ConstRRsetPtr();
- return (ResultContext(NXDOMAIN, nsec,
- nsec ? RESULT_NSEC_SIGNED : RESULT_DEFAULT));
+ return (ResultContext(NXDOMAIN, dnssec_ctx.getDNSSECRRset(name, true),
+ dnssec_ctx.getResultFlags()));
}
ZoneFinder::ResultContext
@@ -865,7 +940,7 @@ DatabaseClient::Finder::findInternal(const Name& name, const RRType& type,
name.compare(getOrigin()).getRelation();
if (reln != NameComparisonResult::SUBDOMAIN &&
reln != NameComparisonResult::EQUAL) {
- return (ResultContext(NXDOMAIN, ConstRRsetPtr()));
+ isc_throw(OutOfZone, name.toText() << " not in " << getOrigin());
}
// First, go through all superdomains from the origin down, searching for
@@ -897,23 +972,134 @@ DatabaseClient::Finder::findInternal(const Name& name, const RRType& type,
const FoundRRsets found = getRRsets(name.toText(), final_types,
!is_origin, NULL,
type == RRType::ANY());
-
+ FindDNSSECContext dnssec_ctx(*this, options);
if (found.first) {
// Something found at the domain name. Look into it further to get
// the final result.
return (findOnNameResult(name, type, options, is_origin, found, NULL,
- target));
+ target, dnssec_ctx));
} else {
// Did not find anything at all at the domain name, so check for
// subdomains or wildcards.
- return (findNoNameResult(name, type, options, dresult, target));
+ return (findNoNameResult(name, type, options, dresult, target,
+ dnssec_ctx));
}
}
+// The behaviour is inspired by the one in the in-memory implementation.
ZoneFinder::FindNSEC3Result
-DatabaseClient::Finder::findNSEC3(const Name&, bool) {
- isc_throw(NotImplemented, "findNSEC3 is not yet implemented for database "
- "data source");
+DatabaseClient::Finder::findNSEC3(const Name& name, bool recursive) {
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_DATABASE_FINDNSEC3).arg(name).
+ arg(recursive ? "recursive" : "non-recursive");
+
+ // First, validate the input
+ const NameComparisonResult cmp_result(name.compare(getOrigin()));
+ if (cmp_result.getRelation() != NameComparisonResult::EQUAL &&
+ cmp_result.getRelation() != NameComparisonResult::SUBDOMAIN) {
+ isc_throw(OutOfZone, "findNSEC3 attempt for out-of-zone name: " <<
+ name << ", zone: " << getOrigin() << "/" << getClass());
+ }
+
+ // Now, we need to get the NSEC3 params from the apex and create the hash
+ // creator for it.
+ const FoundRRsets nsec3param(getRRsets(getOrigin().toText(),
+ NSEC3PARAM_TYPES(), false));
+ const FoundIterator param(nsec3param.second.find(RRType::NSEC3PARAM()));
+ if (!nsec3param.first || param == nsec3param.second.end()) {
+ // No NSEC3 params? :-(
+ isc_throw(DataSourceError, "findNSEC3 attempt for non NSEC3 signed " <<
+ "zone: " << getOrigin() << "/" << getClass());
+ }
+ // This takes the RRset received from the find method, takes the first RR
+ // in it, casts it to NSEC3PARAM (as it should be that one) and then creates
+ // the hash calculator class from it.
+ const scoped_ptr<NSEC3Hash> calculator(NSEC3Hash::create(
+ dynamic_cast<const generic::NSEC3PARAM&>(
+ param->second->getRdataIterator()->getCurrent())));
+
+ // Few shortcut variables
+ const unsigned olabels(getOrigin().getLabelCount());
+ const unsigned qlabels(name.getLabelCount());
+ const string otext(getOrigin().toText());
+
+ // This will be set to the one covering the query name
+ ConstRRsetPtr covering_proof;
+
+ // We keep stripping the leftmost label until we find something.
+ // In case it is recursive, we'll exit the loop at the first iteration.
+ for (unsigned labels(qlabels); labels >= olabels; -- labels) {
+ const string hash(calculator->calculate(labels == qlabels ? name :
+ name.split(qlabels - labels,
+ labels)));
+ // Get the exact match for the name.
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_DATABASE_FINDNSEC3_TRYHASH).
+ arg(name).arg(labels).arg(hash);
+
+ DatabaseAccessor::IteratorContextPtr
+ context(accessor_->getNSEC3Records(hash, zone_id_));
+
+ if (!context) {
+ isc_throw(Unexpected, "Iterator context null for hash " + hash);
+ }
+
+ const FoundRRsets nsec3(getRRsets(hash + "." + otext, NSEC3_TYPES(),
+ false, NULL, false, context));
+
+ if (nsec3.first) {
+ // We found an exact match against the current label.
+ const FoundIterator it(nsec3.second.find(RRType::NSEC3()));
+ if (it == nsec3.second.end()) {
+ isc_throw(DataSourceError, "Hash " + hash +
+ "exists, but no NSEC3 there");
+ }
+
+ LOG_DEBUG(logger, DBG_TRACE_BASIC,
+ DATASRC_DATABASE_FINDNSEC3_MATCH).arg(name).arg(labels).
+ arg(*it->second);
+ // Yes, we win
+ return (FindNSEC3Result(true, labels, it->second, covering_proof));
+ } else {
+ // There's no exact match. We try a previous one. We must find it
+ // (if the zone is properly signed).
+ const string prevHash(accessor_->findPreviousNSEC3Hash(zone_id_,
+ hash));
+ LOG_DEBUG(logger, DBG_TRACE_BASIC,
+ DATASRC_DATABASE_FINDNSEC3_TRYHASH_PREV).arg(name).
+ arg(labels).arg(prevHash);
+ context = accessor_->getNSEC3Records(prevHash, zone_id_);
+ const FoundRRsets prev_nsec3(getRRsets(prevHash + "." + otext,
+ NSEC3_TYPES(), false, NULL,
+ false, context));
+
+ if (!prev_nsec3.first) {
+ isc_throw(DataSourceError, "Hash " + prevHash + " returned "
+ "from findPreviousNSEC3Hash, but it is empty");
+ }
+ const FoundIterator
+ prev_it(prev_nsec3.second.find(RRType::NSEC3()));
+ if (prev_it == prev_nsec3.second.end()) {
+ isc_throw(DataSourceError, "The previous hash " + prevHash +
+ "exists, but does not contain the NSEC3");
+ }
+
+ covering_proof = prev_it->second;
+ // In case it is recursive, we try to get an exact match a level
+ // up. If it is not recursive, the caller is ok with a covering
+ // one, so we just return it.
+ if (!recursive) {
+ LOG_DEBUG(logger, DBG_TRACE_BASIC,
+ DATASRC_DATABASE_FINDNSEC3_COVER).arg(name).
+ arg(labels).arg(*covering_proof);
+ return (FindNSEC3Result(false, labels, covering_proof,
+ ConstRRsetPtr()));
+ }
+ }
+ }
+
+ // The zone must contain at least the apex and that one should match
+ // exactly. If that doesn't happen, we have a problem.
+ isc_throw(DataSourceError, "recursive findNSEC3 mode didn't stop, likely a "
+ "broken NSEC3 zone: " << otext << "/" << getClass());
}
Name
@@ -923,16 +1109,9 @@ DatabaseClient::Finder::findPreviousName(const Name& name) const {
try {
return (Name(str));
}
-
- // To avoid having the same code many times, we just catch all the
- // exceptions and handle them in a common code below
- catch (const isc::dns::EmptyLabel&) {}
- catch (const isc::dns::TooLongLabel&) {}
- catch (const isc::dns::BadLabelType&) {}
- catch (const isc::dns::BadEscape&) {}
- catch (const isc::dns::TooLongName&) {}
- catch (const isc::dns::IncompleteName&) {}
- isc_throw(DataSourceError, "Bad name " + str + " from findPreviousName");
+ catch (const isc::dns::NameParserException&) {
+ isc_throw(DataSourceError, "Bad name " + str + " from findPreviousName");
+ }
}
Name
@@ -1012,28 +1191,44 @@ public:
// At the end of zone
accessor_->commit();
ready_ = false;
- LOG_DEBUG(logger, DBG_TRACE_DETAILED,
- DATASRC_DATABASE_ITERATE_END);
+ LOG_DEBUG(logger, DBG_TRACE_DETAILED, DATASRC_DATABASE_ITERATE_END);
return (ConstRRsetPtr());
}
- const string name_str(name_), rtype_str(rtype_), ttl(ttl_);
- const Name name(name_str);
- const RRType rtype(rtype_str);
- RRsetPtr rrset(new RRset(name, class_, rtype, RRTTL(ttl)));
- while (data_ready_ && name_ == name_str && rtype_str == rtype_) {
- if (ttl_ != ttl) {
- if (ttl < ttl_) {
- ttl_ = ttl;
- rrset->setTTL(RRTTL(ttl));
- }
- LOG_WARN(logger, DATASRC_DATABASE_ITERATE_TTL_MISMATCH).
- arg(name_).arg(class_).arg(rtype_).arg(rrset->getTTL());
- }
- rrset->addRdata(rdata::createRdata(rtype, class_, rdata_));
+ const RRType rtype(rtype_txt_);
+ RRsetPtr rrset(new RRset(Name(name_txt_), class_, rtype,
+ RRTTL(ttl_txt_)));
+ // Remember the first RDATA of the RRset for comparison:
+ const ConstRdataPtr rdata_base = rdata_;
+ while (true) {
+ // Extend the RRset with the new RDATA.
+ rrset->addRdata(rdata_);
+
+ // Retrieve the next record from the database. If we reach the
+ // end of the zone, done; if we were requested to separate all RRs,
+ // just remember this record and return the single RR.
getData();
- if (separate_rrs_) {
+ if (separate_rrs_ || !data_ready_) {
+ break;
+ }
+
+ // Check if the next record belongs to the same RRset. If not,
+ // we are done. The next RDATA has been stored in rdata_, which
+ // is used within this loop (if it belongs to the same RRset) or
+ // in the next call.
+ if (Name(name_txt_) != rrset->getName() ||
+ !isSameType(rtype, rdata_base, RRType(rtype_txt_), rdata_)) {
break;
}
+
+ // Adjust TTL if necessary
+ const RRTTL next_ttl(ttl_txt_);
+ if (next_ttl != rrset->getTTL()) {
+ if (next_ttl < rrset->getTTL()) {
+ rrset->setTTL(next_ttl);
+ }
+ LOG_WARN(logger, DATASRC_DATABASE_ITERATE_TTL_MISMATCH).
+ arg(name_txt_).arg(class_).arg(rtype).arg(rrset->getTTL());
+ }
}
LOG_DEBUG(logger, DBG_TRACE_DETAILED, DATASRC_DATABASE_ITERATE_NEXT).
arg(rrset->getName()).arg(rrset->getType());
@@ -1041,14 +1236,34 @@ public:
}
private:
+ // Check two RDATA types are equivalent. Basically it's a trivial
+ // comparison, but if both are of RRSIG, we should also compare the types
+ // covered.
+ static bool isSameType(RRType type1, ConstRdataPtr rdata1,
+ RRType type2, ConstRdataPtr rdata2)
+ {
+ if (type1 != type2) {
+ return (false);
+ }
+ if (type1 == RRType::RRSIG()) {
+ return (dynamic_cast<const generic::RRSIG&>(*rdata1).typeCovered()
+ == dynamic_cast<const generic::RRSIG&>(*rdata2).
+ typeCovered());
+ }
+ return (true);
+ }
+
// Load next row of data
void getData() {
string data[DatabaseAccessor::COLUMN_COUNT];
data_ready_ = context_->getNext(data);
- name_ = data[DatabaseAccessor::NAME_COLUMN];
- rtype_ = data[DatabaseAccessor::TYPE_COLUMN];
- ttl_ = data[DatabaseAccessor::TTL_COLUMN];
- rdata_ = data[DatabaseAccessor::RDATA_COLUMN];
+ if (data_ready_) {
+ name_txt_ = data[DatabaseAccessor::NAME_COLUMN];
+ rtype_txt_ = data[DatabaseAccessor::TYPE_COLUMN];
+ ttl_txt_ = data[DatabaseAccessor::TTL_COLUMN];
+ rdata_ = rdata::createRdata(RRType(rtype_txt_), class_,
+ data[DatabaseAccessor::RDATA_COLUMN]);
+ }
}
// The dedicated accessor
@@ -1062,10 +1277,12 @@ private:
// Status
bool ready_, data_ready_;
// Data of the next row
- string name_, rtype_, rdata_, ttl_;
+ string name_txt_, rtype_txt_, ttl_txt_;
+ // RDATA of the next row
+ ConstRdataPtr rdata_;
// Whether to modify differing TTL values, or treat a different TTL as
// a different RRset
- bool separate_rrs_;
+ const bool separate_rrs_;
};
}
diff --git a/src/lib/datasrc/database.h b/src/lib/datasrc/database.h
index d9d6dfa..40134fc 100644
--- a/src/lib/datasrc/database.h
+++ b/src/lib/datasrc/database.h
@@ -684,7 +684,7 @@ public:
/// This is used to find previous NSEC3 hashes, to find covering NSEC3 in
/// case none match exactly.
///
- /// In case a hash before before the lowest or the lowest is provided,
+ /// In case a hash before the lowest or the lowest is provided,
/// this should return the largest one in the zone (NSEC3 needs a
/// wrap-around semantics).
///
@@ -736,6 +736,7 @@ public:
DatabaseClient(isc::dns::RRClass rrclass,
boost::shared_ptr<DatabaseAccessor> accessor);
+
/// \brief Corresponding ZoneFinder implementation
///
/// The zone finder implementation for database data sources. Similarly
@@ -845,6 +846,7 @@ public:
boost::shared_ptr<DatabaseAccessor> accessor_;
const int zone_id_;
const isc::dns::Name origin_;
+
/// \brief Shortcut name for the result of getRRsets
typedef std::pair<bool, std::map<dns::RRType, dns::RRsetPtr> >
FoundRRsets;
@@ -887,6 +889,9 @@ public:
/// ones requested by types. It also puts a NULL pointer under the
/// ANY type into the result, if it finds any RRs at all, to easy the
/// identification of success.
+ /// \param srcContext This can be set to non-NULL value to override the
+ /// iterator context used for obtaining the data. This can be used,
+ /// for example, to get data from the NSEC3 namespace.
/// \return A pair, where the first element indicates if the domain
/// contains any RRs at all (not only the requested, it may happen
/// this is set to true, but the second part is empty). The second
@@ -898,7 +903,122 @@ public:
FoundRRsets getRRsets(const std::string& name,
const WantedTypes& types, bool check_ns,
const std::string* construct_name = NULL,
- bool any = false);
+ bool any = false,
+ DatabaseAccessor::IteratorContextPtr srcContext =
+ DatabaseAccessor::IteratorContextPtr());
+
+ /// \brief DNSSEC related context for ZoneFinder::findInternal.
+ ///
+ /// This class is a helper for the ZoneFinder::findInternal method,
+ /// encapsulating DNSSEC related information and processing logic.
+ /// Specifically, it tells the finder whether the zone under search
+ /// is DNSSEC signed or not, and if it is, whether it's with NSEC or
+ /// with NSEC3. It also provides a RRset DNSSEC proof RRset for some
+ /// specific situations (in practice, this means an NSEC RRs for
+ /// negative proof when they are needed and expected).
+ ///
+ /// The purpose of this class is to keep the main finder implementation
+ /// unaware of DNSSEC related details. It's also intended to help
+ /// avoid unnecessary lookup for DNSSEC proof RRsets; this class
+ /// doesn't look into the DB for these RRsets unless it's known to
+ /// be needed. The same optimization could be implemented in the
+ /// main code, but it will result in duplicate similar code logic
+ /// and make the code more complicated. By encapsulating and unifying
+ /// the logic in a single separate class, we can keep the main
+ /// search logic readable.
+ class FindDNSSECContext {
+ public:
+ /// \brief Constructor for FindDNSSECContext class.
+ ///
+ /// This constructor doesn't involve any expensive operation such
+ /// as database lookups. It only initializes some internal
+ /// states (in a cheap way) and remembers if DNSSEC proof
+ /// is requested.
+ ///
+ /// \param finder The Finder for the findInternal that uses this
+ /// context.
+ /// \param options Find options given to the finder.
+ FindDNSSECContext(Finder& finder, const FindOptions options);
+
+ /// \brief Return DNSSEC related result flags for the context.
+ ///
+ /// This method returns a FindResultFlags value related to
+ /// DNSSEC, based on the context. If DNSSEC proof is requested
+ /// and the zone is signed with NSEC/NSEC3, it returns
+ /// RESULT_NSEC_SIGNED/RESULT_NSEC3_SIGNED, respectively;
+ /// otherwise it returns RESULT_DEFAULT. So the caller can simply
+ /// take a logical OR for the returned value of this method and
+ /// whatever other flags it's going to set, without knowing
+ /// DNSSEC specific information.
+ ///
+ /// If it's not yet identified whether and how the zone is DNSSEC
+ /// signed at the time of the call, it now detects that via
+ /// database lookups (if necessary). (And this is because why
+ /// this method cannot be a const member function).
+ ZoneFinder::FindResultFlags getResultFlags();
+
+ /// \brief Get DNSSEC negative proof for a given name.
+ ///
+ /// If the zone is considered NSEC-signed and the context
+ /// requested DNSSEC proofs, this method tries to find NSEC RRs
+ /// for the give name. If \c covering is true, it means a
+ /// "no name" proof is requested, so it calls findPreviousName on
+ /// the given name and extracts an NSEC record on the result;
+ /// otherwise it tries to get NSEC RRs for the given name. If
+ /// the NSEC is found, this method returns it; otherwise it returns
+ /// NULL.
+ ///
+ /// In all other cases this method simply returns NULL.
+ ///
+ /// \param name The name which the NSEC RRset belong to.
+ /// \param covering true if a covering NSEC is required; false if
+ /// a matching NSEC is required.
+ /// \return Any found DNSSEC proof RRset or NULL
+ isc::dns::ConstRRsetPtr getDNSSECRRset(
+ const isc::dns::Name& name, bool covering);
+
+ /// \brief Get DNSSEC negative proof for a given name.
+ ///
+ /// If the zone is considered NSEC-signed and the context
+ /// requested DNSSEC proofs, this method tries to find NSEC RRset
+ /// from the given set (\c found_set) and returns it if found;
+ /// in other cases this method simply returns NULL.
+ ///
+ /// \param found_set The RRset which may contain an NSEC RRset.
+ /// \return Any found DNSSEC proof RRset or NULL
+ isc::dns::ConstRRsetPtr getDNSSECRRset(const FoundRRsets&
+ found_set);
+
+ private:
+ /// \brief Returns whether the zone is signed with NSEC3.
+ ///
+ /// This method returns true if the zone for the finder that
+ /// uses this context is considered DNSSEC signed with NSEC3;
+ /// otherwise it returns false. If it's not yet detected,
+ /// this method now detects that via database lookups (if
+ /// necessary).
+ bool isNSEC3();
+
+ /// \brief Returns whether the zone is signed with NSEC.
+ ///
+ /// This is similar to isNSEC3(), but works for NSEC.
+ bool isNSEC();
+
+ /// \brief Probe into the database to see if/how the zone is
+ /// signed.
+ ///
+ /// This is a subroutine of isNSEC3() and isNSEC(), and performs
+ /// delayed database probe to detect whether the zone used by
+ /// the finder is DNSSEC signed, and if it is, with NSEC or NSEC3.
+ void probe();
+
+ DatabaseClient::Finder& finder_;
+ const bool need_dnssec_;
+
+ bool is_nsec3_;
+ bool is_nsec_;
+ bool probed_;
+ };
/// \brief Search result of \c findDelegationPoint().
///
@@ -1002,7 +1122,8 @@ public:
/// \param target If the type happens to be ANY, it will insert all
/// the RRsets of the found name (if any is found) here instead
/// of being returned by the result.
- ///
+ /// \param dnssec_ctx The dnssec context, it is a DNSSEC wrapper for
+ /// find function.
/// \return Tuple holding the result of the search - the RRset of the
/// wildcard records matching the name, together with a status
/// indicating the match type (e.g. CNAME at the wildcard
@@ -1010,12 +1131,12 @@ public:
/// success due to an exact match). Also returned if there
/// is no match is an indication as to whether there was an
/// NXDOMAIN or an NXRRSET.
- ResultContext findWildcardMatch(
- const isc::dns::Name& name,
- const isc::dns::RRType& type,
- const FindOptions options,
- const DelegationSearchResult& dresult,
- std::vector<isc::dns::ConstRRsetPtr>* target);
+ ResultContext findWildcardMatch(const isc::dns::Name& name,
+ const isc::dns::RRType& type,
+ const FindOptions options,
+ const DelegationSearchResult& dresult,
+ std::vector<isc::dns::ConstRRsetPtr>*
+ target, FindDNSSECContext& dnssec_ctx);
/// \brief Handle matching results for name
///
@@ -1048,7 +1169,9 @@ public:
/// it's NULL in the case of non wildcard match.
/// \param target When the query is any, this must be set to a vector
/// where the result will be stored.
- ///
+ /// \param dnssec_ctx The dnssec context, it is a DNSSEC wrapper for
+ /// find function.
+
/// \return Tuple holding the result of the search - the RRset of the
/// wildcard records matching the name, together with a status
/// indicating the match type (corresponding to the each of
@@ -1062,7 +1185,7 @@ public:
const FoundRRsets& found,
const std::string* wildname,
std::vector<isc::dns::ConstRRsetPtr>*
- target);
+ target, FindDNSSECContext& dnssec_ctx);
/// \brief Handle no match for name
///
@@ -1087,7 +1210,8 @@ public:
/// \param target If the query is for type ANY, the successfull result,
/// if there happens to be one, will be returned through the
/// parameter, as it doesn't fit into the result.
- ///
+ /// \param dnssec_ctx The dnssec context, it is a DNSSEC wrapper for
+ /// find function.
/// \return Tuple holding the result of the search - the RRset of the
/// wildcard records matching the name, together with a status
/// indicating the match type (e.g. CNAME at the wildcard
@@ -1098,7 +1222,7 @@ public:
FindOptions options,
const DelegationSearchResult& dresult,
std::vector<isc::dns::ConstRRsetPtr>*
- target);
+ target, FindDNSSECContext& dnssec_ctx);
/// Logs condition and creates result
///
@@ -1139,13 +1263,6 @@ public:
/// \return true if the name has subdomains, false if not.
bool hasSubdomains(const std::string& name);
- /// \brief Get the NSEC covering a name.
- ///
- /// This one calls findPreviousName on the given name and extracts an
- /// NSEC record on the result. It handles various error cases. The
- /// method exists to share code present at more than one location.
- dns::ConstRRsetPtr findNSECCover(const dns::Name& name);
-
/// \brief Convenience type shortcut.
///
/// To find stuff in the result of getRRsets.
diff --git a/src/lib/datasrc/datasrc_messages.mes b/src/lib/datasrc/datasrc_messages.mes
index f1baee9..a9870d6 100644
--- a/src/lib/datasrc/datasrc_messages.mes
+++ b/src/lib/datasrc/datasrc_messages.mes
@@ -75,6 +75,35 @@ The datasource tried to provide an NSEC proof that the named domain does not
exist, but the database backend doesn't support DNSSEC. No proof is included
in the answer as a result.
+% DATASRC_DATABASE_FINDNSEC3 Looking for NSEC3 for %1 in %2 mode
+Debug information. A search in an database data source for NSEC3 that
+matches or covers the given name is being started.
+
+% DATASRC_DATABASE_FINDNSEC3_COVER found a covering NSEC3 for %1: %2
+Debug information. An NSEC3 that covers the given name is found and
+being returned. The found NSEC3 RRset is also displayed.
+
+% DATASRC_DATABASE_FINDNSEC3_MATCH found a matching NSEC3 for %1 at label count %2: %3
+Debug information. An NSEC3 that matches (a possibly superdomain of)
+the given name is found and being returned. When the shown label
+count is smaller than that of the given name, the matching NSEC3 is
+for a superdomain of the given name (see DATASRC_DATABSE_FINDNSEC3_TRYHASH).
+The found NSEC3 RRset is also displayed.
+
+% DATASRC_DATABASE_FINDNSEC3_TRYHASH looking for NSEC3 for %1 at label count %2 (hash %3)
+Debug information. In an attempt of finding an NSEC3 for the give name,
+(a possibly superdomain of) the name is hashed and searched for in the
+NSEC3 name space. When the shown label count is smaller than that of the
+shown name, the search tries the superdomain name that share the shown
+(higher) label count of the shown name (e.g., for
+www.example.com. with shown label count of 3, example.com. is being
+tried, as "." is 1 label long).
+
+% DATASRC_DATABASE_FINDNSEC3_TRYHASH_PREV looking for previous NSEC3 for %1 at label count %2 (hash %3)
+Debug information. An exact match on hash (see
+DATASRC_DATABASE_FINDNSEC3_TRYHASH) was unsuccessful. We get the previous hash
+to that one instead.
+
% DATASRC_DATABASE_FIND_RECORDS looking in datasource %1 for record %2/%3/%4
Debug information. The database data source is looking up records with the given
name and type in the database.
@@ -145,10 +174,12 @@ While iterating through the zone, the program extracted next RRset from it.
The name and RRtype of the RRset is indicated in the message.
% DATASRC_DATABASE_ITERATE_TTL_MISMATCH TTL values differ for RRs of %1/%2/%3, setting to %4
-While iterating through the zone, the time to live for RRs of the given RRset
-were found to be different. This isn't allowed on the wire and is considered
-an error, so we set it to the lowest value we found (but we don't modify the
-database). The data in database should be checked and fixed.
+While iterating through the zone, the time to live for RRs of the
+given RRset were found to be different. Since an RRset cannot have
+multiple TTLs, we set it to the lowest value we found (but we don't
+modify the database). This is what the client would do when such RRs
+were given in a DNS response according to RFC2181. The data in
+database should be checked and fixed.
% DATASRC_DATABASE_JOURNALREADER_END %1/%2 on %3 from %4 to %5
This is a debug message indicating that the program (successfully)
@@ -634,6 +665,17 @@ enough information for it. The code is 1 for error, 2 for not implemented.
% DATASRC_SQLITE_CLOSE closing SQLite database
Debug information. The SQLite data source is closing the database file.
+% DATASRC_SQLITE_COMPATIBLE_VERSION database schema V%1.%2 not up to date (expecting V%3.%4) but is compatible
+The version of the SQLite3 database schema used to hold the zone data
+is not the latest one - the current version of BIND 10 was written
+with a later schema version in mind. However, the database is
+compatible with the current version of BIND 10, and BIND 10 will run
+without any problems.
+
+Consult the release notes for your version of BIND 10. Depending on
+the changes made to the database schema, it is possible that improved
+performance could result if the database were upgraded.
+
% DATASRC_SQLITE_CONNCLOSE Closing sqlite database
The database file is no longer needed and is being closed.
@@ -701,6 +743,14 @@ source.
The SQLite data source was asked to provide a NSEC3 record for given zone.
But it doesn't contain that zone.
+% DATASRC_SQLITE_INCOMPATIBLE_VERSION database schema V%1.%2 incompatible with version (V%3.%4) expected
+The version of the SQLite3 database schema used to hold the zone data
+is incompatible with the version expected by BIND 10. As a result,
+BIND 10 is unable to run using the database file as the data source.
+
+The database should be updated using the means described in the BIND
+10 documentation.
+
% DATASRC_SQLITE_NEWCONN SQLite3Database is being initialized
A wrapper object to hold database connection is being initialized.
diff --git a/src/lib/datasrc/factory.h b/src/lib/datasrc/factory.h
index 9d0a762..f3ca397 100644
--- a/src/lib/datasrc/factory.h
+++ b/src/lib/datasrc/factory.h
@@ -163,7 +163,7 @@ public:
///
/// \return Reference to the DataSourceClient instance contained in this
/// container.
- DataSourceClient& getInstance() { return *instance_; }
+ DataSourceClient& getInstance() { return (*instance_); }
private:
DataSourceClient* instance_;
diff --git a/src/lib/datasrc/memory_datasrc.cc b/src/lib/datasrc/memory_datasrc.cc
index a3a2110..c19d5ae 100644
--- a/src/lib/datasrc/memory_datasrc.cc
+++ b/src/lib/datasrc/memory_datasrc.cc
@@ -29,6 +29,7 @@
#include <datasrc/data_source.h>
#include <datasrc/factory.h>
+#include <boost/function.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/scoped_ptr.hpp>
#include <boost/bind.hpp>
@@ -53,6 +54,9 @@ using namespace internal;
namespace {
// Some type aliases
+// A functor type used for loading.
+typedef boost::function<void(ConstRRsetPtr)> LoadCallback;
+
// RRset specified for this implementation
typedef boost::shared_ptr<internal::RBNodeRRset> RBNodeRRsetPtr;
typedef boost::shared_ptr<const internal::RBNodeRRset> ConstRBNodeRRsetPtr;
@@ -362,8 +366,7 @@ ZoneData::findNode(const Name& name, ZoneFinder::FindOptions options) const {
if (result == DomainTree::EXACTMATCH) {
return (ResultType(ZoneFinder::SUCCESS, node, state.rrset_,
zonecut_flag));
- }
- if (result == DomainTree::PARTIALMATCH) {
+ } else if (result == DomainTree::PARTIALMATCH) {
assert(node != NULL);
if (state.dname_node_ != NULL) { // DNAME
LOG_DEBUG(logger, DBG_TRACE_DATA, DATASRC_MEM_DNAME_FOUND).
@@ -408,10 +411,15 @@ ZoneData::findNode(const Name& name, ZoneFinder::FindOptions options) const {
FindNodeResult::FIND_WILDCARD |
zonecut_flag));
}
+ // Nothing really matched.
+ LOG_DEBUG(logger, DBG_TRACE_DATA, DATASRC_MEM_NOT_FOUND).arg(name);
+ return (ResultType(ZoneFinder::NXDOMAIN, node, state.rrset_));
+ } else {
+ // If the name is neither an exact or partial match, it is
+ // out of bailiwick, which is considered an error.
+ isc_throw(OutOfZone, name.toText() << " not in " <<
+ origin_data_->getName());
}
- // Nothing really matched. The name may even be out-of-bailiwick.
- LOG_DEBUG(logger, DBG_TRACE_DATA, DATASRC_MEM_NOT_FOUND).arg(name);
- return (ResultType(ZoneFinder::NXDOMAIN, node, state.rrset_));
}
} // unnamed namespace
@@ -757,6 +765,17 @@ struct InMemoryZoneFinder::InMemoryZoneFinderImpl {
// The actual zone data
scoped_ptr<ZoneData> zone_data_;
+ // Common process for zone load.
+ // rrset_installer is a functor that takes another functor as an argument,
+ // and expected to call the latter for each RRset of the zone. How the
+ // sequence of the RRsets is generated depends on the internal
+ // details of the loader: either from a textual master file or from
+ // another data source.
+ // filename is the file name of the master file or empty if the zone is
+ // loaded from another data source.
+ void load(const string& filename,
+ boost::function<void(LoadCallback)> rrset_installer);
+
// Add the necessary magic for any wildcard contained in 'name'
// (including itself) to be found in the zone.
//
@@ -1346,7 +1365,7 @@ InMemoryZoneFinder::findNSEC3(const Name& name, bool recursive) {
const NameComparisonResult cmp_result = name.compare(impl_->origin_);
if (cmp_result.getRelation() != NameComparisonResult::EQUAL &&
cmp_result.getRelation() != NameComparisonResult::SUBDOMAIN) {
- isc_throw(InvalidParameter, "findNSEC3 attempt for out-of-zone name: "
+ isc_throw(OutOfZone, "findNSEC3 attempt for out-of-zone name: "
<< name << ", zone: " << impl_->origin_ << "/"
<< impl_->zone_class_);
}
@@ -1451,6 +1470,13 @@ addAdditional(RBNodeRRset* rrset, ZoneData* zone_data,
const Name& name = getAdditionalName(rrset->getType(),
rdata_iterator->getCurrent());
+ // if the name is not in or below this zone, skip it
+ const NameComparisonResult::NameRelation reln =
+ name.compare(zone_data->origin_data_->getName()).getRelation();
+ if (reln != NameComparisonResult::SUBDOMAIN &&
+ reln != NameComparisonResult::EQUAL) {
+ continue;
+ }
const ZoneData::FindMutableNodeResult result =
zone_data->findNode<ZoneData::FindMutableNodeResult>(
name, ZoneFinder::FIND_GLUE_OK);
@@ -1540,24 +1566,16 @@ addWildAdditional(RBNodeRRset* rrset, ZoneData* zone_data) {
}
void
-InMemoryZoneFinder::load(const string& filename) {
- LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_MEM_LOAD).arg(getOrigin()).
- arg(filename);
- // Load it into temporary zone data. As we build the zone, we record
- // the (RBNode)RRsets that needs to be associated with additional
- // information in 'need_additionals'.
+InMemoryZoneFinder::InMemoryZoneFinderImpl::load(
+ const string& filename,
+ boost::function<void(LoadCallback)> rrset_installer)
+{
vector<RBNodeRRset*> need_additionals;
- scoped_ptr<ZoneData> tmp(new ZoneData(getOrigin()));
+ scoped_ptr<ZoneData> tmp(new ZoneData(origin_));
- masterLoad(filename.c_str(), getOrigin(), getClass(),
- boost::bind(&InMemoryZoneFinderImpl::addFromLoad, impl_,
- _1, tmp.get(), &need_additionals));
+ rrset_installer(boost::bind(&InMemoryZoneFinderImpl::addFromLoad, this,
+ _1, tmp.get(), &need_additionals));
- // For each RRset in need_additionals, identify the corresponding
- // RBnode for additional processing and associate it in the RRset.
- // If some additional names in an RRset RDATA as additional need wildcard
- // expansion, we'll remember them in a separate vector, and handle them
- // with addWildAdditional.
vector<RBNodeRRset*> wild_additionals;
for_each(need_additionals.begin(), need_additionals.end(),
boost::bind(addAdditional, _1, tmp.get(), &wild_additionals));
@@ -1573,16 +1591,77 @@ InMemoryZoneFinder::load(const string& filename) {
if (tmp->origin_data_->getData()->find(RRType::NSEC3PARAM()) ==
tmp->origin_data_->getData()->end()) {
LOG_WARN(logger, DATASRC_MEM_NO_NSEC3PARAM).
- arg(getOrigin()).arg(getClass());
+ arg(origin_).arg(zone_class_);
}
}
// If it went well, put it inside
- impl_->file_name_ = filename;
- tmp.swap(impl_->zone_data_);
+ file_name_ = filename;
+ tmp.swap(zone_data_);
// And let the old data die with tmp
}
+namespace {
+// A wrapper for dns::masterLoad used by load() below. Essentially it
+// converts the two callback types. Note the mostly redundant wrapper of
+// boost::bind. It converts function<void(ConstRRsetPtr)> to
+// function<void(RRsetPtr)> (masterLoad() expects the latter). SunStudio
+// doesn't seem to do this conversion if we just pass 'callback'.
+void
+masterLoadWrapper(const char* const filename, const Name& origin,
+ const RRClass& zone_class, LoadCallback callback)
+{
+ masterLoad(filename, origin, zone_class, boost::bind(callback, _1));
+}
+
+// The installer called from Impl::load() for the iterator version of load().
+void
+generateRRsetFromIterator(ZoneIterator* iterator, LoadCallback callback) {
+ ConstRRsetPtr rrset;
+ vector<ConstRRsetPtr> rrsigs; // placeholder for RRSIGs until "commitable".
+
+ // The current internal implementation assumes an RRSIG is always added
+ // after the RRset they cover. So we store any RRSIGs in 'rrsigs' until
+ // it's safe to add them; based on our assumption if the owner name
+ // changes, all covered RRsets of the previous name should have been
+ // installed and any pending RRSIGs can be added at that point. RRSIGs
+ // of the last name from the iterator must be added separately.
+ while ((rrset = iterator->getNextRRset()) != NULL) {
+ if (!rrsigs.empty() && rrset->getName() != rrsigs[0]->getName()) {
+ BOOST_FOREACH(ConstRRsetPtr sig_rrset, rrsigs) {
+ callback(sig_rrset);
+ }
+ rrsigs.clear();
+ }
+ if (rrset->getType() == RRType::RRSIG()) {
+ rrsigs.push_back(rrset);
+ } else {
+ callback(rrset);
+ }
+ }
+
+ BOOST_FOREACH(ConstRRsetPtr sig_rrset, rrsigs) {
+ callback(sig_rrset);
+ }
+}
+}
+
+void
+InMemoryZoneFinder::load(const string& filename) {
+ LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_MEM_LOAD).arg(getOrigin()).
+ arg(filename);
+
+ impl_->load(filename,
+ boost::bind(masterLoadWrapper, filename.c_str(), getOrigin(),
+ getClass(), _1));
+}
+
+void
+InMemoryZoneFinder::load(ZoneIterator& iterator) {
+ impl_->load(string(),
+ boost::bind(generateRRsetFromIterator, &iterator, _1));
+}
+
void
InMemoryZoneFinder::swap(InMemoryZoneFinder& zone_finder) {
LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_MEM_SWAP).arg(getOrigin()).
diff --git a/src/lib/datasrc/memory_datasrc.h b/src/lib/datasrc/memory_datasrc.h
index fbeb2c3..c687d1b 100644
--- a/src/lib/datasrc/memory_datasrc.h
+++ b/src/lib/datasrc/memory_datasrc.h
@@ -125,16 +125,6 @@ public:
/// exists).
result::Result add(const isc::dns::ConstRRsetPtr& rrset);
- /// \brief RRSet out of zone exception.
- ///
- /// This is thrown if addition of an RRset that doesn't belong under the
- /// zone's origin is requested.
- struct OutOfZone : public InvalidParameter {
- OutOfZone(const char* file, size_t line, const char* what) :
- InvalidParameter(file, line, what)
- { }
- };
-
/// \brief RRset is NULL exception.
///
/// This is thrown if the provided RRset parameter is NULL.
@@ -198,6 +188,26 @@ public:
/// configuration reloading is written.
void load(const std::string& filename);
+ /// \brief Load zone from another data source.
+ ///
+ /// This is similar to the other version, but zone's RRsets are provided
+ /// by an iterator of another data source. On successful load, the
+ /// internal filename will be cleared.
+ ///
+ /// This implementation assumes the iterator produces combined RRsets,
+ /// that is, there should exactly one RRset for the same owner name and
+ /// RR type. This means the caller is expected to create the iterator
+ /// with \c separate_rrs being \c false. This implementation also assumes
+ /// RRsets of different names are not mixed; so if the iterator produces
+ /// an RRset of a different name than that of the previous RRset, that
+ /// previous name must never appear in the subsequent sequence of RRsets.
+ /// Note that the iterator API does not ensure this. If the underlying
+ /// implementation does not follow it, load() will fail. Note, however,
+ /// that this whole interface is tentative. in-memory zone loading will
+ /// have to be revisited fundamentally, and at that point this restriction
+ /// probably won't matter.
+ void load(ZoneIterator& iterator);
+
/// Exchanges the content of \c this zone finder with that of the given
/// \c zone_finder.
///
diff --git a/src/lib/datasrc/sqlite3_accessor.cc b/src/lib/datasrc/sqlite3_accessor.cc
index 308df60..11f364e 100644
--- a/src/lib/datasrc/sqlite3_accessor.cc
+++ b/src/lib/datasrc/sqlite3_accessor.cc
@@ -15,8 +15,11 @@
#include <sqlite3.h>
#include <string>
+#include <utility>
#include <vector>
+#include <exceptions/exceptions.h>
+
#include <datasrc/sqlite3_accessor.h>
#include <datasrc/logger.h>
#include <datasrc/data_source.h>
@@ -27,7 +30,20 @@
using namespace std;
using namespace isc::data;
-#define SQLITE_SCHEMA_VERSION 1
+namespace {
+// Expected schema. The major version must match else there is an error. If
+// the minor version of the database is less than this, a warning is output.
+//
+// It is assumed that a program written to run on m.n of the database will run
+// with a database version m.p, where p is any number. However, if p < n,
+// we assume that the database structure was upgraded for some reason, and that
+// some advantage may result if the database is upgraded. Conversely, if p > n,
+// The database is at a later version than the program was written for and the
+// program may not be taking advantage of features (possibly performance
+// improvements) added to the database.
+const int SQLITE_SCHEMA_MAJOR_VERSION = 2;
+const int SQLITE_SCHEMA_MINOR_VERSION = 0;
+}
namespace isc {
namespace datasrc {
@@ -125,8 +141,8 @@ const char* const text_statements[NUM_STATEMENTS] = {
struct SQLite3Parameters {
SQLite3Parameters() :
- db_(NULL), version_(-1), in_transaction(false), updating_zone(false),
- updated_zone_id(-1)
+ db_(NULL), major_version_(-1), minor_version_(-1),
+ in_transaction(false), updating_zone(false), updated_zone_id(-1)
{
for (int i = 0; i < NUM_STATEMENTS; ++i) {
statements_[i] = NULL;
@@ -164,7 +180,8 @@ struct SQLite3Parameters {
}
sqlite3* db_;
- int version_;
+ int major_version_;
+ int minor_version_;
bool in_transaction; // whether or not a transaction has been started
bool updating_zone; // whether or not updating the zone
int updated_zone_id; // valid only when in_transaction is true
@@ -255,34 +272,42 @@ public:
};
const char* const SCHEMA_LIST[] = {
- "CREATE TABLE schema_version (version INTEGER NOT NULL)",
- "INSERT INTO schema_version VALUES (1)",
+ "CREATE TABLE schema_version (version INTEGER NOT NULL, "
+ "minor INTEGER NOT NULL DEFAULT 0)",
+ "INSERT INTO schema_version VALUES (2, 0)",
"CREATE TABLE zones (id INTEGER PRIMARY KEY, "
- "name STRING NOT NULL COLLATE NOCASE, "
- "rdclass STRING NOT NULL COLLATE NOCASE DEFAULT 'IN', "
+ "name TEXT NOT NULL COLLATE NOCASE, "
+ "rdclass TEXT NOT NULL COLLATE NOCASE DEFAULT 'IN', "
"dnssec BOOLEAN NOT NULL DEFAULT 0)",
"CREATE INDEX zones_byname ON zones (name)",
"CREATE TABLE records (id INTEGER PRIMARY KEY, "
- "zone_id INTEGER NOT NULL, name STRING NOT NULL COLLATE NOCASE, "
- "rname STRING NOT NULL COLLATE NOCASE, ttl INTEGER NOT NULL, "
- "rdtype STRING NOT NULL COLLATE NOCASE, sigtype STRING COLLATE NOCASE, "
- "rdata STRING NOT NULL)",
+ "zone_id INTEGER NOT NULL, name TEXT NOT NULL COLLATE NOCASE, "
+ "rname TEXT NOT NULL COLLATE NOCASE, ttl INTEGER NOT NULL, "
+ "rdtype TEXT NOT NULL COLLATE NOCASE, sigtype TEXT COLLATE NOCASE, "
+ "rdata TEXT NOT NULL)",
"CREATE INDEX records_byname ON records (name)",
"CREATE INDEX records_byrname ON records (rname)",
+ // The next index is a tricky one. It's necessary for
+ // FIND_PREVIOUS to use the index efficiently; since there's an
+ // "inequality", the rname column must be placed later. records_byrname
+ // may not be sufficient especially when the zone is not signed (and
+ // defining a separate index for rdtype only doesn't work either; SQLite3
+ // would then create a temporary B-tree for "ORDER BY").
+ "CREATE INDEX records_bytype_and_rname ON records (rdtype, rname)",
"CREATE TABLE nsec3 (id INTEGER PRIMARY KEY, zone_id INTEGER NOT NULL, "
- "hash STRING NOT NULL COLLATE NOCASE, "
- "owner STRING NOT NULL COLLATE NOCASE, "
- "ttl INTEGER NOT NULL, rdtype STRING NOT NULL COLLATE NOCASE, "
- "rdata STRING NOT NULL)",
+ "hash TEXT NOT NULL COLLATE NOCASE, "
+ "owner TEXT NOT NULL COLLATE NOCASE, "
+ "ttl INTEGER NOT NULL, rdtype TEXT NOT NULL COLLATE NOCASE, "
+ "rdata TEXT NOT NULL)",
"CREATE INDEX nsec3_byhash ON nsec3 (hash)",
"CREATE TABLE diffs (id INTEGER PRIMARY KEY, "
"zone_id INTEGER NOT NULL, "
"version INTEGER NOT NULL, "
"operation INTEGER NOT NULL, "
- "name STRING NOT NULL COLLATE NOCASE, "
- "rrtype STRING NOT NULL COLLATE NOCASE, "
+ "name TEXT NOT NULL COLLATE NOCASE, "
+ "rrtype TEXT NOT NULL COLLATE NOCASE, "
"ttl INTEGER NOT NULL, "
- "rdata STRING NOT NULL)",
+ "rdata TEXT NOT NULL)",
NULL
};
@@ -308,14 +333,13 @@ void doSleep() {
// returns the schema version if the schema version table exists
// returns -1 if it does not
-int checkSchemaVersion(sqlite3* db) {
+int checkSchemaVersionElement(sqlite3* db, const char* const query) {
sqlite3_stmt* prepared = NULL;
// At this point in time, the database might be exclusively locked, in
// which case even prepare() will return BUSY, so we may need to try a
// few times
for (size_t i = 0; i < 50; ++i) {
- int rc = sqlite3_prepare_v2(db, "SELECT version FROM schema_version",
- -1, &prepared, NULL);
+ int rc = sqlite3_prepare_v2(db, query, -1, &prepared, NULL);
if (rc == SQLITE_ERROR) {
// this is the error that is returned when the table does not
// exist
@@ -337,50 +361,116 @@ int checkSchemaVersion(sqlite3* db) {
return (version);
}
+// Returns the schema major and minor version numbers in a pair.
+// Returns (-1, -1) if the table does not exist, (1, 0) for a V1
+// database, and (n, m) for any other.
+pair<int, int> checkSchemaVersion(sqlite3* db) {
+ int major = checkSchemaVersionElement(db,
+ "SELECT version FROM schema_version");
+ if (major == -1) {
+ return (make_pair(-1, -1));
+ } else if (major == 1) {
+ return (make_pair(1, 0));
+ } else {
+ int minor = checkSchemaVersionElement(db,
+ "SELECT minor FROM schema_version");
+ return (make_pair(major, minor));
+ }
+}
+
+// A helper class used in createDatabase() below so we manage the one shot
+// transaction safely.
+class ScopedTransaction {
+public:
+ ScopedTransaction(sqlite3* db) : db_(NULL) {
+ // try for 5 secs (50*0.1)
+ for (size_t i = 0; i < 50; ++i) {
+ const int rc = sqlite3_exec(db, "BEGIN EXCLUSIVE TRANSACTION",
+ NULL, NULL, NULL);
+ if (rc == SQLITE_OK) {
+ break;
+ } else if (rc != SQLITE_BUSY || i == 50) {
+ isc_throw(SQLite3Error, "Unable to acquire exclusive lock "
+ "for database creation: " << sqlite3_errmsg(db));
+ }
+ doSleep();
+ }
+ // Hold the DB pointer once we have successfully acquired the lock.
+ db_ = db;
+ }
+ ~ScopedTransaction() {
+ if (db_ != NULL) {
+ // Note: even rollback could fail in theory, but in that case
+ // we cannot do much for safe recovery anyway. We could at least
+ // log the event, but for now don't even bother to do that, with
+ // the expectation that we'll soon stop creating the schema in this
+ // module.
+ sqlite3_exec(db_, "ROLLBACK", NULL, NULL, NULL);
+ }
+ }
+ void commit() {
+ if (sqlite3_exec(db_, "COMMIT TRANSACTION", NULL, NULL, NULL) !=
+ SQLITE_OK) {
+ isc_throw(SQLite3Error, "Unable to commit newly created database "
+ "schema: " << sqlite3_errmsg(db_));
+ }
+ db_ = NULL;
+ }
+
+private:
+ sqlite3* db_;
+};
+
// return db version
-int create_database(sqlite3* db) {
+pair<int, int>
+createDatabase(sqlite3* db) {
+ logger.info(DATASRC_SQLITE_SETUP);
+
// try to get an exclusive lock. Once that is obtained, do the version
// check *again*, just in case this process was racing another
- //
- // try for 5 secs (50*0.1)
- int rc;
- logger.info(DATASRC_SQLITE_SETUP);
- for (size_t i = 0; i < 50; ++i) {
- rc = sqlite3_exec(db, "BEGIN EXCLUSIVE TRANSACTION", NULL, NULL,
- NULL);
- if (rc == SQLITE_OK) {
- break;
- } else if (rc != SQLITE_BUSY || i == 50) {
- isc_throw(SQLite3Error, "Unable to acquire exclusive lock "
- "for database creation: " << sqlite3_errmsg(db));
- }
- doSleep();
- }
- int schema_version = checkSchemaVersion(db);
- if (schema_version == -1) {
+ ScopedTransaction trasaction(db);
+ pair<int, int> schema_version = checkSchemaVersion(db);
+ if (schema_version.first == -1) {
for (int i = 0; SCHEMA_LIST[i] != NULL; ++i) {
if (sqlite3_exec(db, SCHEMA_LIST[i], NULL, NULL, NULL) !=
SQLITE_OK) {
isc_throw(SQLite3Error,
- "Failed to set up schema " << SCHEMA_LIST[i]);
+ "Failed to set up schema " << SCHEMA_LIST[i]);
}
}
- sqlite3_exec(db, "COMMIT TRANSACTION", NULL, NULL, NULL);
- return (SQLITE_SCHEMA_VERSION);
- } else {
- return (schema_version);
+ trasaction.commit();
+
+ // Return the version. We query again to ensure that the only point
+ // in which the current schema version is defined is in the create
+ // statements.
+ schema_version = checkSchemaVersion(db);
}
+
+ return (schema_version);
}
void
checkAndSetupSchema(Initializer* initializer) {
sqlite3* const db = initializer->params_.db_;
- int schema_version = checkSchemaVersion(db);
- if (schema_version != SQLITE_SCHEMA_VERSION) {
- schema_version = create_database(db);
- }
- initializer->params_.version_ = schema_version;
+ pair<int, int> schema_version = checkSchemaVersion(db);
+ if (schema_version.first == -1) {
+ schema_version = createDatabase(db);
+ } else if (schema_version.first != SQLITE_SCHEMA_MAJOR_VERSION) {
+ LOG_ERROR(logger, DATASRC_SQLITE_INCOMPATIBLE_VERSION)
+ .arg(schema_version.first).arg(schema_version.second)
+ .arg(SQLITE_SCHEMA_MAJOR_VERSION).arg(SQLITE_SCHEMA_MINOR_VERSION);
+ isc_throw(IncompatibleDbVersion,
+ "incompatible SQLite3 database version: " <<
+ schema_version.first << "." << schema_version.second);
+ } else if (schema_version.second < SQLITE_SCHEMA_MINOR_VERSION) {
+ LOG_WARN(logger, DATASRC_SQLITE_COMPATIBLE_VERSION)
+ .arg(schema_version.first).arg(schema_version.second)
+ .arg(SQLITE_SCHEMA_MAJOR_VERSION).arg(SQLITE_SCHEMA_MINOR_VERSION);
+ }
+
+ initializer->params_.major_version_ = schema_version.first;
+ initializer->params_.minor_version_ = schema_version.second;
}
}
@@ -858,7 +948,7 @@ private:
// No data returned but the SQL query succeeded. Only possibility
// is that there is no entry in the differences table for the given
// zone and version.
- isc_throw(NoSuchSerial, "No entry in differences table for " <<
+ isc_throw(NoSuchSerial, "No entry in differences table for" <<
" zone ID " << zone_id << ", serial number " << serial);
}
@@ -1085,26 +1175,6 @@ SQLite3Accessor::addRecordDiff(int zone_id, uint32_t serial,
executer.exec();
}
-vector<vector<string> >
-SQLite3Accessor::getRecordDiff(int zone_id) {
- sqlite3_stmt* const stmt = dbparameters_->getStatement(GET_RECORD_DIFF);
- sqlite3_bind_int(stmt, 1, zone_id);
-
- vector<vector<string> > result;
- while (sqlite3_step(stmt) == SQLITE_ROW) {
- vector<string> row_result;
- for (int i = 0; i < 6; ++i) {
- row_result.push_back(convertToPlainChar(sqlite3_column_text(stmt,
- i),
- dbparameters_->db_));
- }
- result.push_back(row_result);
- }
- sqlite3_reset(stmt);
-
- return (result);
-}
-
std::string
SQLite3Accessor::findPreviousName(int zone_id, const std::string& rname)
const
diff --git a/src/lib/datasrc/sqlite3_accessor.h b/src/lib/datasrc/sqlite3_accessor.h
index efaec0e..9f3b60e 100644
--- a/src/lib/datasrc/sqlite3_accessor.h
+++ b/src/lib/datasrc/sqlite3_accessor.h
@@ -47,6 +47,12 @@ public:
DataSourceError(file, line, what) {}
};
+class IncompatibleDbVersion : public Exception {
+public:
+ IncompatibleDbVersion(const char* file, size_t line, const char* what) :
+ isc::Exception(file, line, what) {}
+};
+
/**
* \brief Too Much Data
*
@@ -218,16 +224,6 @@ public:
int zone_id, uint32_t serial, DiffOperation operation,
const std::string (¶ms)[DIFF_PARAM_COUNT]);
- // A short term method for tests until we implement more complete
- // API to retrieve diffs (#1330). It returns all records of the diffs
- // table whose zone_id column is identical to the given value.
- // Since this is a short term workaround, it ignores some corner cases
- // (such as an SQLite3 execution failure) and is not very efficient,
- // in favor of brevity. Once #1330 is completed, this method must be
- // removed, and the tests using this method must be rewritten using the
- // official API.
- std::vector<std::vector<std::string> > getRecordDiff(int zone_id);
-
/// The SQLite3 implementation of this method returns a string starting
/// with a fixed prefix of "sqlite3_" followed by the DB file name
/// removing any path name. For example, for the DB file
diff --git a/src/lib/datasrc/sqlite3_accessor_link.cc b/src/lib/datasrc/sqlite3_accessor_link.cc
index 81ac6b5..c064e0f 100644
--- a/src/lib/datasrc/sqlite3_accessor_link.cc
+++ b/src/lib/datasrc/sqlite3_accessor_link.cc
@@ -82,13 +82,15 @@ createInstance(isc::data::ConstElementPtr config, std::string& error) {
error = "Configuration error: " + errors->str();
return (NULL);
}
- std::string dbfile = config->get(CONFIG_ITEM_DATABASE_FILE)->stringValue();
+ const std::string dbfile =
+ config->get(CONFIG_ITEM_DATABASE_FILE)->stringValue();
try {
boost::shared_ptr<DatabaseAccessor> sqlite3_accessor(
new SQLite3Accessor(dbfile, "IN")); // XXX: avoid hardcode RR class
return (new DatabaseClient(isc::dns::RRClass::IN(), sqlite3_accessor));
} catch (const std::exception& exc) {
- error = std::string("Error creating sqlite3 datasource: ") + exc.what();
+ error = std::string("Error creating sqlite3 datasource: ") +
+ exc.what();
return (NULL);
} catch (...) {
error = std::string("Error creating sqlite3 datasource, "
diff --git a/src/lib/datasrc/sqlite3_datasrc.cc b/src/lib/datasrc/sqlite3_datasrc.cc
index 7cd565d..b450cd5 100644
--- a/src/lib/datasrc/sqlite3_datasrc.cc
+++ b/src/lib/datasrc/sqlite3_datasrc.cc
@@ -14,19 +14,33 @@
#include <string>
#include <sstream>
+#include <utility>
#include <sqlite3.h>
#include <datasrc/sqlite3_datasrc.h>
#include <datasrc/logger.h>
-
+#include <exceptions/exceptions.h>
#include <dns/rrttl.h>
#include <dns/rdata.h>
#include <dns/rdataclass.h>
#include <dns/rrset.h>
#include <dns/rrsetlist.h>
-#define SQLITE_SCHEMA_VERSION 1
+namespace {
+// Expected schema. The major version must match else there is an error. If
+// the minor version of the database is less than this, a warning is output.
+//
+// It is assumed that a program written to run on m.n of the database will run
+// with a database version m.p, where p is any number. However, if p < n,
+// we assume that the database structure was upgraded for some reason, and that
+// some advantage may result if the database is upgraded. Conversely, if p > n,
+// The database is at a later version than the program was written for and the
+// program may not be taking advantage of features (possibly performance
+// improvements) added to the database.
+const int SQLITE_SCHEMA_MAJOR_VERSION = 2;
+const int SQLITE_SCHEMA_MINOR_VERSION = 0;
+}
using namespace std;
using namespace isc::dns;
@@ -36,13 +50,14 @@ namespace isc {
namespace datasrc {
struct Sqlite3Parameters {
- Sqlite3Parameters() : db_(NULL), version_(-1),
+ Sqlite3Parameters() : db_(NULL), major_version_(-1), minor_version_(-1),
q_zone_(NULL), q_record_(NULL), q_addrs_(NULL), q_referral_(NULL),
q_any_(NULL), q_count_(NULL), q_previous_(NULL), q_nsec3_(NULL),
q_prevnsec3_(NULL)
{}
sqlite3* db_;
- int version_;
+ int major_version_;
+ int minor_version_;
sqlite3_stmt* q_zone_;
sqlite3_stmt* q_record_;
sqlite3_stmt* q_addrs_;
@@ -56,38 +71,41 @@ struct Sqlite3Parameters {
namespace {
const char* const SCHEMA_LIST[] = {
- "CREATE TABLE schema_version (version INTEGER NOT NULL)",
- "INSERT INTO schema_version VALUES (1)",
+ "CREATE TABLE schema_version (version INTEGER NOT NULL, "
+ "minor INTEGER NOT NULL DEFAULT 0)",
+ "INSERT INTO schema_version VALUES (2, 0)",
"CREATE TABLE zones (id INTEGER PRIMARY KEY, "
- "name STRING NOT NULL COLLATE NOCASE, "
- "rdclass STRING NOT NULL COLLATE NOCASE DEFAULT 'IN', "
+ "name TEXT NOT NULL COLLATE NOCASE, "
+ "rdclass TEXT NOT NULL COLLATE NOCASE DEFAULT 'IN', "
"dnssec BOOLEAN NOT NULL DEFAULT 0)",
"CREATE INDEX zones_byname ON zones (name)",
"CREATE TABLE records (id INTEGER PRIMARY KEY, "
- "zone_id INTEGER NOT NULL, name STRING NOT NULL COLLATE NOCASE, "
- "rname STRING NOT NULL COLLATE NOCASE, ttl INTEGER NOT NULL, "
- "rdtype STRING NOT NULL COLLATE NOCASE, sigtype STRING COLLATE NOCASE, "
- "rdata STRING NOT NULL)",
+ "zone_id INTEGER NOT NULL, name TEXT NOT NULL COLLATE NOCASE, "
+ "rname TEXT NOT NULL COLLATE NOCASE, ttl INTEGER NOT NULL, "
+ "rdtype TEXT NOT NULL COLLATE NOCASE, sigtype TEXT COLLATE NOCASE, "
+ "rdata TEXT NOT NULL)",
"CREATE INDEX records_byname ON records (name)",
"CREATE INDEX records_byrname ON records (rname)",
+ "CREATE INDEX records_bytype_and_rname ON records (rdtype, rname)",
"CREATE TABLE nsec3 (id INTEGER PRIMARY KEY, zone_id INTEGER NOT NULL, "
- "hash STRING NOT NULL COLLATE NOCASE, "
- "owner STRING NOT NULL COLLATE NOCASE, "
- "ttl INTEGER NOT NULL, rdtype STRING NOT NULL COLLATE NOCASE, "
- "rdata STRING NOT NULL)",
+ "hash TEXT NOT NULL COLLATE NOCASE, "
+ "owner TEXT NOT NULL COLLATE NOCASE, "
+ "ttl INTEGER NOT NULL, rdtype TEXT NOT NULL COLLATE NOCASE, "
+ "rdata TEXT NOT NULL)",
"CREATE INDEX nsec3_byhash ON nsec3 (hash)",
"CREATE TABLE diffs (id INTEGER PRIMARY KEY, "
"zone_id INTEGER NOT NULL, "
"version INTEGER NOT NULL, "
"operation INTEGER NOT NULL, "
- "name STRING NOT NULL COLLATE NOCASE, "
- "rrtype STRING NOT NULL COLLATE NOCASE, "
+ "name TEXT NOT NULL COLLATE NOCASE, "
+ "rrtype TEXT NOT NULL COLLATE NOCASE, "
"ttl INTEGER NOT NULL, "
- "rdata STRING NOT NULL)",
+ "rdata TEXT NOT NULL)",
NULL
};
const char* const q_version_str = "SELECT version FROM schema_version";
+const char* const q_minor_str = "SELECT minor FROM schema_version";
const char* const q_zone_str = "SELECT id FROM zones WHERE name=?1";
@@ -109,12 +127,16 @@ const char* const q_referral_str = "SELECT rdtype, ttl, sigtype, rdata FROM "
const char* const q_any_str = "SELECT rdtype, ttl, sigtype, rdata "
"FROM records WHERE zone_id=?1 AND name=?2";
+// Note: the wildcard symbol '%' is expected to be added to the text
+// for the placeholder for LIKE given via sqlite3_bind_text(). We don't
+// use the expression such as (?2 || '%') because it would disable the use
+// of indices and could result in terrible performance.
const char* const q_count_str = "SELECT COUNT(*) FROM records "
- "WHERE zone_id=?1 AND rname LIKE (?2 || '%');";
+ "WHERE zone_id=?1 AND rname LIKE ?2;";
const char* const q_previous_str = "SELECT name FROM records "
- "WHERE zone_id=?1 AND rdtype = 'NSEC' AND "
- "rname < $2 ORDER BY rname DESC LIMIT 1";
+ "WHERE rname < ?2 AND zone_id=?1 AND rdtype = 'NSEC' "
+ "ORDER BY rname DESC LIMIT 1";
const char* const q_nsec3_str = "SELECT rdtype, ttl, rdata FROM nsec3 "
"WHERE zone_id = ?1 AND hash = $2";
@@ -314,8 +336,9 @@ Sqlite3DataSrc::findRecords(const Name& name, const RRType& rdtype,
" to SQL statement (qcount)");
}
- const string revname_text = name.reverse().toText();
- rc = sqlite3_bind_text(dbparameters->q_count_, 2, revname_text.c_str(),
+ const string revname_text = name.reverse().toText() + "%";
+ rc = sqlite3_bind_text(dbparameters->q_count_, 2,
+ revname_text.c_str(),
-1, SQLITE_STATIC);
if (rc != SQLITE_OK) {
isc_throw(Sqlite3Error, "Could not bind name " << name.reverse() <<
@@ -675,15 +698,15 @@ void do_sleep() {
nanosleep(&req, NULL);
}
-// returns the schema version if the schema version table exists
+// returns the schema version element if the schema version table exists
// returns -1 if it does not
-int check_schema_version(sqlite3* db) {
+int check_schema_version_element(sqlite3* db, const char* const version_query) {
sqlite3_stmt* prepared = NULL;
// At this point in time, the database might be exclusively locked, in
// which case even prepare() will return BUSY, so we may need to try a
// few times
for (size_t i = 0; i < 50; ++i) {
- int rc = sqlite3_prepare_v2(db, q_version_str, -1, &prepared, NULL);
+ int rc = sqlite3_prepare_v2(db, version_query, -1, &prepared, NULL);
if (rc == SQLITE_ERROR) {
// this is the error that is returned when the table does not
// exist
@@ -705,27 +728,73 @@ int check_schema_version(sqlite3* db) {
return (version);
}
+// Returns the schema major and minor version numbers in a pair.
+// Returns (-1, -1) if the table does not exist, (1, 0) for a V1
+// database, and (n, m) for any other.
+pair<int, int> check_schema_version(sqlite3* db) {
+ int major = check_schema_version_element(db, q_version_str);
+ if (major == -1) {
+ return (make_pair(-1, -1));
+ } else if (major == 1) {
+ return (make_pair(1, 0));
+ } else {
+ int minor = check_schema_version_element(db, q_minor_str);
+ return (make_pair(major, minor));
+ }
+}
+
+// A helper class used in create_database() below so we manage the one shot
+// transaction safely.
+class ScopedTransaction {
+public:
+ ScopedTransaction(sqlite3* db) : db_(NULL) {
+ // try for 5 secs (50*0.1)
+ for (size_t i = 0; i < 50; ++i) {
+ const int rc = sqlite3_exec(db, "BEGIN EXCLUSIVE TRANSACTION",
+ NULL, NULL, NULL);
+ if (rc == SQLITE_OK) {
+ break;
+ } else if (rc != SQLITE_BUSY || i == 50) {
+ isc_throw(Sqlite3Error, "Unable to acquire exclusive lock "
+ "for database creation: " << sqlite3_errmsg(db));
+ }
+ do_sleep();
+ }
+ // Hold the DB pointer once we have successfully acquired the lock.
+ db_ = db;
+ }
+ ~ScopedTransaction() {
+ if (db_ != NULL) {
+ // Note: even rollback could fail in theory, but in that case
+ // we cannot do much for safe recovery anyway. We could at least
+ // log the event, but for now don't even bother to do that, with
+ // the expectation that we'll soon stop creating the schema in this
+ // module.
+ sqlite3_exec(db_, "ROLLBACK", NULL, NULL, NULL);
+ }
+ }
+ void commit() {
+ if (sqlite3_exec(db_, "COMMIT TRANSACTION", NULL, NULL, NULL) !=
+ SQLITE_OK) {
+ isc_throw(Sqlite3Error, "Unable to commit newly created database "
+ "schema: " << sqlite3_errmsg(db_));
+ }
+ db_ = NULL;
+ }
+
+private:
+ sqlite3* db_;
+};
+
// return db version
-int create_database(sqlite3* db) {
+pair<int, int> create_database(sqlite3* db) {
+ logger.info(DATASRC_SQLITE_SETUP);
+
// try to get an exclusive lock. Once that is obtained, do the version
// check *again*, just in case this process was racing another
- //
- // try for 5 secs (50*0.1)
- int rc;
- logger.info(DATASRC_SQLITE_SETUP);
- for (size_t i = 0; i < 50; ++i) {
- rc = sqlite3_exec(db, "BEGIN EXCLUSIVE TRANSACTION", NULL, NULL,
- NULL);
- if (rc == SQLITE_OK) {
- break;
- } else if (rc != SQLITE_BUSY || i == 50) {
- isc_throw(Sqlite3Error, "Unable to acquire exclusive lock "
- "for database creation: " << sqlite3_errmsg(db));
- }
- do_sleep();
- }
- int schema_version = check_schema_version(db);
- if (schema_version == -1) {
+ ScopedTransaction transaction(db);
+ pair<int, int> schema_version = check_schema_version(db);
+ if (schema_version.first == -1) {
for (int i = 0; SCHEMA_LIST[i] != NULL; ++i) {
if (sqlite3_exec(db, SCHEMA_LIST[i], NULL, NULL, NULL) !=
SQLITE_OK) {
@@ -733,23 +802,40 @@ int create_database(sqlite3* db) {
"Failed to set up schema " << SCHEMA_LIST[i]);
}
}
- sqlite3_exec(db, "COMMIT TRANSACTION", NULL, NULL, NULL);
- return (SQLITE_SCHEMA_VERSION);
- } else {
- return (schema_version);
+ transaction.commit();
+
+ // Return the version. We query again to ensure that the only point
+ // in which the current schema version is defined is in the
+ // CREATE statements.
+ schema_version = check_schema_version(db);
}
+ return (schema_version);
}
void
checkAndSetupSchema(Sqlite3Initializer* initializer) {
sqlite3* const db = initializer->params_.db_;
- int schema_version = check_schema_version(db);
- if (schema_version != SQLITE_SCHEMA_VERSION) {
+ // Note: we use the same SCHEMA_xxx_VERSION log IDs here and in
+ // sqlite3_accessor.cc, which is against our policy of ID uniqueness.
+ // The assumption is that this file will soon be deprecated, and we don't
+ // bother to define separate IDs for the short period.
+ pair<int, int> schema_version = check_schema_version(db);
+ if (schema_version.first == -1) {
schema_version = create_database(db);
- }
- initializer->params_.version_ = schema_version;
-
+ } else if (schema_version.first != SQLITE_SCHEMA_MAJOR_VERSION) {
+ LOG_ERROR(logger, DATASRC_SQLITE_INCOMPATIBLE_VERSION)
+ .arg(schema_version.first).arg(schema_version.second)
+ .arg(SQLITE_SCHEMA_MAJOR_VERSION).arg(SQLITE_SCHEMA_MINOR_VERSION);
+ isc_throw(IncompatibleDbVersion, "Incompatible database version");
+ } else if (schema_version.second < SQLITE_SCHEMA_MINOR_VERSION) {
+ LOG_WARN(logger, DATASRC_SQLITE_COMPATIBLE_VERSION)
+ .arg(schema_version.first).arg(schema_version.second)
+ .arg(SQLITE_SCHEMA_MAJOR_VERSION).arg(SQLITE_SCHEMA_MINOR_VERSION);
+ }
+
+ initializer->params_.major_version_ = schema_version.first;
+ initializer->params_.minor_version_ = schema_version.second;
initializer->params_.q_zone_ = prepare(db, q_zone_str);
initializer->params_.q_record_ = prepare(db, q_record_str);
initializer->params_.q_addrs_ = prepare(db, q_addrs_str);
diff --git a/src/lib/datasrc/sqlite3_datasrc.h b/src/lib/datasrc/sqlite3_datasrc.h
index d4abef7..8ee042f 100644
--- a/src/lib/datasrc/sqlite3_datasrc.h
+++ b/src/lib/datasrc/sqlite3_datasrc.h
@@ -41,6 +41,12 @@ public:
isc::Exception(file, line, what) {}
};
+class IncompatibleDbVersion : public Exception {
+public:
+ IncompatibleDbVersion(const char* file, size_t line, const char* what) :
+ isc::Exception(file, line, what) {}
+};
+
class Sqlite3DataSrc : public DataSrc {
///
/// \name Constructors, Assignment Operator and Destructor.
diff --git a/src/lib/datasrc/static_datasrc.cc b/src/lib/datasrc/static_datasrc.cc
index 1ce3966..77d7a1d 100644
--- a/src/lib/datasrc/static_datasrc.cc
+++ b/src/lib/datasrc/static_datasrc.cc
@@ -83,6 +83,7 @@ StaticDataSrcImpl::StaticDataSrcImpl() :
authors->addRdata(generic::TXT("Kazunori Fujiwara"));
authors->addRdata(generic::TXT("Michael Graff"));
authors->addRdata(generic::TXT("Michal Vaner"));
+ authors->addRdata(generic::TXT("Mukund Sivaraman"));
authors->addRdata(generic::TXT("Naoki Kambe"));
authors->addRdata(generic::TXT("Shane Kerr"));
authors->addRdata(generic::TXT("Shen Tingting"));
diff --git a/src/lib/datasrc/tests/Makefile.am b/src/lib/datasrc/tests/Makefile.am
index c8ffa58..90fb3e4 100644
--- a/src/lib/datasrc/tests/Makefile.am
+++ b/src/lib/datasrc/tests/Makefile.am
@@ -48,6 +48,7 @@ run_unittests_SOURCES += datasrc_unittest.cc
run_unittests_SOURCES += static_unittest.cc
run_unittests_SOURCES += query_unittest.cc
run_unittests_SOURCES += cache_unittest.cc
+run_unittests_SOURCES += test_client.h test_client.cc
run_unittests_SOURCES += test_datasrc.h test_datasrc.cc
run_unittests_SOURCES += rbtree_unittest.cc
run_unittests_SOURCES += logger_unittest.cc
@@ -58,6 +59,7 @@ run_unittests_SOURCES += sqlite3_accessor_unittest.cc
run_unittests_SOURCES += memory_datasrc_unittest.cc
run_unittests_SOURCES += rbnode_rrset_unittest.cc
run_unittests_SOURCES += zone_finder_context_unittest.cc
+run_unittests_SOURCES += faked_nsec3.h faked_nsec3.cc
# We need the actual module implementation in the tests (they are not part
# of libdatasrc)
@@ -105,9 +107,10 @@ EXTRA_DIST += testdata/mkbrokendb.c
EXTRA_DIST += testdata/root.zone
EXTRA_DIST += testdata/rrset_toWire1
EXTRA_DIST += testdata/rrset_toWire2
-EXTRA_DIST += testdata/rwtest.sqlite3
EXTRA_DIST += testdata/sql1.example.com.signed
EXTRA_DIST += testdata/sql2.example.com.signed
EXTRA_DIST += testdata/test-root.sqlite3
EXTRA_DIST += testdata/test.sqlite3
-EXTRA_DIST += testdata/test.sqlite3.nodiffs
+EXTRA_DIST += testdata/new_minor_schema.sqlite3
+EXTRA_DIST += testdata/newschema.sqlite3
+EXTRA_DIST += testdata/oldschema.sqlite3
diff --git a/src/lib/datasrc/tests/database_unittest.cc b/src/lib/datasrc/tests/database_unittest.cc
index 8ffd3c7..c18cfad 100644
--- a/src/lib/datasrc/tests/database_unittest.cc
+++ b/src/lib/datasrc/tests/database_unittest.cc
@@ -12,6 +12,8 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include "faked_nsec3.h"
+
#include <stdlib.h>
#include <boost/shared_ptr.hpp>
@@ -24,6 +26,7 @@
#include <dns/name.h>
#include <dns/rrttl.h>
#include <dns/rrset.h>
+#include <dns/nsec3hash.h>
#include <exceptions/exceptions.h>
#include <datasrc/database.h>
@@ -44,6 +47,7 @@ using namespace std;
using boost::dynamic_pointer_cast;
using boost::lexical_cast;
using namespace isc::dns;
+using namespace isc::datasrc::test;
namespace {
@@ -167,7 +171,10 @@ const char* const TEST_RECORDS[][5] = {
"1234 3600 1800 2419200 7200" },
{"example.org.", "NS", "3600", "", "ns.example.com."},
{"example.org.", "A", "3600", "", "192.0.2.1"},
- {"example.org.", "NSEC", "3600", "", "acnamesig1.example.org. NS A NSEC RRSIG"},
+ // Note that the RDATA text is "normalized", i.e., identical to what
+ // Rdata::toText() would produce. some tests rely on that behavior.
+ {"example.org.", "NSEC", "3600", "",
+ "acnamesig1.example.org. A NS RRSIG NSEC"},
{"example.org.", "RRSIG", "3600", "", "SOA 5 3 3600 20000101000000 "
"20000201000000 12345 example.org. FAKEFAKEFAKE"},
{"example.org.", "RRSIG", "3600", "", "NSEC 5 3 3600 20000101000000 "
@@ -207,8 +214,14 @@ const char* const TEST_RECORDS[][5] = {
// FIXME: Taken from a different test. Fill with proper data when creating a test.
const char* TEST_NSEC3_RECORDS[][5] = {
- {"1BB7SO0452U1QHL98UISNDD9218GELR5", "NSEC3", "3600", "", "1 0 10 FEEDABEE 4KLSVDE8KH8G95VU68R7AHBE1CPQN38J"},
- {"1BB7SO0452U1QHL98UISNDD9218GELR5", "RRSIG", "3600", "", "NSEC3 5 4 7200 20100410172647 20100311172647 63192 example.org. gNIVj4T8t51fEU6kOPpvK7HOGBFZGbalN5ZK mInyrww6UWZsUNdw07ge6/U6HfG+/s61RZ/L is2M6yUWHyXbNbj/QqwqgadG5dhxTArfuR02 xP600x0fWX8LXzW4yLMdKVxGbzYT+vvGz71o 8gHSY5vYTtothcZQa4BMKhmGQEk="},
+ {apex_hash, "NSEC3", "300", "", "1 1 12 AABBCCDD 2T7B4G4VSA5SMI47K61MV5BV1A22BOJR A RRSIG"},
+ {apex_hash, "RRSIG", "300", "", "NSEC3 5 4 7200 20100410172647 20100311172647 63192 example.org. gNIVj4T8t51fEU6kOPpvK7HOGBFZGbalN5ZK mInyrww6UWZsUNdw07ge6/U6HfG+/s61RZ/L is2M6yUWHyXbNbj/QqwqgadG5dhxTArfuR02 xP600x0fWX8LXzW4yLMdKVxGbzYT+vvGz71o 8gHSY5vYTtothcZQa4BMKhmGQEk="},
+ {ns1_hash, "NSEC3", "300", "", "1 1 12 AABBCCDD 2T7B4G4VSA5SMI47K61MV5BV1A22BOJR A RRSIG"},
+ {ns1_hash, "RRSIG", "300", "", "NSEC3 5 4 7200 20100410172647 20100311172647 63192 example.org. gNIVj4T8t51fEU6kOPpvK7HOGBFZGbalN5ZK mInyrww6UWZsUNdw07ge6/U6HfG+/s61RZ/L is2M6yUWHyXbNbj/QqwqgadG5dhxTArfuR02 xP600x0fWX8LXzW4yLMdKVxGbzYT+vvGz71o 8gHSY5vYTtothcZQa4BMKhmGQEk="},
+ {w_hash, "NSEC3", "300", "", "1 1 12 AABBCCDD 2T7B4G4VSA5SMI47K61MV5BV1A22BOJR A RRSIG"},
+ {w_hash, "RRSIG", "300", "", "NSEC3 5 4 7200 20100410172647 20100311172647 63192 example.org. gNIVj4T8t51fEU6kOPpvK7HOGBFZGbalN5ZK mInyrww6UWZsUNdw07ge6/U6HfG+/s61RZ/L is2M6yUWHyXbNbj/QqwqgadG5dhxTArfuR02 xP600x0fWX8LXzW4yLMdKVxGbzYT+vvGz71o 8gHSY5vYTtothcZQa4BMKhmGQEk="},
+ {zzz_hash, "NSEC3", "300", "", "1 1 12 AABBCCDD 2T7B4G4VSA5SMI47K61MV5BV1A22BOJR A RRSIG"},
+ {zzz_hash, "RRSIG", "300", "", "NSEC3 5 4 7200 20100410172647 20100311172647 63192 example.org. gNIVj4T8t51fEU6kOPpvK7HOGBFZGbalN5ZK mInyrww6UWZsUNdw07ge6/U6HfG+/s61RZ/L is2M6yUWHyXbNbj/QqwqgadG5dhxTArfuR02 xP600x0fWX8LXzW4yLMdKVxGbzYT+vvGz71o 8gHSY5vYTtothcZQa4BMKhmGQEk="},
{NULL, NULL, NULL, NULL, NULL}
};
@@ -501,62 +514,46 @@ private:
}
// Return faked data for tests
- switch (step ++) {
- case 0:
- data[DatabaseAccessor::NAME_COLUMN] = "example.org";
- data[DatabaseAccessor::TYPE_COLUMN] = "A";
- data[DatabaseAccessor::TTL_COLUMN] = "3600";
- data[DatabaseAccessor::RDATA_COLUMN] = "192.0.2.1";
- return (true);
- case 1:
- data[DatabaseAccessor::NAME_COLUMN] = "example.org";
- data[DatabaseAccessor::TYPE_COLUMN] = "SOA";
- data[DatabaseAccessor::TTL_COLUMN] = "3600";
- data[DatabaseAccessor::RDATA_COLUMN] = "ns1.example.org. admin.example.org. "
- "1234 3600 1800 2419200 7200";
- return (true);
- case 2:
- data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
- data[DatabaseAccessor::TYPE_COLUMN] = "A";
- data[DatabaseAccessor::TTL_COLUMN] = "300";
- data[DatabaseAccessor::RDATA_COLUMN] = "192.0.2.1";
- return (true);
- case 3:
- data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
- data[DatabaseAccessor::TYPE_COLUMN] = "A";
- data[DatabaseAccessor::TTL_COLUMN] = "300";
- data[DatabaseAccessor::RDATA_COLUMN] = "192.0.2.2";
- return (true);
- case 4:
- data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
- data[DatabaseAccessor::TYPE_COLUMN] = "AAAA";
- data[DatabaseAccessor::TTL_COLUMN] = "300";
- data[DatabaseAccessor::RDATA_COLUMN] = "2001:db8::1";
- return (true);
- case 5:
- data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
- data[DatabaseAccessor::TYPE_COLUMN] = "AAAA";
- data[DatabaseAccessor::TTL_COLUMN] = "300";
- data[DatabaseAccessor::RDATA_COLUMN] = "2001:db8::2";
- return (true);
- case 6:
- data[DatabaseAccessor::NAME_COLUMN] = "ttldiff.example.org";
- data[DatabaseAccessor::TYPE_COLUMN] = "A";
- data[DatabaseAccessor::TTL_COLUMN] = "300";
- data[DatabaseAccessor::RDATA_COLUMN] = "192.0.2.1";
- return (true);
- case 7:
- data[DatabaseAccessor::NAME_COLUMN] = "ttldiff.example.org";
- data[DatabaseAccessor::TYPE_COLUMN] = "A";
- data[DatabaseAccessor::TTL_COLUMN] = "600";
- data[DatabaseAccessor::RDATA_COLUMN] = "192.0.2.2";
- return (true);
- default:
- ADD_FAILURE() <<
- "Request past the end of iterator context";
- case 8:
- return (false);
+ // This is the sequence of zone data in the order of appearance
+ // in the returned sequence from this iterator.
+ typedef const char* ColumnText[4];
+ const ColumnText zone_data[] = {
+ // A couple of basic RRs at the zone origin.
+ {"example.org", "A", "3600", "192.0.2.1"},
+ {"example.org", "SOA", "3600", "ns1.example.org. "
+ "admin.example.org. 1234 3600 1800 2419200 7200"},
+ // RRsets sharing the same owner name with multiple RRs.
+ {"x.example.org", "A", "300", "192.0.2.1"},
+ {"x.example.org", "A", "300", "192.0.2.2"},
+ {"x.example.org", "AAAA", "300", "2001:db8::1"},
+ {"x.example.org", "AAAA", "300", "2001:db8::2"},
+ // RRSIGs. Covered types are different and these two should
+ // be distinguished.
+ {"x.example.org", "RRSIG", "300",
+ "A 5 3 3600 20000101000000 20000201000000 12345 "
+ "example.org. FAKEFAKEFAKE"},
+ {"x.example.org", "RRSIG", "300",
+ "AAAA 5 3 3600 20000101000000 20000201000000 12345 "
+ "example.org. FAKEFAKEFAKEFAKE"},
+ // Mixture of different TTLs. Covering both cases of small
+ // then large and large then small. In either case the smaller
+ // TTL should win.
+ {"ttldiff.example.org", "A", "300", "192.0.2.1"},
+ {"ttldiff.example.org", "A", "600", "192.0.2.2"},
+ {"ttldiff2.example.org", "AAAA", "600", "2001:db8::1"},
+ {"ttldiff2.example.org", "AAAA", "300", "2001:db8::2"}};
+ const size_t num_rrs = sizeof(zone_data) / sizeof(zone_data[0]);
+ if (step > num_rrs) {
+ ADD_FAILURE() << "Request past the end of iterator context";
+ } else if (step < num_rrs) {
+ data[DatabaseAccessor::NAME_COLUMN] = zone_data[step][0];
+ data[DatabaseAccessor::TYPE_COLUMN] = zone_data[step][1];
+ data[DatabaseAccessor::TTL_COLUMN] = zone_data[step][2];
+ data[DatabaseAccessor::RDATA_COLUMN] = zone_data[step][3];
+ ++step;
+ return (true);
}
+ return (false);
}
};
class EmptyIteratorContext : public IteratorContext {
@@ -968,7 +965,7 @@ private:
i = cur_name_.begin(); i != cur_name_.end(); ++ i) {
i->push_back(hash);
}
- (*readonly_records_)[hash] = cur_name_;
+ nsec3_namespace_[hash] = cur_name_;
cur_name_.clear();
}
@@ -1006,6 +1003,31 @@ private:
}
addCurHash(prev_name);
}
+
+public:
+ // This adds the NSEC3PARAM into the apex, so we can perform some NSEC3
+ // tests. Note that the NSEC3 namespace is available in other tests, but
+ // it should not be accessed at that time.
+ void enableNSEC3() {
+ // We place the signature first, so it's in the block with the other
+ // signatures
+ vector<string> signature;
+ signature.push_back("RRSIG");
+ signature.push_back("3600");
+ signature.push_back("");
+ signature.push_back("NSEC3PARAM 5 3 3600 20000101000000 20000201000000 "
+ "12345 example.org. FAKEFAKEFAKE");
+ signature.push_back("exmaple.org.");
+ (*readonly_records_)["example.org."].push_back(signature);
+ // Now the NSEC3 param itself
+ vector<string> param;
+ param.push_back("NSEC3PARAM");
+ param.push_back("3600");
+ param.push_back("");
+ param.push_back("1 0 12 aabbccdd");
+ param.push_back("example.org.");
+ (*readonly_records_)["example.org."].push_back(param);
+ }
};
// This tests the default getRecords behaviour, throwing NotImplemented
@@ -1059,6 +1081,11 @@ public:
"FAKEFAKEFAKE"));
}
+ ~ DatabaseClientTest() {
+ // Make sure we return the default creator no matter if we set it or not
+ setNSEC3HashCreator(NULL);
+ }
+
/*
* We initialize the client from a function, so we can call it multiple
* times per test.
@@ -1072,7 +1099,7 @@ public:
// probably move this to some specialized templated method specific
// to SQLite3 (or for even a longer term we should add an API to
// purge the diffs table).
- const char* const install_cmd = INSTALL_PROG " " TEST_DATA_DIR
+ const char* const install_cmd = INSTALL_PROG " " TEST_DATA_COMMONDIR
"/rwtest.sqlite3 " TEST_DATA_BUILDDIR
"/rwtest.sqlite3.copied";
if (system(install_cmd) != 0) {
@@ -1219,6 +1246,9 @@ public:
const std::vector<std::string> empty_rdatas_; // for NXRRSET/NXDOMAIN
std::vector<std::string> expected_rdatas_;
std::vector<std::string> expected_sig_rdatas_;
+
+ // A creator for use in several NSEC3 related tests.
+ TestNSEC3HashCreator test_nsec3_hash_creator_;
};
class TestSQLite3Accessor : public SQLite3Accessor {
@@ -1349,7 +1379,7 @@ checkRRset(isc::dns::ConstRRsetPtr rrset,
isc::testutils::rrsetCheck(expected_rrset, rrset);
}
-// Iterate through a zone
+// Iterate through a zone, common case
TYPED_TEST(DatabaseClientTest, iterator) {
ZoneIteratorPtr it(this->client_->getIterator(Name("example.org")));
ConstRRsetPtr rrset(it->getNextRRset());
@@ -1357,47 +1387,100 @@ TYPED_TEST(DatabaseClientTest, iterator) {
// The first name should be the zone origin.
EXPECT_EQ(this->zname_, rrset->getName());
+}
- // The rest of the checks work only for the mock accessor.
- if (!this->is_mock_) {
- return;
- }
-
- this->expected_rdatas_.clear();
- this->expected_rdatas_.push_back("192.0.2.1");
- checkRRset(rrset, Name("example.org"), this->qclass_, RRType::A(),
- this->rrttl_, this->expected_rdatas_);
-
- rrset = it->getNextRRset();
- this->expected_rdatas_.clear();
- this->expected_rdatas_.push_back("ns1.example.org. admin.example.org. "
- "1234 3600 1800 2419200 7200");
- checkRRset(rrset, Name("example.org"), this->qclass_, RRType::SOA(),
- this->rrttl_, this->expected_rdatas_);
-
- rrset = it->getNextRRset();
- this->expected_rdatas_.clear();
- this->expected_rdatas_.push_back("192.0.2.1");
- this->expected_rdatas_.push_back("192.0.2.2");
- checkRRset(rrset, Name("x.example.org"), this->qclass_, RRType::A(),
- RRTTL(300), this->expected_rdatas_);
-
- rrset = it->getNextRRset();
- this->expected_rdatas_.clear();
- this->expected_rdatas_.push_back("2001:db8::1");
- this->expected_rdatas_.push_back("2001:db8::2");
- checkRRset(rrset, Name("x.example.org"), this->qclass_, RRType::AAAA(),
- RRTTL(300), this->expected_rdatas_);
-
- rrset = it->getNextRRset();
- ASSERT_NE(ConstRRsetPtr(), rrset);
- this->expected_rdatas_.clear();
- this->expected_rdatas_.push_back("192.0.2.1");
- this->expected_rdatas_.push_back("192.0.2.2");
- checkRRset(rrset, Name("ttldiff.example.org"), this->qclass_, RRType::A(),
- RRTTL(300), this->expected_rdatas_);
+// Supplemental structure used in the couple of tests below. It represents
+// parameters of an expected RRset containing up to two RDATAs. If it contains
+// only one RDATA, rdata2 is NULL.
+struct ExpectedRRset {
+ const char* const name;
+ const RRType rrtype;
+ const RRTTL rrttl;
+ const char* const rdata1;
+ const char* const rdata2;
+};
- EXPECT_EQ(ConstRRsetPtr(), it->getNextRRset());
+// Common checker for the iterator tests below. It extracts RRsets from the
+// give iterator and compare them to the expected sequence.
+void
+checkIteratorSequence(ZoneIterator& it, ExpectedRRset expected_sequence[],
+ size_t num_rrsets)
+{
+ vector<string> expected_rdatas;
+ for (size_t i = 0; i < num_rrsets; ++i) {
+ const ConstRRsetPtr rrset = it.getNextRRset();
+ ASSERT_TRUE(rrset);
+
+ expected_rdatas.clear();
+ expected_rdatas.push_back(expected_sequence[i].rdata1);
+ if (expected_sequence[i].rdata2 != NULL) {
+ expected_rdatas.push_back(expected_sequence[i].rdata2);
+ }
+ checkRRset(rrset, Name(expected_sequence[i].name), RRClass::IN(),
+ expected_sequence[i].rrtype, expected_sequence[i].rrttl,
+ expected_rdatas);
+ }
+ EXPECT_FALSE(it.getNextRRset());
+}
+
+TEST_F(MockDatabaseClientTest, iterator) {
+ // This version of test creates an iterator that combines same types of
+ // RRs into single RRsets.
+ ExpectedRRset expected_sequence[] = {
+ {"example.org", RRType::A(), rrttl_, "192.0.2.1", NULL},
+ {"example.org", RRType::SOA(), rrttl_,
+ "ns1.example.org. admin.example.org. 1234 3600 1800 2419200 7200",
+ NULL},
+ {"x.example.org", RRType::A(), RRTTL(300), "192.0.2.1", "192.0.2.2"},
+ {"x.example.org", RRType::AAAA(), RRTTL(300),
+ "2001:db8::1", "2001:db8::2"},
+ {"x.example.org", RRType::RRSIG(), RRTTL(300),
+ "A 5 3 3600 20000101000000 20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE", NULL},
+ {"x.example.org", RRType::RRSIG(), RRTTL(300),
+ "AAAA 5 3 3600 20000101000000 20000201000000 12345 example.org. "
+ "FAKEFAKEFAKEFAKE", NULL},
+ {"ttldiff.example.org", RRType::A(), RRTTL(300),
+ "192.0.2.1", "192.0.2.2"},
+ {"ttldiff2.example.org", RRType::AAAA(), RRTTL(300),
+ "2001:db8::1", "2001:db8::2"}
+ };
+ checkIteratorSequence(*client_->getIterator(Name("example.org")),
+ expected_sequence,
+ sizeof(expected_sequence) /
+ sizeof(expected_sequence[0]));
+}
+
+TEST_F(MockDatabaseClientTest, iteratorSeparateRRs) {
+ // This version of test creates an iterator that separates all RRs as
+ // individual RRsets. In particular, it preserves the TTLs of an RRset
+ // even if they are different.
+ ExpectedRRset expected_sequence[] = {
+ {"example.org", RRType::A(), rrttl_, "192.0.2.1", NULL},
+ {"example.org", RRType::SOA(), rrttl_,
+ "ns1.example.org. admin.example.org. 1234 3600 1800 2419200 7200",
+ NULL},
+ {"x.example.org", RRType::A(), RRTTL(300), "192.0.2.1", NULL},
+ {"x.example.org", RRType::A(), RRTTL(300), "192.0.2.2", NULL},
+ {"x.example.org", RRType::AAAA(), RRTTL(300), "2001:db8::1", NULL},
+ {"x.example.org", RRType::AAAA(), RRTTL(300), "2001:db8::2", NULL},
+ {"x.example.org", RRType::RRSIG(), RRTTL(300),
+ "A 5 3 3600 20000101000000 20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE", NULL},
+ {"x.example.org", RRType::RRSIG(), RRTTL(300),
+ "AAAA 5 3 3600 20000101000000 20000201000000 12345 example.org. "
+ "FAKEFAKEFAKEFAKE", NULL},
+ {"ttldiff.example.org", RRType::A(), RRTTL(300), "192.0.2.1", NULL},
+ {"ttldiff.example.org", RRType::A(), RRTTL(600), "192.0.2.2", NULL},
+ {"ttldiff2.example.org", RRType::AAAA(), RRTTL(600), "2001:db8::1",
+ NULL},
+ {"ttldiff2.example.org", RRType::AAAA(), RRTTL(300), "2001:db8::2",
+ NULL}
+ };
+ checkIteratorSequence(*client_->getIterator(Name("example.org"), true),
+ expected_sequence,
+ sizeof(expected_sequence) /
+ sizeof(expected_sequence[0]));
}
// This has inconsistent TTL in the set (the rest, like nonsense in
@@ -1534,12 +1617,14 @@ doFindTest(ZoneFinder& finder,
isc::dns::RRType::RRSIG(), expected_ttl,
expected_sig_rdatas);
} else if (expected_sig_rdatas.empty()) {
- EXPECT_EQ(isc::dns::RRsetPtr(), result->rrset->getRRsig());
+ EXPECT_EQ(isc::dns::RRsetPtr(), result->rrset->getRRsig()) <<
+ "Unexpected RRSIG: " << result->rrset->getRRsig()->toText();
} else {
ADD_FAILURE() << "Missing RRSIG";
}
} else if (expected_rdatas.empty()) {
- EXPECT_EQ(isc::dns::RRsetPtr(), result->rrset);
+ EXPECT_EQ(isc::dns::RRsetPtr(), result->rrset) <<
+ "Unexpected RRset: " << result->rrset->toText();
} else {
ADD_FAILURE() << "Missing result";
}
@@ -1553,7 +1638,9 @@ doFindAllTestResult(ZoneFinder& finder, const isc::dns::Name& name,
const isc::dns::Name& expected_name =
isc::dns::Name::ROOT_NAME(),
const ZoneFinder::FindOptions options =
- ZoneFinder::FIND_DEFAULT)
+ ZoneFinder::FIND_DEFAULT,
+ ZoneFinder::FindResultFlags expected_flags =
+ ZoneFinder::RESULT_DEFAULT)
{
SCOPED_TRACE("All test for " + name.toText());
std::vector<ConstRRsetPtr> target;
@@ -1561,6 +1648,15 @@ doFindAllTestResult(ZoneFinder& finder, const isc::dns::Name& name,
EXPECT_TRUE(target.empty());
EXPECT_EQ(expected_result, result->code);
EXPECT_EQ(expected_type, result->rrset->getType());
+ if (expected_flags != ZoneFinder::RESULT_DEFAULT){
+ EXPECT_EQ((expected_flags & ZoneFinder::RESULT_WILDCARD) != 0,
+ result->isWildcard());
+ EXPECT_EQ((expected_flags & ZoneFinder::RESULT_NSEC_SIGNED) != 0,
+ result->isNSECSigned());
+ EXPECT_EQ((expected_flags & ZoneFinder::RESULT_NSEC3_SIGNED) != 0,
+ result->isNSEC3Signed());
+
+ }
RdataIteratorPtr it(result->rrset->getRdataIterator());
std::vector<std::string> rdata;
while (!it->isLast()) {
@@ -1872,38 +1968,30 @@ TYPED_TEST(DatabaseClientTest, find) {
}
TYPED_TEST(DatabaseClientTest, findOutOfZone) {
- // If the query name is out-of-zone it should result in NXDOMAIN
+ // If the query name is out-of-zone it should result in an exception
boost::shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
vector<ConstRRsetPtr> target;
// Superdomain
- doFindTest(*finder, Name("org"), this->qtype_, this->qtype_,
- this->rrttl_, ZoneFinder::NXDOMAIN,
- this->empty_rdatas_, this->empty_rdatas_);
- EXPECT_EQ(ZoneFinder::NXDOMAIN, finder->findAll(Name("org"), target)->code);
+ EXPECT_THROW(finder->find(Name("org"), this->qtype_), OutOfZone);
+ EXPECT_THROW(finder->findAll(Name("org"), target), OutOfZone);
+
// sharing a common ancestor
- doFindTest(*finder, Name("noexample.org"), this->qtype_, this->qtype_,
- this->rrttl_, ZoneFinder::NXDOMAIN,
- this->empty_rdatas_, this->empty_rdatas_);
- EXPECT_EQ(ZoneFinder::NXDOMAIN, finder->findAll(Name("noexample.org"),
- target)->code);
+ EXPECT_THROW(finder->find(Name("noexample.org"), this->qtype_), OutOfZone);
+ EXPECT_THROW(finder->findAll(Name("noexample.org"), target), OutOfZone);
+
// totally unrelated domain, smaller number of labels
- doFindTest(*finder, Name("com"), this->qtype_, this->qtype_,
- this->rrttl_, ZoneFinder::NXDOMAIN,
- this->empty_rdatas_, this->empty_rdatas_);
- EXPECT_EQ(ZoneFinder::NXDOMAIN, finder->findAll(Name("com"), target)->code);
+ EXPECT_THROW(finder->find(Name("com"), this->qtype_), OutOfZone);
+ EXPECT_THROW(finder->findAll(Name("com"), target), OutOfZone);
+
// totally unrelated domain, same number of labels
- doFindTest(*finder, Name("example.com"), this->qtype_, this->qtype_,
- this->rrttl_, ZoneFinder::NXDOMAIN,
- this->empty_rdatas_, this->empty_rdatas_);
- EXPECT_EQ(ZoneFinder::NXDOMAIN, finder->findAll(Name("example.com"),
- target)->code);
+ EXPECT_THROW(finder->find(Name("example.com"), this->qtype_), OutOfZone);
+ EXPECT_THROW(finder->findAll(Name("example.com"), target), OutOfZone);
+
// totally unrelated domain, larger number of labels
- doFindTest(*finder, Name("more.example.com"), this->qtype_, this->qtype_,
- this->rrttl_, ZoneFinder::NXDOMAIN,
- this->empty_rdatas_, this->empty_rdatas_);
- EXPECT_EQ(ZoneFinder::NXDOMAIN, finder->findAll(Name("more.example.com"),
- target)->code);
+ EXPECT_THROW(finder->find(Name("more.example.com"), this->qtype_),
+ OutOfZone);
+ EXPECT_THROW(finder->findAll(Name("more.example.com"), target), OutOfZone);
}
TYPED_TEST(DatabaseClientTest, findDelegation) {
@@ -2374,10 +2462,169 @@ TYPED_TEST(DatabaseClientTest, wildcardNXRRSET_NSEC) {
Name("*.wild.example.org"), ZoneFinder::FIND_DNSSEC);
}
+// Subroutine for dnssecFlagCheck defined below. It performs some simple
+// checks regarding DNSSEC related result flags for findAll().
+void
+dnssecFlagCheckForAny(ZoneFinder& finder, const Name& name,
+ ZoneFinder::FindResultFlags sec_flag)
+{
+ std::vector<ConstRRsetPtr> target; // just for placeholder
+ ConstZoneFinderContextPtr all_result =
+ finder.findAll(name, target, ZoneFinder::FIND_DNSSEC);
+ EXPECT_EQ((sec_flag & ZoneFinder::RESULT_NSEC_SIGNED) != 0,
+ all_result->isNSECSigned());
+ EXPECT_EQ((sec_flag & ZoneFinder::RESULT_NSEC3_SIGNED) != 0,
+ all_result->isNSEC3Signed());
+}
+
+// Common tests about DNSSEC related result flags. Shared for the NSEC
+// and NSEC3 cases.
+void
+dnssecFlagCheck(ZoneFinder& finder, ZoneFinder::FindResultFlags sec_flag) {
+ std::vector<std::string> expected_rdatas;
+ std::vector<std::string> expected_sig_rdatas;
+
+ // Check NXDOMAIN case in NSEC signed zone, and RESULT_NSEC_SIGNED flag
+ // should be returned to upper layer caller.
+ if ((sec_flag & ZoneFinder::RESULT_NSEC_SIGNED) != 0) {
+ expected_rdatas.push_back("www2.example.org. A AAAA NSEC RRSIG");
+ expected_sig_rdatas.push_back("NSEC 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ }
+ doFindTest(finder, Name("www1.example.org"), RRType::A(), RRType::NSEC(),
+ RRTTL(3600), ZoneFinder::NXDOMAIN, expected_rdatas,
+ expected_sig_rdatas, sec_flag, Name("www.example.org."),
+ ZoneFinder::FIND_DNSSEC);
+ dnssecFlagCheckForAny(finder, Name("www1.example.org"), sec_flag);
+
+ // Check NXRRSET case in NSEC signed zone, and RESULT_NSEC_SIGNED flag
+ // should be return.
+ // No "findAll" test case for this because NXRRSET shouldn't happen for it.
+ expected_rdatas.clear();
+ expected_sig_rdatas.clear();
+ if ((sec_flag & ZoneFinder::RESULT_NSEC_SIGNED) != 0) {
+ expected_rdatas.push_back("www2.example.org. A AAAA NSEC RRSIG");
+ expected_sig_rdatas.push_back("NSEC 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ }
+ doFindTest(finder, Name("www.example.org."), RRType::TXT(), RRType::NSEC(),
+ RRTTL(3600), ZoneFinder::NXRRSET, expected_rdatas,
+ expected_sig_rdatas, sec_flag, Name::ROOT_NAME(),
+ ZoneFinder::FIND_DNSSEC);
+
+ // Empty name, should result in NXRRSET (in this test setup the NSEC
+ // doesn't have RRSIG).
+ expected_rdatas.clear();
+ expected_sig_rdatas.clear();
+ if ((sec_flag & ZoneFinder::RESULT_NSEC_SIGNED) != 0) {
+ expected_rdatas.push_back("empty.nonterminal.example.org. NSEC");
+ }
+ doFindTest(finder, Name("nonterminal.example.org."), RRType::A(),
+ RRType::NSEC(), RRTTL(3600), ZoneFinder::NXRRSET,
+ expected_rdatas,expected_sig_rdatas, sec_flag,
+ Name("l.example.org."), ZoneFinder::FIND_DNSSEC);
+ dnssecFlagCheckForAny(finder, Name("nonterminal.example.org"), sec_flag);
+
+ // Wildcard match
+ expected_rdatas.clear();
+ expected_sig_rdatas.clear();
+ expected_rdatas.push_back("192.0.2.5");
+ expected_sig_rdatas.push_back("A 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ doFindTest(finder, Name("b.a.wild.example.org"), RRType::A(),
+ RRType::A(), RRTTL(3600), ZoneFinder::SUCCESS, expected_rdatas,
+ expected_sig_rdatas, (ZoneFinder::RESULT_WILDCARD | sec_flag),
+ Name("b.a.wild.example.org"), ZoneFinder::FIND_DNSSEC);
+ dnssecFlagCheckForAny(finder, Name("b.a.wild.example.org"), sec_flag);
+
+ // Wildcard + NXRRSET (no "findAll" test for this case)
+ expected_rdatas.clear();
+ expected_sig_rdatas.clear();
+ if ((sec_flag & ZoneFinder::RESULT_NSEC_SIGNED) != 0) {
+ expected_rdatas.push_back("cancel.here.wild.example.org. "
+ "A NSEC RRSIG");
+ expected_sig_rdatas.push_back("NSEC 5 3 3600 20000101000000 "
+ "20000201000000 12345 example.org. "
+ "FAKEFAKEFAKE");
+ }
+ doFindTest(finder, Name("b.a.wild.example.org"),
+ RRType::TXT(), RRType::NSEC(), RRTTL(3600), ZoneFinder::NXRRSET,
+ expected_rdatas, expected_sig_rdatas,
+ (ZoneFinder::RESULT_WILDCARD | sec_flag),
+ Name("*.wild.example.org"), ZoneFinder::FIND_DNSSEC);
+
+ // Empty wildcard (this NSEC doesn't have RRSIG in our test data)
+ expected_rdatas.clear();
+ expected_sig_rdatas.clear();
+ if ((sec_flag & ZoneFinder::RESULT_NSEC_SIGNED) != 0) {
+ expected_rdatas.push_back("wild.*.foo.*.bar.example.org. NSEC");
+ }
+ doFindTest(finder, Name("foo.wild.bar.example.org"),
+ RRType::TXT(), RRType::NSEC(), RRTTL(3600), ZoneFinder::NXRRSET,
+ expected_rdatas, expected_sig_rdatas,
+ (ZoneFinder::RESULT_WILDCARD | sec_flag),
+ Name("bao.example.org"), ZoneFinder::FIND_DNSSEC);
+ dnssecFlagCheckForAny(finder, Name("foo.wild.bar.example.org"), sec_flag);
+}
+
+TYPED_TEST(DatabaseClientTest, dnssecResultFlags) {
+ // ZoneFinder::find() for negative cases and wildcard cases should check
+ // whether the zone is signed with NSEC or NSEC3.
+
+ // In the default test setup, the zone should be considered NSEC-signed
+ // (the apex node has an NSEC RR).
+ {
+ SCOPED_TRACE("NSEC only");
+ dnssecFlagCheck(*this->getFinder(), ZoneFinder::RESULT_NSEC_SIGNED);
+ }
+
+ // Then add an NSEC3PARAM RRset at the apex (it may look weird if the
+ // zone only has NSEC3PARM RRset (but no NSEC3s), but it is okay for the
+ // purpose of this test). The zone should now be considered NSEC3-signed.
+ // Note that the apex NSEC still exists; NSEC3 should override NSEC.
+ this->updater_ = this->client_->getUpdater(this->zname_, false);
+ this->rrset_.reset(new RRset(this->zname_, this->qclass_,
+ RRType::NSEC3PARAM(), this->rrttl_));
+ this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+ this->rrset_->getClass(),
+ "1 0 12 aabbccdd"));
+ this->updater_->addRRset(*this->rrset_);
+ {
+ SCOPED_TRACE("NSEC and NSEC3");
+ dnssecFlagCheck(this->updater_->getFinder(),
+ ZoneFinder::RESULT_NSEC3_SIGNED);
+ }
+
+ // Next, delete the apex NSEC. Since NSEC3PARAM remains, the zone should
+ // still be considered NSEC3-signed.
+ RRsetPtr nsec_rrset(new RRset(this->zname_, this->qclass_, RRType::NSEC(),
+ this->rrttl_));
+ nsec_rrset->addRdata(rdata::createRdata(RRType::NSEC(), this->qclass_,
+ "acnamesig1.example.org. NS A "
+ "NSEC RRSIG"));
+ this->updater_->deleteRRset(*nsec_rrset);
+ {
+ SCOPED_TRACE("NSEC3 only");
+ dnssecFlagCheck(this->updater_->getFinder(),
+ ZoneFinder::RESULT_NSEC3_SIGNED);
+ }
+
+ // Finally, delete the NSEC3PARAM we just added above. The zone should
+ // then be considered unsigned.
+ this->updater_->deleteRRset(*this->rrset_);
+ {
+ SCOPED_TRACE("unsigned");
+ dnssecFlagCheck(this->updater_->getFinder(),
+ ZoneFinder::RESULT_DEFAULT);
+ }
+}
+
TYPED_TEST(DatabaseClientTest, NXDOMAIN_NSEC) {
// The domain doesn't exist, so we must get the right NSEC
boost::shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
-
this->expected_rdatas_.push_back("www2.example.org. A AAAA NSEC RRSIG");
this->expected_sig_rdatas_.push_back("NSEC 5 3 3600 20000101000000 "
"20000201000000 12345 example.org. "
@@ -2404,14 +2651,13 @@ TYPED_TEST(DatabaseClientTest, NXDOMAIN_NSEC) {
if (!this->is_mock_) {
return; // We don't make the real DB to throw
}
- EXPECT_NO_THROW(doFindTest(*finder,
- isc::dns::Name("notimplnsec.example.org."),
- isc::dns::RRType::TXT(),
- isc::dns::RRType::NSEC(), this->rrttl_,
- ZoneFinder::NXDOMAIN, this->empty_rdatas_,
- this->empty_rdatas_,
- ZoneFinder::RESULT_DEFAULT,
- Name::ROOT_NAME(), ZoneFinder::FIND_DNSSEC));
+ // In this case the accessor doesn't support findPreviousName(), but the
+ // zone apex has NSEC, and the zone itself is considered NSEC-signed.
+ doFindTest(*finder, Name("notimplnsec.example.org."),
+ RRType::TXT(), RRType::NSEC(), this->rrttl_,
+ ZoneFinder::NXDOMAIN, this->empty_rdatas_,
+ this->empty_rdatas_, ZoneFinder::RESULT_NSEC_SIGNED,
+ Name::ROOT_NAME(), ZoneFinder::FIND_DNSSEC);
}
TYPED_TEST(DatabaseClientTest, emptyNonterminalNSEC) {
@@ -2431,14 +2677,12 @@ TYPED_TEST(DatabaseClientTest, emptyNonterminalNSEC) {
if (!this->is_mock_) {
return; // We don't make the real DB to throw
}
- EXPECT_NO_THROW(doFindTest(*finder,
- isc::dns::Name("here.wild.example.org."),
- isc::dns::RRType::TXT(),
- isc::dns::RRType::NSEC(),
- this->rrttl_, ZoneFinder::NXRRSET,
- this->empty_rdatas_, this->empty_rdatas_,
- ZoneFinder::RESULT_DEFAULT,
- Name::ROOT_NAME(), ZoneFinder::FIND_DNSSEC));
+ // See the corresponding case of NXDOMAIN_NSEC.
+ doFindTest(*finder, Name("here.wild.example.org."),
+ RRType::TXT(), RRType::NSEC(), this->rrttl_,
+ ZoneFinder::NXRRSET, this->empty_rdatas_,
+ this->empty_rdatas_, ZoneFinder::RESULT_NSEC_SIGNED,
+ Name::ROOT_NAME(), ZoneFinder::FIND_DNSSEC);
}
TYPED_TEST(DatabaseClientTest, anyFromFind) {
@@ -2831,14 +3075,13 @@ TYPED_TEST(DatabaseClientTest, addDeviantRR) {
this->expected_rdatas_.clear();
this->expected_rdatas_.push_back("192.0.2.100");
{
- // Note: find() rejects out-of-zone query name with NXDOMAIN
+ // Note: find() rejects out-of-zone query name with an exception
// regardless of whether adding the RR succeeded, so this check
// actually doesn't confirm it.
SCOPED_TRACE("add out-of-zone RR");
- doFindTest(this->updater_->getFinder(), Name("example.com"),
- this->qtype_, this->qtype_, this->rrttl_,
- ZoneFinder::NXDOMAIN, this->empty_rdatas_,
- this->empty_rdatas_);
+ EXPECT_THROW(this->updater_->getFinder().find(Name("example.com"),
+ this->qtype_),
+ OutOfZone);
}
}
@@ -3542,4 +3785,25 @@ TEST_F(MockDatabaseClientTest, journalWithBadData) {
second->getNextDiff(), DataSourceError);
}
+/// Let us test a little bit of NSEC3.
+TEST_F(MockDatabaseClientTest, findNSEC3) {
+ // Set up the faked hash calculator.
+ setNSEC3HashCreator(&test_nsec3_hash_creator_);
+
+ DataSourceClient::FindResult
+ zone(this->client_->findZone(Name("example.org")));
+ ASSERT_EQ(result::SUCCESS, zone.code);
+ boost::shared_ptr<DatabaseClient::Finder> finder(
+ dynamic_pointer_cast<DatabaseClient::Finder>(zone.zone_finder));
+
+ // It'll complain if there is no NSEC3PARAM yet
+ EXPECT_THROW(finder->findNSEC3(Name("example.org"), false),
+ DataSourceError);
+ // And enable NSEC3 in the zone.
+ this->current_accessor_->enableNSEC3();
+
+ // The rest is in the function, it is shared with in-memory tests
+ performNSEC3Test(*finder);
+}
+
}
diff --git a/src/lib/datasrc/tests/faked_nsec3.cc b/src/lib/datasrc/tests/faked_nsec3.cc
new file mode 100644
index 0000000..4ca22a5
--- /dev/null
+++ b/src/lib/datasrc/tests/faked_nsec3.cc
@@ -0,0 +1,196 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include "faked_nsec3.h"
+
+#include <dns/name.h>
+#include <testutils/dnsmessage_test.h>
+
+#include <map>
+#include <gtest/gtest.h>
+
+using namespace std;
+using namespace isc::dns;
+using namespace isc::testutils;
+
+namespace isc {
+namespace datasrc {
+namespace test {
+
+class TestNSEC3HashCreator::TestNSEC3Hash : public NSEC3Hash {
+private:
+ typedef map<Name, string> NSEC3HashMap;
+ typedef NSEC3HashMap::value_type NSEC3HashPair;
+ NSEC3HashMap map_;
+public:
+ TestNSEC3Hash() {
+ // Build pre-defined hash
+ map_[Name("example.org")] = apex_hash;
+ map_[Name("www.example.org")] = "2S9MHAVEQVM6T7VBL5LOP2U3T2RP3TOM";
+ map_[Name("xxx.example.org")] = "Q09MHAVEQVM6T7VBL5LOP2U3T2RP3TOM";
+ map_[Name("yyy.example.org")] = "0A9MHAVEQVM6T7VBL5LOP2U3T2RP3TOM";
+ map_[Name("x.y.w.example.org")] =
+ "2VPTU5TIMAMQTTGL4LUU9KG21E0AOR3S";
+ map_[Name("y.w.example.org")] = "K8UDEMVP1J2F7EG6JEBPS17VP3N8I58H";
+ map_[Name("w.example.org")] = w_hash;
+ map_[Name("zzz.example.org")] = zzz_hash;
+ map_[Name("smallest.example.org")] =
+ "00000000000000000000000000000000";
+ map_[Name("largest.example.org")] =
+ "UUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUU";
+ }
+ virtual string calculate(const Name& name) const {
+ const NSEC3HashMap::const_iterator found = map_.find(name);
+ if (found != map_.end()) {
+ return (found->second);
+ }
+ isc_throw(isc::Unexpected, "unexpected name for NSEC3 test: "
+ << name);
+ }
+ virtual bool match(const rdata::generic::NSEC3PARAM&) const {
+ return (true);
+ }
+ virtual bool match(const rdata::generic::NSEC3&) const {
+ return (true);
+ }
+};
+
+NSEC3Hash* TestNSEC3HashCreator::create(const rdata::generic::NSEC3PARAM&)
+ const
+{
+ return (new TestNSEC3Hash);
+}
+
+NSEC3Hash* TestNSEC3HashCreator::create(const rdata::generic::NSEC3&) const {
+ return (new TestNSEC3Hash);
+}
+
+void
+findNSEC3Check(bool expected_matched, uint8_t expected_labels,
+ const string& expected_closest,
+ const string& expected_next,
+ const ZoneFinder::FindNSEC3Result& result,
+ bool expected_sig)
+{
+ EXPECT_EQ(expected_matched, result.matched);
+ // Convert to int so the error messages would be more readable:
+ EXPECT_EQ(static_cast<int>(expected_labels),
+ static_cast<int>(result.closest_labels));
+
+ vector<ConstRRsetPtr> actual_rrsets;
+ ASSERT_TRUE(result.closest_proof);
+ actual_rrsets.push_back(result.closest_proof);
+ if (expected_sig) {
+ actual_rrsets.push_back(result.closest_proof->getRRsig());
+ }
+ rrsetsCheck(expected_closest, actual_rrsets.begin(),
+ actual_rrsets.end());
+
+ actual_rrsets.clear();
+ if (expected_next.empty()) {
+ EXPECT_FALSE(result.next_proof);
+ } else {
+ ASSERT_TRUE(result.next_proof);
+ actual_rrsets.push_back(result.next_proof);
+ if (expected_sig) {
+ actual_rrsets.push_back(result.next_proof->getRRsig());
+ }
+ rrsetsCheck(expected_next, actual_rrsets.begin(),
+ actual_rrsets.end());
+ }
+}
+
+void
+performNSEC3Test(ZoneFinder &finder) {
+ // Parameter validation: the query name must be in or below the zone
+ EXPECT_THROW(finder.findNSEC3(Name("example.com"), false), OutOfZone);
+ EXPECT_THROW(finder.findNSEC3(Name("org"), true), OutOfZone);
+
+ Name origin("example.org");
+ const string apex_nsec3_text = string(apex_hash) + ".example.org." +
+ string(nsec3_common);
+ const string ns1_nsec3_text = string(ns1_hash) + ".example.org." +
+ string(nsec3_common);
+ const string w_nsec3_text = string(w_hash) + ".example.org." +
+ string(nsec3_common);
+ const string zzz_nsec3_text = string(zzz_hash) + ".example.org." +
+ string(nsec3_common);
+
+ // Apex name. It should have a matching NSEC3.
+ {
+ SCOPED_TRACE("apex, non recursive mode");
+ findNSEC3Check(true, origin.getLabelCount(), apex_nsec3_text, "",
+ finder.findNSEC3(origin, false));
+ }
+
+ // Recursive mode doesn't change the result in this case.
+ {
+ SCOPED_TRACE("apex, recursive mode");
+ findNSEC3Check(true, origin.getLabelCount(), apex_nsec3_text, "",
+ finder.findNSEC3(origin, true));
+ }
+
+ // Non existent name (in the NSEC3 namespace -- the findNSEC3 does
+ // not look into the normal data). Disabling recursion, a covering
+ // NSEC3 should be returned.
+ const Name www_name("www.example.org");
+ {
+ SCOPED_TRACE("non existent name, non recursive mode");
+ findNSEC3Check(false, www_name.getLabelCount(), apex_nsec3_text, "",
+ finder.findNSEC3(www_name, false));
+ }
+
+ // Non existent name. The closest provable encloser is the apex,
+ // and next closer is the query name itself (which NSEC3 for ns1
+ // covers)
+ // H(ns1) = 2T... < H(xxx) = Q0... < H(zzz) = R5...
+ {
+ SCOPED_TRACE("non existent name, recursive mode");
+ findNSEC3Check(true, origin.getLabelCount(), apex_nsec3_text,
+ ns1_nsec3_text,
+ finder.findNSEC3(Name("xxx.example.org"), true));
+ }
+
+ // Similar to the previous case, but next closer name is different
+ // from the query name. The closet encloser is w.example.org, and
+ // next closer is y.w.example.org.
+ // H(ns1) = 2T.. < H(y.w) = K8.. < H(zzz) = R5
+ {
+ SCOPED_TRACE("non existent name, non qname next closer");
+ findNSEC3Check(true, Name("w.example.org").getLabelCount(),
+ w_nsec3_text, ns1_nsec3_text,
+ finder.findNSEC3(Name("x.y.w.example.org"),
+ true));
+ }
+
+ // In the rest of test we check hash comparison for wrap around cases.
+ {
+ SCOPED_TRACE("very small hash");
+ const Name smallest_name("smallest.example.org");
+ findNSEC3Check(false, smallest_name.getLabelCount(),
+ zzz_nsec3_text, "",
+ finder.findNSEC3(smallest_name, false));
+ }
+ {
+ SCOPED_TRACE("very large hash");
+ const Name largest_name("largest.example.org");
+ findNSEC3Check(false, largest_name.getLabelCount(),
+ zzz_nsec3_text, "",
+ finder.findNSEC3(largest_name, false));
+ }
+}
+
+}
+}
+}
diff --git a/src/lib/datasrc/tests/faked_nsec3.h b/src/lib/datasrc/tests/faked_nsec3.h
new file mode 100644
index 0000000..51b4059
--- /dev/null
+++ b/src/lib/datasrc/tests/faked_nsec3.h
@@ -0,0 +1,86 @@
+// Copyright (C) 2011 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef FAKED_NSEC3_H
+#define FAKED_NSEC3_H
+
+#include <datasrc/zone.h>
+
+#include <dns/nsec3hash.h>
+
+#include <stdint.h>
+#include <string>
+
+namespace isc {
+namespace datasrc {
+namespace test {
+
+//
+// (Faked) NSEC3 hash data. Arbitrarily borrowed from RFC515 examples.
+//
+// Commonly used NSEC3 suffix. It's incorrect to use it for all NSEC3s, but
+// doesn't matter for the purpose of our tests.
+const char* const nsec3_common = " 300 IN NSEC3 1 1 12 aabbccdd "
+ "2T7B4G4VSA5SMI47K61MV5BV1A22BOJR A RRSIG";
+// Likewise, common RRSIG suffix for NSEC3s.
+const char* const nsec3_rrsig_common = " 300 IN RRSIG NSEC3 5 3 3600 "
+ "20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE";
+
+// Some faked NSEC3 hash values commonly used in tests and the faked NSEC3Hash
+// object.
+//
+// For apex (example.org)
+const char* const apex_hash = "0P9MHAVEQVM6T7VBL5LOP2U3T2RP3TOM";
+const char* const apex_hash_lower = "0p9mhaveqvm6t7vbl5lop2u3t2rp3tom";
+// For ns1.example.org
+const char* const ns1_hash = "2T7B4G4VSA5SMI47K61MV5BV1A22BOJR";
+// For w.example.org
+const char* const w_hash = "01UDEMVP1J2F7EG6JEBPS17VP3N8I58H";
+// For x.y.w.example.org (lower-cased)
+const char* const xyw_hash = "2vptu5timamqttgl4luu9kg21e0aor3s";
+// For zzz.example.org.
+const char* const zzz_hash = "R53BQ7CC2UVMUBFU5OCMM6PERS9TK9EN";
+
+// A simple faked NSEC3 hash calculator with a dedicated creator for it.
+//
+// This is used in some NSEC3-related tests below.
+class TestNSEC3HashCreator : public isc::dns::NSEC3HashCreator {
+private:
+ class TestNSEC3Hash;
+public:
+ virtual isc::dns::NSEC3Hash* create(const
+ isc::dns::rdata::generic::NSEC3PARAM&)
+ const;
+ virtual isc::dns::NSEC3Hash* create(const isc::dns::rdata::generic::NSEC3&)
+ const;
+};
+
+// Check the result against expected values. It directly calls EXPECT_ macros
+void
+findNSEC3Check(bool expected_matched, uint8_t expected_labels,
+ const std::string& expected_closest,
+ const std::string& expected_next,
+ const isc::datasrc::ZoneFinder::FindNSEC3Result& result,
+ bool expected_sig = false);
+
+// Perform the shared part of NSEC3 test (shared between in-memory and database
+// tests).
+void
+performNSEC3Test(ZoneFinder &finder);
+
+}
+}
+}
+
+#endif
diff --git a/src/lib/datasrc/tests/memory_datasrc_unittest.cc b/src/lib/datasrc/tests/memory_datasrc_unittest.cc
index 9096a9e..a7d13d5 100644
--- a/src/lib/datasrc/tests/memory_datasrc_unittest.cc
+++ b/src/lib/datasrc/tests/memory_datasrc_unittest.cc
@@ -12,11 +12,7 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
-#include <sstream>
-#include <vector>
-
-#include <boost/bind.hpp>
-#include <boost/foreach.hpp>
+#include "faked_nsec3.h"
#include <exceptions/exceptions.h>
@@ -30,19 +26,31 @@
#include <dns/rrttl.h>
#include <dns/masterload.h>
+#include <datasrc/client.h>
#include <datasrc/memory_datasrc.h>
#include <datasrc/data_source.h>
#include <datasrc/iterator.h>
+#include "test_client.h"
+
#include <testutils/dnsmessage_test.h>
#include <gtest/gtest.h>
+#include <boost/bind.hpp>
+#include <boost/foreach.hpp>
+#include <boost/shared_ptr.hpp>
+
+#include <sstream>
+#include <vector>
+
using namespace std;
using namespace isc::dns;
using namespace isc::dns::rdata;
using namespace isc::datasrc;
using namespace isc::testutils;
+using boost::shared_ptr;
+using namespace isc::datasrc::test;
namespace {
// Commonly used result codes (Who should write the prefix all the time)
@@ -285,83 +293,18 @@ setRRset(RRsetPtr rrset, vector<RRsetPtr*>::iterator& it) {
++it;
}
-ConstRRsetPtr
-textToRRset(const string& text_rrset, const RRClass& rrclass = RRClass::IN()) {
+RRsetPtr
+textToRRset(const string& text_rrset, const RRClass& rrclass = RRClass::IN(),
+ const Name& origin = Name::ROOT_NAME())
+{
stringstream ss(text_rrset);
RRsetPtr rrset;
vector<RRsetPtr*> rrsets;
rrsets.push_back(&rrset);
- masterLoad(ss, Name::ROOT_NAME(), rrclass,
- boost::bind(setRRset, _1, rrsets.begin()));
+ masterLoad(ss, origin, rrclass, boost::bind(setRRset, _1, rrsets.begin()));
return (rrset);
}
-// Some faked NSEC3 hash values commonly used in tests and the faked NSEC3Hash
-// object.
-//
-// For apex (example.org)
-const char* const apex_hash = "0P9MHAVEQVM6T7VBL5LOP2U3T2RP3TOM";
-const char* const apex_hash_lower = "0p9mhaveqvm6t7vbl5lop2u3t2rp3tom";
-// For ns1.example.org
-const char* const ns1_hash = "2T7B4G4VSA5SMI47K61MV5BV1A22BOJR";
-// For w.example.org
-const char* const w_hash = "01UDEMVP1J2F7EG6JEBPS17VP3N8I58H";
-// For x.y.w.example.org (lower-cased)
-const char* const xyw_hash = "2vptu5timamqttgl4luu9kg21e0aor3s";
-// For zzz.example.org.
-const char* const zzz_hash = "R53BQ7CC2UVMUBFU5OCMM6PERS9TK9EN";
-
-// A simple faked NSEC3 hash calculator with a dedicated creator for it.
-//
-// This is used in some NSEC3-related tests below.
-class TestNSEC3HashCreator : public NSEC3HashCreator {
- class TestNSEC3Hash : public NSEC3Hash {
- private:
- typedef map<Name, string> NSEC3HashMap;
- typedef NSEC3HashMap::value_type NSEC3HashPair;
- NSEC3HashMap map_;
- public:
- TestNSEC3Hash() {
- // Build pre-defined hash
- map_[Name("example.org")] = apex_hash;
- map_[Name("www.example.org")] = "2S9MHAVEQVM6T7VBL5LOP2U3T2RP3TOM";
- map_[Name("xxx.example.org")] = "Q09MHAVEQVM6T7VBL5LOP2U3T2RP3TOM";
- map_[Name("yyy.example.org")] = "0A9MHAVEQVM6T7VBL5LOP2U3T2RP3TOM";
- map_[Name("x.y.w.example.org")] =
- "2VPTU5TIMAMQTTGL4LUU9KG21E0AOR3S";
- map_[Name("y.w.example.org")] = "K8UDEMVP1J2F7EG6JEBPS17VP3N8I58H";
- map_[Name("w.example.org")] = w_hash;
- map_[Name("zzz.example.org")] = zzz_hash;
- map_[Name("smallest.example.org")] =
- "00000000000000000000000000000000";
- map_[Name("largest.example.org")] =
- "UUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUU";
- }
- virtual string calculate(const Name& name) const {
- const NSEC3HashMap::const_iterator found = map_.find(name);
- if (found != map_.end()) {
- return (found->second);
- }
- isc_throw(isc::Unexpected, "unexpected name for NSEC3 test: "
- << name);
- }
- virtual bool match(const generic::NSEC3PARAM&) const {
- return (true);
- }
- virtual bool match(const generic::NSEC3&) const {
- return (true);
- }
- };
-
-public:
- virtual NSEC3Hash* create(const generic::NSEC3PARAM&) const {
- return (new TestNSEC3Hash);
- }
- virtual NSEC3Hash* create(const generic::NSEC3&) const {
- return (new TestNSEC3Hash);
- }
-};
-
/// \brief Test fixture for the InMemoryZoneFinder class
class InMemoryZoneFinderTest : public ::testing::Test {
// A straightforward pair of textual RR(set) and a RRsetPtr variable
@@ -398,6 +341,8 @@ public:
// Build test RRsets. Below, we construct an RRset for
// each textual RR(s) of zone_data, and assign it to the corresponding
// rr_xxx.
+ // Note that this contains an out-of-zone RR, and due to the
+ // validation check of masterLoad() used below, we cannot add SOA.
const RRsetData zone_data[] = {
{"example.org. 300 IN NS ns.example.org.", &rr_ns_},
{"example.org. 300 IN A 192.0.2.1", &rr_a_},
@@ -545,6 +490,8 @@ public:
ZoneFinder::FindOptions options = ZoneFinder::FIND_DEFAULT,
bool check_wild_answer = false)
{
+ SCOPED_TRACE("findTest for " + name.toText() + "/" + rrtype.toText());
+
if (zone_finder == NULL) {
zone_finder = &zone_finder_;
}
@@ -672,7 +619,7 @@ TEST_F(InMemoryZoneFinderTest, constructor) {
*/
TEST_F(InMemoryZoneFinderTest, add) {
// This one does not belong to this zone
- EXPECT_THROW(zone_finder_.add(rr_out_), InMemoryZoneFinder::OutOfZone);
+ EXPECT_THROW(zone_finder_.add(rr_out_), OutOfZone);
// Test null pointer
EXPECT_THROW(zone_finder_.add(ConstRRsetPtr()),
InMemoryZoneFinder::NullRRset);
@@ -899,8 +846,9 @@ TEST_F(InMemoryZoneFinderTest, findAny) {
findAllTest(origin_, ZoneFinder::SUCCESS, expected_sets);
// out zone name
- findAllTest(Name("example.com"), ZoneFinder::NXDOMAIN,
- vector<ConstRRsetPtr>());
+ EXPECT_THROW(findAllTest(Name("example.com"), ZoneFinder::NXDOMAIN,
+ vector<ConstRRsetPtr>()),
+ OutOfZone);
expected_sets.clear();
expected_sets.push_back(rr_child_glue_);
@@ -997,8 +945,8 @@ InMemoryZoneFinderTest::findCheck(ZoneFinder::FindResultFlags expected_flags) {
// These domains don't exist (and one is out of the zone)
findTest(Name("nothere.example.org"), RRType::A(), ZoneFinder::NXDOMAIN,
true, ConstRRsetPtr(), expected_flags);
- findTest(Name("example.net"), RRType::A(), ZoneFinder::NXDOMAIN, true,
- ConstRRsetPtr(), expected_flags);
+ EXPECT_THROW(zone_finder_.find(Name("example.net"), RRType::A()),
+ OutOfZone);
}
TEST_F(InMemoryZoneFinderTest, find) {
@@ -1053,8 +1001,7 @@ InMemoryZoneFinderTest::emptyNodeCheck(
// Note: basically we don't expect such a query to be performed (the common
// operation is to identify the best matching zone first then perform
// search it), but we shouldn't be confused even in the unexpected case.
- findTest(Name("org"), RRType::A(), ZoneFinder::NXDOMAIN, true,
- ConstRRsetPtr(), expected_flags);
+ EXPECT_THROW(zone_finder_.find(Name("org"), RRType::A()), OutOfZone);
}
TEST_F(InMemoryZoneFinderTest, emptyNode) {
@@ -1093,7 +1040,70 @@ TEST_F(InMemoryZoneFinderTest, load) {
// Try loading zone that is wrong in a different way
EXPECT_THROW(zone_finder_.load(TEST_DATA_DIR "/duplicate_rrset.zone"),
- MasterLoadError);
+ MasterLoadError);
+}
+
+TEST_F(InMemoryZoneFinderTest, loadFromIterator) {
+ // The initial test set doesn't have SOA at the apex.
+ findTest(origin_, RRType::SOA(), ZoneFinder::NXRRSET, false,
+ ConstRRsetPtr());
+
+ // The content of the new version of zone to be first installed to
+ // the SQLite3 data source, then to in-memory via SQLite3. The data are
+ // chosen to cover major check points of the implementation:
+ // - the previously non-existent record is added (SOA)
+ // - An RRSIG is given from the iterator before the RRset it covers
+ // (RRSIG for SOA, because they are sorted by name then rrtype as text)
+ // - An RRset containing multiple RRs (ns1/A)
+ // - RRSIGs for different owner names
+ stringstream ss;
+ const char* const soa_txt = "example.org. 300 IN SOA . . 0 0 0 0 0\n";
+ const char* const soa_sig_txt = "example.org. 300 IN RRSIG SOA 5 3 300 "
+ "20000101000000 20000201000000 12345 example.org. FAKEFAKE\n";
+ const char* const a_txt =
+ "ns1.example.org. 300 IN A 192.0.2.1\n"
+ "ns1.example.org. 300 IN A 192.0.2.2\n";
+ const char* const a_sig_txt = "ns1.example.org. 300 IN RRSIG A 5 3 300 "
+ "20000101000000 20000201000000 12345 example.org. FAKEFAKE\n";
+ ss << soa_txt << soa_sig_txt << a_txt << a_sig_txt;
+ shared_ptr<DataSourceClient> db_client = unittest::createSQLite3Client(
+ class_, origin_, TEST_DATA_BUILDDIR "/contexttest.sqlite3.copied", ss);
+ zone_finder_.load(*db_client->getIterator(origin_));
+
+ // The new content should be visible, including the previously-nonexistent
+ // SOA.
+ RRsetPtr expected_answer = textToRRset(soa_txt, RRClass::IN(), origin_);
+ expected_answer->addRRsig(textToRRset(soa_sig_txt));
+ findTest(origin_, RRType::SOA(), ZoneFinder::SUCCESS, true,
+ expected_answer);
+
+ expected_answer = textToRRset(a_txt);
+ expected_answer->addRRsig(textToRRset(a_sig_txt));
+ findTest(Name("ns1.example.org"), RRType::A(), ZoneFinder::SUCCESS, true,
+ expected_answer);
+
+ // File name should be (re)set to empty.
+ EXPECT_TRUE(zone_finder_.getFileName().empty());
+
+ // Loading the zone with an iterator separating RRs of the same RRset
+ // will fail because the resulting sequence doesn't meet assumptions of
+ // the (current) in-memory implementation.
+ EXPECT_THROW(zone_finder_.load(*db_client->getIterator(origin_, true)),
+ MasterLoadError);
+
+ // Load the zone from a file that contains more realistic data (borrowed
+ // from a different test). There's nothing special in this case for the
+ // purpose of this test, so it should just succeed.
+ db_client = unittest::createSQLite3Client(
+ class_, origin_, TEST_DATA_BUILDDIR "/contexttest.sqlite3.copied",
+ TEST_DATA_DIR "/contexttest.zone");
+ zone_finder_.load(*db_client->getIterator(origin_));
+
+ // just checking a couple of RRs in the new version of zone.
+ findTest(Name("mx1.example.org"), RRType::A(), ZoneFinder::SUCCESS, true,
+ textToRRset("mx1.example.org. 3600 IN A 192.0.2.10"));
+ findTest(Name("ns1.example.org"), RRType::AAAA(), ZoneFinder::SUCCESS,
+ true, textToRRset("ns1.example.org. 3600 IN AAAA 2001:db8::1"));
}
/*
@@ -1512,14 +1522,12 @@ TEST_F(InMemoryZoneFinderTest, swap) {
EXPECT_EQ(RRClass::CH(), finder1.getClass());
EXPECT_EQ(RRClass::IN(), finder2.getClass());
// make sure the zone data is swapped, too
- findTest(origin_, RRType::NS(), ZoneFinder::NXDOMAIN, false,
- ConstRRsetPtr(), ZoneFinder::RESULT_DEFAULT, &finder1);
+ EXPECT_THROW(finder1.find(origin_, RRType::NS()), OutOfZone);
findTest(other_origin, RRType::TXT(), ZoneFinder::SUCCESS, false,
ConstRRsetPtr(), ZoneFinder::RESULT_DEFAULT, &finder1);
findTest(origin_, RRType::NS(), ZoneFinder::SUCCESS, false,
ConstRRsetPtr(), ZoneFinder::RESULT_DEFAULT, &finder2);
- findTest(other_origin, RRType::TXT(), ZoneFinder::NXDOMAIN, false,
- ConstRRsetPtr(), ZoneFinder::RESULT_DEFAULT, &finder2);
+ EXPECT_THROW(finder2.find(other_origin, RRType::TXT()), OutOfZone);
}
TEST_F(InMemoryZoneFinderTest, getFileName) {
@@ -1636,52 +1644,6 @@ TEST_F(InMemoryZoneFinderTest, addbadRRsig) {
InMemoryZoneFinder::AddError);
}
-//
-// (Faked) NSEC3 hash data. Arbitrarily borrowed from RFC515 examples.
-//
-// Commonly used NSEC3 suffix. It's incorrect to use it for all NSEC3s, but
-// doesn't matter for the purpose of our tests.
-const char* const nsec3_common = " 300 IN NSEC3 1 1 12 aabbccdd "
- "2T7B4G4VSA5SMI47K61MV5BV1A22BOJR A RRSIG";
-// Likewise, common RRSIG suffix for NSEC3s.
-const char* const nsec3_rrsig_common = " 300 IN RRSIG NSEC3 5 3 3600 "
- "20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE";
-
-void
-findNSEC3Check(bool expected_matched, uint8_t expected_labels,
- const string& expected_closest,
- const string& expected_next,
- const ZoneFinder::FindNSEC3Result& result,
- bool expected_sig = false)
-{
- EXPECT_EQ(expected_matched, result.matched);
- // Convert to int so the error messages would be more readable:
- EXPECT_EQ(static_cast<int>(expected_labels),
- static_cast<int>(result.closest_labels));
-
- vector<ConstRRsetPtr> actual_rrsets;
- ASSERT_TRUE(result.closest_proof);
- actual_rrsets.push_back(result.closest_proof);
- if (expected_sig) {
- actual_rrsets.push_back(result.closest_proof->getRRsig());
- }
- rrsetsCheck(expected_closest, actual_rrsets.begin(),
- actual_rrsets.end());
-
- actual_rrsets.clear();
- if (expected_next.empty()) {
- EXPECT_FALSE(result.next_proof);
- } else {
- ASSERT_TRUE(result.next_proof);
- actual_rrsets.push_back(result.next_proof);
- if (expected_sig) {
- actual_rrsets.push_back(result.next_proof->getRRsig());
- }
- rrsetsCheck(expected_next, actual_rrsets.begin(),
- actual_rrsets.end());
- }
-}
-
TEST_F(InMemoryZoneFinderTest, addNSEC3) {
// Set up the faked hash calculator.
setNSEC3HashCreator(&nsec3_hash_creator_);
@@ -1970,73 +1932,7 @@ TEST_F(InMemoryZoneFinderTest, findNSEC3) {
string(nsec3_common);
EXPECT_EQ(result::SUCCESS, zone_finder_.add(textToRRset(zzz_nsec3_text)));
- // Parameter validation: the query name must be in or below the zone
- EXPECT_THROW(zone_finder_.findNSEC3(Name("example.com"), false),
- isc::InvalidParameter);
- EXPECT_THROW(zone_finder_.findNSEC3(Name("org"), true),
- isc::InvalidParameter);
-
- // Apex name. It should have a matching NSEC3.
- {
- SCOPED_TRACE("apex, non recursive mode");
- findNSEC3Check(true, origin_.getLabelCount(), apex_nsec3_text, "",
- zone_finder_.findNSEC3(origin_, false));
- }
-
- // Recursive mode doesn't change the result in this case.
- {
- SCOPED_TRACE("apex, recursive mode");
- findNSEC3Check(true, origin_.getLabelCount(), apex_nsec3_text, "",
- zone_finder_.findNSEC3(origin_, true));
- }
-
- // Non existent name. Disabling recursion, a covering NSEC3 should be
- // returned.
- const Name www_name("www.example.org");
- {
- SCOPED_TRACE("non existent name, non recursive mode");
- findNSEC3Check(false, www_name.getLabelCount(), apex_nsec3_text, "",
- zone_finder_.findNSEC3(www_name, false));
- }
-
- // Non existent name. The closest provable encloser is the apex,
- // and next closer is the query name itself (which NSEC3 for ns1
- // covers)
- // H(ns1) = 2T... < H(xxx) = Q0... < H(zzz) = R5...
- {
- SCOPED_TRACE("non existent name, recursive mode");
- findNSEC3Check(true, origin_.getLabelCount(), apex_nsec3_text,
- ns1_nsec3_text,
- zone_finder_.findNSEC3(Name("xxx.example.org"), true));
- }
-
- // Similar to the previous case, but next closer name is different
- // from the query name. The closet encloser is w.example.org, and
- // next closer is y.w.example.org.
- // H(ns1) = 2T.. < H(y.w) = K8.. < H(zzz) = R5
- {
- SCOPED_TRACE("non existent name, non qname next closer");
- findNSEC3Check(true, Name("w.example.org").getLabelCount(),
- w_nsec3_text, ns1_nsec3_text,
- zone_finder_.findNSEC3(Name("x.y.w.example.org"),
- true));
- }
-
- // In the rest of test we check hash comparison for wrap around cases.
- {
- SCOPED_TRACE("very small hash");
- const Name smallest_name("smallest.example.org");
- findNSEC3Check(false, smallest_name.getLabelCount(),
- zzz_nsec3_text, "",
- zone_finder_.findNSEC3(smallest_name, false));
- }
- {
- SCOPED_TRACE("very large hash");
- const Name largest_name("largest.example.org");
- findNSEC3Check(false, largest_name.getLabelCount(),
- zzz_nsec3_text, "",
- zone_finder_.findNSEC3(largest_name, false));
- }
+ performNSEC3Test(zone_finder_);
}
TEST_F(InMemoryZoneFinderTest, findNSEC3ForBadZone) {
diff --git a/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc b/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
index 4f7a5d8..c36c94b 100644
--- a/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
+++ b/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
@@ -37,15 +37,22 @@ using isc::dns::Name;
namespace {
// Some test data
-std::string SQLITE_DBFILE_EXAMPLE = TEST_DATA_DIR "/test.sqlite3";
-std::string SQLITE_DBFILE_EXAMPLE2 = TEST_DATA_DIR "/example2.com.sqlite3";
-std::string SQLITE_DBNAME_EXAMPLE2 = "sqlite3_example2.com.sqlite3";
-std::string SQLITE_DBFILE_EXAMPLE_ROOT = TEST_DATA_DIR "/test-root.sqlite3";
-std::string SQLITE_DBNAME_EXAMPLE_ROOT = "sqlite3_test-root.sqlite3";
-std::string SQLITE_DBFILE_BROKENDB = TEST_DATA_DIR "/brokendb.sqlite3";
-std::string SQLITE_DBFILE_MEMORY = ":memory:";
-std::string SQLITE_DBFILE_EXAMPLE_ORG = TEST_DATA_DIR "/example.org.sqlite3";
-std::string SQLITE_DBFILE_DIFFS = TEST_DATA_DIR "/diffs.sqlite3";
+const char* const SQLITE_DBFILE_EXAMPLE = TEST_DATA_DIR "/test.sqlite3";
+const char* const SQLITE_DBFILE_EXAMPLE2 =
+ TEST_DATA_DIR "/example2.com.sqlite3";
+const char* const SQLITE_DBNAME_EXAMPLE2 = "sqlite3_example2.com.sqlite3";
+const char* const SQLITE_DBFILE_EXAMPLE_ROOT =
+ TEST_DATA_DIR "/test-root.sqlite3";
+const char* const SQLITE_DBNAME_EXAMPLE_ROOT = "sqlite3_test-root.sqlite3";
+const char* const SQLITE_DBFILE_BROKENDB = TEST_DATA_DIR "/brokendb.sqlite3";
+const char* const SQLITE_DBFILE_MEMORY = ":memory:";
+const char* const SQLITE_DBFILE_EXAMPLE_ORG =
+ TEST_DATA_DIR "/example.org.sqlite3";
+const char* const SQLITE_DBFILE_DIFFS = TEST_DATA_DIR "/diffs.sqlite3";
+const char* const SQLITE_DBFILE_NEWSCHEMA = TEST_DATA_DIR "/newschema.sqlite3";
+const char* const SQLITE_DBFILE_OLDSCHEMA = TEST_DATA_DIR "/oldschema.sqlite3";
+const char* const SQLITE_DBFILE_NEW_MINOR_SCHEMA =
+ TEST_DATA_DIR "/new_minor_schema.sqlite3";
// The following file must be non existent and must be non"creatable";
// the sqlite3 library will try to create a new DB file if it doesn't exist,
@@ -74,6 +81,20 @@ TEST(SQLite3Open, brokenDB) {
SQLite3Error);
}
+// Different schema versions
+TEST(SQLite3Open, differentSchemaVersions) {
+ // If the major version is different from the current one, it should fail.
+ EXPECT_THROW(SQLite3Accessor(SQLITE_DBFILE_NEWSCHEMA, "IN"),
+ IncompatibleDbVersion);
+ EXPECT_THROW(SQLite3Accessor(SQLITE_DBFILE_OLDSCHEMA, "IN"),
+ IncompatibleDbVersion);
+
+ // Difference in the minor version is okay (as of this test written
+ // the current minor version is 0, so we can only test the case with a
+ // higher minor version).
+ EXPECT_NO_THROW(SQLite3Accessor(SQLITE_DBFILE_NEW_MINOR_SCHEMA, "IN"));
+}
+
// Test we can create the schema on the fly
TEST(SQLite3Open, memoryDB) {
EXPECT_NO_THROW(SQLite3Accessor accessor(SQLITE_DBFILE_MEMORY, "IN"));
@@ -771,17 +792,27 @@ TEST_F(SQLite3Update, rollback) {
checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
}
-TEST_F(SQLite3Update, rollbackFailure) {
+TEST_F(SQLite3Update, rollbackFailure) {
// This test emulates a rare scenario of making rollback attempt fail.
// The iterator is paused in the middle of getting records, which prevents
// the rollback operation at the end of the test.
+ // Since SQLite3 version 3.7.11, rollbacks do not fail on pending
+ // transactions anymore, making this test fail (and moot), but the
+ // transactions will fail after it, so, depending on version,
+ // we test whether that happens and is caught
string columns[DatabaseAccessor::COLUMN_COUNT];
iterator = accessor->getRecords("example.com.", zone_id);
EXPECT_TRUE(iterator->getNext(columns));
accessor->startUpdateZone("example.com.", true);
+#if SQLITE_VERSION_NUMBER < 3007011
EXPECT_THROW(accessor->rollback(), DataSourceError);
+ EXPECT_NO_THROW(iterator->getNext(columns));
+#else
+ EXPECT_NO_THROW(accessor->rollback());
+ EXPECT_THROW(iterator->getNext(columns), DataSourceError);
+#endif
}
TEST_F(SQLite3Update, commitConflict) {
@@ -1049,7 +1080,7 @@ const char* const diff_end_data[] = {
"1300", DIFF_ADD_TEXT
};
const char* const diff_add_a_data[] = {
- "dns01.example.com.", "A", "3600", "192.0.2.10", "1234", DIFF_ADD_TEXT
+ "dns01.example.com.", "A", "3600", "192.0.2.10", "1300", DIFF_ADD_TEXT
};
// The following two are helper functions to convert textual test data
@@ -1070,8 +1101,19 @@ getOperation(const char* const diff_data[]) {
// diffs.
void
checkDiffs(const vector<const char* const*>& expected,
- const vector<vector<string> >& actual)
+ DatabaseAccessor::IteratorContextPtr rr_iterator)
{
+ vector<vector<string> > actual;
+ string columns_holder[DatabaseAccessor::COLUMN_COUNT];
+ while (rr_iterator->getNext(columns_holder)) {
+ // Reorder the 'actual' vector to be compatible with the expected one.
+ vector<string> columns;
+ columns.push_back(columns_holder[DatabaseAccessor::NAME_COLUMN]);
+ columns.push_back(columns_holder[DatabaseAccessor::TYPE_COLUMN]);
+ columns.push_back(columns_holder[DatabaseAccessor::TTL_COLUMN]);
+ columns.push_back(columns_holder[DatabaseAccessor::RDATA_COLUMN]);
+ actual.push_back(columns);
+ }
EXPECT_EQ(expected.size(), actual.size());
const size_t n_diffs = std::min(expected.size(), actual.size());
for (size_t i = 0; i < n_diffs; ++i) {
@@ -1097,16 +1139,18 @@ TEST_F(SQLite3Update, addRecordDiff) {
getOperation(diff_end_data), diff_params);
// Until the diffs are committed, they are not visible to other accessors.
- EXPECT_TRUE(another_accessor->getRecordDiff(zone_id).empty());
+ EXPECT_THROW(another_accessor->getDiffs(zone_id, 1234, 1300),
+ NoSuchSerial);
accessor->commit();
expected_stored.clear();
expected_stored.push_back(diff_begin_data);
expected_stored.push_back(diff_end_data);
- checkDiffs(expected_stored, accessor->getRecordDiff(zone_id));
+ checkDiffs(expected_stored, accessor->getDiffs(zone_id, 1234, 1300));
// Now it should be visible to others, too.
- checkDiffs(expected_stored, another_accessor->getRecordDiff(zone_id));
+ checkDiffs(expected_stored, another_accessor->getDiffs(zone_id, 1234,
+ 1300));
}
TEST_F(SQLite3Update, addRecordOfLargeSerial) {
@@ -1138,7 +1182,7 @@ TEST_F(SQLite3Update, addRecordOfLargeSerial) {
expected_stored.clear();
expected_stored.push_back(begin_data);
expected_stored.push_back(diff_end_data);
- checkDiffs(expected_stored, accessor->getRecordDiff(zone_id));
+ checkDiffs(expected_stored, accessor->getDiffs(zone_id, 4294967295U, 1300));
}
TEST_F(SQLite3Update, addDiffWithoutUpdate) {
@@ -1183,7 +1227,7 @@ TEST_F(SQLite3Update, addDiffRollback) {
getOperation(diff_begin_data), diff_params);
accessor->rollback();
- EXPECT_TRUE(accessor->getRecordDiff(zone_id).empty());
+ EXPECT_THROW(accessor->getDiffs(zone_id, 1234, 1234), NoSuchSerial);
}
TEST_F(SQLite3Update, addDiffInBadOrder) {
@@ -1195,19 +1239,23 @@ TEST_F(SQLite3Update, addDiffInBadOrder) {
copy(diff_end_data, diff_end_data + DatabaseAccessor::DIFF_PARAM_COUNT,
diff_params);
accessor->addRecordDiff(zone_id, getVersion(diff_end_data),
- getOperation(diff_end_data), diff_params);
+ static_cast<DatabaseAccessor::DiffOperation>(
+ lexical_cast<int>(DIFF_DELETE_TEXT)),
+ diff_params);
copy(diff_begin_data, diff_begin_data + DatabaseAccessor::DIFF_PARAM_COUNT,
diff_params);
accessor->addRecordDiff(zone_id, getVersion(diff_begin_data),
- getOperation(diff_begin_data), diff_params);
+ static_cast<DatabaseAccessor::DiffOperation>(
+ lexical_cast<int>(DIFF_ADD_TEXT)),
+ diff_params);
accessor->commit();
expected_stored.clear();
expected_stored.push_back(diff_end_data);
expected_stored.push_back(diff_begin_data);
- checkDiffs(expected_stored, accessor->getRecordDiff(zone_id));
+ checkDiffs(expected_stored, accessor->getDiffs(zone_id, 1300, 1234));
}
TEST_F(SQLite3Update, addDiffWithUpdate) {
@@ -1277,19 +1325,6 @@ TEST_F(SQLite3Update, addDiffWithUpdate) {
expected_stored.push_back(diff_end_data);
expected_stored.push_back(diff_add_a_data);
- checkDiffs(expected_stored, accessor->getRecordDiff(zone_id));
-}
-
-TEST_F(SQLite3Update, addDiffWithNoTable) {
- // An attempt of adding diffs to an old version of database that doesn't
- // have a diffs table. This will fail in preparing the statement.
- initAccessor(SQLITE_DBFILE_EXAMPLE + ".nodiffs", "IN");
- zone_id = accessor->startUpdateZone("example.com.", false).second;
- copy(diff_begin_data, diff_begin_data + DatabaseAccessor::DIFF_PARAM_COUNT,
- diff_params);
- EXPECT_THROW(accessor->addRecordDiff(zone_id, getVersion(diff_begin_data),
- getOperation(diff_begin_data),
- diff_params),
- SQLite3Error);
+ checkDiffs(expected_stored, accessor->getDiffs(zone_id, 1234, 1300));
}
} // end anonymous namespace
diff --git a/src/lib/datasrc/tests/sqlite3_unittest.cc b/src/lib/datasrc/tests/sqlite3_unittest.cc
index ce9413d..ac1211b 100644
--- a/src/lib/datasrc/tests/sqlite3_unittest.cc
+++ b/src/lib/datasrc/tests/sqlite3_unittest.cc
@@ -51,6 +51,10 @@ ConstElementPtr SQLITE_DBFILE_BROKENDB = Element::fromJSON(
"{ \"database_file\": \"" TEST_DATA_DIR "/brokendb.sqlite3\"}");
ConstElementPtr SQLITE_DBFILE_MEMORY = Element::fromJSON(
"{ \"database_file\": \":memory:\"}");
+ConstElementPtr SQLITE_DBFILE_NEWSCHEMA = Element::fromJSON(
+ "{ \"database_file\": \"" TEST_DATA_DIR "/newschema.sqlite3\"}");
+ConstElementPtr SQLITE_DBFILE_OLDSCHEMA = Element::fromJSON(
+ "{ \"database_file\": \"" TEST_DATA_DIR "/oldschema.sqlite3\"}");
// The following file must be non existent and must be non"creatable";
// the sqlite3 library will try to create a new DB file if it doesn't exist,
@@ -403,6 +407,17 @@ TEST_F(Sqlite3DataSourceTest, openBrokenDB) {
EXPECT_EQ(DataSrc::SUCCESS, data_source.init(SQLITE_DBFILE_EXAMPLE));
}
+// Different schema versions, see sqlite3_accessor_unittest.
+TEST_F(Sqlite3DataSourceTest, differentSchemaVersions) {
+ EXPECT_EQ(DataSrc::SUCCESS, data_source.close());
+ EXPECT_THROW(data_source.init(SQLITE_DBFILE_NEWSCHEMA),
+ IncompatibleDbVersion);
+ EXPECT_THROW(data_source.init(SQLITE_DBFILE_OLDSCHEMA),
+ IncompatibleDbVersion);
+ // Don't bother to test the new_minor case; we should retire this stuff
+ // before it really happens.
+}
+
// This test only confirms that on-the-fly schema creation works.
TEST_F(Sqlite3DataSourceTest, memoryDB) {
EXPECT_EQ(DataSrc::SUCCESS, data_source.close());
diff --git a/src/lib/datasrc/tests/static_unittest.cc b/src/lib/datasrc/tests/static_unittest.cc
index 6d6bd74..2a19ecb 100644
--- a/src/lib/datasrc/tests/static_unittest.cc
+++ b/src/lib/datasrc/tests/static_unittest.cc
@@ -66,6 +66,7 @@ protected:
authors_data.push_back("Kazunori Fujiwara");
authors_data.push_back("Michael Graff");
authors_data.push_back("Michal Vaner");
+ authors_data.push_back("Mukund Sivaraman");
authors_data.push_back("Naoki Kambe");
authors_data.push_back("Shane Kerr");
authors_data.push_back("Shen Tingting");
diff --git a/src/lib/datasrc/tests/test_client.cc b/src/lib/datasrc/tests/test_client.cc
new file mode 100644
index 0000000..3974371
--- /dev/null
+++ b/src/lib/datasrc/tests/test_client.cc
@@ -0,0 +1,92 @@
+// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <exceptions/exceptions.h>
+
+#include <dns/masterload.h>
+#include <dns/name.h>
+#include <dns/rrclass.h>
+
+#include <datasrc/client.h>
+#include <datasrc/zone.h>
+#include <datasrc/sqlite3_accessor.h>
+
+#include "test_client.h"
+
+#include <boost/bind.hpp>
+#include <boost/shared_ptr.hpp>
+
+#include <cstdlib>
+#include <istream>
+#include <fstream>
+
+using namespace std;
+using boost::shared_ptr;
+
+using namespace isc::dns;
+
+namespace isc {
+namespace datasrc {
+namespace unittest {
+
+namespace {
+// A helper subroutine for the SQLite3Client creator.
+void
+addRRset(ZoneUpdaterPtr updater, ConstRRsetPtr rrset) {
+ updater->addRRset(*rrset);
+}
+}
+
+shared_ptr<DataSourceClient>
+createSQLite3Client(RRClass zclass, const Name& zname,
+ const char* const db_file, const char* const zone_file)
+{
+ ifstream ifs(zone_file, ios_base::in);
+ if (ifs.fail()) {
+ isc_throw(isc::Unexpected, "Failed to open test zone file: "
+ << zone_file);
+ }
+ return (createSQLite3Client(zclass, zname, db_file, ifs));
+}
+
+shared_ptr<DataSourceClient>
+createSQLite3Client(RRClass zclass, const Name& zname,
+ const char* const db_file, istream& rr_stream)
+{
+ // We always begin with an empty template SQLite3 DB file and install
+ // the zone data from the zone file to ensure both cases have the
+ // same test data.
+ const char* const install_cmd_prefix = INSTALL_PROG " " TEST_DATA_COMMONDIR
+ "/rwtest.sqlite3 ";
+ const string install_cmd = string(install_cmd_prefix) + db_file;
+ if (system(install_cmd.c_str()) != 0) {
+ isc_throw(isc::Unexpected,
+ "Error setting up; command failed: " << install_cmd);
+ }
+
+ shared_ptr<SQLite3Accessor> accessor(
+ new SQLite3Accessor(db_file, zclass.toText()));
+ shared_ptr<DatabaseClient> client(new DatabaseClient(zclass, accessor));
+
+ ZoneUpdaterPtr updater = client->getUpdater(zname, true);
+ masterLoad(rr_stream, zname, zclass, boost::bind(addRRset, updater, _1));
+
+ updater->commit();
+
+ return (client);
+}
+
+}
+}
+}
diff --git a/src/lib/datasrc/tests/test_client.h b/src/lib/datasrc/tests/test_client.h
new file mode 100644
index 0000000..2c692d3
--- /dev/null
+++ b/src/lib/datasrc/tests/test_client.h
@@ -0,0 +1,71 @@
+// Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __TEST_DATA_SOURCE_CLIENT_H
+#define __TEST_DATA_SOURCE_CLIENT_H 1
+
+#include <dns/name.h>
+#include <dns/rrclass.h>
+
+#include <boost/shared_ptr.hpp>
+
+#include <istream>
+
+namespace isc {
+namespace datasrc {
+namespace unittest {
+
+// Here we define utility modules for the convenience of tests that create
+// a data source client according to the specified conditions.
+
+/// \brief Create an SQLite3 data source client from a zone file.
+///
+/// This function creates an SQLite3 client for the specified zone containing
+/// RRs in the specified zone file. The zone will be created in the given
+/// SQLite3 database file. The database file does not have to exist; this
+/// function will automatically create a new file for the test; if the given
+/// file already exists this function overrides the content (so basically the
+/// file must be an ephemeral one only for that test case).
+///
+/// The zone file must be formatted so it's accepted by the dns::masterLoad()
+/// function.
+///
+/// \param zclass The RR class of the zone
+/// \param zname The origin name of the zone
+/// \param db_file The SQLite3 data base file in which the zone data should be
+/// installed.
+/// \param zone_file The filename of the zone data in the textual format.
+/// \return Newly created \c DataSourceClient using the SQLite3 data source
+boost::shared_ptr<DataSourceClient>
+createSQLite3Client(dns::RRClass zclass, const dns::Name& zname,
+ const char* const db_file, const char* const zone_file);
+
+/// \brief Create an SQLite3 data source client from a stream.
+///
+/// This is similar to the other version of the function, but takes an input
+/// stream for the zone data. The stream produces strings as the corresponding
+/// dns::masterLoad() function expects.
+boost::shared_ptr<DataSourceClient>
+createSQLite3Client(dns::RRClass zclass, const dns::Name& zname,
+ const char* const db_file, std::istream& rr_stream);
+
+} // end of unittest
+} // end of datasrc
+} // end of isc
+
+#endif // __TEST_DATA_SOURCE_CLIENT_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/datasrc/tests/testdata/diffs.sqlite3 b/src/lib/datasrc/tests/testdata/diffs.sqlite3
index 3820563..4cf8fb7 100644
Binary files a/src/lib/datasrc/tests/testdata/diffs.sqlite3 and b/src/lib/datasrc/tests/testdata/diffs.sqlite3 differ
diff --git a/src/lib/datasrc/tests/testdata/example.org.sqlite3 b/src/lib/datasrc/tests/testdata/example.org.sqlite3
index 60e6e05..c7388ff 100644
Binary files a/src/lib/datasrc/tests/testdata/example.org.sqlite3 and b/src/lib/datasrc/tests/testdata/example.org.sqlite3 differ
diff --git a/src/lib/datasrc/tests/testdata/example2.com.sqlite3 b/src/lib/datasrc/tests/testdata/example2.com.sqlite3
index 9da7d0e..a0a576c 100644
Binary files a/src/lib/datasrc/tests/testdata/example2.com.sqlite3 and b/src/lib/datasrc/tests/testdata/example2.com.sqlite3 differ
diff --git a/src/lib/datasrc/tests/testdata/new_minor_schema.sqlite3 b/src/lib/datasrc/tests/testdata/new_minor_schema.sqlite3
new file mode 100644
index 0000000..1542c20
Binary files /dev/null and b/src/lib/datasrc/tests/testdata/new_minor_schema.sqlite3 differ
diff --git a/src/lib/datasrc/tests/testdata/newschema.sqlite3 b/src/lib/datasrc/tests/testdata/newschema.sqlite3
new file mode 100644
index 0000000..460cfa8
Binary files /dev/null and b/src/lib/datasrc/tests/testdata/newschema.sqlite3 differ
diff --git a/src/lib/datasrc/tests/testdata/oldschema.sqlite3 b/src/lib/datasrc/tests/testdata/oldschema.sqlite3
new file mode 100644
index 0000000..b44c5eb
Binary files /dev/null and b/src/lib/datasrc/tests/testdata/oldschema.sqlite3 differ
diff --git a/src/lib/datasrc/tests/testdata/rwtest.sqlite3 b/src/lib/datasrc/tests/testdata/rwtest.sqlite3
deleted file mode 100644
index ccbb884..0000000
Binary files a/src/lib/datasrc/tests/testdata/rwtest.sqlite3 and /dev/null differ
diff --git a/src/lib/datasrc/tests/testdata/test-root.sqlite3 b/src/lib/datasrc/tests/testdata/test-root.sqlite3
index c1dae47..1bef761 100644
Binary files a/src/lib/datasrc/tests/testdata/test-root.sqlite3 and b/src/lib/datasrc/tests/testdata/test-root.sqlite3 differ
diff --git a/src/lib/datasrc/tests/testdata/test.sqlite3 b/src/lib/datasrc/tests/testdata/test.sqlite3
index 521cf31..9c71cb5 100644
Binary files a/src/lib/datasrc/tests/testdata/test.sqlite3 and b/src/lib/datasrc/tests/testdata/test.sqlite3 differ
diff --git a/src/lib/datasrc/tests/testdata/test.sqlite3.nodiffs b/src/lib/datasrc/tests/testdata/test.sqlite3.nodiffs
deleted file mode 100644
index cc8cfc3..0000000
Binary files a/src/lib/datasrc/tests/testdata/test.sqlite3.nodiffs and /dev/null differ
diff --git a/src/lib/datasrc/tests/zone_finder_context_unittest.cc b/src/lib/datasrc/tests/zone_finder_context_unittest.cc
index cb48e7e..50d409e 100644
--- a/src/lib/datasrc/tests/zone_finder_context_unittest.cc
+++ b/src/lib/datasrc/tests/zone_finder_context_unittest.cc
@@ -12,6 +12,8 @@
// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
+#include <exceptions/exceptions.h>
+
#include <dns/masterload.h>
#include <dns/name.h>
#include <dns/rrclass.h>
@@ -21,6 +23,7 @@
#include <datasrc/database.h>
#include <datasrc/sqlite3_accessor.h>
+#include "test_client.h"
#include <testutils/dnsmessage_test.h>
#include <gtest/gtest.h>
@@ -29,7 +32,8 @@
#include <boost/foreach.hpp>
#include <boost/shared_ptr.hpp>
-#include <cstdlib>
+#include <fstream>
+#include <sstream>
#include <vector>
using namespace std;
@@ -66,8 +70,6 @@ createInMemoryClient(RRClass zclass, const Name& zname) {
return (client);
}
-// Creator for the SQLite3 client to be tested. addRRset() is a helper
-// subroutine.
void
addRRset(ZoneUpdaterPtr updater, ConstRRsetPtr rrset) {
updater->addRRset(*rrset);
@@ -78,25 +80,14 @@ createSQLite3Client(RRClass zclass, const Name& zname) {
// We always begin with an empty template SQLite3 DB file and install
// the zone data from the zone file to ensure both cases have the
// same test data.
+ DataSourceClientPtr client = unittest::createSQLite3Client(
+ zclass, zname, TEST_DATA_BUILDDIR "/contexttest.sqlite3.copied",
+ TEST_ZONE_FILE);
- const char* const install_cmd = INSTALL_PROG " " TEST_DATA_DIR
- "/rwtest.sqlite3 " TEST_DATA_BUILDDIR "/contexttest.sqlite3.copied";
- if (system(install_cmd) != 0) {
- isc_throw(isc::Unexpected,
- "Error setting up; command failed: " << install_cmd);
- }
-
- shared_ptr<SQLite3Accessor> accessor(
- new SQLite3Accessor(TEST_DATA_BUILDDIR "/contexttest.sqlite3.copied",
- zclass.toText()));
- shared_ptr<DatabaseClient> client(new DatabaseClient(zclass, accessor));
-
- ZoneUpdaterPtr updater = client->getUpdater(zname, true);
- masterLoad(TEST_ZONE_FILE, zname, zclass, boost::bind(addRRset, updater,
- _1));
// Insert an out-of-zone name to test if it's incorrectly returned.
// Note that neither updater nor SQLite3 accessor checks this condition,
// so this should succeed.
+ ZoneUpdaterPtr updater = client->getUpdater(zname, false);
stringstream ss("ns.example.com. 3600 IN A 192.0.2.7");
masterLoad(ss, Name::ROOT_NAME(), zclass,
boost::bind(addRRset, updater, _1));
diff --git a/src/lib/datasrc/zone.h b/src/lib/datasrc/zone.h
index c705279..c68a01c 100644
--- a/src/lib/datasrc/zone.h
+++ b/src/lib/datasrc/zone.h
@@ -27,6 +27,16 @@
namespace isc {
namespace datasrc {
+/// \brief Out of zone exception
+///
+/// This is thrown when a method is called for a name or RRset which
+/// is not in or below the zone.
+class OutOfZone : public Exception {
+public:
+ OutOfZone(const char* file, size_t line, const char* what) :
+ isc::Exception(file, line, what) {}
+};
+
/// \brief The base class to search a zone for RRsets
///
/// The \c ZoneFinder class is an abstract base class for representing
@@ -466,6 +476,8 @@ public:
///
/// \exception std::bad_alloc Memory allocation such as for constructing
/// the resulting RRset fails
+ /// \throw OutOfZone The Name \c name is outside of the origin of the
+ /// zone of this ZoneFinder.
/// \exception DataSourceError Derived class specific exception, e.g.
/// when encountering a bad zone configuration or database connection
/// failure. Although these are considered rare, exceptional events,
@@ -589,7 +601,7 @@ public:
/// algorithm, and salt) from the zone as noted above. If these
/// assumptions aren't met, \c DataSourceError exception will be thrown.
///
- /// \exception InvalidParameter name is not a subdomain of the zone origin
+ /// \exception OutOfZone name is not a subdomain of the zone origin
/// \exception DataSourceError Low-level or internal datasource errors
/// happened, or the zone isn't properly signed with NSEC3
/// (NSEC3 parameters cannot be found, no NSEC3s are available, etc).
diff --git a/src/lib/dns/Makefile.am b/src/lib/dns/Makefile.am
index 010c027..39f4429 100644
--- a/src/lib/dns/Makefile.am
+++ b/src/lib/dns/Makefile.am
@@ -88,7 +88,7 @@ BUILT_SOURCES += rdataclass.h rdataclass.cc
lib_LTLIBRARIES = libdns++.la
-libdns___la_LDFLAGS = -no-undefined -version-info 1:0:1
+libdns___la_LDFLAGS = -no-undefined -version-info 2:0:0
libdns___la_SOURCES =
libdns___la_SOURCES += edns.h edns.cc
diff --git a/src/lib/dns/name.cc b/src/lib/dns/name.cc
index b56efc4..d642e97 100644
--- a/src/lib/dns/name.cc
+++ b/src/lib/dns/name.cc
@@ -169,7 +169,8 @@ Name::Name(const std::string &namestring, bool downcase) {
//
if (c == '.') {
if (s != send) {
- isc_throw(EmptyLabel, "non terminating empty label");
+ isc_throw(EmptyLabel,
+ "non terminating empty label in " << namestring);
}
is_root = true;
} else if (c == '@' && s == send) {
@@ -197,7 +198,8 @@ Name::Name(const std::string &namestring, bool downcase) {
case ft_ordinary:
if (c == '.') {
if (count == 0) {
- isc_throw(EmptyLabel, "duplicate period");
+ isc_throw(EmptyLabel,
+ "duplicate period in " << namestring);
}
ndata.at(offsets.back()) = count;
offsets.push_back(ndata.size());
@@ -210,7 +212,8 @@ Name::Name(const std::string &namestring, bool downcase) {
state = ft_escape;
} else {
if (++count > MAX_LABELLEN) {
- isc_throw(TooLongLabel, "label is too long");
+ isc_throw(TooLongLabel,
+ "label is too long in " << namestring);
}
ndata.push_back(downcase ? maptolower[c] : c);
}
@@ -219,14 +222,16 @@ Name::Name(const std::string &namestring, bool downcase) {
if (c == '[') {
// This looks like a bitstring label, which was deprecated.
// Intentionally drop it.
- isc_throw(BadLabelType, "invalid label type");
+ isc_throw(BadLabelType,
+ "invalid label type in " << namestring);
}
state = ft_escape;
// FALLTHROUGH
case ft_escape:
if (!isdigit(c & 0xff)) {
if (++count > MAX_LABELLEN) {
- isc_throw(TooLongLabel, "label is too long");
+ isc_throw(TooLongLabel,
+ "label is too long in " << namestring);
}
ndata.push_back(downcase ? maptolower[c] : c);
state = ft_ordinary;
@@ -238,17 +243,22 @@ Name::Name(const std::string &namestring, bool downcase) {
// FALLTHROUGH
case ft_escdecimal:
if (!isdigit(c & 0xff)) {
- isc_throw(BadEscape, "mixture of escaped digit and non-digit");
+ isc_throw(BadEscape,
+ "mixture of escaped digit and non-digit in "
+ << namestring);
}
value *= 10;
value += digitvalue[c];
digits++;
if (digits == 3) {
if (value > 255) {
- isc_throw(BadEscape, "escaped decimal is too large");
+ isc_throw(BadEscape,
+ "escaped decimal is too large in "
+ << namestring);
}
if (++count > MAX_LABELLEN) {
- isc_throw(TooLongLabel, "label is too long");
+ isc_throw(TooLongLabel,
+ "label is too long in " << namestring);
}
ndata.push_back(downcase ? maptolower[value] : value);
state = ft_ordinary;
@@ -262,11 +272,14 @@ Name::Name(const std::string &namestring, bool downcase) {
if (!done) { // no trailing '.' was found.
if (ndata.size() == Name::MAX_WIRE) {
- isc_throw(TooLongName, "name is too long for termination");
+ isc_throw(TooLongName,
+ "name is too long for termination in " << namestring);
}
assert(s == send);
if (state != ft_ordinary && state != ft_at) {
- isc_throw(IncompleteName, "incomplete textual name");
+ isc_throw(IncompleteName,
+ "incomplete textual name in " <<
+ (namestring.empty() ? "<empty>" : namestring));
}
if (state == ft_ordinary) {
assert(count != 0);
diff --git a/src/lib/dns/name.h b/src/lib/dns/name.h
index ca64d69..ef32f90 100644
--- a/src/lib/dns/name.h
+++ b/src/lib/dns/name.h
@@ -32,33 +32,42 @@ namespace dns {
class AbstractMessageRenderer;
///
+/// \brief Base class for name parser exceptions.
+///
+class NameParserException : public Exception {
+public:
+ NameParserException(const char* file, size_t line, const char* what) :
+ isc::Exception(file, line, what) {}
+};
+
+///
/// \brief A standard DNS module exception that is thrown if the name parser
/// encounters an empty label in the middle of a name.
///
-class EmptyLabel : public Exception {
+class EmptyLabel : public NameParserException {
public:
EmptyLabel(const char* file, size_t line, const char* what) :
- isc::Exception(file, line, what) {}
+ NameParserException(file, line, what) {}
};
///
/// \brief A standard DNS module exception that is thrown if the name parser
/// encounters too long a name.
///
-class TooLongName : public Exception {
+class TooLongName : public NameParserException {
public:
TooLongName(const char* file, size_t line, const char* what) :
- isc::Exception(file, line, what) {}
+ NameParserException(file, line, what) {}
};
///
/// \brief A standard DNS module exception that is thrown if the name parser
/// encounters too long a label.
///
-class TooLongLabel : public Exception {
+class TooLongLabel : public NameParserException {
public:
TooLongLabel(const char* file, size_t line, const char* what) :
- isc::Exception(file, line, what) {}
+ NameParserException(file, line, what) {}
};
///
@@ -67,20 +76,20 @@ public:
/// applies to bitstring labels, which would begin with "\[". Incomplete cases
/// include an incomplete escaped sequence such as "\12".
///
-class BadLabelType : public Exception {
+class BadLabelType : public NameParserException {
public:
BadLabelType(const char* file, size_t line, const char* what) :
- isc::Exception(file, line, what) {}
+ NameParserException(file, line, what) {}
};
///
/// \brief A standard DNS module exception that is thrown if the name parser
/// fails to decode a "\"-escaped sequence.
///
-class BadEscape : public Exception {
+class BadEscape : public NameParserException {
public:
BadEscape(const char* file, size_t line, const char* what) :
- isc::Exception(file, line, what) {}
+ NameParserException(file, line, what) {}
};
///
@@ -90,10 +99,10 @@ public:
/// An attempt of constructing a name from an empty string will trigger this
/// exception.
///
-class IncompleteName : public Exception {
+class IncompleteName : public NameParserException {
public:
IncompleteName(const char* file, size_t line, const char* what) :
- isc::Exception(file, line, what) {}
+ NameParserException(file, line, what) {}
};
///
diff --git a/src/lib/dns/rdata/template.cc b/src/lib/dns/rdata/template.cc
index e85f82c..ee1097e 100644
--- a/src/lib/dns/rdata/template.cc
+++ b/src/lib/dns/rdata/template.cc
@@ -58,6 +58,7 @@ MyType::toWire(AbstractMessageRenderer& renderer) const {
int
MyType::compare(const Rdata& other) const {
// The compare method normally begins with this dynamic cast.
+ // cppcheck-suppress unreadVariable
const MyType& other_mytype = dynamic_cast<const MyType&>(other);
// ...
}
diff --git a/src/lib/dns/tests/name_unittest.cc b/src/lib/dns/tests/name_unittest.cc
index c5f3b7f..c327bdc 100644
--- a/src/lib/dns/tests/name_unittest.cc
+++ b/src/lib/dns/tests/name_unittest.cc
@@ -130,6 +130,15 @@ TEST_F(NameTest, nonlocalObject) {
EXPECT_EQ("\\255.example.com.", downcased_global.toText());
}
+template <typename ExceptionType>
+void
+checkBadTextName(const string& txt) {
+ // Check it results in the specified type of exception as well as
+ // NameParserException.
+ EXPECT_THROW(Name(txt, false), ExceptionType);
+ EXPECT_THROW(Name(txt, false), NameParserException);
+}
+
TEST_F(NameTest, fromText) {
vector<string> strnames;
strnames.push_back("www.example.com");
@@ -151,45 +160,46 @@ TEST_F(NameTest, fromText) {
EXPECT_EQ(Name("Www.eXample.coM", true).toText(), example_name.toText());
//
- // Tests for bogus names. These should trigger an exception.
+ // Tests for bogus names. These should trigger exceptions.
//
// empty label cannot be followed by another label
- EXPECT_THROW(Name(".a"), EmptyLabel);
+ checkBadTextName<EmptyLabel>(".a");
// duplicate period
- EXPECT_THROW(Name("a.."), EmptyLabel);
+ checkBadTextName<EmptyLabel>("a..");
// label length must be < 64
- EXPECT_THROW(Name("012345678901234567890123456789"
- "012345678901234567890123456789"
- "0123"), TooLongLabel);
+ checkBadTextName<TooLongLabel>("012345678901234567890123456789"
+ "012345678901234567890123456789"
+ "0123");
// now-unsupported bitstring labels
- EXPECT_THROW(Name("\\[b11010000011101]"), BadLabelType);
+ checkBadTextName<BadLabelType>("\\[b11010000011101]");
// label length must be < 64
- EXPECT_THROW(Name("012345678901234567890123456789"
- "012345678901234567890123456789"
- "012\\x"), TooLongLabel);
+ checkBadTextName<TooLongLabel>("012345678901234567890123456789"
+ "012345678901234567890123456789"
+ "012\\x");
// but okay as long as resulting len < 64 even if the original string is
// "too long"
EXPECT_NO_THROW(Name("012345678901234567890123456789"
"012345678901234567890123456789"
"01\\x"));
// incomplete \DDD pattern (exactly 3 D's must appear)
- EXPECT_THROW(Name("\\12abc"), BadEscape);
+ checkBadTextName<BadEscape>("\\12abc");
// \DDD must not exceed 255
- EXPECT_THROW(Name("\\256"), BadEscape);
+ checkBadTextName<BadEscape>("\\256");
// Same tests for \111 as for \\x above
- EXPECT_THROW(Name("012345678901234567890123456789"
- "012345678901234567890123456789"
- "012\\111"), TooLongLabel);
+ checkBadTextName<TooLongLabel>("012345678901234567890123456789"
+ "012345678901234567890123456789"
+ "012\\111");
EXPECT_NO_THROW(Name("012345678901234567890123456789"
"012345678901234567890123456789"
"01\\111"));
// A domain name must be 255 octets or less
- EXPECT_THROW(Name("123456789.123456789.123456789.123456789.123456789."
- "123456789.123456789.123456789.123456789.123456789."
- "123456789.123456789.123456789.123456789.123456789."
- "123456789.123456789.123456789.123456789.123456789."
- "123456789.123456789.123456789.123456789.123456789."
- "1234"), TooLongName);
+ checkBadTextName<TooLongName>("123456789.123456789.123456789.123456789."
+ "123456789.123456789.123456789.123456789."
+ "123456789.123456789.123456789.123456789."
+ "123456789.123456789.123456789.123456789."
+ "123456789.123456789.123456789.123456789."
+ "123456789.123456789.123456789.123456789."
+ "123456789.1234");
// This is a possible longest name and should be accepted
EXPECT_NO_THROW(Name("123456789.123456789.123456789.123456789.123456789."
"123456789.123456789.123456789.123456789.123456789."
@@ -198,7 +208,7 @@ TEST_F(NameTest, fromText) {
"123456789.123456789.123456789.123456789.123456789."
"123"));
// \DDD must consist of 3 digits.
- EXPECT_THROW(Name("\\12"), IncompleteName);
+ checkBadTextName<IncompleteName>("\\12");
// a name with the max number of labels. should be constructed without
// an error, and its length should be the max value.
@@ -565,6 +575,7 @@ TEST_F(NameTest, lthan) {
EXPECT_TRUE(small_name < large_name);
EXPECT_FALSE(small_name.lthan(small_name));
+ // cppcheck-suppress duplicateExpression
EXPECT_FALSE(small_name < small_name);
EXPECT_FALSE(large_name.lthan(small_name));
@@ -576,6 +587,7 @@ TEST_F(NameTest, gthan) {
EXPECT_TRUE(large_name > small_name);
EXPECT_FALSE(large_name.gthan(large_name));
+ // cppcheck-suppress duplicateExpression
EXPECT_FALSE(large_name > large_name);
EXPECT_FALSE(small_name.gthan(large_name));
diff --git a/src/lib/dns/tests/rdata_dhcid_unittest.cc b/src/lib/dns/tests/rdata_dhcid_unittest.cc
index 9df7043..38b1459 100644
--- a/src/lib/dns/tests/rdata_dhcid_unittest.cc
+++ b/src/lib/dns/tests/rdata_dhcid_unittest.cc
@@ -93,6 +93,7 @@ TEST_F(Rdata_DHCID_Test, getDHCIDDigest) {
TEST_F(Rdata_DHCID_Test, compare) {
// trivial case: self equivalence
+ // cppcheck-suppress uselessCallsCompare
EXPECT_EQ(0, rdata_dhcid.compare(rdata_dhcid));
in::DHCID rdata_dhcid1("0YLQvtC/0L7Qu9GPINC00LLQsCDRgNGD0LHQu9GP");
diff --git a/src/lib/dns/tests/rdata_in_a_unittest.cc b/src/lib/dns/tests/rdata_in_a_unittest.cc
index af4369d..2fea9a3 100644
--- a/src/lib/dns/tests/rdata_in_a_unittest.cc
+++ b/src/lib/dns/tests/rdata_in_a_unittest.cc
@@ -95,6 +95,7 @@ TEST_F(Rdata_IN_A_Test, compare) {
in::A large2("4.3.2.1");
// trivial case: self equivalence
+ // cppcheck-suppress uselessCallsCompare
EXPECT_EQ(0, small1.compare(small1));
// confirm these are compared as unsigned values
diff --git a/src/lib/dns/tests/rdata_in_aaaa_unittest.cc b/src/lib/dns/tests/rdata_in_aaaa_unittest.cc
index c3e1e16..d8ed1d6 100644
--- a/src/lib/dns/tests/rdata_in_aaaa_unittest.cc
+++ b/src/lib/dns/tests/rdata_in_aaaa_unittest.cc
@@ -91,6 +91,7 @@ TEST_F(Rdata_IN_AAAA_Test, compare) {
in::AAAA large2("8:7:6:5:4:3:2:1");
// trivial case: self equivalence
+ // cppcheck-suppress uselessCallsCompare
EXPECT_EQ(0, small1.compare(small1));
// confirm these are compared as unsigned values
diff --git a/src/lib/dns/tests/rdata_mx_unittest.cc b/src/lib/dns/tests/rdata_mx_unittest.cc
index 36814ea..7dc774d 100644
--- a/src/lib/dns/tests/rdata_mx_unittest.cc
+++ b/src/lib/dns/tests/rdata_mx_unittest.cc
@@ -101,6 +101,7 @@ TEST_F(Rdata_MX_Test, compare) {
generic::MX large2(256, Name("mx.example.com"));
// trivial case: self equivalence
+ // cppcheck-suppress uselessCallsCompare
EXPECT_EQ(0, small1.compare(small1));
// confirm these are compared as unsigned values
diff --git a/src/lib/dns/tests/rdata_unittest.cc b/src/lib/dns/tests/rdata_unittest.cc
index 5be06e8..bf1f5f7 100644
--- a/src/lib/dns/tests/rdata_unittest.cc
+++ b/src/lib/dns/tests/rdata_unittest.cc
@@ -250,6 +250,7 @@ TEST_F(Rdata_Unknown_Test, toWireRenderer) {
TEST_F(Rdata_Unknown_Test, compare) {
// comparison as left-justified unsigned octet sequences:
+ // cppcheck-suppress uselessCallsCompare
EXPECT_EQ(0, rdata_unknown.compare(rdata_unknown));
generic::Generic rdata_unknown_small("\\# 4 00b2c3ff");
diff --git a/src/lib/dns/tests/rrttl_unittest.cc b/src/lib/dns/tests/rrttl_unittest.cc
index 703616c..0e3ab44 100644
--- a/src/lib/dns/tests/rrttl_unittest.cc
+++ b/src/lib/dns/tests/rrttl_unittest.cc
@@ -161,6 +161,7 @@ TEST_F(RRTTLTest, lthan) {
EXPECT_TRUE(ttl_small < ttl_large);
EXPECT_FALSE(ttl_small.lthan(ttl_small));
+ // cppcheck-suppress duplicateExpression
EXPECT_FALSE(ttl_small < ttl_small);
EXPECT_FALSE(ttl_large.lthan(ttl_small));
@@ -172,6 +173,7 @@ TEST_F(RRTTLTest, gthan) {
EXPECT_TRUE(ttl_large > ttl_small);
EXPECT_FALSE(ttl_large.gthan(ttl_large));
+ // cppcheck-suppress duplicateExpression
EXPECT_FALSE(ttl_large > ttl_large);
EXPECT_FALSE(ttl_small.gthan(ttl_large));
diff --git a/src/lib/python/Makefile.am b/src/lib/python/Makefile.am
index 893bb8c..e3ae4b5 100644
--- a/src/lib/python/Makefile.am
+++ b/src/lib/python/Makefile.am
@@ -3,7 +3,7 @@ SUBDIRS = isc
nodist_python_PYTHON = bind10_config.py
pythondir = $(pyexecdir)
-CLEANFILES = bind10_config.pyc
+CLEANFILES = bind10_config.pyc bind10_config.pyo
CLEANDIRS = __pycache__
clean-local:
diff --git a/src/lib/python/isc/acl/tests/dns_test.py b/src/lib/python/isc/acl/tests/dns_test.py
index 7ee3023..d225bee 100644
--- a/src/lib/python/isc/acl/tests/dns_test.py
+++ b/src/lib/python/isc/acl/tests/dns_test.py
@@ -321,7 +321,7 @@ class RequestACLTest(unittest.TestCase):
' "from": "192.0.2.0/24"},' +
' {"action": "DROP",' +
' "from": "2001:db8::1"},' +
- '] }')
+ ']')
self.assertEqual(ACCEPT, acl.execute(CONTEXT4))
self.assertEqual(REJECT, acl.execute(get_context('192.0.2.2')))
self.assertEqual(DROP, acl.execute(get_context('2001:db8::1')))
diff --git a/src/lib/python/isc/cc/session.py b/src/lib/python/isc/cc/session.py
index f6b6265..33a47bd 100644
--- a/src/lib/python/isc/cc/session.py
+++ b/src/lib/python/isc/cc/session.py
@@ -72,7 +72,7 @@ class Session:
self._lname = None
self._closed = True
- def sendmsg(self, env, msg = None):
+ def sendmsg(self, env, msg=None):
with self._lock:
if self._closed:
raise SessionError("Session has been closed.")
@@ -82,15 +82,24 @@ class Session:
raise ProtocolError("Envelope too large")
if type(msg) == dict:
msg = isc.cc.message.to_wire(msg)
- self._socket.setblocking(1)
length = 2 + len(env);
- if msg:
+ if msg is not None:
length += len(msg)
- self._socket.send(struct.pack("!I", length))
- self._socket.send(struct.pack("!H", len(env)))
- self._socket.send(env)
- if msg:
- self._socket.send(msg)
+
+ # Build entire message.
+ data = struct.pack("!I", length)
+ data += struct.pack("!H", len(env))
+ data += env
+ if msg is not None:
+ data += msg
+
+ # Send it in the blocking mode. On some systems send() may
+ # actually send only part of the data, so we need to repeat it
+ # until all data have been sent out.
+ self._socket.setblocking(1)
+ while len(data) > 0:
+ cc = self._socket.send(data)
+ data = data[cc:]
def recvmsg(self, nonblock = True, seq = None):
"""Reads a message. If nonblock is true, and there is no
diff --git a/src/lib/python/isc/cc/tests/session_test.py b/src/lib/python/isc/cc/tests/session_test.py
index 772ed0c..e589085 100644
--- a/src/lib/python/isc/cc/tests/session_test.py
+++ b/src/lib/python/isc/cc/tests/session_test.py
@@ -29,6 +29,7 @@ class MySocket():
self.recvqueue = bytearray()
self.sendqueue = bytearray()
self._blocking = True
+ self.send_limit = None
def connect(self, to):
pass
@@ -40,7 +41,14 @@ class MySocket():
self._blocking = val
def send(self, data):
- self.sendqueue.extend(data);
+ # If the upper limit is specified, only "send" up to the specified
+ # limit
+ if self.send_limit is not None and len(data) > self.send_limit:
+ self.sendqueue.extend(data[0:self.send_limit])
+ return self.send_limit
+ else:
+ self.sendqueue.extend(data)
+ return len(data)
def readsent(self, length):
if length > len(self.sendqueue):
@@ -101,6 +109,17 @@ class MySocket():
def gettimeout(self):
return 0
+ def set_send_limit(self, limit):
+ '''Specify the upper limit of the transmittable data at once.
+
+ By default, the send() method of this class "sends" all given data.
+ If this method is called and the its parameter is not None,
+ subsequent calls to send() will only transmit the specified amount
+ of data. This can be used to emulate the situation where send()
+ on a real socket object results in partial write.
+ '''
+ self.send_limit = limit
+
#
# We subclass the Session class we're testing here, only
# to override the __init__() method, which wants a socket,
@@ -157,6 +176,16 @@ class testSession(unittest.TestCase):
#print(sent)
#self.assertRaises(SessionError, sess.sendmsg, {}, {"hello": "a"})
+ def test_session_sendmsg_shortwrite(self):
+ sess = MySession()
+ # Specify the upper limit of the size that can be transmitted at
+ # a single send() call on the faked socket (10 is an arbitrary choice,
+ # just reasonably small).
+ sess._socket.set_send_limit(10)
+ sess.sendmsg({'to': 'someone', 'reply': 1}, {"hello": "a"})
+ # The complete message should still have been transmitted in the end.
+ sent = sess._socket.readsentmsg();
+
def recv_and_compare(self, session, bytes, env, msg):
"""Adds bytes to the recvqueue (which will be read by the
session object, and compare the resultinv env and msg to
diff --git a/src/lib/python/isc/config/Makefile.am b/src/lib/python/isc/config/Makefile.am
index ef696fb..cda8b57 100644
--- a/src/lib/python/isc/config/Makefile.am
+++ b/src/lib/python/isc/config/Makefile.am
@@ -13,6 +13,7 @@ CLEANFILES = $(PYTHON_LOGMSGPKG_DIR)/work/cfgmgr_messages.py
CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/cfgmgr_messages.pyc
CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/config_messages.py
CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/config_messages.pyc
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/config_messages.pyo
CLEANDIRS = __pycache__
diff --git a/src/lib/python/isc/config/cfgmgr.py b/src/lib/python/isc/config/cfgmgr.py
index dd97827..9f9ce68 100644
--- a/src/lib/python/isc/config/cfgmgr.py
+++ b/src/lib/python/isc/config/cfgmgr.py
@@ -148,6 +148,27 @@ class ConfigManagerData:
# Ok if we really can't delete it anymore, leave it
pass
+ def rename_config_file(self, old_file_name=None, new_file_name=None):
+ """Renames the given configuration file to the given new file name,
+ if it exists. If it does not exist, nothing happens.
+ If old_file_name is None (default), the file used in
+ read_from_file is used. If new_file_name is None (default), the
+ file old_file_name appended with .bak is used. If that file exists
+ already, .1 is appended. If that file exists, .2 is appended, etc.
+ """
+ if old_file_name is None:
+ old_file_name = self.db_filename
+ if new_file_name is None:
+ new_file_name = old_file_name + ".bak"
+ if os.path.exists(new_file_name):
+ i = 1
+ while os.path.exists(new_file_name + "." + str(i)):
+ i += 1
+ new_file_name = new_file_name + "." + str(i)
+ if os.path.exists(old_file_name):
+ logger.info(CFGMGR_RENAMED_CONFIG_FILE, old_file_name, new_file_name)
+ os.rename(old_file_name, new_file_name)
+
def __eq__(self, other):
"""Returns True if the data contained is equal. data_path and
db_filename may be different."""
@@ -163,14 +184,16 @@ class ConfigManager:
channel session. If not, a new session will be created.
The ability to specify a custom session is for testing purposes
and should not be needed for normal usage."""
- def __init__(self, data_path, database_filename, session=None):
+ def __init__(self, data_path, database_filename, session=None,
+ clear_config=False):
"""Initialize the configuration manager. The data_path string
is the path to the directory where the configuration is
stored (in <data_path>/<database_filename> or in
- <database_filename>, if it is absolute). The dabase_filename
+ <database_filename>, if it is absolute). The database_filename
is the config file to load. Session is an optional
cc-channel session. If this is not given, a new one is
- created."""
+ created. If clear_config is True, the configuration file is
+ renamed and a new one is created."""
self.data_path = data_path
self.database_filename = database_filename
self.module_specs = {}
@@ -179,6 +202,8 @@ class ConfigManager:
# of some other process
self.virtual_modules = {}
self.config = ConfigManagerData(data_path, database_filename)
+ if clear_config:
+ self.config.rename_config_file()
if session:
self.cc = session
else:
diff --git a/src/lib/python/isc/config/cfgmgr_messages.mes b/src/lib/python/isc/config/cfgmgr_messages.mes
index 61a63ed..ad78be0 100644
--- a/src/lib/python/isc/config/cfgmgr_messages.mes
+++ b/src/lib/python/isc/config/cfgmgr_messages.mes
@@ -51,7 +51,11 @@ error is given. The most likely cause is that the system does not have
write access to the configuration database file. The updated
configuration is not stored.
+% CFGMGR_RENAMED_CONFIG_FILE renamed configuration file %1 to %2, will create new %1
+BIND 10 has been started with the command to clear the configuration file.
+The existing file is backed up to the given file name, so that data is not
+immediately lost if this was done by accident.
+
% CFGMGR_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down
There was a keyboard interrupt signal to stop the cfgmgr daemon. The
daemon will now shut down.
-
diff --git a/src/lib/python/isc/config/config_data.py b/src/lib/python/isc/config/config_data.py
index e7e810b..346585b 100644
--- a/src/lib/python/isc/config/config_data.py
+++ b/src/lib/python/isc/config/config_data.py
@@ -42,7 +42,7 @@ def spec_part_is_map(spec_part):
def spec_part_is_named_set(spec_part):
"""Returns True if the given spec_part is a dict that contains a
named_set specification, and False otherwise."""
- return (type(spec_part) == dict and 'named_map_item_spec' in spec_part)
+ return (type(spec_part) == dict and 'named_set_item_spec' in spec_part)
def check_type(spec_part, value):
"""Does nothing if the value is of the correct type given the
@@ -672,6 +672,16 @@ class MultiConfigData:
self._append_value_item(result, spec_part, identifier, all, True)
return result
+ def unset(self, identifier):
+ """
+ Reset the value to default.
+ """
+ spec_part = self.find_spec_part(identifier)
+ if spec_part is not None:
+ isc.cc.data.unset(self._local_changes, identifier)
+ else:
+ raise isc.cc.data.DataNotFoundError(identifier + "not found")
+
def set_value(self, identifier, value):
"""Set the local value at the given identifier to value. If
there is a specification for the given identifier, the type
@@ -720,6 +730,15 @@ class MultiConfigData:
cur_id_part + id,
cur_value)
cur_id_part = cur_id_part + id_part + "/"
+
+ # We also need to copy to local if we are changing a named set,
+ # so that the other items in the set do not disappear
+ if spec_part_is_named_set(self.find_spec_part(cur_id_part)):
+ ns_value, ns_status = self.get_value(cur_id_part)
+ if ns_status != MultiConfigData.LOCAL:
+ isc.cc.data.set(self._local_changes,
+ cur_id_part,
+ ns_value)
isc.cc.data.set(self._local_changes, identifier, value)
def _get_list_items(self, item_name):
diff --git a/src/lib/python/isc/config/tests/cfgmgr_test.py b/src/lib/python/isc/config/tests/cfgmgr_test.py
index 7fe8212..891a7d7 100644
--- a/src/lib/python/isc/config/tests/cfgmgr_test.py
+++ b/src/lib/python/isc/config/tests/cfgmgr_test.py
@@ -74,6 +74,60 @@ class TestConfigManagerData(unittest.TestCase):
self.assertEqual(self.config_manager_data, new_config)
os.remove(output_file_name)
+ def check_existence(self, files, should_exist=[], should_not_exist=[]):
+ """Helper function for test_rename_config_file.
+ Arguments:
+ files: array of file names to check.
+ should_exist: array of indices, the files in 'files' with these
+ indices should exist.
+ should_not_exist: array of indices, the files in 'files' with
+ these indices should not exist."""
+ for n in should_exist:
+ self.assertTrue(os.path.exists(files[n]))
+ for n in should_not_exist:
+ self.assertFalse(os.path.exists(files[n]))
+
+ def test_rename_config_file(self):
+ # test file names, put in array for easy cleanup
+ filenames = [ "b10-config-rename-test",
+ "b10-config-rename-test.bak",
+ "b10-config-rename-test.bak.1",
+ "b10-config-rename-test.bak.2" ]
+
+ for filename in filenames:
+ if os.path.exists(filename):
+ os.remove(filename)
+
+ # The original does not exist, so the new one should not be created
+ self.config_manager_data.rename_config_file(filenames[0])
+ self.check_existence(filenames, [], [0, 1, 2, 3])
+
+ # now create a file to rename, and call rename again
+ self.config_manager_data.write_to_file(filenames[0])
+ self.config_manager_data.rename_config_file(filenames[0])
+ self.check_existence(filenames, [1], [0, 2, 3])
+
+ # If backup already exists, give it a new name automatically
+ self.config_manager_data.write_to_file(filenames[0])
+ self.config_manager_data.rename_config_file(filenames[0])
+ self.check_existence(filenames, [1, 2], [0, 3])
+
+ # If backup already exists, give it a new name automatically with
+ # increasing postfix
+ self.config_manager_data.write_to_file(filenames[0])
+ self.config_manager_data.rename_config_file(filenames[0])
+ self.check_existence(filenames, [1, 2, 3], [0])
+
+ # Test with explicit renamed file argument
+ self.config_manager_data.rename_config_file(filenames[1],
+ filenames[0])
+ self.check_existence(filenames, [0, 2, 3], [1])
+
+ # clean up again to be nice
+ for filename in filenames:
+ if os.path.exists(filename):
+ os.remove(filename)
+
def test_equality(self):
# tests the __eq__ function. Equality is only defined
# by equality of the .data element. If data_path or db_filename
@@ -570,5 +624,6 @@ if __name__ == '__main__':
if not 'CONFIG_TESTDATA_PATH' in os.environ or not 'CONFIG_WR_TESTDATA_PATH' in os.environ:
print("You need to set the environment variable CONFIG_TESTDATA_PATH and CONFIG_WR_TESTDATA_PATH to point to the directory containing the test data files")
exit(1)
+ isc.log.init("unittests")
+ isc.log.resetUnitTestRootLogger()
unittest.main()
-
diff --git a/src/lib/python/isc/config/tests/config_data_test.py b/src/lib/python/isc/config/tests/config_data_test.py
index 446d898..864fe70 100644
--- a/src/lib/python/isc/config/tests/config_data_test.py
+++ b/src/lib/python/isc/config/tests/config_data_test.py
@@ -618,6 +618,16 @@ class TestMultiConfigData(unittest.TestCase):
maps = self.mcd.get_value_maps("/Spec22/value9/")
self.assertEqual(expected, maps)
+ # A slash at the end should not produce different output with
+ # indices too
+ expected2 = [{'default': True,
+ 'type': 'integer',
+ 'name': 'Spec22/value5[1]',
+ 'value': 'b',
+ 'modified': False}]
+ maps = self.mcd.get_value_maps("/Spec22/value5[1]/")
+ self.assertEqual(expected2, maps)
+
def test_get_value_maps_named_set(self):
module_spec = isc.config.module_spec_from_file(self.data_path + os.sep + "spec32.spec")
self.mcd.set_specification(module_spec)
@@ -657,7 +667,38 @@ class TestMultiConfigData(unittest.TestCase):
self.assertEqual(MultiConfigData.LOCAL, status)
self.assertRaises(isc.cc.data.DataTypeError, self.mcd.set_value, "Spec2/item5[a]", "asdf")
-
+
+
+ def test_unset(self):
+ """
+ Test the unset command works.
+ """
+ module_spec = isc.config.module_spec_from_file(self.data_path + os.sep + "spec2.spec")
+ self.mcd.set_specification(module_spec)
+ self.mcd.set_specification(module_spec)
+ value, status = self.mcd.get_value("Spec2/item1")
+ # This is the default first
+ self.assertEqual(1, value)
+ self.assertEqual(MultiConfigData.DEFAULT, status)
+ # Unseting a default item does nothing.
+ self.mcd.unset("Spec2/item1")
+ value, status = self.mcd.get_value("Spec2/item1")
+ # This should be the default
+ self.assertEqual(1, value)
+ self.assertEqual(MultiConfigData.DEFAULT, status)
+ # Set it to something else
+ self.mcd.set_value("Spec2/item1", 42)
+ value, status = self.mcd.get_value("Spec2/item1")
+ self.assertEqual(42, value)
+ self.assertEqual(MultiConfigData.LOCAL, status)
+ # Try to unset it
+ self.mcd.unset("Spec2/item1")
+ value, status = self.mcd.get_value("Spec2/item1")
+ # This should be the default
+ self.assertEqual(1, value)
+ self.assertEqual(MultiConfigData.DEFAULT, status)
+ # Unset a nonexisting item. Should raise.
+ self.assertRaises(isc.cc.data.DataNotFoundError, self.mcd.unset, "Spec2/doesnotexist")
def test_get_config_item_list(self):
config_items = self.mcd.get_config_item_list()
@@ -679,6 +720,12 @@ class TestMultiConfigData(unittest.TestCase):
config_items = self.mcd.get_config_item_list("Spec2", True)
self.assertEqual(['Spec2/item1', 'Spec2/item2', 'Spec2/item3', 'Spec2/item4', 'Spec2/item5', 'Spec2/item6/value1', 'Spec2/item6/value2'], config_items)
+ def test_is_named_set(self):
+ module_spec = isc.config.module_spec_from_file(self.data_path + os.sep + "spec32.spec")
+ self.mcd.set_specification(module_spec)
+ spec_part = self.mcd.find_spec_part("Spec32/named_set_item")
+ self.assertTrue(spec_part_is_named_set(spec_part))
+
def test_get_config_item_list_named_set(self):
config_items = self.mcd.get_config_item_list()
self.assertEqual([], config_items)
@@ -697,6 +744,20 @@ class TestMultiConfigData(unittest.TestCase):
'Spec32/named_set_item/bbbb',
], config_items)
+ def test_set_named_set_nonlocal(self):
+ # Test whether a default named set is copied to local if a subitem
+ # is changed, and that other items in the set do not get lost
+ module_spec = isc.config.module_spec_from_file(self.data_path + os.sep + 'spec32.spec')
+ self.mcd.set_specification(module_spec)
+ value, status = self.mcd.get_value('Spec32/named_set_item')
+ self.assertEqual({'a': 1, 'b': 2}, value)
+ self.assertEqual(MultiConfigData.DEFAULT, status)
+
+ self.mcd.set_value('Spec32/named_set_item/b', 3)
+ value, status = self.mcd.get_value('Spec32/named_set_item')
+ self.assertEqual({'a': 1, 'b': 3}, value)
+ self.assertEqual(MultiConfigData.LOCAL, status)
+
if __name__ == '__main__':
unittest.main()
diff --git a/src/lib/python/isc/datasrc/datasrc.cc b/src/lib/python/isc/datasrc/datasrc.cc
index 8ee06b7..f31d10a 100644
--- a/src/lib/python/isc/datasrc/datasrc.cc
+++ b/src/lib/python/isc/datasrc/datasrc.cc
@@ -233,6 +233,7 @@ initModulePart_ZoneJournalReader(PyObject* mod) {
}
PyObject* po_DataSourceError;
+PyObject* po_OutOfZone;
PyObject* po_NotImplemented;
PyModuleDef iscDataSrc = {
@@ -287,6 +288,8 @@ PyInit_datasrc(void) {
po_DataSourceError = PyErr_NewException("isc.datasrc.Error", NULL,
NULL);
PyObjectContainer(po_DataSourceError).installToModule(mod, "Error");
+ po_OutOfZone = PyErr_NewException("isc.datasrc.OutOfZone", NULL, NULL);
+ PyObjectContainer(po_OutOfZone).installToModule(mod, "OutOfZone");
po_NotImplemented = PyErr_NewException("isc.datasrc.NotImplemented",
NULL, NULL);
PyObjectContainer(po_NotImplemented).installToModule(mod,
diff --git a/src/lib/python/isc/datasrc/finder_inc.cc b/src/lib/python/isc/datasrc/finder_inc.cc
index c063c44..7caa144 100644
--- a/src/lib/python/isc/datasrc/finder_inc.cc
+++ b/src/lib/python/isc/datasrc/finder_inc.cc
@@ -99,10 +99,9 @@ Their semantics is as follows (they are or bit-field):\n\
of the non existence of any matching wildcard or non existence of an\n\
exact match when a wildcard match is found.\n\
\n\
-In general, name is expected to be included in the zone, that is, it\n\
-should be equal to or a subdomain of the zone origin. Otherwise this\n\
-method will return NXDOMAIN with an empty RRset. But such a case\n\
-should rather be considered a caller's bug.\n\
+Name is expected to be included in the zone, that is, it\n\
+should be equal to or a subdomain of the zone origin. Otherwise an\n\
+OutOfZoneFind exception is raised.\n\
\n\
Note: For this reason it's probably better to throw an exception than\n\
returning NXDOMAIN. This point should be revisited in a near future\n\
diff --git a/src/lib/python/isc/datasrc/finder_python.cc b/src/lib/python/isc/datasrc/finder_python.cc
index 33a503f..ed05fdb 100644
--- a/src/lib/python/isc/datasrc/finder_python.cc
+++ b/src/lib/python/isc/datasrc/finder_python.cc
@@ -97,6 +97,9 @@ PyObject* ZoneFinder_helper(ZoneFinder* finder, PyObject* args) {
} else {
return (Py_BuildValue("IOI", r, Py_None, result_flags));
}
+ } catch (const OutOfZone& ooz) {
+ PyErr_SetString(getDataSourceException("OutOfZone"), ooz.what());
+ return (NULL);
} catch (const DataSourceError& dse) {
PyErr_SetString(getDataSourceException("Error"), dse.what());
return (NULL);
diff --git a/src/lib/python/isc/datasrc/sqlite3_ds.py b/src/lib/python/isc/datasrc/sqlite3_ds.py
index daa12fc..f9b47c0 100644
--- a/src/lib/python/isc/datasrc/sqlite3_ds.py
+++ b/src/lib/python/isc/datasrc/sqlite3_ds.py
@@ -23,6 +23,10 @@ RR_NAME_INDEX = 2
RR_TTL_INDEX = 4
RR_RDATA_INDEX = 7
+# Current major and minor versions of schema
+SCHEMA_MAJOR_VERSION = 2
+SCHEMA_MINOR_VERSION = 0
+
class Sqlite3DSError(Exception):
""" Define exceptions."""
pass
@@ -47,40 +51,46 @@ def create(cur):
cur.execute("SELECT version FROM schema_version")
row = cur.fetchone()
except sqlite3.OperationalError:
- cur.execute("CREATE TABLE schema_version (version INTEGER NOT NULL)")
- cur.execute("INSERT INTO schema_version VALUES (1)")
+ cur.execute("""CREATE TABLE schema_version (version INTEGER NOT NULL,
+ minor INTEGER NOT NULL DEFAULT 0)""")
+ cur.execute("INSERT INTO schema_version VALUES (" +
+ str(SCHEMA_MAJOR_VERSION) + ", " +
+ str(SCHEMA_MINOR_VERSION) + ")")
cur.execute("""CREATE TABLE zones (id INTEGER PRIMARY KEY,
- name STRING NOT NULL COLLATE NOCASE,
- rdclass STRING NOT NULL COLLATE NOCASE DEFAULT 'IN',
+ name TEXT NOT NULL COLLATE NOCASE,
+ rdclass TEXT NOT NULL COLLATE NOCASE DEFAULT 'IN',
dnssec BOOLEAN NOT NULL DEFAULT 0)""")
cur.execute("CREATE INDEX zones_byname ON zones (name)")
cur.execute("""CREATE TABLE records (id INTEGER PRIMARY KEY,
zone_id INTEGER NOT NULL,
- name STRING NOT NULL COLLATE NOCASE,
- rname STRING NOT NULL COLLATE NOCASE,
+ name TEXT NOT NULL COLLATE NOCASE,
+ rname TEXT NOT NULL COLLATE NOCASE,
ttl INTEGER NOT NULL,
- rdtype STRING NOT NULL COLLATE NOCASE,
- sigtype STRING COLLATE NOCASE,
- rdata STRING NOT NULL)""")
+ rdtype TEXT NOT NULL COLLATE NOCASE,
+ sigtype TEXT COLLATE NOCASE,
+ rdata TEXT NOT NULL)""")
cur.execute("CREATE INDEX records_byname ON records (name)")
cur.execute("CREATE INDEX records_byrname ON records (rname)")
+ cur.execute("""CREATE INDEX records_bytype_and_rname ON records
+ (rdtype, rname)""")
cur.execute("""CREATE TABLE nsec3 (id INTEGER PRIMARY KEY,
zone_id INTEGER NOT NULL,
- hash STRING NOT NULL COLLATE NOCASE,
- owner STRING NOT NULL COLLATE NOCASE,
+ hash TEXT NOT NULL COLLATE NOCASE,
+ owner TEXT NOT NULL COLLATE NOCASE,
ttl INTEGER NOT NULL,
- rdtype STRING NOT NULL COLLATE NOCASE,
- rdata STRING NOT NULL)""")
+ rdtype TEXT NOT NULL COLLATE NOCASE,
+ rdata TEXT NOT NULL)""")
cur.execute("CREATE INDEX nsec3_byhash ON nsec3 (hash)")
cur.execute("""CREATE TABLE diffs (id INTEGER PRIMARY KEY,
zone_id INTEGER NOT NULL,
version INTEGER NOT NULL,
operation INTEGER NOT NULL,
- name STRING NOT NULL COLLATE NOCASE,
- rrtype STRING NOT NULL COLLATE NOCASE,
+ name TEXT NOT NULL COLLATE NOCASE,
+ rrtype TEXT NOT NULL COLLATE NOCASE,
ttl INTEGER NOT NULL,
- rdata STRING NOT NULL)""")
- row = [1]
+ rdata TEXT NOT NULL)""")
+ cur.execute("SELECT version FROM schema_version")
+ row = cur.fetchone()
cur.execute("COMMIT TRANSACTION")
return row
@@ -115,8 +125,9 @@ def open(dbfile, connect_timeout=5.0):
row = create(cur)
conn.isolation_level = iso_lvl
- if row == None or row[0] != 1:
- raise Sqlite3DSError("Bad database schema version")
+ if row == None or row[0] != SCHEMA_MAJOR_VERSION:
+ bad_version = "(unknown)" if row is None else str(row[0])
+ raise Sqlite3DSError("Bad database schema version: " + bad_version)
return conn, cur
diff --git a/src/lib/python/isc/datasrc/tests/Makefile.am b/src/lib/python/isc/datasrc/tests/Makefile.am
index ab89b93..c996f2a 100644
--- a/src/lib/python/isc/datasrc/tests/Makefile.am
+++ b/src/lib/python/isc/datasrc/tests/Makefile.am
@@ -1,12 +1,14 @@
PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
# old tests, TODO remove or change to use new API?
-#PYTESTS = master_test.py sqlite3_ds_test.py
-PYTESTS = datasrc_test.py
+#PYTESTS = master_test.py
+PYTESTS = datasrc_test.py sqlite3_ds_test.py
EXTRA_DIST = $(PYTESTS)
EXTRA_DIST += testdata/brokendb.sqlite3
EXTRA_DIST += testdata/example.com.sqlite3
-EXTRA_DIST += testdata/test.sqlite3.nodiffs
+EXTRA_DIST += testdata/newschema.sqlite3
+EXTRA_DIST += testdata/oldschema.sqlite3
+EXTRA_DIST += testdata/new_minor_schema.sqlite3
CLEANFILES = $(abs_builddir)/rwtest.sqlite3.copied
# If necessary (rare cases), explicitly specify paths to dynamic libraries
diff --git a/src/lib/python/isc/datasrc/tests/datasrc_test.py b/src/lib/python/isc/datasrc/tests/datasrc_test.py
index a6f8f16..c7bf6b4 100644
--- a/src/lib/python/isc/datasrc/tests/datasrc_test.py
+++ b/src/lib/python/isc/datasrc/tests/datasrc_test.py
@@ -262,16 +262,16 @@ class DataSrcClient(unittest.TestCase):
rrets = dsc.get_iterator(isc.dns.Name("example.com"))
# there are more than 80 RRs in this zone... let's just count them
# (already did a full check of the smaller zone above)
- self.assertEqual(55, len(list(rrets)))
+ # There are 40 non-RRSIG RRsets and 32 dinstinct RRSIGs.
+ self.assertEqual(72, len(list(rrets)))
# same test, but now with explicit False argument for separate_rrs
dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
rrets = dsc.get_iterator(isc.dns.Name("example.com"), False)
# there are more than 80 RRs in this zone... let's just count them
# (already did a full check of the smaller zone above)
- self.assertEqual(55, len(list(rrets)))
+ self.assertEqual(72, len(list(rrets)))
- # Count should be 71 if we request individual rrsets for differing ttls
dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
rrets = dsc.get_iterator(isc.dns.Name("example.com"), True)
# there are more than 80 RRs in this zone... let's just count them
@@ -379,11 +379,10 @@ class DataSrcClient(unittest.TestCase):
self.assertEqual(finder.NXDOMAIN, result)
self.assertEqual(None, rrset)
- result, rrset, _ = finder.find(isc.dns.Name("www.some.other.domain"),
- isc.dns.RRType.A(),
- finder.FIND_DEFAULT)
- self.assertEqual(finder.NXDOMAIN, result)
- self.assertEqual(None, rrset)
+
+ self.assertRaises(isc.datasrc.OutOfZone, finder.find,
+ isc.dns.Name("www.some.other.domain"),
+ isc.dns.RRType.A())
result, rrset, _ = finder.find(isc.dns.Name("www.example.com"),
isc.dns.RRType.TXT(),
@@ -881,15 +880,6 @@ class JournalRead(unittest.TestCase):
# ZoneJournalReader can only be constructed via a factory
self.assertRaises(TypeError, ZoneJournalReader)
- def test_journal_reader_old_schema(self):
- # The database doesn't have a "diffs" table.
- dbfile = TESTDATA_PATH + 'test.sqlite3.nodiffs'
- client = isc.datasrc.DataSourceClient("sqlite3",
- "{ \"database_file\": \"" + \
- dbfile + "\" }")
- self.assertRaises(isc.datasrc.Error, client.get_journal_reader,
- self.zname, 0, 1)
-
if __name__ == "__main__":
isc.log.init("bind10")
isc.log.resetUnitTestRootLogger()
diff --git a/src/lib/python/isc/datasrc/tests/sqlite3_ds_test.py b/src/lib/python/isc/datasrc/tests/sqlite3_ds_test.py
index 10c61cf..5604c32 100644
--- a/src/lib/python/isc/datasrc/tests/sqlite3_ds_test.py
+++ b/src/lib/python/isc/datasrc/tests/sqlite3_ds_test.py
@@ -22,122 +22,18 @@ import sqlite3
TESTDATA_PATH = os.environ['TESTDATA_PATH'] + os.sep
TESTDATA_WRITE_PATH = os.environ['TESTDATA_WRITE_PATH'] + os.sep
-READ_ZONE_DB_FILE = TESTDATA_PATH + "example.com.sqlite3"
-BROKEN_DB_FILE = TESTDATA_PATH + "brokendb.sqlite3"
-WRITE_ZONE_DB_FILE = TESTDATA_WRITE_PATH + "example.com.out.sqlite3"
-NEW_DB_FILE = TESTDATA_WRITE_PATH + "new_db.sqlite3"
-
-def example_reader():
- my_zone = [
- ("example.com.", "3600", "IN", "SOA", "ns.example.com. admin.example.com. 1234 3600 1800 2419200 7200"),
- ("example.com.", "3600", "IN", "NS", "ns.example.com."),
- ("ns.example.com.", "3600", "IN", "A", "192.0.2.1")
- ]
- for rr in my_zone:
- yield rr
-
-def example_reader_nested():
- # this iterator is used in the 'locked' test; it will cause
- # the load() method to try and write to the same database
- sqlite3_ds.load(WRITE_ZONE_DB_FILE,
- ".",
- example_reader)
- return example_reader()
-
-class TestSqlite3_ds(unittest.TestCase):
- def test_zone_exist(self):
- # The following file must be non existent and must be non
- # "creatable"; the sqlite3 library will try to create a new
- # DB file if it doesn't exist, so to test a failure case the
- # create operation should also fail. The "nodir", a non
- # existent directory, is inserted for this purpose.
- nodir = "/nodir/notexist"
- self.assertRaises(sqlite3_ds.Sqlite3DSError,
- sqlite3_ds.zone_exist, "example.com", nodir)
- # Open a broken database file
- self.assertRaises(sqlite3_ds.Sqlite3DSError,
- sqlite3_ds.zone_exist, "example.com",
- BROKEN_DB_FILE)
- self.assertTrue(sqlite3_ds.zone_exist("example.com.",
- READ_ZONE_DB_FILE))
- self.assertFalse(sqlite3_ds.zone_exist("example.org.",
- READ_ZONE_DB_FILE))
-
- def test_load_db(self):
- sqlite3_ds.load(WRITE_ZONE_DB_FILE, ".", example_reader)
-
- def test_locked_db(self):
- # load it first to make sure it exists
- sqlite3_ds.load(WRITE_ZONE_DB_FILE, ".", example_reader)
-
- # and manually create a writing session as well
- con = sqlite3.connect(WRITE_ZONE_DB_FILE);
- cur = con.cursor()
- cur.execute("delete from records")
-
- self.assertRaises(sqlite3_ds.Sqlite3DSError,
- sqlite3_ds.load, WRITE_ZONE_DB_FILE, ".",
- example_reader)
-
- con.rollback()
-
- # and make sure lock does not stay
- sqlite3_ds.load(WRITE_ZONE_DB_FILE, ".", example_reader)
-
- # force locked db by nested loads
- self.assertRaises(sqlite3_ds.Sqlite3DSError,
- sqlite3_ds.load, WRITE_ZONE_DB_FILE, ".",
- example_reader_nested)
-
- # and make sure lock does not stay
- sqlite3_ds.load(WRITE_ZONE_DB_FILE, ".", example_reader)
+DBFILE_NEWSCHEMA = TESTDATA_PATH + "/newschema.sqlite3";
+DBFILE_OLDSCHEMA = TESTDATA_PATH + "/oldschema.sqlite3";
+DBFILE_NEW_MINOR_SCHEMA = TESTDATA_PATH + "/new_minor_schema.sqlite3";
class NewDBFile(unittest.TestCase):
- def tearDown(self):
- # remove the created database after every test
- if (os.path.exists(NEW_DB_FILE)):
- os.remove(NEW_DB_FILE)
-
- def setUp(self):
- # remove the created database before every test too, just
- # in case a test got aborted half-way, and cleanup didn't occur
- if (os.path.exists(NEW_DB_FILE)):
- os.remove(NEW_DB_FILE)
-
- def test_new_db(self):
- self.assertFalse(os.path.exists(NEW_DB_FILE))
- sqlite3_ds.open(NEW_DB_FILE)
- self.assertTrue(os.path.exists(NEW_DB_FILE))
-
- def test_new_db_locked(self):
- self.assertFalse(os.path.exists(NEW_DB_FILE))
- con = sqlite3.connect(NEW_DB_FILE);
- con.isolation_level = None
- cur = con.cursor()
- cur.execute("BEGIN IMMEDIATE TRANSACTION")
-
- # load should now fail, since the database is locked,
- # and the open() call needs an exclusive lock
- self.assertRaises(sqlite3.OperationalError,
- sqlite3_ds.open, NEW_DB_FILE, 0.1)
-
- con.rollback()
- cur.close()
- con.close()
- self.assertTrue(os.path.exists(NEW_DB_FILE))
-
- # now that we closed our connection, load should work again
- sqlite3_ds.open(NEW_DB_FILE)
-
- # the database should now have been created, and a new load should
- # not require an exclusive lock anymore, so we lock it again
- con = sqlite3.connect(NEW_DB_FILE);
- cur = con.cursor()
- cur.execute("BEGIN IMMEDIATE TRANSACTION")
- sqlite3_ds.open(NEW_DB_FILE, 0.1)
- con.rollback()
- cur.close()
- con.close()
+ def test_different_version(self):
+ self.assertTrue(os.path.exists(DBFILE_NEWSCHEMA))
+ self.assertRaises(sqlite3_ds.Sqlite3DSError, sqlite3_ds.open,
+ DBFILE_NEWSCHEMA)
+ self.assertRaises(sqlite3_ds.Sqlite3DSError, sqlite3_ds.open,
+ DBFILE_OLDSCHEMA)
+ self.assertNotEqual(None, sqlite3_ds.open(DBFILE_NEW_MINOR_SCHEMA)[0])
if __name__ == '__main__':
unittest.main()
diff --git a/src/lib/python/isc/datasrc/tests/testdata/example.com.sqlite3 b/src/lib/python/isc/datasrc/tests/testdata/example.com.sqlite3
index 521cf31..9c71cb5 100644
Binary files a/src/lib/python/isc/datasrc/tests/testdata/example.com.sqlite3 and b/src/lib/python/isc/datasrc/tests/testdata/example.com.sqlite3 differ
diff --git a/src/lib/python/isc/datasrc/tests/testdata/new_minor_schema.sqlite3 b/src/lib/python/isc/datasrc/tests/testdata/new_minor_schema.sqlite3
new file mode 100644
index 0000000..1542c20
Binary files /dev/null and b/src/lib/python/isc/datasrc/tests/testdata/new_minor_schema.sqlite3 differ
diff --git a/src/lib/python/isc/datasrc/tests/testdata/newschema.sqlite3 b/src/lib/python/isc/datasrc/tests/testdata/newschema.sqlite3
new file mode 100644
index 0000000..460cfa8
Binary files /dev/null and b/src/lib/python/isc/datasrc/tests/testdata/newschema.sqlite3 differ
diff --git a/src/lib/python/isc/datasrc/tests/testdata/oldschema.sqlite3 b/src/lib/python/isc/datasrc/tests/testdata/oldschema.sqlite3
new file mode 100644
index 0000000..b44c5eb
Binary files /dev/null and b/src/lib/python/isc/datasrc/tests/testdata/oldschema.sqlite3 differ
diff --git a/src/lib/python/isc/datasrc/tests/testdata/test.sqlite3.nodiffs b/src/lib/python/isc/datasrc/tests/testdata/test.sqlite3.nodiffs
deleted file mode 100644
index cc8cfc3..0000000
Binary files a/src/lib/python/isc/datasrc/tests/testdata/test.sqlite3.nodiffs and /dev/null differ
diff --git a/src/lib/python/isc/log_messages/Makefile.am b/src/lib/python/isc/log_messages/Makefile.am
index 4b7e1d1..6b4be94 100644
--- a/src/lib/python/isc/log_messages/Makefile.am
+++ b/src/lib/python/isc/log_messages/Makefile.am
@@ -14,6 +14,7 @@ EXTRA_DIST += config_messages.py
EXTRA_DIST += notify_out_messages.py
EXTRA_DIST += libxfrin_messages.py
EXTRA_DIST += server_common_messages.py
+EXTRA_DIST += dbutil_messages.py
CLEANFILES = __init__.pyc
CLEANFILES += bind10_messages.pyc
@@ -29,6 +30,7 @@ CLEANFILES += config_messages.pyc
CLEANFILES += notify_out_messages.pyc
CLEANFILES += libxfrin_messages.pyc
CLEANFILES += server_common_messages.pyc
+CLEANFILES += dbutil_messages.pyc
CLEANDIRS = __pycache__
diff --git a/src/lib/python/isc/log_messages/dbutil_messages.py b/src/lib/python/isc/log_messages/dbutil_messages.py
new file mode 100644
index 0000000..c06dfef
--- /dev/null
+++ b/src/lib/python/isc/log_messages/dbutil_messages.py
@@ -0,0 +1 @@
+from work.dbutil_messages import *
diff --git a/src/lib/python/isc/log_messages/work/Makefile.am b/src/lib/python/isc/log_messages/work/Makefile.am
index 9bc5e0f..ad5ee0c 100644
--- a/src/lib/python/isc/log_messages/work/Makefile.am
+++ b/src/lib/python/isc/log_messages/work/Makefile.am
@@ -5,7 +5,7 @@ python_PYTHON = __init__.py
pythondir = $(pyexecdir)/isc/log_messages/
-CLEANFILES = __init__.pyc
+CLEANFILES = __init__.pyc __init__.pyo
CLEANDIRS = __pycache__
clean-local:
diff --git a/src/lib/python/isc/notify/notify_out.py b/src/lib/python/isc/notify/notify_out.py
index 6f3bec9..bfa7167 100644
--- a/src/lib/python/isc/notify/notify_out.py
+++ b/src/lib/python/isc/notify/notify_out.py
@@ -278,12 +278,12 @@ class NotifyOut:
# data sources.
datasrc_config = '{ "database_file": "' + self._db_file + '"}'
try:
- result, finder = DataSourceClient('sqlite3',
- datasrc_config).find_zone(
- zone_name)
+ ds_client = DataSourceClient('sqlite3', datasrc_config)
except isc.datasrc.Error as ex:
logger.error(NOTIFY_OUT_DATASRC_ACCESS_FAILURE, ex)
return []
+
+ result, finder = ds_client.find_zone(zone_name)
if result is not DataSourceClient.SUCCESS:
logger.error(NOTIFY_OUT_DATASRC_ZONE_NOT_FOUND,
format_zone_str(zone_name, zone_class))
@@ -307,13 +307,17 @@ class NotifyOut:
ns_name = Name(ns_rdata.to_text())
if soa_mname == ns_name:
continue
- result, rrset, _ = finder.find(ns_name, RRType.A())
- if result is finder.SUCCESS and rrset is not None:
- addrs.extend([a.to_text() for a in rrset.get_rdata()])
-
- result, rrset, _ = finder.find(ns_name, RRType.AAAA())
- if result is finder.SUCCESS and rrset is not None:
- addrs.extend([aaaa.to_text() for aaaa in rrset.get_rdata()])
+ ns_result, ns_finder = ds_client.find_zone(ns_name)
+ if ns_result is DataSourceClient.SUCCESS or \
+ ns_result is DataSourceClient.PARTIALMATCH:
+ result, rrset, _ = ns_finder.find(ns_name, RRType.A())
+ if result is ns_finder.SUCCESS and rrset is not None:
+ addrs.extend([a.to_text() for a in rrset.get_rdata()])
+
+ result, rrset, _ = ns_finder.find(ns_name, RRType.AAAA())
+ if result is ns_finder.SUCCESS and rrset is not None:
+ addrs.extend([aaaa.to_text()
+ for aaaa in rrset.get_rdata()])
return addrs
diff --git a/src/lib/python/isc/notify/tests/testdata/brokentest.sqlite3 b/src/lib/python/isc/notify/tests/testdata/brokentest.sqlite3
index 61e766c..10d64c1 100644
Binary files a/src/lib/python/isc/notify/tests/testdata/brokentest.sqlite3 and b/src/lib/python/isc/notify/tests/testdata/brokentest.sqlite3 differ
diff --git a/src/lib/python/isc/notify/tests/testdata/test.sqlite3 b/src/lib/python/isc/notify/tests/testdata/test.sqlite3
index e3cadb0..d659181 100644
Binary files a/src/lib/python/isc/notify/tests/testdata/test.sqlite3 and b/src/lib/python/isc/notify/tests/testdata/test.sqlite3 differ
diff --git a/src/lib/resolve/tests/recursive_query_unittest.cc b/src/lib/resolve/tests/recursive_query_unittest.cc
index 83ea052..a8b8057 100644
--- a/src/lib/resolve/tests/recursive_query_unittest.cc
+++ b/src/lib/resolve/tests/recursive_query_unittest.cc
@@ -18,10 +18,12 @@
#include <sys/socket.h>
#include <sys/time.h>
-#include <string.h>
+#include <cstring>
+#include <boost/noncopyable.hpp>
#include <boost/lexical_cast.hpp>
#include <boost/bind.hpp>
+#include <boost/scoped_ptr.hpp>
#include <boost/date_time/posix_time/posix_time_types.hpp>
#include <gtest/gtest.h>
@@ -62,6 +64,7 @@ using namespace isc::asiodns;
using namespace isc::asiolink;
using namespace isc::dns;
using namespace isc::util;
+using boost::scoped_ptr;
namespace isc {
namespace asiodns {
@@ -85,18 +88,14 @@ const char* const TEST_IPV4_ADDR = "127.0.0.1";
// for the tests below.
const uint8_t test_data[] = {0, 4, 1, 2, 3, 4};
-// This function returns an addrinfo structure for use by tests, using
-// different addresses and ports depending on whether we're testing
-// IPv4 or v6, TCP or UDP, and client or server operation.
+// This function returns an addrinfo structure for use by tests.
struct addrinfo*
-resolveAddress(const int family, const int protocol, const bool client) {
- const char* const addr = (family == AF_INET6) ?
- TEST_IPV6_ADDR : TEST_IPV4_ADDR;
- const char* const port = client ? TEST_CLIENT_PORT : TEST_SERVER_PORT;
-
+resolveAddress(const int protocol, const char* const addr,
+ const char* const port)
+{
struct addrinfo hints;
memset(&hints, 0, sizeof(hints));
- hints.ai_family = family;
+ hints.ai_family = AF_UNSPEC; // let the address decide it.
hints.ai_socktype = (protocol == IPPROTO_UDP) ? SOCK_DGRAM : SOCK_STREAM;
hints.ai_protocol = protocol;
hints.ai_flags = AI_NUMERICSERV;
@@ -110,6 +109,51 @@ resolveAddress(const int family, const int protocol, const bool client) {
return (res);
}
+// convenience shortcut of the other version using different addresses and
+// ports depending on whether we're testing IPv4 or v6, TCP or UDP, and
+// client or server operation.
+struct addrinfo*
+resolveAddress(const int family, const int protocol, const bool client) {
+ return (resolveAddress(protocol,
+ (family == AF_INET6) ? TEST_IPV6_ADDR :
+ TEST_IPV4_ADDR,
+ client ? TEST_CLIENT_PORT : TEST_SERVER_PORT));
+}
+
+// A helper holder of addrinfo so we can safely release the resource
+// either when leaving the defined scope either normally or due to exception.
+struct ScopedAddrInfo {
+ ScopedAddrInfo(struct addrinfo* res) : res_(res) {}
+ ~ScopedAddrInfo() { freeaddrinfo(res_); }
+ struct addrinfo* res_;
+};
+
+// Similar to ScopedAddrInfo but for socket FD. It also supports the "release"
+// operation so it can release the ownership of the FD.
+// This is made non copyable to avoid making an accidental copy, which could
+// result in duplicate close.
+struct ScopedSocket : private boost::noncopyable {
+ ScopedSocket() : s_(-1) {}
+ ScopedSocket(int s) : s_(s) {}
+ ~ScopedSocket() {
+ if (s_ >= 0) {
+ close(s_);
+ }
+ }
+ void reset(int new_s) {
+ if (s_ >= 0) {
+ close(s_);
+ }
+ s_ = new_s;
+ }
+ int release() {
+ int s = s_;
+ s_ = -1;
+ return (s);
+ }
+ int s_;
+};
+
// This fixture is a framework for various types of network operations
// using the ASIO interfaces. Each test case creates an IOService object,
// opens a local "client" socket for testing, sends data via the local socket
@@ -129,27 +173,20 @@ protected:
// It would delete itself, but after the io_service_, which could
// segfailt in case there were unhandled requests
resolver_.reset();
- if (res_ != NULL) {
- freeaddrinfo(res_);
- }
- if (sock_ != -1) {
- close(sock_);
- }
- delete dns_service_;
- delete callback_;
- delete io_service_;
}
// Send a test UDP packet to a mock server
void sendUDP(const int family) {
- res_ = resolveAddress(family, IPPROTO_UDP, false);
+ ScopedAddrInfo sai(resolveAddress(family, IPPROTO_UDP, false));
+ struct addrinfo* res = sai.res_;
- sock_ = socket(res_->ai_family, res_->ai_socktype, res_->ai_protocol);
- if (sock_ < 0) {
+ sock_.reset(socket(res->ai_family, res->ai_socktype,
+ res->ai_protocol));
+ if (sock_.s_ < 0) {
isc_throw(IOError, "failed to open test socket");
}
- const int cc = sendto(sock_, test_data, sizeof(test_data), 0,
- res_->ai_addr, res_->ai_addrlen);
+ const int cc = sendto(sock_.s_, test_data, sizeof(test_data), 0,
+ res->ai_addr, res->ai_addrlen);
if (cc != sizeof(test_data)) {
isc_throw(IOError, "unexpected sendto result: " << cc);
}
@@ -158,16 +195,18 @@ protected:
// Send a test TCP packet to a mock server
void sendTCP(const int family) {
- res_ = resolveAddress(family, IPPROTO_TCP, false);
+ ScopedAddrInfo sai(resolveAddress(family, IPPROTO_TCP, false));
+ struct addrinfo* res = sai.res_;
- sock_ = socket(res_->ai_family, res_->ai_socktype, res_->ai_protocol);
- if (sock_ < 0) {
+ sock_.reset(socket(res->ai_family, res->ai_socktype,
+ res->ai_protocol));
+ if (sock_.s_ < 0) {
isc_throw(IOError, "failed to open test socket");
}
- if (connect(sock_, res_->ai_addr, res_->ai_addrlen) < 0) {
+ if (connect(sock_.s_, res->ai_addr, res->ai_addrlen) < 0) {
isc_throw(IOError, "failed to connect to the test server");
}
- const int cc = send(sock_, test_data, sizeof(test_data), 0);
+ const int cc = send(sock_.s_, test_data, sizeof(test_data), 0);
if (cc != sizeof(test_data)) {
isc_throw(IOError, "unexpected send result: " << cc);
}
@@ -178,14 +217,16 @@ protected:
// recursive lookup. The caller must place a RecursiveQuery
// on the IO Service queue before running this routine.
void recvUDP(const int family, void* buffer, size_t& size) {
- res_ = resolveAddress(family, IPPROTO_UDP, true);
+ ScopedAddrInfo sai(resolveAddress(family, IPPROTO_UDP, true));
+ struct addrinfo* res = sai.res_;
- sock_ = socket(res_->ai_family, res_->ai_socktype, res_->ai_protocol);
- if (sock_ < 0) {
+ sock_.reset(socket(res->ai_family, res->ai_socktype,
+ res->ai_protocol));
+ if (sock_.s_ < 0) {
isc_throw(IOError, "failed to open test socket");
}
- if (bind(sock_, res_->ai_addr, res_->ai_addrlen) < 0) {
+ if (bind(sock_.s_, res->ai_addr, res->ai_addrlen) < 0) {
isc_throw(IOError, "bind failed: " << strerror(errno));
}
@@ -205,7 +246,7 @@ protected:
// we add an ad hoc timeout.
const struct timeval timeo = { 10, 0 };
int recv_options = 0;
- if (setsockopt(sock_, SOL_SOCKET, SO_RCVTIMEO, &timeo,
+ if (setsockopt(sock_.s_, SOL_SOCKET, SO_RCVTIMEO, &timeo,
sizeof(timeo))) {
if (errno == ENOPROTOOPT) {
// Workaround for Solaris: it doesn't accept SO_RCVTIMEO
@@ -218,7 +259,7 @@ protected:
isc_throw(IOError, "set RCVTIMEO failed: " << strerror(errno));
}
}
- const int ret = recv(sock_, buffer, size, recv_options);
+ const int ret = recv(sock_.s_, buffer, size, recv_options);
if (ret < 0) {
isc_throw(IOError, "recvfrom failed: " << strerror(errno));
}
@@ -227,38 +268,68 @@ protected:
size = ret;
}
+ void
+ addServer(const string& address, const char* const port, int protocol) {
+ ScopedAddrInfo sai(resolveAddress(protocol, address.c_str(), port));
+ struct addrinfo* res = sai.res_;
+ const int family = res->ai_family;
+
+ ScopedSocket sock(socket(res->ai_family, res->ai_socktype,
+ res->ai_protocol));
+ const int s = sock.s_;
+ if (s < 0) {
+ isc_throw(isc::Unexpected, "failed to open a test socket");
+ }
+ const int on = 1;
+ if (family == AF_INET6) {
+ if (setsockopt(s, IPPROTO_IPV6, IPV6_V6ONLY, &on, sizeof(on)) ==
+ -1) {
+ isc_throw(isc::Unexpected,
+ "failed to set socket option(IPV6_V6ONLY)");
+ }
+ }
+ if (setsockopt(s, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)) == -1) {
+ isc_throw(isc::Unexpected,
+ "failed to set socket option(SO_REUSEADDR)");
+ }
+ if (bind(s, res->ai_addr, res->ai_addrlen) != 0) {
+ isc_throw(isc::Unexpected, "failed to bind a test socket");
+ }
+ if (protocol == IPPROTO_TCP) {
+ dns_service_->addServerTCPFromFD(sock.release(), family);
+ } else {
+ dns_service_->addServerUDPFromFD(sock.release(), family);
+ }
+ }
// Set up an IO Service queue using the specified address
- void setDNSService(const char& address) {
- delete dns_service_;
- dns_service_ = NULL;
- delete io_service_;
- io_service_ = new IOService();
- callback_ = new ASIOCallBack(this);
- dns_service_ = new DNSService(*io_service_, *TEST_SERVER_PORT, address, callback_, NULL, NULL);
+ void setDNSService(const string& address) {
+ setDNSService();
+ addServer(address, TEST_SERVER_PORT, IPPROTO_TCP);
+ addServer(address, TEST_SERVER_PORT, IPPROTO_UDP);
}
// Set up an IO Service queue using the "any" address, on IPv4 if
// 'use_ipv4' is true and on IPv6 if 'use_ipv6' is true.
void setDNSService(const bool use_ipv4, const bool use_ipv6) {
- delete dns_service_;
- dns_service_ = NULL;
- delete io_service_;
- io_service_ = new IOService();
- callback_ = new ASIOCallBack(this);
- dns_service_ = new DNSService(*io_service_, *TEST_SERVER_PORT, use_ipv4, use_ipv6, callback_,
- NULL, NULL);
+ setDNSService();
+ if (use_ipv6) {
+ addServer("::", TEST_SERVER_PORT, IPPROTO_TCP);
+ addServer("::", TEST_SERVER_PORT, IPPROTO_UDP);
+ }
+ if (use_ipv4) {
+ addServer("0.0.0.0", TEST_SERVER_PORT, IPPROTO_TCP);
+ addServer("0.0.0.0", TEST_SERVER_PORT, IPPROTO_UDP);
+ }
}
// Set up empty DNS Service
// Set up an IO Service queue without any addresses
void setDNSService() {
- delete dns_service_;
- dns_service_ = NULL;
- delete io_service_;
- io_service_ = new IOService();
- callback_ = new ASIOCallBack(this);
- dns_service_ = new DNSService(*io_service_, callback_, NULL, NULL);
+ io_service_.reset(new IOService());
+ callback_.reset(new ASIOCallBack(this));
+ dns_service_.reset(new DNSService(*io_service_, callback_.get(), NULL,
+ NULL));
}
// Run a simple server test, on either IPv4 or IPv6, and over either
@@ -277,7 +348,7 @@ protected:
// There doesn't seem to be an effective test for the validity of
// 'native'.
// One thing we are sure is it must be different from our local socket.
- EXPECT_NE(sock_, callback_native_);
+ EXPECT_NE(sock_.s_, callback_native_);
EXPECT_EQ(protocol, callback_protocol_);
EXPECT_EQ(family == AF_INET6 ? TEST_IPV6_ADDR : TEST_IPV4_ADDR,
callback_address_);
@@ -425,28 +496,26 @@ private:
protected:
// We use a pointer for io_service_, because for some tests we
// need to recreate a new one within one onstance of this class
- IOService* io_service_;
- DNSService* dns_service_;
- isc::nsas::NameserverAddressStore* nsas_;
+ scoped_ptr<IOService> io_service_;
+ scoped_ptr<DNSService> dns_service_;
+ scoped_ptr<isc::nsas::NameserverAddressStore> nsas_;
isc::cache::ResolverCache cache_;
- ASIOCallBack* callback_;
+ scoped_ptr<ASIOCallBack> callback_;
int callback_protocol_;
int callback_native_;
string callback_address_;
vector<uint8_t> callback_data_;
- int sock_;
- struct addrinfo* res_;
+ ScopedSocket sock_;
boost::shared_ptr<isc::util::unittests::TestResolver> resolver_;
};
RecursiveQueryTest::RecursiveQueryTest() :
dns_service_(NULL), callback_(NULL), callback_protocol_(0),
- callback_native_(-1), sock_(-1), res_(NULL),
- resolver_(new isc::util::unittests::TestResolver())
+ callback_native_(-1), resolver_(new isc::util::unittests::TestResolver())
{
- io_service_ = new IOService();
+ io_service_.reset(new IOService());
setDNSService(true, true);
- nsas_ = new isc::nsas::NameserverAddressStore(resolver_);
+ nsas_.reset(new isc::nsas::NameserverAddressStore(resolver_));
}
TEST_F(RecursiveQueryTest, v6UDPSend) {
@@ -477,24 +546,24 @@ TEST_F(RecursiveQueryTest, v6UDPSendSpecific) {
// an error on a subsequent read operation. We could do it, but for
// simplicity we only tests the easier cases for now.
- setDNSService(*TEST_IPV6_ADDR);
+ setDNSService(TEST_IPV6_ADDR);
doTest(AF_INET6, IPPROTO_UDP);
}
TEST_F(RecursiveQueryTest, v6TCPSendSpecific) {
- setDNSService(*TEST_IPV6_ADDR);
+ setDNSService(TEST_IPV6_ADDR);
doTest(AF_INET6, IPPROTO_TCP);
EXPECT_THROW(sendTCP(AF_INET), IOError);
}
TEST_F(RecursiveQueryTest, v4UDPSendSpecific) {
- setDNSService(*TEST_IPV4_ADDR);
+ setDNSService(TEST_IPV4_ADDR);
doTest(AF_INET, IPPROTO_UDP);
}
TEST_F(RecursiveQueryTest, v4TCPSendSpecific) {
- setDNSService(*TEST_IPV4_ADDR);
+ setDNSService(TEST_IPV4_ADDR);
doTest(AF_INET, IPPROTO_TCP);
EXPECT_THROW(sendTCP(AF_INET6), IOError);
@@ -502,7 +571,7 @@ TEST_F(RecursiveQueryTest, v4TCPSendSpecific) {
TEST_F(RecursiveQueryTest, v6AddServer) {
setDNSService();
- dns_service_->addServer(*TEST_SERVER_PORT, TEST_IPV6_ADDR);
+ addServer(TEST_IPV6_ADDR, TEST_SERVER_PORT, IPPROTO_TCP);
doTest(AF_INET6, IPPROTO_TCP);
EXPECT_THROW(sendTCP(AF_INET), IOError);
@@ -510,7 +579,7 @@ TEST_F(RecursiveQueryTest, v6AddServer) {
TEST_F(RecursiveQueryTest, v4AddServer) {
setDNSService();
- dns_service_->addServer(*TEST_SERVER_PORT, TEST_IPV4_ADDR);
+ addServer(TEST_IPV4_ADDR, TEST_SERVER_PORT, IPPROTO_TCP);
doTest(AF_INET, IPPROTO_TCP);
EXPECT_THROW(sendTCP(AF_INET6), IOError);
@@ -607,41 +676,43 @@ TEST_F(RecursiveQueryTest, forwarderSend) {
}
int
-createTestSocket()
-{
- struct addrinfo* res_ = resolveAddress(AF_INET, IPPROTO_UDP, true);
- int sock_ = socket(res_->ai_family, res_->ai_socktype, res_->ai_protocol);
- if (sock_ < 0) {
+createTestSocket() {
+ ScopedAddrInfo sai(resolveAddress(AF_INET, IPPROTO_UDP, true));
+ struct addrinfo* res = sai.res_;
+
+ ScopedSocket sock(socket(res->ai_family, res->ai_socktype,
+ res->ai_protocol));
+ if (sock.s_ < 0) {
isc_throw(IOError, "failed to open test socket");
}
- if (bind(sock_, res_->ai_addr, res_->ai_addrlen) < 0) {
+ if (bind(sock.s_, res->ai_addr, res->ai_addrlen) < 0) {
isc_throw(IOError, "failed to bind test socket");
}
- return sock_;
+ return (sock.release());
}
int
-setSocketTimeout(int sock_, size_t tv_sec, size_t tv_usec) {
+setSocketTimeout(int sock, size_t tv_sec, size_t tv_usec) {
const struct timeval timeo = { tv_sec, tv_usec };
int recv_options = 0;
- if (setsockopt(sock_, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo))) {
+ if (setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo))) {
if (errno == ENOPROTOOPT) { // see RecursiveQueryTest::recvUDP()
recv_options = MSG_DONTWAIT;
} else {
isc_throw(IOError, "set RCVTIMEO failed: " << strerror(errno));
}
}
- return recv_options;
+ return (recv_options);
}
// try to read from the socket max time
// *num is incremented for every succesfull read
// returns true if it can read max times, false otherwise
-bool tryRead(int sock_, int recv_options, size_t max, int* num) {
+bool tryRead(int sock, int recv_options, size_t max, int* num) {
size_t i = 0;
do {
char inbuff[512];
- if (recv(sock_, inbuff, sizeof(inbuff), recv_options) < 0) {
+ if (recv(sock, inbuff, sizeof(inbuff), recv_options) < 0) {
return false;
} else {
++i;
@@ -691,7 +762,7 @@ TEST_F(RecursiveQueryTest, forwardQueryTimeout) {
setDNSService();
// Prepare the socket
- sock_ = createTestSocket();
+ sock_.reset(createTestSocket());
// Prepare the server
bool done(true);
@@ -725,7 +796,7 @@ TEST_F(RecursiveQueryTest, forwardClientTimeout) {
// Prepare the service (we do not use the common setup, we do not answer
setDNSService();
- sock_ = createTestSocket();
+ sock_.reset(createTestSocket());
// Prepare the server
bool done1(true);
@@ -759,7 +830,7 @@ TEST_F(RecursiveQueryTest, forwardLookupTimeout) {
setDNSService();
// Prepare the socket
- sock_ = createTestSocket();
+ sock_.reset(createTestSocket());
// Prepare the server
bool done(true);
@@ -794,7 +865,7 @@ TEST_F(RecursiveQueryTest, lowtimeouts) {
setDNSService();
// Prepare the socket
- sock_ = createTestSocket();
+ sock_.reset(createTestSocket());
// Prepare the server
bool done(true);
diff --git a/src/lib/testutils/testdata/Makefile.am b/src/lib/testutils/testdata/Makefile.am
index a6b8206..b9ef53f 100644
--- a/src/lib/testutils/testdata/Makefile.am
+++ b/src/lib/testutils/testdata/Makefile.am
@@ -27,6 +27,7 @@ EXTRA_DIST += rfc5155-example.zone.signed
EXTRA_DIST += example.com
EXTRA_DIST += example.sqlite3
+EXTRA_DIST += rwtest.sqlite3 # SQLite3 DB file as a template data source
EXTRA_DIST += test1.zone.in
EXTRA_DIST += test1-new.zone.in
diff --git a/src/lib/testutils/testdata/auth_test.sqlite3 b/src/lib/testutils/testdata/auth_test.sqlite3
new file mode 100755
index 0000000..5eeb2c3
Binary files /dev/null and b/src/lib/testutils/testdata/auth_test.sqlite3 differ
diff --git a/src/lib/testutils/testdata/auth_test.sqlite3.copied b/src/lib/testutils/testdata/auth_test.sqlite3.copied
new file mode 100755
index 0000000..205e4ef
Binary files /dev/null and b/src/lib/testutils/testdata/auth_test.sqlite3.copied differ
diff --git a/src/lib/testutils/testdata/example.sqlite3 b/src/lib/testutils/testdata/example.sqlite3
index e8e255b..0f6ee02 100644
Binary files a/src/lib/testutils/testdata/example.sqlite3 and b/src/lib/testutils/testdata/example.sqlite3 differ
diff --git a/src/lib/testutils/testdata/rwtest.sqlite3 b/src/lib/testutils/testdata/rwtest.sqlite3
new file mode 100644
index 0000000..5eeb2c3
Binary files /dev/null and b/src/lib/testutils/testdata/rwtest.sqlite3 differ
diff --git a/tests/lettuce/.gitignore b/tests/lettuce/.gitignore
index e389f47..f41154c 100644
--- a/tests/lettuce/.gitignore
+++ b/tests/lettuce/.gitignore
@@ -1 +1,2 @@
+/output/
/setup_intree_bind10.sh
diff --git a/tests/lettuce/configurations/.gitignore b/tests/lettuce/configurations/.gitignore
new file mode 100644
index 0000000..69d136f
--- /dev/null
+++ b/tests/lettuce/configurations/.gitignore
@@ -0,0 +1,2 @@
+/bindctl_commands.config
+/example.org.config
diff --git a/tests/lettuce/configurations/bindctl_commands.config.orig b/tests/lettuce/configurations/bindctl_commands.config.orig
new file mode 100644
index 0000000..d74b96e
--- /dev/null
+++ b/tests/lettuce/configurations/bindctl_commands.config.orig
@@ -0,0 +1,34 @@
+{
+ "version": 2,
+ "Logging": {
+ "loggers": [ {
+ "debuglevel": 99,
+ "severity": "DEBUG",
+ "name": "*"
+ } ]
+ },
+ "Auth": {
+ "database_file": "data/example.org.sqlite3",
+ "listen_on": [ {
+ "port": 47806,
+ "address": "127.0.0.1"
+ } ]
+ },
+ "StatsHttpd": {
+ "listen_on": [ {
+ "port": 47811,
+ "address": "127.0.0.1"
+ } ]
+ },
+ "Boss": {
+ "components": {
+ "b10-auth": { "kind": "dispensable", "special": "auth" },
+ "b10-xfrin": { "address": "Xfrin", "kind": "dispensable" },
+ "b10-xfrout": { "address": "Xfrout", "kind": "dispensable" },
+ "b10-zonemgr": { "address": "Zonemgr", "kind": "dispensable" },
+ "b10-stats": { "address": "Stats", "kind": "dispensable" },
+ "b10-stats-httpd": { "address": "StatsHttpd", "kind": "dispensable" },
+ "b10-cmdctl": { "special": "cmdctl", "kind": "needed" }
+ }
+ }
+}
diff --git a/tests/lettuce/configurations/default.config b/tests/lettuce/configurations/default.config
new file mode 100644
index 0000000..9e1d3d1
--- /dev/null
+++ b/tests/lettuce/configurations/default.config
@@ -0,0 +1,16 @@
+{
+ "version": 2,
+ "Logging": {
+ "loggers": [ {
+ "debuglevel": 99,
+ "severity": "DEBUG",
+ "name": "*"
+ } ]
+ },
+ "StatsHttpd": {
+ "listen_on": [ {
+ "port": 47811,
+ "address": "127.0.0.1"
+ } ]
+ }
+}
diff --git a/tests/lettuce/configurations/example.org.config.orig b/tests/lettuce/configurations/example.org.config.orig
index 642f2dd..fadb3e2 100644
--- a/tests/lettuce/configurations/example.org.config.orig
+++ b/tests/lettuce/configurations/example.org.config.orig
@@ -4,7 +4,7 @@
"loggers": [ {
"debuglevel": 99,
"severity": "DEBUG",
- "name": "auth"
+ "name": "*"
} ]
},
"Auth": {
@@ -13,5 +13,11 @@
"port": 47806,
"address": "127.0.0.1"
} ]
+ },
+ "Boss": {
+ "components": {
+ "b10-auth": { "kind": "needed", "special": "auth" },
+ "b10-cmdctl": { "special": "cmdctl", "kind": "needed" }
+ }
}
}
diff --git a/tests/lettuce/configurations/example.org.inmem.config b/tests/lettuce/configurations/example.org.inmem.config
index fa5f954..6418c65 100644
--- a/tests/lettuce/configurations/example.org.inmem.config
+++ b/tests/lettuce/configurations/example.org.inmem.config
@@ -1 +1,8 @@
-{"version": 2, "Logging": {"loggers": [{"severity": "DEBUG", "name": "auth", "debuglevel": 99}]}, "Auth": {"database_file": "", "listen_on": [{"port": 47806, "address": "127.0.0.1"}], "datasources": [{"zones": [{"origin": "example.org", "file": "data/example.org"}], "type": "memory"}]}}
+{"version": 2, "Logging": {"loggers": [{"severity": "DEBUG", "name": "*", "debuglevel": 99}]}, "Auth": {"database_file": "", "listen_on": [{"port": 47806, "address": "127.0.0.1"}], "datasources": [{"zones": [{"origin": "example.org", "file": "data/example.org"}], "type": "memory"}]},
+ "Boss": {
+ "components": {
+ "b10-auth": { "kind": "needed", "special": "auth" },
+ "b10-cmdctl": { "special": "cmdctl", "kind": "needed" }
+ }
+ }
+}
diff --git a/tests/lettuce/configurations/example2.org.config b/tests/lettuce/configurations/example2.org.config
index 1a40d1b..25314dc 100644
--- a/tests/lettuce/configurations/example2.org.config
+++ b/tests/lettuce/configurations/example2.org.config
@@ -3,7 +3,7 @@
"Logging": {
"loggers": [ {
"severity": "DEBUG",
- "name": "auth",
+ "name": "*",
"debuglevel": 99
}
]
@@ -12,7 +12,13 @@
"database_file": "data/example.org.sqlite3",
"listen_on": [ {
"port": 47807,
- "address": "127.0.0.1"
+ "address": "::1"
} ]
+ },
+ "Boss": {
+ "components": {
+ "b10-auth": { "kind": "needed", "special": "auth" },
+ "b10-cmdctl": { "special": "cmdctl", "kind": "needed" }
+ }
}
}
diff --git a/tests/lettuce/configurations/inmemory_over_sqlite3/secondary.conf b/tests/lettuce/configurations/inmemory_over_sqlite3/secondary.conf
new file mode 100644
index 0000000..a104726
--- /dev/null
+++ b/tests/lettuce/configurations/inmemory_over_sqlite3/secondary.conf
@@ -0,0 +1,32 @@
+{
+ "version": 2,
+ "Logging": {
+ "loggers": [ {
+ "debuglevel": 99,
+ "severity": "DEBUG",
+ "name": "auth"
+ } ]
+ },
+ "Auth": {
+ "datasources": [ {
+ "type": "memory",
+ "zones": [ {
+ "origin": "example.org",
+ "file": "data/example.org.sqlite3",
+ "filetype": "sqlite3"
+ } ]
+ } ],
+ "listen_on": [ {
+ "port": 47806,
+ "address": "127.0.0.1"
+ } ]
+ },
+ "Boss": {
+ "components": {
+ "b10-auth": { "kind": "needed", "special": "auth" },
+ "b10-xfrin": { "address": "Xfrin", "kind": "dispensable" },
+ "b10-zonemgr": { "address": "Zonemgr", "kind": "dispensable" },
+ "b10-cmdctl": { "special": "cmdctl", "kind": "needed" }
+ }
+ }
+}
diff --git a/tests/lettuce/configurations/ixfr-out/testset1-config.db b/tests/lettuce/configurations/ixfr-out/testset1-config.db
index c5fc165..1c1b990 100644
--- a/tests/lettuce/configurations/ixfr-out/testset1-config.db
+++ b/tests/lettuce/configurations/ixfr-out/testset1-config.db
@@ -1 +1,11 @@
-{"Xfrin": {"zones": [{"use_ixfr": true, "class": "IN", "name": "example.com.", "master_addr": "178.18.82.80"}]}, "version": 2, "Logging": {"loggers": [{"debuglevel": 99, "severity": "DEBUG", "output_options": [{"output": "stderr", "flush": true}], "name": "*"}]}, "Auth": {"database_file": "data/ixfr-out/zones.slite3", "listen_on": [{"port": 47806, "address": "::"}, {"port": 47806, "address": "0.0.0.0"}]}}
+{"Xfrin": {"zones": [{"use_ixfr": true, "class": "IN", "name": "example.com.", "master_addr": "178.18.82.80"}]}, "version": 2, "Logging": {"loggers": [{"debuglevel": 99, "severity": "DEBUG", "output_options": [{"output": "stderr", "flush": true}], "name": "*"}]}, "Auth": {"database_file": "data/ixfr-out/zones.sqlite3", "listen_on": [{"port": 47806, "address": "::"}, {"port": 47806, "address": "0.0.0.0"}]},
+ "Boss": {
+ "components": {
+ "b10-auth": { "kind": "needed", "special": "auth" },
+ "b10-xfrin": { "address": "Xfrin", "kind": "dispensable" },
+ "b10-xfrout": { "address": "Xfrout", "kind": "dispensable" },
+ "b10-zonemgr": { "address": "Zonemgr", "kind": "dispensable" },
+ "b10-cmdctl": { "special": "cmdctl", "kind": "needed" }
+ }
+ }
+}
diff --git a/tests/lettuce/configurations/multi_instance/.gitignore b/tests/lettuce/configurations/multi_instance/.gitignore
new file mode 100644
index 0000000..9509290
--- /dev/null
+++ b/tests/lettuce/configurations/multi_instance/.gitignore
@@ -0,0 +1 @@
+/multi_auth.config
diff --git a/tests/lettuce/configurations/multi_instance/multi_auth.config.orig b/tests/lettuce/configurations/multi_instance/multi_auth.config.orig
index f181d42..53c2b7a 100644
--- a/tests/lettuce/configurations/multi_instance/multi_auth.config.orig
+++ b/tests/lettuce/configurations/multi_instance/multi_auth.config.orig
@@ -1 +1 @@
-{"version": 2, "Auth": {"listen_on": [{"port": 47806, "address": "0.0.0.0"}]}, "Boss": {"components": {"b10-auth-2": {"kind": "needed", "special": "auth"}, "b10-auth": {"kind": "needed", "special": "auth"}, "b10-cmdctl": {"kind": "needed", "special": "cmdctl"}}}}
+{"version": 2, "Logging": {"loggers": [{"severity": "DEBUG", "name": "*", "debuglevel": 99}]}, "Auth": {"listen_on": [{"port": 47806, "address": "0.0.0.0"}]}, "Boss": {"components": {"b10-auth-2": {"kind": "dispensable", "special": "auth"}, "b10-auth": {"kind": "dispensable", "special": "auth"}, "b10-cmdctl": {"kind": "needed", "special": "cmdctl"}}}}
diff --git a/tests/lettuce/configurations/no_db_file.config b/tests/lettuce/configurations/no_db_file.config
index f865354..fc0a25d 100644
--- a/tests/lettuce/configurations/no_db_file.config
+++ b/tests/lettuce/configurations/no_db_file.config
@@ -1,10 +1,24 @@
{
"version": 2,
+ "Logging": {
+ "loggers": [ {
+ "severity": "DEBUG",
+ "name": "*",
+ "debuglevel": 99
+ }
+ ]
+ },
"Auth": {
"database_file": "data/test_nonexistent_db.sqlite3",
"listen_on": [ {
"port": 47806,
"address": "127.0.0.1"
} ]
+ },
+ "Boss": {
+ "components": {
+ "b10-auth": { "kind": "needed", "special": "auth" },
+ "b10-cmdctl": { "special": "cmdctl", "kind": "needed" }
+ }
}
}
diff --git a/tests/lettuce/configurations/nsec3/nsec3_auth.config b/tests/lettuce/configurations/nsec3/nsec3_auth.config
index 2de5284..94514c0 100644
--- a/tests/lettuce/configurations/nsec3/nsec3_auth.config
+++ b/tests/lettuce/configurations/nsec3/nsec3_auth.config
@@ -1 +1 @@
-{"version": 2, "Auth": {"datasources": [{"zones": [{"origin": "example.", "file": "configurations/nsec3/rfc5155-example.zone.signed"}], "type": "memory"}], "listen_on": [{"port": 47806, "address": "0.0.0.0"}]}, "Boss": {"components": {"b10-auth": {"kind": "needed", "special": "auth"}, "b10-cmdctl": {"kind": "needed", "special": "cmdctl"}}}}
+{"version": 2, "Logging": {"loggers": [{"severity": "DEBUG", "name": "*", "debuglevel": 99}]}, "Auth": {"datasources": [{"zones": [{"origin": "example.", "file": "configurations/nsec3/rfc5155-example.zone.signed"}], "type": "memory"}], "listen_on": [{"port": 47806, "address": "0.0.0.0"}]}, "Boss": {"components": {"b10-auth": {"kind": "needed", "special": "auth"}, "b10-cmdctl": {"kind": "needed", "special": "cmdctl"}}}}
diff --git a/tests/lettuce/configurations/resolver/.gitignore b/tests/lettuce/configurations/resolver/.gitignore
new file mode 100644
index 0000000..8d60553
--- /dev/null
+++ b/tests/lettuce/configurations/resolver/.gitignore
@@ -0,0 +1 @@
+/resolver_basic.config
diff --git a/tests/lettuce/configurations/resolver/resolver_basic.config.orig b/tests/lettuce/configurations/resolver/resolver_basic.config.orig
index 5664c20..0adca9f 100644
--- a/tests/lettuce/configurations/resolver/resolver_basic.config.orig
+++ b/tests/lettuce/configurations/resolver/resolver_basic.config.orig
@@ -1 +1 @@
-{"version": 2, "Resolver": {"query_acl": [{"action": "REJECT", "from": "127.0.0.1"}], "listen_on": [{"port": 47806, "address": "127.0.0.1"}]}, "Boss": {"components": {"b10-resolver": {"kind": "needed"}, "b10-cmdctl": {"kind": "needed", "special": "cmdctl"}}}}
+{"version": 2, "Logging": {"loggers": [{"severity": "DEBUG", "name": "*", "debuglevel": 99}]}, "Resolver": {"query_acl": [{"action": "REJECT", "from": "127.0.0.1"}], "listen_on": [{"port": 47806, "address": "127.0.0.1"}]}, "Boss": {"components": {"b10-resolver": {"kind": "needed"}, "b10-cmdctl": {"kind": "needed", "special": "cmdctl"}}}}
diff --git a/tests/lettuce/configurations/xfrin/retransfer_master.conf b/tests/lettuce/configurations/xfrin/retransfer_master.conf
index 95cd88e..eae47a6 100644
--- a/tests/lettuce/configurations/xfrin/retransfer_master.conf
+++ b/tests/lettuce/configurations/xfrin/retransfer_master.conf
@@ -4,19 +4,27 @@
"loggers": [ {
"debuglevel": 99,
"severity": "DEBUG",
- "name": "auth"
+ "name": "*"
} ]
},
"Auth": {
"database_file": "data/example.org.sqlite3",
"listen_on": [ {
"port": 47807,
- "address": "127.0.0.1"
+ "address": "::1"
} ]
},
"Xfrout": {
"zone_config": [ {
"origin": "example.org"
} ]
+ },
+ "Boss": {
+ "components": {
+ "b10-auth": { "kind": "needed", "special": "auth" },
+ "b10-xfrout": { "address": "Xfrout", "kind": "dispensable" },
+ "b10-zonemgr": { "address": "Zonemgr", "kind": "dispensable" },
+ "b10-cmdctl": { "special": "cmdctl", "kind": "needed" }
+ }
}
}
diff --git a/tests/lettuce/configurations/xfrin/retransfer_slave.conf b/tests/lettuce/configurations/xfrin/retransfer_slave.conf
index 51622cd..2296b8f 100644
--- a/tests/lettuce/configurations/xfrin/retransfer_slave.conf
+++ b/tests/lettuce/configurations/xfrin/retransfer_slave.conf
@@ -4,7 +4,7 @@
"loggers": [ {
"debuglevel": 99,
"severity": "DEBUG",
- "name": "auth"
+ "name": "*"
} ]
},
"Auth": {
@@ -13,5 +13,13 @@
"port": 47806,
"address": "127.0.0.1"
} ]
+ },
+ "Boss": {
+ "components": {
+ "b10-auth": { "kind": "needed", "special": "auth" },
+ "b10-xfrin": { "address": "Xfrin", "kind": "dispensable" },
+ "b10-zonemgr": { "address": "Zonemgr", "kind": "dispensable" },
+ "b10-cmdctl": { "special": "cmdctl", "kind": "needed" }
+ }
}
}
diff --git a/tests/lettuce/data/.gitignore b/tests/lettuce/data/.gitignore
new file mode 100644
index 0000000..299b6d2
--- /dev/null
+++ b/tests/lettuce/data/.gitignore
@@ -0,0 +1 @@
+/test_nonexistent_db.sqlite3
diff --git a/tests/lettuce/data/empty_db.sqlite3 b/tests/lettuce/data/empty_db.sqlite3
index f27a8b8..c434e30 100644
Binary files a/tests/lettuce/data/empty_db.sqlite3 and b/tests/lettuce/data/empty_db.sqlite3 differ
diff --git a/tests/lettuce/data/example.org.sqlite3 b/tests/lettuce/data/example.org.sqlite3
index 070012f..715a092 100644
Binary files a/tests/lettuce/data/example.org.sqlite3 and b/tests/lettuce/data/example.org.sqlite3 differ
diff --git a/tests/lettuce/data/ixfr-out/.gitignore b/tests/lettuce/data/ixfr-out/.gitignore
new file mode 100644
index 0000000..f8de78e
--- /dev/null
+++ b/tests/lettuce/data/ixfr-out/.gitignore
@@ -0,0 +1 @@
+/zones.sqlite3
diff --git a/tests/lettuce/data/ixfr-out/zones.slite3 b/tests/lettuce/data/ixfr-out/zones.slite3
deleted file mode 100644
index a2b2dbd..0000000
Binary files a/tests/lettuce/data/ixfr-out/zones.slite3 and /dev/null differ
diff --git a/tests/lettuce/data/ixfr-out/zones.sqlite3 b/tests/lettuce/data/ixfr-out/zones.sqlite3
new file mode 100644
index 0000000..311d335
Binary files /dev/null and b/tests/lettuce/data/ixfr-out/zones.sqlite3 differ
diff --git a/tests/lettuce/features/bindctl_commands.feature b/tests/lettuce/features/bindctl_commands.feature
index 872064f..03a04bf 100644
--- a/tests/lettuce/features/bindctl_commands.feature
+++ b/tests/lettuce/features/bindctl_commands.feature
@@ -8,7 +8,15 @@ Feature: control with bindctl
# modules (note that it 'misuses' the help command for this,
# there is a Boss command 'show_processes' but it's output is
# currently less standardized than 'help')
- Given I have bind10 running with configuration example.org.config
+ Given I have bind10 running with configuration bindctl_commands.config
+ And wait for bind10 stderr message BIND10_STARTED_CC
+ And wait for bind10 stderr message CMDCTL_STARTED
+ And wait for bind10 stderr message ZONEMGR_STARTED
+ And wait for bind10 stderr message AUTH_SERVER_STARTED
+ And wait for bind10 stderr message XFRIN_STARTED
+ And wait for bind10 stderr message XFROUT_STARTED
+ And wait for bind10 stderr message STATS_STARTING
+ And wait for bind10 stderr message STATHTTPD_STARTED
Then remove bind10 configuration Boss/components/NOSUCHMODULE
last bindctl output should contain Error
@@ -19,20 +27,39 @@ Feature: control with bindctl
bind10 module Xfrin should be running
bind10 module Auth should be running
bind10 module StatsHttpd should be running
+ bind10 module Resolver should not be running
Then remove bind10 configuration Boss/components value b10-xfrout
+ And wait for new bind10 stderr message BIND10_PROCESS_ENDED
last bindctl output should not contain Error
+
# assuming it won't error for further modules (if it does, the final
# 'should not be running' tests would fail anyway)
+ Then remove bind10 configuration Boss/components value b10-stats-httpd
+ And wait for new bind10 stderr message BIND10_PROCESS_ENDED
+ last bindctl output should not contain Error
+
Then remove bind10 configuration Boss/components value b10-stats
+ And wait for new bind10 stderr message BIND10_PROCESS_ENDED
+ last bindctl output should not contain Error
+
Then remove bind10 configuration Boss/components value b10-zonemgr
+ And wait for new bind10 stderr message BIND10_PROCESS_ENDED
+ last bindctl output should not contain Error
+
Then remove bind10 configuration Boss/components value b10-xfrin
+ And wait for new bind10 stderr message BIND10_PROCESS_ENDED
+ last bindctl output should not contain Error
+
Then remove bind10 configuration Boss/components value b10-auth
- Then remove bind10 configuration Boss/components value b10-stats-httpd
+ And wait for new bind10 stderr message BIND10_PROCESS_ENDED
+ last bindctl output should not contain Error
+ # After these ^^^ have been stopped...
bind10 module Xfrout should not be running
- bind10 module Stats should not be running
bind10 module Zonemgr should not be running
bind10 module Xfrin should not be running
bind10 module Auth should not be running
bind10 module StatsHttpd should not be running
+ bind10 module Stats should not be running
+ bind10 module Resolver should not be running
diff --git a/tests/lettuce/features/default.feature b/tests/lettuce/features/default.feature
new file mode 100644
index 0000000..daace57
--- /dev/null
+++ b/tests/lettuce/features/default.feature
@@ -0,0 +1,22 @@
+Feature: default bind10 config
+ Tests for the default configuration of bind10.
+
+ Scenario: Check that only the default components are running
+ Given I have bind10 running with configuration default.config
+ And wait for bind10 stderr message BIND10_STARTED_CC
+ And wait for bind10 stderr message CMDCTL_STARTED
+ And wait for bind10 stderr message STATS_STARTING
+ And wait for bind10 stderr message STATHTTPD_STARTED
+
+ # These should be running
+ bind10 module Boss should be running
+ And bind10 module Logging should be running
+ And bind10 module Stats should be running
+ And bind10 module StatsHttpd should be running
+
+ # These should not be running
+ bind10 module Resolver should not be running
+ And bind10 module Xfrout should not be running
+ And bind10 module Zonemgr should not be running
+ And bind10 module Xfrin should not be running
+ And bind10 module Auth should not be running
diff --git a/tests/lettuce/features/example.feature b/tests/lettuce/features/example.feature
index d1ed6b3..ca5ffbf 100644
--- a/tests/lettuce/features/example.feature
+++ b/tests/lettuce/features/example.feature
@@ -8,6 +8,18 @@ Feature: Example feature
Scenario: A simple example
Given I have bind10 running with configuration example.org.config
+ And wait for bind10 stderr message BIND10_STARTED_CC
+ And wait for bind10 stderr message CMDCTL_STARTED
+ And wait for bind10 stderr message AUTH_SERVER_STARTED
+
+ bind10 module Auth should be running
+ And bind10 module Resolver should not be running
+ And bind10 module Xfrout should not be running
+ And bind10 module Zonemgr should not be running
+ And bind10 module Xfrin should not be running
+ And bind10 module Stats should not be running
+ And bind10 module StatsHttpd should not be running
+
A query for www.example.org should have rcode NOERROR
A query for www.doesnotexist.org should have rcode REFUSED
The SOA serial for example.org should be 1234
@@ -26,8 +38,18 @@ Feature: Example feature
# is actually a compound step consisting of the following two
# one to start the server
When I start bind10 with configuration no_db_file.config
- # And one to wait until it reports that b10-auth has started
- Then wait for bind10 auth to start
+
+ And wait for bind10 stderr message BIND10_STARTED_CC
+ And wait for bind10 stderr message CMDCTL_STARTED
+ And wait for bind10 stderr message AUTH_SERVER_STARTED
+
+ bind10 module Auth should be running
+ And bind10 module Resolver should not be running
+ And bind10 module Xfrout should not be running
+ And bind10 module Zonemgr should not be running
+ And bind10 module Xfrin should not be running
+ And bind10 module Stats should not be running
+ And bind10 module StatsHttpd should not be running
# This is a general step to stop a named process. By convention,
# the default name for any process is the same as the one we
@@ -50,6 +72,17 @@ Feature: Example feature
# This is a compound statement that starts and waits for the
# started message
Given I have bind10 running with configuration example.org.config
+ And wait for bind10 stderr message BIND10_STARTED_CC
+ And wait for bind10 stderr message CMDCTL_STARTED
+ And wait for bind10 stderr message AUTH_SERVER_STARTED
+
+ bind10 module Auth should be running
+ And bind10 module Resolver should not be running
+ And bind10 module Xfrout should not be running
+ And bind10 module Zonemgr should not be running
+ And bind10 module Xfrin should not be running
+ And bind10 module Stats should not be running
+ And bind10 module StatsHttpd should not be running
# Some simple queries that is not examined further
A query for www.example.com should have rcode REFUSED
@@ -113,8 +146,18 @@ Feature: Example feature
# the system
When I start bind10 with configuration example.org.config
- Then wait for bind10 auth to start
- Wait for bind10 stderr message CMDCTL_STARTED
+ And wait for bind10 stderr message BIND10_STARTED_CC
+ And wait for bind10 stderr message CMDCTL_STARTED
+ And wait for bind10 stderr message AUTH_SERVER_STARTED
+
+ bind10 module Auth should be running
+ And bind10 module Resolver should not be running
+ And bind10 module Xfrout should not be running
+ And bind10 module Zonemgr should not be running
+ And bind10 module Xfrin should not be running
+ And bind10 module Stats should not be running
+ And bind10 module StatsHttpd should not be running
+
A query for www.example.org should have rcode NOERROR
Wait for new bind10 stderr message AUTH_SEND_NORMAL_RESPONSE
Then set bind10 configuration Auth/database_file to data/empty_db.sqlite3
@@ -128,15 +171,20 @@ Feature: Example feature
Scenario: two bind10 instances
# This is more a test of the test system, start 2 bind10's
When I start bind10 with configuration example.org.config as bind10_one
+ And wait for bind10_one stderr message BIND10_STARTED_CC
+ And wait for bind10_one stderr message CMDCTL_STARTED
+ And wait for bind10_one stderr message AUTH_SERVER_STARTED
+
And I start bind10 with configuration example2.org.config with cmdctl port 47804 as bind10_two
+ And wait for bind10_two stderr message BIND10_STARTED_CC
+ And wait for bind10_two stderr message CMDCTL_STARTED
+ And wait for bind10_two stderr message AUTH_SERVER_STARTED
- Then wait for bind10 auth of bind10_one to start
- Then wait for bind10 auth of bind10_two to start
A query for www.example.org to 127.0.0.1:47806 should have rcode NOERROR
- A query for www.example.org to 127.0.0.1:47807 should have rcode NOERROR
+ A query for www.example.org to [::1]:47807 should have rcode NOERROR
Then set bind10 configuration Auth/database_file to data/empty_db.sqlite3
And wait for bind10_one stderr message DATASRC_SQLITE_OPEN
A query for www.example.org to 127.0.0.1:47806 should have rcode REFUSED
- A query for www.example.org to 127.0.0.1:47807 should have rcode NOERROR
+ A query for www.example.org to [::1]:47807 should have rcode NOERROR
diff --git a/tests/lettuce/features/inmemory_over_sqlite3.feature b/tests/lettuce/features/inmemory_over_sqlite3.feature
new file mode 100644
index 0000000..60945c8
--- /dev/null
+++ b/tests/lettuce/features/inmemory_over_sqlite3.feature
@@ -0,0 +1,9 @@
+Feature: In-memory zone using SQLite3 backend
+ This feature tests the authoritative server configured with an in-memory
+ data source that uses the SQLite3 data source as the backend, and tests
+ scenarios that update the zone via incoming zone transfers.
+
+ Scenario: Load and response
+ Given I have bind10 running with configuration inmemory_over_sqlite3/secondary.conf
+ A query for www.example.org should have rcode NOERROR
+ The SOA serial for example.org should be 1234
diff --git a/tests/lettuce/features/ixfr_out_bind10.feature b/tests/lettuce/features/ixfr_out_bind10.feature
index e84ad8c..24a9299 100644
--- a/tests/lettuce/features/ixfr_out_bind10.feature
+++ b/tests/lettuce/features/ixfr_out_bind10.feature
@@ -31,7 +31,14 @@ Feature: IXFR out
Scenario: Test Set 1
Given I have bind10 running with configuration ixfr-out/testset1-config.db
- Then wait for bind10 xfrout to start
+
+ And wait for bind10 stderr message BIND10_STARTED_CC
+ And wait for bind10 stderr message CMDCTL_STARTED
+ And wait for bind10 stderr message AUTH_SERVER_STARTED
+ And wait for bind10 stderr message XFROUT_STARTED
+ And wait for bind10 stderr message XFRIN_STARTED
+ And wait for bind10 stderr message ZONEMGR_STARTED
+
The SOA serial for example.com should be 22
#
@@ -146,7 +153,14 @@ Feature: IXFR out
Scenario: Test Set 2
Given I have bind10 running with configuration ixfr-out/testset1-config.db
- Then wait for bind10 xfrout to start
+
+ And wait for bind10 stderr message BIND10_STARTED_CC
+ And wait for bind10 stderr message CMDCTL_STARTED
+ And wait for bind10 stderr message AUTH_SERVER_STARTED
+ And wait for bind10 stderr message XFROUT_STARTED
+ And wait for bind10 stderr message XFRIN_STARTED
+ And wait for bind10 stderr message ZONEMGR_STARTED
+
The SOA serial for example.com should be 22
#
diff --git a/tests/lettuce/features/multi_instance.feature b/tests/lettuce/features/multi_instance.feature
index 864431d..048e60e 100644
--- a/tests/lettuce/features/multi_instance.feature
+++ b/tests/lettuce/features/multi_instance.feature
@@ -5,7 +5,22 @@ Feature: Multiple instances
Scenario: Multiple instances of Auth
# This config should have two running instances
Given I have bind10 running with configuration multi_instance/multi_auth.config
- And bind10 module Auth should be running
+ And wait for bind10 stderr message BIND10_STARTED_CC
+ And wait for bind10 stderr message CMDCTL_STARTED
+
+ # This is a hack. We should actually check if b10-auth and
+ # b10-auth-2 are started by name. But there's currently no way
+ # for a component to find out its name and log it.
+ And wait 2 times for bind10 stderr message AUTH_SERVER_STARTED
+
+ bind10 module Auth should be running
+ And bind10 module Resolver should not be running
+ And bind10 module Xfrout should not be running
+ And bind10 module Zonemgr should not be running
+ And bind10 module Xfrin should not be running
+ And bind10 module Stats should not be running
+ And bind10 module StatsHttpd should not be running
+
A query for example.com should have rcode REFUSED
# this also checks whether the process is running
@@ -13,6 +28,7 @@ Feature: Multiple instances
And remember the pid of process b10-auth-2
When I remove bind10 configuration Boss/components value b10-auth-2
+ And wait for new bind10 stderr message BIND10_PROCESS_ENDED
Then the pid of process b10-auth should not have changed
And a query for example.com should have rcode REFUSED
@@ -31,5 +47,6 @@ Feature: Multiple instances
A query for example.com should have rcode REFUSED
When I remove bind10 configuration Boss/components value b10-auth
+ And wait for new bind10 stderr message BIND10_PROCESS_ENDED
Then the pid of process b10-auth-2 should not have changed
A query for example.com should have rcode REFUSED
diff --git a/tests/lettuce/features/nsec3_auth.feature b/tests/lettuce/features/nsec3_auth.feature
index 0fbf219..4e5ed5b 100644
--- a/tests/lettuce/features/nsec3_auth.feature
+++ b/tests/lettuce/features/nsec3_auth.feature
@@ -12,6 +12,18 @@ Feature: NSEC3 Authoritative service
Scenario: B.1. Name Error
Given I have bind10 running with configuration nsec3/nsec3_auth.config
+ And wait for bind10 stderr message BIND10_STARTED_CC
+ And wait for bind10 stderr message CMDCTL_STARTED
+ And wait for bind10 stderr message AUTH_SERVER_STARTED
+
+ bind10 module Auth should be running
+ And bind10 module Resolver should not be running
+ And bind10 module Xfrout should not be running
+ And bind10 module Zonemgr should not be running
+ And bind10 module Xfrin should not be running
+ And bind10 module Stats should not be running
+ And bind10 module StatsHttpd should not be running
+
A dnssec query for a.c.x.w.example. should have rcode NXDOMAIN
The last query response should have flags qr aa rd
The last query response should have edns_flags do
@@ -20,18 +32,30 @@ Feature: NSEC3 Authoritative service
The last query response should have adcount 1
The authority section of the last query response should be
"""
- example. 3600 IN SOA ns1.example. bugs.x.w.example. 1 3600 300 3600000 3600
- example. 3600 IN RRSIG SOA 7 1 3600 20150420235959 20051021000000 40430 example. Hu25UIyNPmvPIVBrldN+9Mlp9Zql39qaUd8iq4ZLlYWfUUbbAS41pG+6 8z81q1xhkYAcEyHdVI2LmKusbZsT0Q==
- 0p9mhaveqvm6t7vbl5lop2u3t2rp3tom.example. 3600 IN NSEC3 1 1 12 aabbccdd 2t7b4g4vsa5smi47k61mv5bv1a22bojr NS SOA MX RRSIG DNSKEY NSEC3PARAM
- 0p9mhaveqvm6t7vbl5lop2u3t2rp3tom.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. OSgWSm26B+cS+dDL8b5QrWr/dEWhtCsKlwKLIBHYH6blRxK9rC0bMJPw Q4mLIuw85H2EY762BOCXJZMnpuwhpA==
- b4um86eghhds6nea196smvmlo4ors995.example. 3600 IN NSEC3 1 1 12 aabbccdd gjeqe526plbf1g8mklp59enfd789njgi MX RRSIG
- b4um86eghhds6nea196smvmlo4ors995.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. ZkPG3M32lmoHM6pa3D6gZFGB/rhL//Bs3Omh5u4m/CUiwtblEVOaAKKZ d7S959OeiX43aLX3pOv0TSTyiTxIZg==
- 35mthgpgcu1qg68fab165klnsnk3dpvl.example. 3600 IN NSEC3 1 1 12 aabbccdd b4um86eghhds6nea196smvmlo4ors995 NS DS RRSIG
- 35mthgpgcu1qg68fab165klnsnk3dpvl.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. g6jPUUpduAJKRljUsN8gB4UagAX0NxY9shwQAynzo8EUWH+z6hEIBlUT PGj15eZll6VhQqgZXtAIR3chwgW+SA==
+ example. 3600 IN SOA ns1.example. bugs.x.w.example. 1 3600 300 3600000 3600
+ example. 3600 IN RRSIG SOA 7 1 3600 20150420235959 20051021000000 40430 example. Hu25UIyNPmvPIVBrldN+9Mlp9Zql39qaUd8iq4ZLlYWfUUbbAS41pG+6 8z81q1xhkYAcEyHdVI2LmKusbZsT0Q==
+ 0p9mhaveqvm6t7vbl5lop2u3t2rp3tom.example. 3600 IN NSEC3 1 1 12 aabbccdd 2t7b4g4vsa5smi47k61mv5bv1a22bojr NS SOA MX RRSIG DNSKEY NSEC3PARAM
+ 0p9mhaveqvm6t7vbl5lop2u3t2rp3tom.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. OSgWSm26B+cS+dDL8b5QrWr/dEWhtCsKlwKLIBHYH6blRxK9rC0bMJPw Q4mLIuw85H2EY762BOCXJZMnpuwhpA==
+ b4um86eghhds6nea196smvmlo4ors995.example. 3600 IN NSEC3 1 1 12 aabbccdd gjeqe526plbf1g8mklp59enfd789njgi MX RRSIG
+ b4um86eghhds6nea196smvmlo4ors995.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. ZkPG3M32lmoHM6pa3D6gZFGB/rhL//Bs3Omh5u4m/CUiwtblEVOaAKKZ d7S959OeiX43aLX3pOv0TSTyiTxIZg==
+ 35mthgpgcu1qg68fab165klnsnk3dpvl.example. 3600 IN NSEC3 1 1 12 aabbccdd b4um86eghhds6nea196smvmlo4ors995 NS DS RRSIG
+ 35mthgpgcu1qg68fab165klnsnk3dpvl.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. g6jPUUpduAJKRljUsN8gB4UagAX0NxY9shwQAynzo8EUWH+z6hEIBlUT PGj15eZll6VhQqgZXtAIR3chwgW+SA==
"""
Scenario: B.2. No Data Error
Given I have bind10 running with configuration nsec3/nsec3_auth.config
+ And wait for bind10 stderr message BIND10_STARTED_CC
+ And wait for bind10 stderr message CMDCTL_STARTED
+ And wait for bind10 stderr message AUTH_SERVER_STARTED
+
+ bind10 module Auth should be running
+ And bind10 module Resolver should not be running
+ And bind10 module Xfrout should not be running
+ And bind10 module Zonemgr should not be running
+ And bind10 module Xfrin should not be running
+ And bind10 module Stats should not be running
+ And bind10 module StatsHttpd should not be running
+
A dnssec query for ns1.example. type MX should have rcode NOERROR
The last query response should have flags qr aa rd
The last query response should have edns_flags do
@@ -40,14 +64,26 @@ Feature: NSEC3 Authoritative service
The last query response should have adcount 1
The authority section of the last query response should be
"""
- example. 3600 IN SOA ns1.example. bugs.x.w.example. 1 3600 300 3600000 3600
- example. 3600 IN RRSIG SOA 7 1 3600 20150420235959 20051021000000 40430 example. Hu25UIyNPmvPIVBrldN+9Mlp9Zql39qaUd8iq4ZLlYWfUUbbAS41pG+6 8z81q1xhkYAcEyHdVI2LmKusbZsT0Q==
- 2t7b4g4vsa5smi47k61mv5bv1a22bojr.example. 3600 IN NSEC3 1 1 12 aabbccdd 2vptu5timamqttgl4luu9kg21e0aor3s A RRSIG
- 2t7b4g4vsa5smi47k61mv5bv1a22bojr.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. OmBvJ1Vgg1hCKMXHFiNeIYHK9XVW0iLDLwJN4TFoNxZuP03gAXEI634Y wOc4YBNITrj413iqNI6mRk/r1dOSUw==
+ example. 3600 IN SOA ns1.example. bugs.x.w.example. 1 3600 300 3600000 3600
+ example. 3600 IN RRSIG SOA 7 1 3600 20150420235959 20051021000000 40430 example. Hu25UIyNPmvPIVBrldN+9Mlp9Zql39qaUd8iq4ZLlYWfUUbbAS41pG+6 8z81q1xhkYAcEyHdVI2LmKusbZsT0Q==
+ 2t7b4g4vsa5smi47k61mv5bv1a22bojr.example. 3600 IN NSEC3 1 1 12 aabbccdd 2vptu5timamqttgl4luu9kg21e0aor3s A RRSIG
+ 2t7b4g4vsa5smi47k61mv5bv1a22bojr.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. OmBvJ1Vgg1hCKMXHFiNeIYHK9XVW0iLDLwJN4TFoNxZuP03gAXEI634Y wOc4YBNITrj413iqNI6mRk/r1dOSUw==
"""
Scenario: B2.1. No Data Error, Empty Non-Terminal
Given I have bind10 running with configuration nsec3/nsec3_auth.config
+ And wait for bind10 stderr message BIND10_STARTED_CC
+ And wait for bind10 stderr message CMDCTL_STARTED
+ And wait for bind10 stderr message AUTH_SERVER_STARTED
+
+ bind10 module Auth should be running
+ And bind10 module Resolver should not be running
+ And bind10 module Xfrout should not be running
+ And bind10 module Zonemgr should not be running
+ And bind10 module Xfrin should not be running
+ And bind10 module Stats should not be running
+ And bind10 module StatsHttpd should not be running
+
A dnssec query for y.w.example. should have rcode NOERROR
The last query response should have flags qr aa rd
The last query response should have edns_flags do
@@ -56,14 +92,26 @@ Feature: NSEC3 Authoritative service
The last query response should have adcount 1
The authority section of the last query response should be
"""
- example. 3600 IN SOA ns1.example. bugs.x.w.example. 1 3600 300 3600000 3600
- example. 3600 IN RRSIG SOA 7 1 3600 20150420235959 20051021000000 40430 example. Hu25UIyNPmvPIVBrldN+9Mlp9Zql39qaUd8iq4ZLlYWfUUbbAS41pG+6 8z81q1xhkYAcEyHdVI2LmKusbZsT0Q==
- ji6neoaepv8b5o6k4ev33abha8ht9fgc.example. 3600 IN NSEC3 1 1 12 aabbccdd k8udemvp1j2f7eg6jebps17vp3n8i58h
- ji6neoaepv8b5o6k4ev33abha8ht9fgc.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. gPkFp1s2QDQ6wQzcg1uSebZ61W33rUBDcTj72F3kQ490fEdp7k1BUIfb cZtPbX3YCpE+sIt0MpzVSKfTwx4uYA==
+ example. 3600 IN SOA ns1.example. bugs.x.w.example. 1 3600 300 3600000 3600
+ example. 3600 IN RRSIG SOA 7 1 3600 20150420235959 20051021000000 40430 example. Hu25UIyNPmvPIVBrldN+9Mlp9Zql39qaUd8iq4ZLlYWfUUbbAS41pG+6 8z81q1xhkYAcEyHdVI2LmKusbZsT0Q==
+ ji6neoaepv8b5o6k4ev33abha8ht9fgc.example. 3600 IN NSEC3 1 1 12 aabbccdd k8udemvp1j2f7eg6jebps17vp3n8i58h
+ ji6neoaepv8b5o6k4ev33abha8ht9fgc.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. gPkFp1s2QDQ6wQzcg1uSebZ61W33rUBDcTj72F3kQ490fEdp7k1BUIfb cZtPbX3YCpE+sIt0MpzVSKfTwx4uYA==
"""
Scenario: B.3. Referral to an Opt-Out Unsigned Zone
Given I have bind10 running with configuration nsec3/nsec3_auth.config
+ And wait for bind10 stderr message BIND10_STARTED_CC
+ And wait for bind10 stderr message CMDCTL_STARTED
+ And wait for bind10 stderr message AUTH_SERVER_STARTED
+
+ bind10 module Auth should be running
+ And bind10 module Resolver should not be running
+ And bind10 module Xfrout should not be running
+ And bind10 module Zonemgr should not be running
+ And bind10 module Xfrin should not be running
+ And bind10 module Stats should not be running
+ And bind10 module StatsHttpd should not be running
+
A dnssec query for mc.c.example. type MX should have rcode NOERROR
The last query response should have flags qr rd
The last query response should have edns_flags do
@@ -72,12 +120,12 @@ Feature: NSEC3 Authoritative service
The last query response should have adcount 3
The authority section of the last query response should be
"""
- c.example. 3600 IN NS ns1.c.example.
- c.example. 3600 IN NS ns2.c.example.
- 35mthgpgcu1qg68fab165klnsnk3dpvl.example. 3600 IN NSEC3 1 1 12 aabbccdd b4um86eghhds6nea196smvmlo4ors995 NS DS RRSIG
- 35mthgpgcu1qg68fab165klnsnk3dpvl.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. g6jPUUpduAJKRljUsN8gB4UagAX0NxY9shwQAynzo8EUWH+z6hEIBlUT PGj15eZll6VhQqgZXtAIR3chwgW+SA==
- 0p9mhaveqvm6t7vbl5lop2u3t2rp3tom.example. 3600 IN NSEC3 1 1 12 aabbccdd 2t7b4g4vsa5smi47k61mv5bv1a22bojr NS SOA MX RRSIG DNSKEY NSEC3PARAM
- 0p9mhaveqvm6t7vbl5lop2u3t2rp3tom.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. OSgWSm26B+cS+dDL8b5QrWr/dEWhtCsKlwKLIBHYH6blRxK9rC0bMJPw Q4mLIuw85H2EY762BOCXJZMnpuwhpA==
+ c.example. 3600 IN NS ns1.c.example.
+ c.example. 3600 IN NS ns2.c.example.
+ 35mthgpgcu1qg68fab165klnsnk3dpvl.example. 3600 IN NSEC3 1 1 12 aabbccdd b4um86eghhds6nea196smvmlo4ors995 NS DS RRSIG
+ 35mthgpgcu1qg68fab165klnsnk3dpvl.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. g6jPUUpduAJKRljUsN8gB4UagAX0NxY9shwQAynzo8EUWH+z6hEIBlUT PGj15eZll6VhQqgZXtAIR3chwgW+SA==
+ 0p9mhaveqvm6t7vbl5lop2u3t2rp3tom.example. 3600 IN NSEC3 1 1 12 aabbccdd 2t7b4g4vsa5smi47k61mv5bv1a22bojr NS SOA MX RRSIG DNSKEY NSEC3PARAM
+ 0p9mhaveqvm6t7vbl5lop2u3t2rp3tom.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. OSgWSm26B+cS+dDL8b5QrWr/dEWhtCsKlwKLIBHYH6blRxK9rC0bMJPw Q4mLIuw85H2EY762BOCXJZMnpuwhpA==
"""
The additional section of the last query response should be
"""
@@ -87,6 +135,18 @@ Feature: NSEC3 Authoritative service
Scenario: B.4. Wildcard Expansion
Given I have bind10 running with configuration nsec3/nsec3_auth.config
+ And wait for bind10 stderr message BIND10_STARTED_CC
+ And wait for bind10 stderr message CMDCTL_STARTED
+ And wait for bind10 stderr message AUTH_SERVER_STARTED
+
+ bind10 module Auth should be running
+ And bind10 module Resolver should not be running
+ And bind10 module Xfrout should not be running
+ And bind10 module Zonemgr should not be running
+ And bind10 module Xfrin should not be running
+ And bind10 module Stats should not be running
+ And bind10 module StatsHttpd should not be running
+
A dnssec query for a.z.w.example. type MX should have rcode NOERROR
The last query response should have flags qr aa rd
The last query response should have edns_flags do
@@ -95,33 +155,45 @@ Feature: NSEC3 Authoritative service
The last query response should have adcount 9
The answer section of the last query response should be
"""
- a.z.w.example. 3600 IN MX 1 ai.example.
- a.z.w.example. 3600 IN RRSIG MX 7 2 3600 20150420235959 20051021000000 40430 example. CikebjQwGQPwijVcxgcZcSJKtfynugtlBiKb9FcBTrmOoyQ4InoWVudh CWsh/URX3lc4WRUMivEBP6+4KS3ldA==
+ a.z.w.example. 3600 IN MX 1 ai.example.
+ a.z.w.example. 3600 IN RRSIG MX 7 2 3600 20150420235959 20051021000000 40430 example. CikebjQwGQPwijVcxgcZcSJKtfynugtlBiKb9FcBTrmOoyQ4InoWVudh CWsh/URX3lc4WRUMivEBP6+4KS3ldA==
"""
The authority section of the last query response should be
"""
- example. 3600 IN NS ns1.example.
- example. 3600 IN NS ns2.example.
- example. 3600 IN RRSIG NS 7 1 3600 20150420235959 20051021000000 40430 example. PVOgtMK1HHeSTau+HwDWC8Ts+6C8qtqd4pQJqOtdEVgg+MA+ai4fWDEh u3qHJyLcQ9tbD2vvCnMXjtz6SyObxA==
- q04jkcevqvmu85r014c7dkba38o0ji5r.example. 3600 IN NSEC3 1 1 12 aabbccdd r53bq7cc2uvmubfu5ocmm6pers9tk9en A RRSIG
- q04jkcevqvmu85r014c7dkba38o0ji5r.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. hV5I89b+4FHJDATp09g4bbN0R1F845CaXpL3ZxlMKimoPAyqletMlEWw LfFia7sdpSzn+ZlNNlkxWcLsIlMmUg==
+ example. 3600 IN NS ns1.example.
+ example. 3600 IN NS ns2.example.
+ example. 3600 IN RRSIG NS 7 1 3600 20150420235959 20051021000000 40430 example. PVOgtMK1HHeSTau+HwDWC8Ts+6C8qtqd4pQJqOtdEVgg+MA+ai4fWDEh u3qHJyLcQ9tbD2vvCnMXjtz6SyObxA==
+ q04jkcevqvmu85r014c7dkba38o0ji5r.example. 3600 IN NSEC3 1 1 12 aabbccdd r53bq7cc2uvmubfu5ocmm6pers9tk9en A RRSIG
+ q04jkcevqvmu85r014c7dkba38o0ji5r.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. hV5I89b+4FHJDATp09g4bbN0R1F845CaXpL3ZxlMKimoPAyqletMlEWw LfFia7sdpSzn+ZlNNlkxWcLsIlMmUg==
"""
# This is slightly different from the example in RFC5155; there are
# more RRs in the additional section.
The additional section of the last query response should be
"""
- ai.example. 3600 IN A 192.0.2.9
- ai.example. 3600 IN AAAA 2001:db8::f00:baa9
- ns1.example. 3600 IN A 192.0.2.1
- ns2.example. 3600 IN A 192.0.2.2
- ai.example. 3600 IN RRSIG A 7 2 3600 20150420235959 20051021000000 40430 example. hVe+wKYMlObTRPhX0NL67GxeZfdxqr/QeR6FtfdAj5+FgYxyzPEjIzvK Wy00hWIl6wD3Vws+rznEn8sQ64UdqA==
- ai.example. 3600 IN RRSIG AAAA 7 2 3600 20150420235959 20051021000000 40430 example. LcdxKaCB5bGZwPDg+3JJ4O02zoMBrjxqlf6WuaHQZZfTUpb9Nf2nxFGe 2XRPfR5tpJT6GdRGcHueLuXkMjBArQ==
- ns1.example. 3600 IN RRSIG A 7 2 3600 20150420235959 20051021000000 40430 example. bu6kx73n6XEunoVGuRfAgY7EF/AJqHy7hj0jkiqJjB0dOrx3wuz9SaBe GfqWIdn/uta3SavN4FRvZR9SCFHF5Q==
- ns2.example. 3600 IN RRSIG A 7 2 3600 20150420235959 20051021000000 40430 example. ktQ3TqE0CfRfki0Rb/Ip5BM0VnxelbuejCC4zpLbFKA/7eD7UNAwxMgx JPtbdST+syjYSJaj4IHfeX6n8vfoGA==
+ ai.example. 3600 IN A 192.0.2.9
+ ai.example. 3600 IN AAAA 2001:db8::f00:baa9
+ ns1.example. 3600 IN A 192.0.2.1
+ ns2.example. 3600 IN A 192.0.2.2
+ ai.example. 3600 IN RRSIG A 7 2 3600 20150420235959 20051021000000 40430 example. hVe+wKYMlObTRPhX0NL67GxeZfdxqr/QeR6FtfdAj5+FgYxyzPEjIzvK Wy00hWIl6wD3Vws+rznEn8sQ64UdqA==
+ ai.example. 3600 IN RRSIG AAAA 7 2 3600 20150420235959 20051021000000 40430 example. LcdxKaCB5bGZwPDg+3JJ4O02zoMBrjxqlf6WuaHQZZfTUpb9Nf2nxFGe 2XRPfR5tpJT6GdRGcHueLuXkMjBArQ==
+ ns1.example. 3600 IN RRSIG A 7 2 3600 20150420235959 20051021000000 40430 example. bu6kx73n6XEunoVGuRfAgY7EF/AJqHy7hj0jkiqJjB0dOrx3wuz9SaBe GfqWIdn/uta3SavN4FRvZR9SCFHF5Q==
+ ns2.example. 3600 IN RRSIG A 7 2 3600 20150420235959 20051021000000 40430 example. ktQ3TqE0CfRfki0Rb/Ip5BM0VnxelbuejCC4zpLbFKA/7eD7UNAwxMgx JPtbdST+syjYSJaj4IHfeX6n8vfoGA==
"""
Scenario: B.5. Wildcard No Data Error
Given I have bind10 running with configuration nsec3/nsec3_auth.config
+ And wait for bind10 stderr message BIND10_STARTED_CC
+ And wait for bind10 stderr message CMDCTL_STARTED
+ And wait for bind10 stderr message AUTH_SERVER_STARTED
+
+ bind10 module Auth should be running
+ And bind10 module Resolver should not be running
+ And bind10 module Xfrout should not be running
+ And bind10 module Zonemgr should not be running
+ And bind10 module Xfrin should not be running
+ And bind10 module Stats should not be running
+ And bind10 module StatsHttpd should not be running
+
A dnssec query for a.z.w.example. type AAAA should have rcode NOERROR
The last query response should have flags qr aa rd
The last query response should have edns_flags do
@@ -130,18 +202,30 @@ Feature: NSEC3 Authoritative service
The last query response should have adcount 1
The authority section of the last query response should be
"""
- example. 3600 IN SOA ns1.example. bugs.x.w.example. 1 3600 300 3600000 3600
- example. 3600 IN RRSIG SOA 7 1 3600 20150420235959 20051021000000 40430 example. Hu25UIyNPmvPIVBrldN+9Mlp9Zql39qaUd8iq4ZLlYWfUUbbAS41pG+6 8z81q1xhkYAcEyHdVI2LmKusbZsT0Q==
- k8udemvp1j2f7eg6jebps17vp3n8i58h.example. 3600 IN NSEC3 1 1 12 aabbccdd kohar7mbb8dc2ce8a9qvl8hon4k53uhi
- k8udemvp1j2f7eg6jebps17vp3n8i58h.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. FtXGbvF0+wf8iWkyo73enAuVx03klN+pILBKS6qCcftVtfH4yVzsEZqu J27NHR7ruxJWDNMtOtx7w9WfcIg62A==
- q04jkcevqvmu85r014c7dkba38o0ji5r.example. 3600 IN NSEC3 1 1 12 aabbccdd r53bq7cc2uvmubfu5ocmm6pers9tk9en A RRSIG
- q04jkcevqvmu85r014c7dkba38o0ji5r.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. hV5I89b+4FHJDATp09g4bbN0R1F845CaXpL3ZxlMKimoPAyqletMlEWw LfFia7sdpSzn+ZlNNlkxWcLsIlMmUg==
- r53bq7cc2uvmubfu5ocmm6pers9tk9en.example. 3600 IN NSEC3 1 1 12 aabbccdd t644ebqk9bibcna874givr6joj62mlhv MX RRSIG
- r53bq7cc2uvmubfu5ocmm6pers9tk9en.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. aupviViruXs4bDg9rCbezzBMf9h1ZlDvbW/CZFKulIGXXLj8B/fsDJar XVDA9bnUoRhEbKp+HF1FWKW7RIJdtQ==
+ example. 3600 IN SOA ns1.example. bugs.x.w.example. 1 3600 300 3600000 3600
+ example. 3600 IN RRSIG SOA 7 1 3600 20150420235959 20051021000000 40430 example. Hu25UIyNPmvPIVBrldN+9Mlp9Zql39qaUd8iq4ZLlYWfUUbbAS41pG+6 8z81q1xhkYAcEyHdVI2LmKusbZsT0Q==
+ k8udemvp1j2f7eg6jebps17vp3n8i58h.example. 3600 IN NSEC3 1 1 12 aabbccdd kohar7mbb8dc2ce8a9qvl8hon4k53uhi
+ k8udemvp1j2f7eg6jebps17vp3n8i58h.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. FtXGbvF0+wf8iWkyo73enAuVx03klN+pILBKS6qCcftVtfH4yVzsEZqu J27NHR7ruxJWDNMtOtx7w9WfcIg62A==
+ q04jkcevqvmu85r014c7dkba38o0ji5r.example. 3600 IN NSEC3 1 1 12 aabbccdd r53bq7cc2uvmubfu5ocmm6pers9tk9en A RRSIG
+ q04jkcevqvmu85r014c7dkba38o0ji5r.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. hV5I89b+4FHJDATp09g4bbN0R1F845CaXpL3ZxlMKimoPAyqletMlEWw LfFia7sdpSzn+ZlNNlkxWcLsIlMmUg==
+ r53bq7cc2uvmubfu5ocmm6pers9tk9en.example. 3600 IN NSEC3 1 1 12 aabbccdd t644ebqk9bibcna874givr6joj62mlhv MX RRSIG
+ r53bq7cc2uvmubfu5ocmm6pers9tk9en.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. aupviViruXs4bDg9rCbezzBMf9h1ZlDvbW/CZFKulIGXXLj8B/fsDJar XVDA9bnUoRhEbKp+HF1FWKW7RIJdtQ==
"""
Scenario: B.6. DS Child Zone No Data Error
Given I have bind10 running with configuration nsec3/nsec3_auth.config
+ And wait for bind10 stderr message BIND10_STARTED_CC
+ And wait for bind10 stderr message CMDCTL_STARTED
+ And wait for bind10 stderr message AUTH_SERVER_STARTED
+
+ bind10 module Auth should be running
+ And bind10 module Resolver should not be running
+ And bind10 module Xfrout should not be running
+ And bind10 module Zonemgr should not be running
+ And bind10 module Xfrin should not be running
+ And bind10 module Stats should not be running
+ And bind10 module StatsHttpd should not be running
+
A dnssec query for example. type DS should have rcode NOERROR
The last query response should have flags qr aa rd
The last query response should have edns_flags do
@@ -150,10 +234,10 @@ Feature: NSEC3 Authoritative service
The last query response should have adcount 1
The authority section of the last query response should be
"""
- example. 3600 IN SOA ns1.example. bugs.x.w.example. 1 3600 300 3600000 3600
- example. 3600 IN RRSIG SOA 7 1 3600 20150420235959 20051021000000 40430 example. Hu25UIyNPmvPIVBrldN+9Mlp9Zql39qaUd8iq4ZLlYWfUUbbAS41pG+6 8z81q1xhkYAcEyHdVI2LmKusbZsT0Q==
- 0p9mhaveqvm6t7vbl5lop2u3t2rp3tom.example. 3600 IN NSEC3 1 1 12 aabbccdd 2t7b4g4vsa5smi47k61mv5bv1a22bojr NS SOA MX RRSIG DNSKEY NSEC3PARAM
- 0p9mhaveqvm6t7vbl5lop2u3t2rp3tom.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. OSgWSm26B+cS+dDL8b5QrWr/dEWhtCsKlwKLIBHYH6blRxK9rC0bMJPw Q4mLIuw85H2EY762BOCXJZMnpuwhpA==
+ example. 3600 IN SOA ns1.example. bugs.x.w.example. 1 3600 300 3600000 3600
+ example. 3600 IN RRSIG SOA 7 1 3600 20150420235959 20051021000000 40430 example. Hu25UIyNPmvPIVBrldN+9Mlp9Zql39qaUd8iq4ZLlYWfUUbbAS41pG+6 8z81q1xhkYAcEyHdVI2LmKusbZsT0Q==
+ 0p9mhaveqvm6t7vbl5lop2u3t2rp3tom.example. 3600 IN NSEC3 1 1 12 aabbccdd 2t7b4g4vsa5smi47k61mv5bv1a22bojr NS SOA MX RRSIG DNSKEY NSEC3PARAM
+ 0p9mhaveqvm6t7vbl5lop2u3t2rp3tom.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. OSgWSm26B+cS+dDL8b5QrWr/dEWhtCsKlwKLIBHYH6blRxK9rC0bMJPw Q4mLIuw85H2EY762BOCXJZMnpuwhpA==
"""
#
@@ -162,6 +246,18 @@ Feature: NSEC3 Authoritative service
Scenario: 7.2.2 other; Name Error where one NSEC3 covers multiple parts of proof (closest encloser)
Given I have bind10 running with configuration nsec3/nsec3_auth.config
+ And wait for bind10 stderr message BIND10_STARTED_CC
+ And wait for bind10 stderr message CMDCTL_STARTED
+ And wait for bind10 stderr message AUTH_SERVER_STARTED
+
+ bind10 module Auth should be running
+ And bind10 module Resolver should not be running
+ And bind10 module Xfrout should not be running
+ And bind10 module Zonemgr should not be running
+ And bind10 module Xfrin should not be running
+ And bind10 module Stats should not be running
+ And bind10 module StatsHttpd should not be running
+
A dnssec query for b.x.w.example. should have rcode NXDOMAIN
The last query response should have flags qr aa rd
The last query response should have edns_flags do
@@ -170,16 +266,28 @@ Feature: NSEC3 Authoritative service
The last query response should have adcount 1
The authority section of the last query response should be
"""
- example. 3600 IN SOA ns1.example. bugs.x.w.example. 1 3600 300 3600000 3600
- example. 3600 IN RRSIG SOA 7 1 3600 20150420235959 20051021000000 40430 example. Hu25UIyNPmvPIVBrldN+9Mlp9Zql39qaUd8iq4ZLlYWfUUbbAS41pG+6 8z81q1xhkYAcEyHdVI2LmKusbZsT0Q==
- b4um86eghhds6nea196smvmlo4ors995.example. 3600 IN NSEC3 1 1 12 aabbccdd gjeqe526plbf1g8mklp59enfd789njgi MX RRSIG
- b4um86eghhds6nea196smvmlo4ors995.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. ZkPG3M32lmoHM6pa3D6gZFGB/rhL//Bs3Omh5u4m/CUiwtblEVOaAKKZ d7S959OeiX43aLX3pOv0TSTyiTxIZg==
- 35mthgpgcu1qg68fab165klnsnk3dpvl.example. 3600 IN NSEC3 1 1 12 aabbccdd b4um86eghhds6nea196smvmlo4ors995 NS DS RRSIG
- 35mthgpgcu1qg68fab165klnsnk3dpvl.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. g6jPUUpduAJKRljUsN8gB4UagAX0NxY9shwQAynzo8EUWH+z6hEIBlUT PGj15eZll6VhQqgZXtAIR3chwgW+SA==
+ example. 3600 IN SOA ns1.example. bugs.x.w.example. 1 3600 300 3600000 3600
+ example. 3600 IN RRSIG SOA 7 1 3600 20150420235959 20051021000000 40430 example. Hu25UIyNPmvPIVBrldN+9Mlp9Zql39qaUd8iq4ZLlYWfUUbbAS41pG+6 8z81q1xhkYAcEyHdVI2LmKusbZsT0Q==
+ b4um86eghhds6nea196smvmlo4ors995.example. 3600 IN NSEC3 1 1 12 aabbccdd gjeqe526plbf1g8mklp59enfd789njgi MX RRSIG
+ b4um86eghhds6nea196smvmlo4ors995.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. ZkPG3M32lmoHM6pa3D6gZFGB/rhL//Bs3Omh5u4m/CUiwtblEVOaAKKZ d7S959OeiX43aLX3pOv0TSTyiTxIZg==
+ 35mthgpgcu1qg68fab165klnsnk3dpvl.example. 3600 IN NSEC3 1 1 12 aabbccdd b4um86eghhds6nea196smvmlo4ors995 NS DS RRSIG
+ 35mthgpgcu1qg68fab165klnsnk3dpvl.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. g6jPUUpduAJKRljUsN8gB4UagAX0NxY9shwQAynzo8EUWH+z6hEIBlUT PGj15eZll6VhQqgZXtAIR3chwgW+SA==
"""
Scenario: 7.2.2 other; Name Error where one NSEC3 covers multiple parts of proof (wildcard)
Given I have bind10 running with configuration nsec3/nsec3_auth.config
+ And wait for bind10 stderr message BIND10_STARTED_CC
+ And wait for bind10 stderr message CMDCTL_STARTED
+ And wait for bind10 stderr message AUTH_SERVER_STARTED
+
+ bind10 module Auth should be running
+ And bind10 module Resolver should not be running
+ And bind10 module Xfrout should not be running
+ And bind10 module Zonemgr should not be running
+ And bind10 module Xfrin should not be running
+ And bind10 module Stats should not be running
+ And bind10 module StatsHttpd should not be running
+
A dnssec query for a.w.example. should have rcode NOERROR
The last query response should have flags qr aa rd
The last query response should have edns_flags do
@@ -188,16 +296,28 @@ Feature: NSEC3 Authoritative service
The last query response should have adcount 1
The authority section of the last query response should be
"""
- example. 3600 IN SOA ns1.example. bugs.x.w.example. 1 3600 300 3600000 3600
- example. 3600 IN RRSIG SOA 7 1 3600 20150420235959 20051021000000 40430 example. Hu25UIyNPmvPIVBrldN+9Mlp9Zql39qaUd8iq4ZLlYWfUUbbAS41pG+6 8z81q1xhkYAcEyHdVI2LmKusbZsT0Q==
- k8udemvp1j2f7eg6jebps17vp3n8i58h.example. 3600 IN NSEC3 1 1 12 AABBCCDD KOHAR7MBB8DC2CE8A9QVL8HON4K53UHI
- k8udemvp1j2f7eg6jebps17vp3n8i58h.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. FtXGbvF0+wf8iWkyo73enAuVx03klN+pILBKS6qCcftVtfH4yVzsEZqu J27NHR7ruxJWDNMtOtx7w9WfcIg62A==
- r53bq7cc2uvmubfu5ocmm6pers9tk9en.example. 3600 IN NSEC3 1 1 12 AABBCCDD T644EBQK9BIBCNA874GIVR6JOJ62MLHV MX RRSIG
- r53bq7cc2uvmubfu5ocmm6pers9tk9en.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. aupviViruXs4bDg9rCbezzBMf9h1ZlDvbW/CZFKulIGXXLj8B/fsDJar XVDA9bnUoRhEbKp+HF1FWKW7RIJdtQ==
+ example. 3600 IN SOA ns1.example. bugs.x.w.example. 1 3600 300 3600000 3600
+ example. 3600 IN RRSIG SOA 7 1 3600 20150420235959 20051021000000 40430 example. Hu25UIyNPmvPIVBrldN+9Mlp9Zql39qaUd8iq4ZLlYWfUUbbAS41pG+6 8z81q1xhkYAcEyHdVI2LmKusbZsT0Q==
+ k8udemvp1j2f7eg6jebps17vp3n8i58h.example. 3600 IN NSEC3 1 1 12 AABBCCDD KOHAR7MBB8DC2CE8A9QVL8HON4K53UHI
+ k8udemvp1j2f7eg6jebps17vp3n8i58h.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. FtXGbvF0+wf8iWkyo73enAuVx03klN+pILBKS6qCcftVtfH4yVzsEZqu J27NHR7ruxJWDNMtOtx7w9WfcIg62A==
+ r53bq7cc2uvmubfu5ocmm6pers9tk9en.example. 3600 IN NSEC3 1 1 12 AABBCCDD T644EBQK9BIBCNA874GIVR6JOJ62MLHV MX RRSIG
+ r53bq7cc2uvmubfu5ocmm6pers9tk9en.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. aupviViruXs4bDg9rCbezzBMf9h1ZlDvbW/CZFKulIGXXLj8B/fsDJar XVDA9bnUoRhEbKp+HF1FWKW7RIJdtQ==
"""
Scenario: Wildcard other: Wildcard name itself
Given I have bind10 running with configuration nsec3/nsec3_auth.config
+ And wait for bind10 stderr message BIND10_STARTED_CC
+ And wait for bind10 stderr message CMDCTL_STARTED
+ And wait for bind10 stderr message AUTH_SERVER_STARTED
+
+ bind10 module Auth should be running
+ And bind10 module Resolver should not be running
+ And bind10 module Xfrout should not be running
+ And bind10 module Zonemgr should not be running
+ And bind10 module Xfrin should not be running
+ And bind10 module Stats should not be running
+ And bind10 module StatsHttpd should not be running
+
A dnssec query for *.w.example. type MX should have rcode NOERROR
The last query response should have flags qr aa rd
The last query response should have edns_flags do
@@ -206,29 +326,41 @@ Feature: NSEC3 Authoritative service
The last query response should have adcount 9
The answer section of the last query response should be
"""
- *.w.example. 3600 IN MX 1 ai.example.
- *.w.example. 3600 IN RRSIG MX 7 2 3600 20150420235959 20051021000000 40430 example. CikebjQwGQPwijVcxgcZcSJKtfynugtlBiKb9FcBTrmOoyQ4InoWVudh CWsh/URX3lc4WRUMivEBP6+4KS3ldA==
+ *.w.example. 3600 IN MX 1 ai.example.
+ *.w.example. 3600 IN RRSIG MX 7 2 3600 20150420235959 20051021000000 40430 example. CikebjQwGQPwijVcxgcZcSJKtfynugtlBiKb9FcBTrmOoyQ4InoWVudh CWsh/URX3lc4WRUMivEBP6+4KS3ldA==
"""
The authority section of the last query response should be
"""
- example. 3600 IN NS ns1.example.
- example. 3600 IN NS ns2.example.
- example. 3600 IN RRSIG NS 7 1 3600 20150420235959 20051021000000 40430 example. PVOgtMK1HHeSTau+HwDWC8Ts+6C8qtqd4pQJqOtdEVgg+MA+ai4fWDEh u3qHJyLcQ9tbD2vvCnMXjtz6SyObxA==
+ example. 3600 IN NS ns1.example.
+ example. 3600 IN NS ns2.example.
+ example. 3600 IN RRSIG NS 7 1 3600 20150420235959 20051021000000 40430 example. PVOgtMK1HHeSTau+HwDWC8Ts+6C8qtqd4pQJqOtdEVgg+MA+ai4fWDEh u3qHJyLcQ9tbD2vvCnMXjtz6SyObxA==
"""
The additional section of the last query response should be
"""
- ai.example. 3600 IN A 192.0.2.9
- ai.example. 3600 IN AAAA 2001:db8::f00:baa9
- ns1.example. 3600 IN A 192.0.2.1
- ns2.example. 3600 IN A 192.0.2.2
- ai.example. 3600 IN RRSIG A 7 2 3600 20150420235959 20051021000000 40430 example. hVe+wKYMlObTRPhX0NL67GxeZfdxqr/QeR6FtfdAj5+FgYxyzPEjIzvK Wy00hWIl6wD3Vws+rznEn8sQ64UdqA==
- ai.example. 3600 IN RRSIG AAAA 7 2 3600 20150420235959 20051021000000 40430 example. LcdxKaCB5bGZwPDg+3JJ4O02zoMBrjxqlf6WuaHQZZfTUpb9Nf2nxFGe 2XRPfR5tpJT6GdRGcHueLuXkMjBArQ==
- ns1.example. 3600 IN RRSIG A 7 2 3600 20150420235959 20051021000000 40430 example. bu6kx73n6XEunoVGuRfAgY7EF/AJqHy7hj0jkiqJjB0dOrx3wuz9SaBe GfqWIdn/uta3SavN4FRvZR9SCFHF5Q==
- ns2.example. 3600 IN RRSIG A 7 2 3600 20150420235959 20051021000000 40430 example. ktQ3TqE0CfRfki0Rb/Ip5BM0VnxelbuejCC4zpLbFKA/7eD7UNAwxMgx JPtbdST+syjYSJaj4IHfeX6n8vfoGA==
+ ai.example. 3600 IN A 192.0.2.9
+ ai.example. 3600 IN AAAA 2001:db8::f00:baa9
+ ns1.example. 3600 IN A 192.0.2.1
+ ns2.example. 3600 IN A 192.0.2.2
+ ai.example. 3600 IN RRSIG A 7 2 3600 20150420235959 20051021000000 40430 example. hVe+wKYMlObTRPhX0NL67GxeZfdxqr/QeR6FtfdAj5+FgYxyzPEjIzvK Wy00hWIl6wD3Vws+rznEn8sQ64UdqA==
+ ai.example. 3600 IN RRSIG AAAA 7 2 3600 20150420235959 20051021000000 40430 example. LcdxKaCB5bGZwPDg+3JJ4O02zoMBrjxqlf6WuaHQZZfTUpb9Nf2nxFGe 2XRPfR5tpJT6GdRGcHueLuXkMjBArQ==
+ ns1.example. 3600 IN RRSIG A 7 2 3600 20150420235959 20051021000000 40430 example. bu6kx73n6XEunoVGuRfAgY7EF/AJqHy7hj0jkiqJjB0dOrx3wuz9SaBe GfqWIdn/uta3SavN4FRvZR9SCFHF5Q==
+ ns2.example. 3600 IN RRSIG A 7 2 3600 20150420235959 20051021000000 40430 example. ktQ3TqE0CfRfki0Rb/Ip5BM0VnxelbuejCC4zpLbFKA/7eD7UNAwxMgx JPtbdST+syjYSJaj4IHfeX6n8vfoGA==
"""
Scenario: Wildcard other: Wildcard name itself nodata
Given I have bind10 running with configuration nsec3/nsec3_auth.config
+ And wait for bind10 stderr message BIND10_STARTED_CC
+ And wait for bind10 stderr message CMDCTL_STARTED
+ And wait for bind10 stderr message AUTH_SERVER_STARTED
+
+ bind10 module Auth should be running
+ And bind10 module Resolver should not be running
+ And bind10 module Xfrout should not be running
+ And bind10 module Zonemgr should not be running
+ And bind10 module Xfrin should not be running
+ And bind10 module Stats should not be running
+ And bind10 module StatsHttpd should not be running
+
A dnssec query for *.w.example. type A should have rcode NOERROR
The last query response should have flags qr aa rd
The last query response should have edns_flags do
@@ -237,14 +369,26 @@ Feature: NSEC3 Authoritative service
The last query response should have adcount 1
The authority section of the last query response should be
"""
- example. 3600 IN SOA ns1.example. bugs.x.w.example. 1 3600 300 3600000 3600
- example. 3600 IN RRSIG SOA 7 1 3600 20150420235959 20051021000000 40430 example. Hu25UIyNPmvPIVBrldN+9Mlp9Zql39qaUd8iq4ZLlYWfUUbbAS41pG+6 8z81q1xhkYAcEyHdVI2LmKusbZsT0Q==
- r53bq7cc2uvmubfu5ocmm6pers9tk9en.example. 3600 IN NSEC3 1 1 12 AABBCCDD T644EBQK9BIBCNA874GIVR6JOJ62MLHV MX RRSIG
- r53bq7cc2uvmubfu5ocmm6pers9tk9en.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. aupviViruXs4bDg9rCbezzBMf9h1ZlDvbW/CZFKulIGXXLj8B/fsDJar XVDA9bnUoRhEbKp+HF1FWKW7RIJdtQ==
+ example. 3600 IN SOA ns1.example. bugs.x.w.example. 1 3600 300 3600000 3600
+ example. 3600 IN RRSIG SOA 7 1 3600 20150420235959 20051021000000 40430 example. Hu25UIyNPmvPIVBrldN+9Mlp9Zql39qaUd8iq4ZLlYWfUUbbAS41pG+6 8z81q1xhkYAcEyHdVI2LmKusbZsT0Q==
+ r53bq7cc2uvmubfu5ocmm6pers9tk9en.example. 3600 IN NSEC3 1 1 12 AABBCCDD T644EBQK9BIBCNA874GIVR6JOJ62MLHV MX RRSIG
+ r53bq7cc2uvmubfu5ocmm6pers9tk9en.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. aupviViruXs4bDg9rCbezzBMf9h1ZlDvbW/CZFKulIGXXLj8B/fsDJar XVDA9bnUoRhEbKp+HF1FWKW7RIJdtQ==
"""
Scenario: Direct query for NSEC3 record
Given I have bind10 running with configuration nsec3/nsec3_auth.config
+ And wait for bind10 stderr message BIND10_STARTED_CC
+ And wait for bind10 stderr message CMDCTL_STARTED
+ And wait for bind10 stderr message AUTH_SERVER_STARTED
+
+ bind10 module Auth should be running
+ And bind10 module Resolver should not be running
+ And bind10 module Xfrout should not be running
+ And bind10 module Zonemgr should not be running
+ And bind10 module Xfrin should not be running
+ And bind10 module Stats should not be running
+ And bind10 module StatsHttpd should not be running
+
A dnssec query for 0p9mhaveqvm6t7vbl5lop2u3t2rp3tom.example. type NSEC3 should have rcode NXDOMAIN
The last query response should have flags qr aa rd
The last query response should have edns_flags do
@@ -253,18 +397,30 @@ Feature: NSEC3 Authoritative service
The last query response should have adcount 1
The authority section of the last query response should be
"""
- example. 3600 IN SOA ns1.example. bugs.x.w.example. 1 3600 300 3600000 3600
- example. 3600 IN RRSIG SOA 7 1 3600 20150420235959 20051021000000 40430 example. Hu25UIyNPmvPIVBrldN+9Mlp9Zql39qaUd8iq4ZLlYWfUUbbAS41pG+6 8z81q1xhkYAcEyHdVI2LmKusbZsT0Q==
- 0p9mhaveqvm6t7vbl5lop2u3t2rp3tom.example. 3600 IN NSEC3 1 1 12 AABBCCDD 2T7B4G4VSA5SMI47K61MV5BV1A22BOJR NS SOA MX RRSIG DNSKEY NSEC3PARAM
- 0p9mhaveqvm6t7vbl5lop2u3t2rp3tom.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. OSgWSm26B+cS+dDL8b5QrWr/dEWhtCsKlwKLIBHYH6blRxK9rC0bMJPw Q4mLIuw85H2EY762BOCXJZMnpuwhpA==
- q04jkcevqvmu85r014c7dkba38o0ji5r.example. 3600 IN NSEC3 1 1 12 AABBCCDD R53BQ7CC2UVMUBFU5OCMM6PERS9TK9EN A RRSIG
- q04jkcevqvmu85r014c7dkba38o0ji5r.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. hV5I89b+4FHJDATp09g4bbN0R1F845CaXpL3ZxlMKimoPAyqletMlEWw LfFia7sdpSzn+ZlNNlkxWcLsIlMmUg==
- gjeqe526plbf1g8mklp59enfd789njgi.example. 3600 IN NSEC3 1 1 12 AABBCCDD JI6NEOAEPV8B5O6K4EV33ABHA8HT9FGC A HINFO AAAA RRSIG
- gjeqe526plbf1g8mklp59enfd789njgi.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. IVnezTJ9iqblFF97vPSmfXZ5Zozngx3KX3byLTZC4QBH2dFWhf6scrGF ZB980AfCxoD9qbbKDy+rdGIeRSVNyw==
+ example. 3600 IN SOA ns1.example. bugs.x.w.example. 1 3600 300 3600000 3600
+ example. 3600 IN RRSIG SOA 7 1 3600 20150420235959 20051021000000 40430 example. Hu25UIyNPmvPIVBrldN+9Mlp9Zql39qaUd8iq4ZLlYWfUUbbAS41pG+6 8z81q1xhkYAcEyHdVI2LmKusbZsT0Q==
+ 0p9mhaveqvm6t7vbl5lop2u3t2rp3tom.example. 3600 IN NSEC3 1 1 12 AABBCCDD 2T7B4G4VSA5SMI47K61MV5BV1A22BOJR NS SOA MX RRSIG DNSKEY NSEC3PARAM
+ 0p9mhaveqvm6t7vbl5lop2u3t2rp3tom.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. OSgWSm26B+cS+dDL8b5QrWr/dEWhtCsKlwKLIBHYH6blRxK9rC0bMJPw Q4mLIuw85H2EY762BOCXJZMnpuwhpA==
+ q04jkcevqvmu85r014c7dkba38o0ji5r.example. 3600 IN NSEC3 1 1 12 AABBCCDD R53BQ7CC2UVMUBFU5OCMM6PERS9TK9EN A RRSIG
+ q04jkcevqvmu85r014c7dkba38o0ji5r.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. hV5I89b+4FHJDATp09g4bbN0R1F845CaXpL3ZxlMKimoPAyqletMlEWw LfFia7sdpSzn+ZlNNlkxWcLsIlMmUg==
+ gjeqe526plbf1g8mklp59enfd789njgi.example. 3600 IN NSEC3 1 1 12 AABBCCDD JI6NEOAEPV8B5O6K4EV33ABHA8HT9FGC A HINFO AAAA RRSIG
+ gjeqe526plbf1g8mklp59enfd789njgi.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. IVnezTJ9iqblFF97vPSmfXZ5Zozngx3KX3byLTZC4QBH2dFWhf6scrGF ZB980AfCxoD9qbbKDy+rdGIeRSVNyw==
"""
Scenario: No data, type DS, in-zone
Given I have bind10 running with configuration nsec3/nsec3_auth.config
+ And wait for bind10 stderr message BIND10_STARTED_CC
+ And wait for bind10 stderr message CMDCTL_STARTED
+ And wait for bind10 stderr message AUTH_SERVER_STARTED
+
+ bind10 module Auth should be running
+ And bind10 module Resolver should not be running
+ And bind10 module Xfrout should not be running
+ And bind10 module Zonemgr should not be running
+ And bind10 module Xfrin should not be running
+ And bind10 module Stats should not be running
+ And bind10 module StatsHttpd should not be running
+
A dnssec query for ai.example. type DS should have rcode NOERROR
The last query response should have flags qr aa rd
The last query response should have edns_flags do
@@ -273,14 +429,26 @@ Feature: NSEC3 Authoritative service
The last query response should have adcount 1
The authority section of the last query response should be
"""
- example. 3600 IN SOA ns1.example. bugs.x.w.example. 1 3600 300 3600000 3600
- example. 3600 IN RRSIG SOA 7 1 3600 20150420235959 20051021000000 40430 example. Hu25UIyNPmvPIVBrldN+9Mlp9Zql39qaUd8iq4ZLlYWfUUbbAS41pG+6 8z81q1xhkYAcEyHdVI2LmKusbZsT0Q==
- gjeqe526plbf1g8mklp59enfd789njgi.example. 3600 IN NSEC3 1 1 12 AABBCCDD JI6NEOAEPV8B5O6K4EV33ABHA8HT9FGC A HINFO AAAA RRSIG
- gjeqe526plbf1g8mklp59enfd789njgi.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. IVnezTJ9iqblFF97vPSmfXZ5Zozngx3KX3byLTZC4QBH2dFWhf6scrGF ZB980AfCxoD9qbbKDy+rdGIeRSVNyw==
+ example. 3600 IN SOA ns1.example. bugs.x.w.example. 1 3600 300 3600000 3600
+ example. 3600 IN RRSIG SOA 7 1 3600 20150420235959 20051021000000 40430 example. Hu25UIyNPmvPIVBrldN+9Mlp9Zql39qaUd8iq4ZLlYWfUUbbAS41pG+6 8z81q1xhkYAcEyHdVI2LmKusbZsT0Q==
+ gjeqe526plbf1g8mklp59enfd789njgi.example. 3600 IN NSEC3 1 1 12 AABBCCDD JI6NEOAEPV8B5O6K4EV33ABHA8HT9FGC A HINFO AAAA RRSIG
+ gjeqe526plbf1g8mklp59enfd789njgi.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. IVnezTJ9iqblFF97vPSmfXZ5Zozngx3KX3byLTZC4QBH2dFWhf6scrGF ZB980AfCxoD9qbbKDy+rdGIeRSVNyw==
"""
Scenario: No data, type DS, optout delegation
Given I have bind10 running with configuration nsec3/nsec3_auth.config
+ And wait for bind10 stderr message BIND10_STARTED_CC
+ And wait for bind10 stderr message CMDCTL_STARTED
+ And wait for bind10 stderr message AUTH_SERVER_STARTED
+
+ bind10 module Auth should be running
+ And bind10 module Resolver should not be running
+ And bind10 module Xfrout should not be running
+ And bind10 module Zonemgr should not be running
+ And bind10 module Xfrin should not be running
+ And bind10 module Stats should not be running
+ And bind10 module StatsHttpd should not be running
+
A dnssec query for c.example. type DS should have rcode NOERROR
The last query response should have flags qr aa rd
The last query response should have edns_flags do
@@ -289,10 +457,10 @@ Feature: NSEC3 Authoritative service
The last query response should have adcount 1
The authority section of the last query response should be
"""
- example. 3600 IN SOA ns1.example. bugs.x.w.example. 1 3600 300 3600000 3600
- example. 3600 IN RRSIG SOA 7 1 3600 20150420235959 20051021000000 40430 example. Hu25UIyNPmvPIVBrldN+9Mlp9Zql39qaUd8iq4ZLlYWfUUbbAS41pG+6 8z81q1xhkYAcEyHdVI2LmKusbZsT0Q==
- 0p9mhaveqvm6t7vbl5lop2u3t2rp3tom.example. 3600 IN NSEC3 1 1 12 AABBCCDD 2T7B4G4VSA5SMI47K61MV5BV1A22BOJR NS SOA MX RRSIG DNSKEY NSEC3PARAM
- 0p9mhaveqvm6t7vbl5lop2u3t2rp3tom.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. OSgWSm26B+cS+dDL8b5QrWr/dEWhtCsKlwKLIBHYH6blRxK9rC0bMJPw Q4mLIuw85H2EY762BOCXJZMnpuwhpA==
- 35mthgpgcu1qg68fab165klnsnk3dpvl.example. 3600 IN NSEC3 1 1 12 AABBCCDD B4UM86EGHHDS6NEA196SMVMLO4ORS995 NS DS RRSIG
- 35mthgpgcu1qg68fab165klnsnk3dpvl.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. g6jPUUpduAJKRljUsN8gB4UagAX0NxY9shwQAynzo8EUWH+z6hEIBlUT PGj15eZll6VhQqgZXtAIR3chwgW+SA==
+ example. 3600 IN SOA ns1.example. bugs.x.w.example. 1 3600 300 3600000 3600
+ example. 3600 IN RRSIG SOA 7 1 3600 20150420235959 20051021000000 40430 example. Hu25UIyNPmvPIVBrldN+9Mlp9Zql39qaUd8iq4ZLlYWfUUbbAS41pG+6 8z81q1xhkYAcEyHdVI2LmKusbZsT0Q==
+ 0p9mhaveqvm6t7vbl5lop2u3t2rp3tom.example. 3600 IN NSEC3 1 1 12 AABBCCDD 2T7B4G4VSA5SMI47K61MV5BV1A22BOJR NS SOA MX RRSIG DNSKEY NSEC3PARAM
+ 0p9mhaveqvm6t7vbl5lop2u3t2rp3tom.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. OSgWSm26B+cS+dDL8b5QrWr/dEWhtCsKlwKLIBHYH6blRxK9rC0bMJPw Q4mLIuw85H2EY762BOCXJZMnpuwhpA==
+ 35mthgpgcu1qg68fab165klnsnk3dpvl.example. 3600 IN NSEC3 1 1 12 AABBCCDD B4UM86EGHHDS6NEA196SMVMLO4ORS995 NS DS RRSIG
+ 35mthgpgcu1qg68fab165klnsnk3dpvl.example. 3600 IN RRSIG NSEC3 7 2 3600 20150420235959 20051021000000 40430 example. g6jPUUpduAJKRljUsN8gB4UagAX0NxY9shwQAynzo8EUWH+z6hEIBlUT PGj15eZll6VhQqgZXtAIR3chwgW+SA==
"""
diff --git a/tests/lettuce/features/queries.feature b/tests/lettuce/features/queries.feature
index b8f9b3d..f549f2d 100644
--- a/tests/lettuce/features/queries.feature
+++ b/tests/lettuce/features/queries.feature
@@ -5,6 +5,18 @@ Feature: Querying feature
Scenario: Repeated queries
Given I have bind10 running with configuration example.org.inmem.config
+ And wait for bind10 stderr message BIND10_STARTED_CC
+ And wait for bind10 stderr message CMDCTL_STARTED
+ And wait for bind10 stderr message AUTH_SERVER_STARTED
+
+ bind10 module Auth should be running
+ And bind10 module Resolver should not be running
+ And bind10 module Xfrout should not be running
+ And bind10 module Zonemgr should not be running
+ And bind10 module Xfrin should not be running
+ And bind10 module Stats should not be running
+ And bind10 module StatsHttpd should not be running
+
A query for www.example.org should have rcode NOERROR
The last query response should have flags qr aa rd
The last query response should have ancount 1
@@ -13,17 +25,17 @@ Feature: Querying feature
The answer section of the last query response should be
"""
- www.example.org. 3600 IN A 192.0.2.1
+ www.example.org. 3600 IN A 192.0.2.1
"""
The authority section of the last query response should be
"""
- example.org. 3600 IN NS ns1.example.org.
- example.org. 3600 IN NS ns2.example.org.
+ example.org. 3600 IN NS ns1.example.org.
+ example.org. 3600 IN NS ns2.example.org.
"""
The additional section of the last query response should be
"""
- ns1.example.org. 3600 IN A 192.0.2.3
- ns2.example.org. 3600 IN A 192.0.2.4
+ ns1.example.org. 3600 IN A 192.0.2.3
+ ns2.example.org. 3600 IN A 192.0.2.4
"""
# Repeat of the above
@@ -35,17 +47,17 @@ Feature: Querying feature
The answer section of the last query response should be
"""
- www.example.org. 3600 IN A 192.0.2.1
+ www.example.org. 3600 IN A 192.0.2.1
"""
The authority section of the last query response should be
"""
- example.org. 3600 IN NS ns1.example.org.
- example.org. 3600 IN NS ns2.example.org.
+ example.org. 3600 IN NS ns1.example.org.
+ example.org. 3600 IN NS ns2.example.org.
"""
The additional section of the last query response should be
"""
- ns1.example.org. 3600 IN A 192.0.2.3
- ns2.example.org. 3600 IN A 192.0.2.4
+ ns1.example.org. 3600 IN A 192.0.2.3
+ ns2.example.org. 3600 IN A 192.0.2.4
"""
# And now query something completely different
@@ -56,11 +68,23 @@ Feature: Querying feature
The last query response should have adcount 0
The authority section of the last query response should be
"""
- example.org. 3600 IN SOA ns1.example.org. admin.example.org. 1234 3600 1800 2419200 7200
+ example.org. 3600 IN SOA ns1.example.org. admin.example.org. 1234 3600 1800 2419200 7200
"""
Scenario: ANY query
Given I have bind10 running with configuration example.org.inmem.config
+ And wait for bind10 stderr message BIND10_STARTED_CC
+ And wait for bind10 stderr message CMDCTL_STARTED
+ And wait for bind10 stderr message AUTH_SERVER_STARTED
+
+ bind10 module Auth should be running
+ And bind10 module Resolver should not be running
+ And bind10 module Xfrout should not be running
+ And bind10 module Zonemgr should not be running
+ And bind10 module Xfrin should not be running
+ And bind10 module Stats should not be running
+ And bind10 module StatsHttpd should not be running
+
A query for example.org type ANY should have rcode NOERROR
The last query response should have flags qr aa rd
The last query response should have ancount 4
@@ -68,14 +92,30 @@ Feature: Querying feature
The last query response should have adcount 3
The answer section of the last query response should be
"""
- example.org. 3600 IN NS ns1.example.org.
- example.org. 3600 IN NS ns2.example.org.
- example.org. 3600 IN SOA ns1.example.org. admin.example.org. 1234 3600 1800 2419200 7200
- example.org. 3600 IN MX 10 mail.example.org.
+ example.org. 3600 IN NS ns1.example.org.
+ example.org. 3600 IN NS ns2.example.org.
+ example.org. 3600 IN SOA ns1.example.org. admin.example.org. 1234 3600 1800 2419200 7200
+ example.org. 3600 IN MX 10 mail.example.org.
+ """
+ The additional section of the last query response should be
+ """
+ ns1.example.org. 3600 IN A 192.0.2.3
+ ns2.example.org. 3600 IN A 192.0.2.4
+ mail.example.org. 3600 IN A 192.0.2.10
+ """
+ Scenario: Delegation query for unsigned child zone
+ Given I have bind10 running with configuration example.org.inmem.config
+ A dnssec query for www.sub.example.org type AAAA should have rcode NOERROR
+ The last query response should have flags qr rd
+ The last query response should have edns_flags do
+ The last query response should have ancount 0
+ The last query response should have nscount 1
+ The last query response should have adcount 2
+ The authority section of the last query response should be
+ """
+ sub.example.org. 3600 IN NS ns.sub.example.org.
"""
The additional section of the last query response should be
"""
- ns1.example.org. 3600 IN A 192.0.2.3
- ns2.example.org. 3600 IN A 192.0.2.4
- mail.example.org. 3600 IN A 192.0.2.10
+ ns.sub.example.org. 3600 IN A 192.0.2.101
"""
diff --git a/tests/lettuce/features/resolver_basic.feature b/tests/lettuce/features/resolver_basic.feature
index c759971..4092101 100644
--- a/tests/lettuce/features/resolver_basic.feature
+++ b/tests/lettuce/features/resolver_basic.feature
@@ -11,7 +11,17 @@ Feature: Basic Resolver
# to be revised (as it would then leak, which is probably true
# for any resolver system test)
When I start bind10 with configuration resolver/resolver_basic.config
- And wait for new bind10 stderr message RESOLVER_STARTED
+ And wait for bind10 stderr message BIND10_STARTED_CC
+ And wait for bind10 stderr message CMDCTL_STARTED
+ And wait for bind10 stderr message RESOLVER_STARTED
+
+ bind10 module Resolver should be running
+ And bind10 module Auth should not be running
+ And bind10 module Xfrout should not be running
+ And bind10 module Zonemgr should not be running
+ And bind10 module Xfrin should not be running
+ And bind10 module Stats should not be running
+ And bind10 module StatsHttpd should not be running
# The ACL is set to reject any queries
A query for l.root-servers.net. should have rcode REFUSED
diff --git a/tests/lettuce/features/terrain/.gitignore b/tests/lettuce/features/terrain/.gitignore
new file mode 100644
index 0000000..0d20b64
--- /dev/null
+++ b/tests/lettuce/features/terrain/.gitignore
@@ -0,0 +1 @@
+*.pyc
diff --git a/tests/lettuce/features/terrain/bind10_control.py b/tests/lettuce/features/terrain/bind10_control.py
index b2a367c..c56afb7 100644
--- a/tests/lettuce/features/terrain/bind10_control.py
+++ b/tests/lettuce/features/terrain/bind10_control.py
@@ -14,10 +14,19 @@
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from lettuce import *
+import time
import subprocess
import re
import json
+ at step('sleep for (\d+) seconds')
+def wait_seconds(step, seconds):
+ """Sleep for some seconds.
+ Parameters:
+ seconds number of seconds to sleep for.
+ """
+ time.sleep(float(seconds))
+
@step('start bind10(?: with configuration (\S+))?' +\
'(?: with cmdctl port (\d+))?' +\
'(?: with msgq socket file (\S+))?' +\
@@ -100,18 +109,15 @@ def wait_for_xfrout(step, process_name):
def have_bind10_running(step, config_file, cmdctl_port, process_name):
"""
Compound convenience step for running bind10, which consists of
- start_bind10 and wait_for_auth.
+ start_bind10.
Currently only supports the 'with configuration' option.
"""
start_step = 'start bind10 with configuration ' + config_file
- wait_step = 'wait for bind10 auth to start'
if cmdctl_port is not None:
start_step += ' with cmdctl port ' + str(cmdctl_port)
if process_name is not None:
start_step += ' as ' + process_name
- wait_step = 'wait for bind10 auth of ' + process_name + ' to start'
step.given(start_step)
- step.given(wait_step)
# function to send lines to bindctl, and store the result
def run_bindctl(commands, cmdctl_port=None):
@@ -142,8 +148,8 @@ def run_bindctl(commands, cmdctl_port=None):
"stderr:\n" + str(stderr)
- at step('last bindctl( stderr)? output should( not)? contain (\S+)')
-def check_bindctl_output(step, stderr, notv, string):
+ at step('last bindctl( stderr)? output should( not)? contain (\S+)( exactly)?')
+def check_bindctl_output(step, stderr, notv, string, exactly):
"""Checks the stdout (or stderr) stream of the last run of bindctl,
fails if the given string is not found in it (or fails if 'not' was
set and it is found
@@ -151,14 +157,19 @@ def check_bindctl_output(step, stderr, notv, string):
stderr ('stderr'): Check stderr instead of stdout output
notv ('not'): reverse the check (fail if string is found)
string ('contain <string>') string to look for
+ exactly ('exactly'): Make an exact match delimited by whitespace
"""
if stderr is None:
output = world.last_bindctl_stdout
else:
output = world.last_bindctl_stderr
found = False
- if string in output:
- found = True
+ if exactly is None:
+ if string in output:
+ found = True
+ else:
+ if re.search(r'^\s+' + string + r'\s+', output, re.IGNORECASE | re.MULTILINE) is not None:
+ found = True
if notv is None:
assert found == True, "'" + string +\
"' was not found in bindctl output:\n" +\
@@ -322,4 +333,4 @@ def module_is_running(step, name, not_str):
if not_str is None:
not_str = ""
step.given('send bind10 the command help')
- step.given('last bindctl output should' + not_str + ' contain ' + name)
+ step.given('last bindctl output should' + not_str + ' contain ' + name + ' exactly')
diff --git a/tests/lettuce/features/terrain/querying.py b/tests/lettuce/features/terrain/querying.py
index c070dd2..a547014 100644
--- a/tests/lettuce/features/terrain/querying.py
+++ b/tests/lettuce/features/terrain/querying.py
@@ -201,7 +201,7 @@ class QueryResult(object):
pass
@step('A (dnssec )?query for ([\S]+) (?:type ([A-Z0-9]+) )?' +
- '(?:class ([A-Z]+) )?(?:to ([^:]+)(?::([0-9]+))? )?' +
+ '(?:class ([A-Z]+) )?(?:to ([^:]+|\[[0-9a-fA-F:]+\])(?::([0-9]+))? )?' +
'should have rcode ([\w.]+)')
def query(step, dnssec, query_name, qtype, qclass, addr, port, rcode):
"""
@@ -223,11 +223,17 @@ def query(step, dnssec, query_name, qtype, qclass, addr, port, rcode):
qclass = "IN"
if addr is None:
addr = "127.0.0.1"
+ addr = re.sub(r"\[(.+)\]", r"\1", addr) # convert [IPv6_addr] to IPv6_addr
if port is None:
port = 47806
additional_arguments = []
if dnssec is not None:
additional_arguments.append("+dnssec")
+ else:
+ # some builds of dig add edns0 by default. This could muck up
+ # additional counts, so unless we need dnssec, explicitly
+ # disable edns0
+ additional_arguments.append("+noedns")
query_result = QueryResult(query_name, qtype, qclass, addr, port,
additional_arguments)
assert query_result.rcode == rcode,\
diff --git a/tests/lettuce/features/terrain/steps.py b/tests/lettuce/features/terrain/steps.py
index 4b199d6..8df0bae 100644
--- a/tests/lettuce/features/terrain/steps.py
+++ b/tests/lettuce/features/terrain/steps.py
@@ -30,12 +30,13 @@ def stop_a_named_process(step, process_name):
"""
world.processes.stop_process(process_name)
- at step('wait for (new )?(\w+) stderr message (\w+)(?: not (\w+))?')
-def wait_for_message(step, new, process_name, message, not_message):
+ at step('wait (?:(\d+) times )?for (new )?(\w+) stderr message (\w+)(?: not (\w+))?')
+def wait_for_stderr_message(step, times, new, process_name, message, not_message):
"""
Block until the given message is printed to the given process's stderr
output.
Parameter:
+ times: Check for the string this many times.
new: (' new', optional): Only check the output printed since last time
this step was used for this process.
process_name ('<name> stderr'): Name of the process to check the output of.
@@ -46,16 +47,19 @@ def wait_for_message(step, new, process_name, message, not_message):
strings = [message]
if not_message is not None:
strings.append(not_message)
- (found, line) = world.processes.wait_for_stderr_str(process_name, strings, new)
+ if times is None:
+ times = 1
+ (found, line) = world.processes.wait_for_stderr_str(process_name, strings, new, int(times))
if not_message is not None:
assert found != not_message, line
- at step('wait for (new )?(\w+) stdout message (\w+)(?: not (\w+))?')
-def wait_for_message(step, process_name, message, not_message):
+ at step('wait (?:(\d+) times )?for (new )?(\w+) stdout message (\w+)(?: not (\w+))?')
+def wait_for_stdout_message(step, times, new, process_name, message, not_message):
"""
Block until the given message is printed to the given process's stdout
output.
Parameter:
+ times: Check for the string this many times.
new: (' new', optional): Only check the output printed since last time
this step was used for this process.
process_name ('<name> stderr'): Name of the process to check the output of.
@@ -66,7 +70,9 @@ def wait_for_message(step, process_name, message, not_message):
strings = [message]
if not_message is not None:
strings.append(not_message)
- (found, line) = world.processes.wait_for_stdout_str(process_name, strings, new)
+ if times is None:
+ times = 1
+ (found, line) = world.processes.wait_for_stdout_str(process_name, strings, new, int(times))
if not_message is not None:
assert found != not_message, line
diff --git a/tests/lettuce/features/terrain/terrain.py b/tests/lettuce/features/terrain/terrain.py
index 2bfddd6..753d912 100644
--- a/tests/lettuce/features/terrain/terrain.py
+++ b/tests/lettuce/features/terrain/terrain.py
@@ -42,6 +42,8 @@ import time
# The first element is the original, the second is the target that will be
# used by the tests that need them
copylist = [
+ ["configurations/bindctl_commands.config.orig",
+ "configurations/bindctl_commands.config"],
["configurations/example.org.config.orig",
"configurations/example.org.config"],
["configurations/resolver/resolver_basic.config.orig",
@@ -164,7 +166,7 @@ class RunningProcess:
os.remove(self.stderr_filename)
os.remove(self.stdout_filename)
- def _wait_for_output_str(self, filename, running_file, strings, only_new):
+ def _wait_for_output_str(self, filename, running_file, strings, only_new, matches = 1):
"""
Wait for a line of output in this process. This will (if only_new is
False) first check all previous output from the process, and if not
@@ -178,18 +180,22 @@ class RunningProcess:
strings: Array of strings to look for.
only_new: If true, only check output since last time this method was
called. If false, first check earlier output.
+ matches: Check for the string this many times.
Returns a tuple containing the matched string, and the complete line
it was found in.
Fails if none of the strings was read after 10 seconds
(OUTPUT_WAIT_INTERVAL * OUTPUT_WAIT_MAX_INTERVALS).
"""
+ match_count = 0
if not only_new:
full_file = open(filename, "r")
for line in full_file:
for string in strings:
if line.find(string) != -1:
- full_file.close()
- return (string, line)
+ match_count += 1
+ if match_count >= matches:
+ full_file.close()
+ return (string, line)
wait_count = 0
while wait_count < OUTPUT_WAIT_MAX_INTERVALS:
where = running_file.tell()
@@ -197,42 +203,46 @@ class RunningProcess:
if line:
for string in strings:
if line.find(string) != -1:
- return (string, line)
+ match_count += 1
+ if match_count >= matches:
+ return (string, line)
else:
wait_count += 1
time.sleep(OUTPUT_WAIT_INTERVAL)
running_file.seek(where)
assert False, "Timeout waiting for process output: " + str(strings)
- def wait_for_stderr_str(self, strings, only_new = True):
+ def wait_for_stderr_str(self, strings, only_new = True, matches = 1):
"""
Wait for one of the given strings in this process's stderr output.
Parameters:
strings: Array of strings to look for.
only_new: If true, only check output since last time this method was
called. If false, first check earlier output.
+ matches: Check for the string this many times.
Returns a tuple containing the matched string, and the complete line
it was found in.
Fails if none of the strings was read after 10 seconds
(OUTPUT_WAIT_INTERVAL * OUTPUT_WAIT_MAX_INTERVALS).
"""
return self._wait_for_output_str(self.stderr_filename, self.stderr,
- strings, only_new)
+ strings, only_new, matches)
- def wait_for_stdout_str(self, strings, only_new = True):
+ def wait_for_stdout_str(self, strings, only_new = True, matches = 1):
"""
Wait for one of the given strings in this process's stdout output.
Parameters:
strings: Array of strings to look for.
only_new: If true, only check output since last time this method was
called. If false, first check earlier output.
+ matches: Check for the string this many times.
Returns a tuple containing the matched string, and the complete line
it was found in.
Fails if none of the strings was read after 10 seconds
(OUTPUT_WAIT_INTERVAL * OUTPUT_WAIT_MAX_INTERVALS).
"""
return self._wait_for_output_str(self.stdout_filename, self.stdout,
- strings, only_new)
+ strings, only_new, matches)
# Container class for a number of running processes
# i.e. servers like bind10, etc
@@ -298,7 +308,7 @@ class RunningProcesses:
for process in self.processes.values():
process.remove_files_on_exit = False
- def wait_for_stderr_str(self, process_name, strings, only_new = True):
+ def wait_for_stderr_str(self, process_name, strings, only_new = True, matches = 1):
"""
Wait for one of the given strings in the given process's stderr output.
Parameters:
@@ -306,6 +316,7 @@ class RunningProcesses:
strings: Array of strings to look for.
only_new: If true, only check output since last time this method was
called. If false, first check earlier output.
+ matches: Check for the string this many times.
Returns the matched string.
Fails if none of the strings was read after 10 seconds
(OUTPUT_WAIT_INTERVAL * OUTPUT_WAIT_MAX_INTERVALS).
@@ -314,9 +325,10 @@ class RunningProcesses:
assert process_name in self.processes,\
"Process " + process_name + " unknown"
return self.processes[process_name].wait_for_stderr_str(strings,
- only_new)
+ only_new,
+ matches)
- def wait_for_stdout_str(self, process_name, strings, only_new = True):
+ def wait_for_stdout_str(self, process_name, strings, only_new = True, matches = 1):
"""
Wait for one of the given strings in the given process's stdout output.
Parameters:
@@ -324,6 +336,7 @@ class RunningProcesses:
strings: Array of strings to look for.
only_new: If true, only check output since last time this method was
called. If false, first check earlier output.
+ matches: Check for the string this many times.
Returns the matched string.
Fails if none of the strings was read after 10 seconds
(OUTPUT_WAIT_INTERVAL * OUTPUT_WAIT_MAX_INTERVALS).
@@ -332,7 +345,8 @@ class RunningProcesses:
assert process_name in self.processes,\
"Process " + process_name + " unknown"
return self.processes[process_name].wait_for_stdout_str(strings,
- only_new)
+ only_new,
+ matches)
@before.each_scenario
def initialize(scenario):
diff --git a/tests/lettuce/features/xfrin_bind10.feature b/tests/lettuce/features/xfrin_bind10.feature
index 8bc6e5e..69043d8 100644
--- a/tests/lettuce/features/xfrin_bind10.feature
+++ b/tests/lettuce/features/xfrin_bind10.feature
@@ -3,9 +3,21 @@ Feature: Xfrin
Scenario: Retransfer command
Given I have bind10 running with configuration xfrin/retransfer_master.conf with cmdctl port 47804 as master
+ And wait for master stderr message BIND10_STARTED_CC
+ And wait for master stderr message CMDCTL_STARTED
+ And wait for master stderr message AUTH_SERVER_STARTED
+ And wait for master stderr message XFROUT_STARTED
+ And wait for master stderr message ZONEMGR_STARTED
+
And I have bind10 running with configuration xfrin/retransfer_slave.conf
+ And wait for bind10 stderr message BIND10_STARTED_CC
+ And wait for bind10 stderr message CMDCTL_STARTED
+ And wait for bind10 stderr message AUTH_SERVER_STARTED
+ And wait for bind10 stderr message XFRIN_STARTED
+ And wait for bind10 stderr message ZONEMGR_STARTED
+
A query for www.example.org should have rcode REFUSED
Wait for bind10 stderr message CMDCTL_STARTED
- When I send bind10 the command Xfrin retransfer example.org IN 127.0.0.1 47807
+ When I send bind10 the command Xfrin retransfer example.org IN ::1 47807
Then wait for new bind10 stderr message XFRIN_TRANSFER_SUCCESS not XFRIN_XFR_PROCESS_FAILURE
A query for www.example.org should have rcode NOERROR
diff --git a/tests/lettuce/setup_intree_bind10.sh.in b/tests/lettuce/setup_intree_bind10.sh.in
index b1f17bc..9d72778 100755
--- a/tests/lettuce/setup_intree_bind10.sh.in
+++ b/tests/lettuce/setup_intree_bind10.sh.in
@@ -23,7 +23,7 @@ BIND10_PATH=@abs_top_builddir@/src/bin/bind10
PATH=@abs_top_builddir@/src/bin/bind10:@abs_top_builddir@/src/bin/bindctl:@abs_top_builddir@/src/bin/msgq:@abs_top_builddir@/src/bin/auth:@abs_top_builddir@/src/bin/resolver:@abs_top_builddir@/src/bin/cfgmgr:@abs_top_builddir@/src/bin/cmdctl:@abs_top_builddir@/src/bin/stats:@abs_top_builddir@/src/bin/xfrin:@abs_top_builddir@/src/bin/xfrout:@abs_top_builddir@/src/bin/zonemgr:@abs_top_builddir@/src/bin/ddns:@abs_top_builddir@/src/bin/dhcp6:@abs_top_builddir@/src/bin/sockcreator:$PATH
export PATH
-PYTHONPATH=@abs_top_builddir@/src/bin:@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/xfr/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/python/isc/config:@abs_top_builddir@/src/lib/python/isc/acl/.libs:@abs_top_builddir@/src/lib/python/isc/datasrc/.libs
+PYTHONPATH=@abs_top_builddir@/src/bin:@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/xfr/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/python/isc/config:@abs_top_builddir@/src/lib/python/isc/acl/.libs:@abs_top_builddir@/src/lib/python/isc/datasrc/.libs:$PYTHONPATH
export PYTHONPATH
# If necessary (rare cases), explicitly specify paths to dynamic libraries
diff --git a/tests/system/bindctl/nsx1/.gitignore b/tests/system/bindctl/nsx1/.gitignore
index 84432f2..4a8ce05 100644
--- a/tests/system/bindctl/nsx1/.gitignore
+++ b/tests/system/bindctl/nsx1/.gitignore
@@ -1 +1,3 @@
/b10-config.db.template
+/bind10.run
+/bindctl.out
diff --git a/tests/system/bindctl/tests.sh b/tests/system/bindctl/tests.sh
index cb9d8be..352642e 100755
--- a/tests/system/bindctl/tests.sh
+++ b/tests/system/bindctl/tests.sh
@@ -32,7 +32,18 @@ cnt_value1=0
cnt_value2=0
cnt_value3=0
-echo "I:Checking b10-auth is working by default ($n)"
+echo "I:Checking b10-auth is disabled by default ($n)"
+$DIG +norec @10.53.0.1 -p 53210 ns.example.com. A > /dev/null && status=1
+if [ $status != 0 ]; then echo "I:failed"; fi
+n=`expr $n + 1`
+
+echo "I:Starting b10-auth and checking that it works ($n)"
+echo 'config add Boss/components b10-auth
+config set Boss/components/b10-auth { "special": "auth", "kind": "needed" }
+config commit
+quit
+' | $RUN_BINDCTL \
+ --csv-file-dir=$BINDCTL_CSV_DIR 2>&1 > /dev/null || status=1
$DIG +norec @10.53.0.1 -p 53210 ns.example.com. A >dig.out.$n || status=1
# perform a simple check on the output (digcomp would be too much for this)
grep 192.0.2.1 dig.out.$n > /dev/null || status=1
@@ -46,9 +57,9 @@ sleep 2
echo 'Stats show
' | $RUN_BINDCTL \
--csv-file-dir=$BINDCTL_CSV_DIR > bindctl.out.$n || status=1
-# the server should have received 1 UDP and 1 TCP queries (TCP query was
-# sent from the server startup script)
-cnt_value1=`expr $cnt_value1 + 1`
+# the server should have received 1 UDP and 0 TCP queries (the server
+# startup script no longer sends any TCP queries)
+cnt_value1=`expr $cnt_value1 + 0`
cnt_value2=`expr $cnt_value2 + 1`
cnt_value3=`expr $cnt_value1 + $cnt_value2`
grep $cnt_name1".*\<"$cnt_value1"\>" bindctl.out.$n > /dev/null || status=1
@@ -64,7 +75,7 @@ quit
' | $RUN_BINDCTL \
--csv-file-dir=$BINDCTL_CSV_DIR 2>&1 > /dev/null || status=1
# dig should exit with a failure code.
-$DIG +tcp +norec @10.53.0.1 -p 53210 ns.example.com. A && status=1
+$DIG +tcp +norec @10.53.0.1 -p 53210 ns.example.com. A > /dev/null && status=1
if [ $status != 0 ]; then echo "I:failed"; fi
n=`expr $n + 1`
@@ -76,6 +87,7 @@ quit
' | $RUN_BINDCTL \
--csv-file-dir=$BINDCTL_CSV_DIR 2>&1 > /dev/null || status=1
$DIG +norec @10.53.0.1 -p 53210 ns.example.com. A >dig.out.$n || status=1
+# perform a simple check on the output (digcomp would be too much for this)
grep 192.0.2.1 dig.out.$n > /dev/null || status=1
if [ $status != 0 ]; then echo "I:failed"; fi
n=`expr $n + 1`
diff --git a/tests/system/glue/nsx1/.gitignore b/tests/system/glue/nsx1/.gitignore
index 1c67281..c0750b3 100644
--- a/tests/system/glue/nsx1/.gitignore
+++ b/tests/system/glue/nsx1/.gitignore
@@ -1 +1,3 @@
/b10-config.db
+/bind10.run
+/bindctl.out
diff --git a/tests/system/glue/nsx1/b10-config.db.in b/tests/system/glue/nsx1/b10-config.db.in
index 0d5a324..660183b 100644
--- a/tests/system/glue/nsx1/b10-config.db.in
+++ b/tests/system/glue/nsx1/b10-config.db.in
@@ -2,5 +2,15 @@
"Auth": {
"listen_on": [{"address": "10.53.0.1", "port": 53210}],
"database_file": "@abs_builddir@/zone.sqlite3"
+ },
+ "Boss": {
+ "components": {
+ "b10-auth": {"kind": "needed", "special": "auth" },
+ "b10-xfrin": { "address": "Xfrin", "kind": "dispensable" },
+ "b10-xfrout": { "address": "Xfrout", "kind": "dispensable" },
+ "b10-zonemgr": { "address": "Zonemgr", "kind": "dispensable" },
+ "b10-stats": { "address": "Stats", "kind": "dispensable" },
+ "b10-cmdctl": { "special": "cmdctl", "kind": "needed" }
+ }
}
}
diff --git a/tests/system/ixfr/b10-config.db.in b/tests/system/ixfr/b10-config.db.in
index 946d80f..156c959 100644
--- a/tests/system/ixfr/b10-config.db.in
+++ b/tests/system/ixfr/b10-config.db.in
@@ -19,5 +19,15 @@
"name": "example.",
"class": "IN"
}]
+ },
+ "Boss": {
+ "components": {
+ "b10-auth": {"kind": "needed", "special": "auth" },
+ "b10-xfrin": { "address": "Xfrin", "kind": "dispensable" },
+ "b10-xfrout": { "address": "Xfrout", "kind": "dispensable" },
+ "b10-zonemgr": { "address": "Zonemgr", "kind": "dispensable" },
+ "b10-stats": { "address": "Stats", "kind": "dispensable" },
+ "b10-cmdctl": { "special": "cmdctl", "kind": "needed" }
+ }
}
}
diff --git a/tests/system/ixfr/in-2/ns1/.gitignore b/tests/system/ixfr/in-2/ns1/.gitignore
new file mode 100644
index 0000000..35ae1cb
--- /dev/null
+++ b/tests/system/ixfr/in-2/ns1/.gitignore
@@ -0,0 +1 @@
+/named.run
diff --git a/tests/system/ixfr/in-2/nsx2/.gitignore b/tests/system/ixfr/in-2/nsx2/.gitignore
new file mode 100644
index 0000000..d31eb18
--- /dev/null
+++ b/tests/system/ixfr/in-2/nsx2/.gitignore
@@ -0,0 +1 @@
+/bindctl.out
diff --git a/tests/system/start.pl b/tests/system/start.pl
index daa4577..32284de 100755
--- a/tests/system/start.pl
+++ b/tests/system/start.pl
@@ -53,6 +53,8 @@ if ($server && !-d "$test/$server") {
my $topdir = abs_path("$test/..");
my $testdir = abs_path("$test");
my $RUN_BIND10 = $ENV{'RUN_BIND10'};
+my $RUN_BINDCTL = $ENV{'RUN_BINDCTL'};
+my $BINDCTL_CSV_DIR = $ENV{'BINDCTL_CSV_DIR'};
my $NAMED = $ENV{'BIND9_NAMED'};
my $LWRESD = $ENV{'LWRESD'};
my $DIG = $ENV{'DIG'};
@@ -211,14 +213,15 @@ sub verify_server {
my $tries = 0;
while (1) {
- my $return = system("$DIG +tcp +noadd +nosea +nostat +noquest +nocomm +nocmd -p 53210 version.bind. chaos txt \@10.53.0.$n > dig.out");
+ my $return = system("echo \"Stats show\" | $RUN_BINDCTL --csv-file-dir=$BINDCTL_CSV_DIR > bindctl.out");
last if ($return == 0);
- print `grep ";" dig.out`;
if (++$tries >= 30) {
print "I:no response from $server\n";
print "R:FAIL\n";
system("$PERL $topdir/stop.pl $testdir");
exit 1;
+ } else {
+ print "I:no response from $server. retrying.\n";
}
sleep 2;
}
More information about the bind10-changes
mailing list